5 from .common
import InfoExtractor
17 class IwaraBaseIE(InfoExtractor
):
18 _BASE_REGEX
= r
'(?P<base_url>https?://(?:www\.|ecchi\.)?iwara\.tv)'
20 def _extract_playlist(self
, base_url
, webpage
):
21 for path
in re
.findall(r
'class="title">\s*<a[^<]+href="([^"]+)', webpage
):
22 yield self
.url_result(urljoin(base_url
, path
))
25 class IwaraIE(IwaraBaseIE
):
26 _VALID_URL
= fr
'{IwaraBaseIE._BASE_REGEX}/videos/(?P<id>[a-zA-Z0-9]+)'
28 'url': 'http://iwara.tv/videos/amVwUl1EHpAD9RD',
31 'id': 'amVwUl1EHpAD9RD',
33 'title': '【MMD R-18】ガールフレンド carry_me_off',
35 'thumbnail': 'https://i.iwara.tv/sites/default/files/videos/thumbnails/7951/thumbnail-7951_0001.png',
36 'uploader': 'Reimu丨Action',
37 'upload_date': '20150828',
38 'description': 'md5:1d4905ce48c66c9299c617f08e106e0f',
41 'url': 'http://ecchi.iwara.tv/videos/Vb4yf2yZspkzkBO',
42 'md5': '7e5f1f359cd51a027ba4a7b7710a50f0',
44 'id': '0B1LvuHnL-sRFNXB1WHNqbGw4SXc',
46 'title': '[3D Hentai] Kyonyu × Genkai × Emaki Shinobi Girls.mp4',
49 'add_ie': ['GoogleDrive'],
51 'url': 'http://www.iwara.tv/videos/nawkaumd6ilezzgq',
57 'title': '[MMD] Do It Again Ver.2 [1080p 60FPS] (Motion,Camera,Wav+DL)',
58 'description': 'md5:590c12c0df1443d833fbebe05da8c47a',
59 'upload_date': '20160910',
60 'uploader': 'aMMDsork',
61 'uploader_id': 'UCVOFyOSCyFkXTYYHITtqB7A',
63 'add_ie': ['Youtube'],
66 def _real_extract(self
, url
):
67 video_id
= self
._match
_id
(url
)
69 webpage
, urlh
= self
._download
_webpage
_handle
(url
, video_id
)
71 hostname
= urllib
.parse
.urlparse(urlh
.geturl()).hostname
72 # ecchi is 'sexy' in Japanese
73 age_limit
= 18 if hostname
.split('.')[0] == 'ecchi' else 0
75 video_data
= self
._download
_json
('http://www.iwara.tv/api/video/%s' % video_id
, video_id
)
78 iframe_url
= self
._html
_search
_regex
(
79 r
'<iframe[^>]+src=([\'"])(?P<url>[^\'"]+)\
1',
80 webpage, 'iframe URL
', group='url
')
82 '_type
': 'url_transparent
',
84 'age_limit
': age_limit,
87 title = remove_end(self._html_extract_title(webpage), ' | Iwara
')
89 thumbnail = self._html_search_regex(
90 r'poster
=[\'"]([^\'"]+)', webpage, 'thumbnail
', default=None)
92 uploader = self._html_search_regex(
93 r'class="username">([^
<]+)', webpage, 'uploader
', fatal=False)
95 upload_date = unified_strdate(self._html_search_regex(
96 r'作成日
:([^\s
]+)', webpage, 'upload_date
', fatal=False))
98 description = strip_or_none(self._search_regex(
99 r'<p
>(.+?
(?
=</div
))', webpage, 'description
', fatal=False,
103 for a_format in video_data:
104 format_uri = url_or_none(a_format.get('uri
'))
107 format_id = a_format.get('resolution
')
108 height = int_or_none(self._search_regex(
109 r'(\d
+)p
', format_id, 'height
', default=None))
111 'url
': self._proto_relative_url(format_uri, 'https
:'),
112 'format_id
': format_id,
113 'ext
': mimetype2ext(a_format.get('mime
')) or 'mp4
',
115 'width
': int_or_none(height / 9.0 * 16.0 if height else None),
116 'quality
': 1 if format_id == 'Source
' else 0,
119 self._sort_formats(formats)
124 'age_limit
': age_limit,
126 'thumbnail
': self._proto_relative_url(thumbnail, 'https
:'),
127 'uploader
': uploader,
128 'upload_date
': upload_date,
129 'description
': description,
133 class IwaraPlaylistIE(IwaraBaseIE):
134 _VALID_URL = fr'{IwaraBaseIE._BASE_REGEX}
/playlist
/(?P
<id>[^
/?
#&]+)'
135 IE_NAME
= 'iwara:playlist'
138 'url': 'https://ecchi.iwara.tv/playlist/best-enf',
141 'uploader': 'Jared98112',
144 'playlist_mincount': 1097,
147 'url': 'https://ecchi.iwara.tv/playlist/%E3%83%97%E3%83%AC%E3%82%A4%E3%83%AA%E3%82%B9%E3%83%88-2',
151 'uploader': 'mainyu',
153 'playlist_mincount': 91,
156 def _real_extract(self
, url
):
157 playlist_id
, base_url
= self
._match
_valid
_url
(url
).group('id', 'base_url')
158 playlist_id
= urllib
.parse
.unquote(playlist_id
)
159 webpage
= self
._download
_webpage
(url
, playlist_id
)
164 'title': self
._html
_search
_regex
(r
'class="title"[^>]*>([^<]+)', webpage
, 'title', fatal
=False),
165 'uploader': self
._html
_search
_regex
(r
'<h2>([^<]+)', webpage
, 'uploader', fatal
=False),
166 'entries': self
._extract
_playlist
(base_url
, webpage
),
170 class IwaraUserIE(IwaraBaseIE
):
171 _VALID_URL
= fr
'{IwaraBaseIE._BASE_REGEX}/users/(?P<id>[^/?#&]+)'
172 IE_NAME
= 'iwara:user'
175 'note': 'number of all videos page is just 1 page. less than 40 videos',
176 'url': 'https://ecchi.iwara.tv/users/infinityyukarip',
178 'title': 'Uploaded videos from Infinity_YukariP',
179 'id': 'infinityyukarip',
180 'uploader': 'Infinity_YukariP',
181 'uploader_id': 'infinityyukarip',
183 'playlist_mincount': 39,
185 'note': 'no even all videos page. probably less than 10 videos',
186 'url': 'https://ecchi.iwara.tv/users/mmd-quintet',
188 'title': 'Uploaded videos from mmd quintet',
190 'uploader': 'mmd quintet',
191 'uploader_id': 'mmd-quintet',
193 'playlist_mincount': 6,
195 'note': 'has paging. more than 40 videos',
196 'url': 'https://ecchi.iwara.tv/users/theblackbirdcalls',
198 'title': 'Uploaded videos from TheBlackbirdCalls',
199 'id': 'theblackbirdcalls',
200 'uploader': 'TheBlackbirdCalls',
201 'uploader_id': 'theblackbirdcalls',
203 'playlist_mincount': 420,
205 'note': 'foreign chars in URL. there must be foreign characters in URL',
206 'url': 'https://ecchi.iwara.tv/users/ぶた丼',
208 'title': 'Uploaded videos from ぶた丼',
211 'uploader_id': 'ぶた丼',
213 'playlist_mincount': 170,
216 def _entries(self
, playlist_id
, base_url
):
217 webpage
= self
._download
_webpage
(
218 f
'{base_url}/users/{playlist_id}', playlist_id
)
219 videos_url
= self
._search
_regex
(r
'<a href="(/users/[^/]+/videos)(?:\?[^"]+)?">', webpage
, 'all videos url', default
=None)
221 yield from self
._extract
_playlist
(base_url
, webpage
)
224 videos_url
= urljoin(base_url
, videos_url
)
226 for n
in itertools
.count(1):
227 page
= self
._download
_webpage
(
228 videos_url
, playlist_id
, note
=f
'Downloading playlist page {n}',
229 query
={'page': str(n - 1)}
if n
> 1 else {})
230 yield from self
._extract
_playlist
(
233 if f
'page={n}' not in page
:
236 def _real_extract(self
, url
):
237 playlist_id
, base_url
= self
._match
_valid
_url
(url
).group('id', 'base_url')
238 playlist_id
= urllib
.parse
.unquote(playlist_id
)
240 return self
.playlist_result(
241 self
._entries
(playlist_id
, base_url
), playlist_id
)