]> jfr.im git - yt-dlp.git/blame - yt_dlp/extractor/iwara.py
[extractor] Use classmethod/property where possible
[yt-dlp.git] / yt_dlp / extractor / iwara.py
CommitLineData
8eb7ba82 1import re
ff4d7860 2import urllib
001a5fd3
YCH
3
4from .common import InfoExtractor
caf0f5f8
YCH
5from ..utils import (
6 int_or_none,
7 mimetype2ext,
8 remove_end,
4ecf300d 9 url_or_none,
ff4d7860 10 urljoin,
8eb7ba82
B
11 unified_strdate,
12 strip_or_none,
caf0f5f8 13)
001a5fd3
YCH
14
15
ff4d7860 16class IwaraBaseIE(InfoExtractor):
17 _BASE_REGEX = r'(?P<base_url>https?://(?:www\.|ecchi\.)?iwara\.tv)'
18
19 def _extract_playlist(self, base_url, webpage):
20 for path in re.findall(r'class="title">\s*<a[^<]+href="([^"]+)', webpage):
21 yield self.url_result(urljoin(base_url, path))
22
23
24class IwaraIE(IwaraBaseIE):
25 _VALID_URL = fr'{IwaraBaseIE._BASE_REGEX}/videos/(?P<id>[a-zA-Z0-9]+)'
001a5fd3
YCH
26 _TESTS = [{
27 'url': 'http://iwara.tv/videos/amVwUl1EHpAD9RD',
caf0f5f8 28 # md5 is unstable
001a5fd3
YCH
29 'info_dict': {
30 'id': 'amVwUl1EHpAD9RD',
31 'ext': 'mp4',
32 'title': '【MMD R-18】ガールフレンド carry_me_off',
33 'age_limit': 18,
8eb7ba82
B
34 'thumbnail': 'https://i.iwara.tv/sites/default/files/videos/thumbnails/7951/thumbnail-7951_0001.png',
35 'uploader': 'Reimu丨Action',
36 'upload_date': '20150828',
37 'description': 'md5:1d4905ce48c66c9299c617f08e106e0f',
001a5fd3
YCH
38 },
39 }, {
40 'url': 'http://ecchi.iwara.tv/videos/Vb4yf2yZspkzkBO',
41 'md5': '7e5f1f359cd51a027ba4a7b7710a50f0',
42 'info_dict': {
43 'id': '0B1LvuHnL-sRFNXB1WHNqbGw4SXc',
44 'ext': 'mp4',
caf0f5f8 45 'title': '[3D Hentai] Kyonyu × Genkai × Emaki Shinobi Girls.mp4',
001a5fd3
YCH
46 'age_limit': 18,
47 },
48 'add_ie': ['GoogleDrive'],
49 }, {
50 'url': 'http://www.iwara.tv/videos/nawkaumd6ilezzgq',
caf0f5f8 51 # md5 is unstable
001a5fd3
YCH
52 'info_dict': {
53 'id': '6liAP9s2Ojc',
54 'ext': 'mp4',
caf0f5f8 55 'age_limit': 18,
001a5fd3
YCH
56 'title': '[MMD] Do It Again Ver.2 [1080p 60FPS] (Motion,Camera,Wav+DL)',
57 'description': 'md5:590c12c0df1443d833fbebe05da8c47a',
58 'upload_date': '20160910',
59 'uploader': 'aMMDsork',
60 'uploader_id': 'UCVOFyOSCyFkXTYYHITtqB7A',
61 },
62 'add_ie': ['Youtube'],
63 }]
64
65 def _real_extract(self, url):
66 video_id = self._match_id(url)
67
68 webpage, urlh = self._download_webpage_handle(url, video_id)
69
ff4d7860 70 hostname = urllib.parse.urlparse(urlh.geturl()).hostname
001a5fd3
YCH
71 # ecchi is 'sexy' in Japanese
72 age_limit = 18 if hostname.split('.')[0] == 'ecchi' else 0
73
caf0f5f8 74 video_data = self._download_json('http://www.iwara.tv/api/video/%s' % video_id, video_id)
001a5fd3 75
caf0f5f8 76 if not video_data:
001a5fd3
YCH
77 iframe_url = self._html_search_regex(
78 r'<iframe[^>]+src=([\'"])(?P<url>[^\'"]+)\1',
79 webpage, 'iframe URL', group='url')
80 return {
81 '_type': 'url_transparent',
82 'url': iframe_url,
83 'age_limit': age_limit,
84 }
85
04f3fd2c 86 title = remove_end(self._html_extract_title(webpage), ' | Iwara')
001a5fd3 87
63b1ad0f 88 thumbnail = self._html_search_regex(
8eb7ba82
B
89 r'poster=[\'"]([^\'"]+)', webpage, 'thumbnail', default=None)
90
91 uploader = self._html_search_regex(
92 r'class="username">([^<]+)', webpage, 'uploader', fatal=False)
93
94 upload_date = unified_strdate(self._html_search_regex(
95 r'作成日:([^\s]+)', webpage, 'upload_date', fatal=False))
96
97 description = strip_or_none(self._search_regex(
98 r'<p>(.+?(?=</div))', webpage, 'description', fatal=False,
99 flags=re.DOTALL))
63b1ad0f 100
caf0f5f8
YCH
101 formats = []
102 for a_format in video_data:
4ecf300d
S
103 format_uri = url_or_none(a_format.get('uri'))
104 if not format_uri:
105 continue
caf0f5f8
YCH
106 format_id = a_format.get('resolution')
107 height = int_or_none(self._search_regex(
108 r'(\d+)p', format_id, 'height', default=None))
109 formats.append({
4ecf300d 110 'url': self._proto_relative_url(format_uri, 'https:'),
caf0f5f8
YCH
111 'format_id': format_id,
112 'ext': mimetype2ext(a_format.get('mime')) or 'mp4',
113 'height': height,
2ab2c0d1 114 'width': int_or_none(height / 9.0 * 16.0 if height else None),
caf0f5f8
YCH
115 'quality': 1 if format_id == 'Source' else 0,
116 })
117
118 self._sort_formats(formats)
119
120 return {
001a5fd3
YCH
121 'id': video_id,
122 'title': title,
123 'age_limit': age_limit,
caf0f5f8 124 'formats': formats,
63b1ad0f 125 'thumbnail': self._proto_relative_url(thumbnail, 'https:'),
8eb7ba82
B
126 'uploader': uploader,
127 'upload_date': upload_date,
128 'description': description,
caf0f5f8 129 }
ff4d7860 130
131
132class IwaraPlaylistIE(IwaraBaseIE):
133 _VALID_URL = fr'{IwaraBaseIE._BASE_REGEX}/playlist/(?P<id>[^/?#&]+)'
134 IE_NAME = 'iwara:playlist'
135
136 _TESTS = [{
137 'url': 'https://ecchi.iwara.tv/playlist/best-enf',
138 'info_dict': {
139 'title': 'Best enf',
140 'uploader': 'Jared98112',
141 'id': 'best-enf',
142 },
143 'playlist_mincount': 1097,
144 }, {
145 # urlencoded
146 'url': 'https://ecchi.iwara.tv/playlist/%E3%83%97%E3%83%AC%E3%82%A4%E3%83%AA%E3%82%B9%E3%83%88-2',
147 'info_dict': {
148 'id': 'プレイリスト-2',
149 'title': 'プレイリスト',
150 'uploader': 'mainyu',
151 },
152 'playlist_mincount': 91,
153 }]
154
155 def _real_extract(self, url):
156 playlist_id, base_url = self._match_valid_url(url).group('id', 'base_url')
157 playlist_id = urllib.parse.unquote(playlist_id)
158 webpage = self._download_webpage(url, playlist_id)
159
160 return {
161 '_type': 'playlist',
162 'id': playlist_id,
163 'title': self._html_search_regex(r'class="title"[^>]*>([^<]+)', webpage, 'title', fatal=False),
164 'uploader': self._html_search_regex(r'<h2>([^<]+)', webpage, 'uploader', fatal=False),
165 'entries': self._extract_playlist(base_url, webpage),
166 }
167
168
169class IwaraUserIE(IwaraBaseIE):
170 _VALID_URL = fr'{IwaraBaseIE._BASE_REGEX}/users/(?P<id>[^/?#&]+)'
171 IE_NAME = 'iwara:user'
172
173 _TESTS = [{
174 'url': 'https://ecchi.iwara.tv/users/CuteMMD',
175 'info_dict': {
176 'id': 'CuteMMD',
177 },
178 'playlist_mincount': 198,
179 }, {
180 # urlencoded
181 'url': 'https://ecchi.iwara.tv/users/%E5%92%95%E5%98%BF%E5%98%BF',
182 'info_dict': {
183 'id': '咕嘿嘿',
184 },
185 'playlist_mincount': 141,
186 }]
187
188 def _entries(self, playlist_id, base_url, webpage):
189 yield from self._extract_playlist(base_url, webpage)
190
191 page_urls = re.findall(
192 r'class="pager-item"[^>]*>\s*<a[^<]+href="([^"]+)', webpage)
193
194 for n, path in enumerate(page_urls, 2):
195 yield from self._extract_playlist(
196 base_url, self._download_webpage(
197 urljoin(base_url, path), playlist_id, note=f'Downloading playlist page {n}'))
198
199 def _real_extract(self, url):
200 playlist_id, base_url = self._match_valid_url(url).group('id', 'base_url')
201 playlist_id = urllib.parse.unquote(playlist_id)
202
203 webpage = self._download_webpage(
204 f'{base_url}/users/{playlist_id}/videos', playlist_id)
205
206 return self.playlist_result(
207 self._entries(playlist_id, base_url, webpage), playlist_id)