]> jfr.im git - yt-dlp.git/commitdiff
[extractor/iwara] Overhaul extractors (#6557)
authorLesmiscore <redacted>
Sat, 18 Mar 2023 14:29:02 +0000 (23:29 +0900)
committerGitHub <redacted>
Sat, 18 Mar 2023 14:29:02 +0000 (23:29 +0900)
Authored by: Lesmiscore

yt_dlp/extractor/iwara.py

index ec3e59c6d0024c13ee312a306c85736367ad5fe5..62a179700aea0edd12b9b944d6ece62a34dc5d6d 100644 (file)
-import itertools
-import re
+import functools
 import urllib.parse
+import hashlib
 
 from .common import InfoExtractor
 from ..utils import (
+    OnDemandPagedList,
     int_or_none,
     mimetype2ext,
-    remove_end,
-    strip_or_none,
-    unified_strdate,
-    url_or_none,
-    urljoin,
+    traverse_obj,
+    unified_timestamp,
 )
 
 
-class IwaraBaseIE(InfoExtractor):
-    _BASE_REGEX = r'(?P<base_url>https?://(?:www\.|ecchi\.)?iwara\.tv)'
-
-    def _extract_playlist(self, base_url, webpage):
-        for path in re.findall(r'class="title">\s*<a[^<]+href="([^"]+)', webpage):
-            yield self.url_result(urljoin(base_url, path))
-
-
-class IwaraIE(IwaraBaseIE):
-    _VALID_URL = fr'{IwaraBaseIE._BASE_REGEX}/videos/(?P<id>[a-zA-Z0-9]+)'
+class IwaraIE(InfoExtractor):
+    IE_NAME = 'iwara'
+    _VALID_URL = r'https?://(?:www\.)?iwara\.tv/video/(?P<id>[a-zA-Z0-9]+)'
     _TESTS = [{
-        'url': 'http://iwara.tv/videos/amVwUl1EHpAD9RD',
-        # md5 is unstable
+        # this video cannot be played because of migration
+        'only_matching': True,
+        'url': 'https://www.iwara.tv/video/k2ayoueezfkx6gvq',
         'info_dict': {
-            'id': 'amVwUl1EHpAD9RD',
+            'id': 'k2ayoueezfkx6gvq',
             'ext': 'mp4',
-            'title': '【MMD R-18】ガールフレンド carry_me_off',
             'age_limit': 18,
-            'thumbnail': 'https://i.iwara.tv/sites/default/files/videos/thumbnails/7951/thumbnail-7951_0001.png',
-            'uploader': 'Reimu丨Action',
-            'upload_date': '20150828',
-            'description': 'md5:1d4905ce48c66c9299c617f08e106e0f',
+            'title': 'Defeat of Irybelda - アイリベルダの敗北',
+            'description': 'md5:70278abebe706647a8b4cb04cf23e0d3',
+            'uploader': 'Inwerwm',
+            'uploader_id': 'inwerwm',
+            'tags': 'count:1',
+            'like_count': 6133,
+            'view_count': 1050343,
+            'comment_count': 1,
+            'timestamp': 1677843869,
+            'modified_timestamp': 1679056362,
         },
     }, {
-        'url': 'http://ecchi.iwara.tv/videos/Vb4yf2yZspkzkBO',
-        'md5': '7e5f1f359cd51a027ba4a7b7710a50f0',
+        'url': 'https://iwara.tv/video/1ywe1sbkqwumpdxz5/',
+        'md5': '20691ce1473ec2766c0788e14c60ce66',
         'info_dict': {
-            'id': '0B1LvuHnL-sRFNXB1WHNqbGw4SXc',
+            'id': '1ywe1sbkqwumpdxz5',
             'ext': 'mp4',
-            'title': '[3D Hentai] Kyonyu × Genkai × Emaki Shinobi Girls.mp4',
             'age_limit': 18,
+            'title': 'Aponia 阿波尼亚SEX  Party Tonight 手动脱衣 大奶 裸腿',
+            'description': 'md5:0c4c310f2e0592d68b9f771d348329ca',
+            'uploader': '龙也zZZ',
+            'uploader_id': 'user792540',
+            'tags': [
+                'uncategorized'
+            ],
+            'like_count': 1809,
+            'view_count': 25156,
+            'comment_count': 1,
+            'timestamp': 1678732213,
+            'modified_timestamp': 1679110271,
         },
-        'add_ie': ['GoogleDrive'],
-    }, {
-        'url': 'http://www.iwara.tv/videos/nawkaumd6ilezzgq',
-        # md5 is unstable
-        'info_dict': {
-            'id': '6liAP9s2Ojc',
-            'ext': 'mp4',
-            'age_limit': 18,
-            'title': '[MMD] Do It Again Ver.2 [1080p 60FPS] (Motion,Camera,Wav+DL)',
-            'description': 'md5:590c12c0df1443d833fbebe05da8c47a',
-            'upload_date': '20160910',
-            'uploader': 'aMMDsork',
-            'uploader_id': 'UCVOFyOSCyFkXTYYHITtqB7A',
-        },
-        'add_ie': ['Youtube'],
     }]
 
+    def _extract_formats(self, video_id, fileurl):
+        up = urllib.parse.urlparse(fileurl)
+        q = urllib.parse.parse_qs(up.query)
+        paths = up.path.rstrip('/').split('/')
+        # https://github.com/yt-dlp/yt-dlp/issues/6549#issuecomment-1473771047
+        x_version = hashlib.sha1('_'.join((paths[-1], q['expires'][0], '5nFp9kmbNnHdAFhaqMvt')).encode()).hexdigest()
+
+        files = self._download_json(fileurl, video_id, headers={'X-Version': x_version})
+        for fmt in files:
+            yield traverse_obj(fmt, {
+                'format_id': 'name',
+                'url': ('src', ('view', 'download'), {self._proto_relative_url}),
+                'ext': ('type', {mimetype2ext}),
+                'quality': ('name', {lambda x: int_or_none(x) or 1e4}),
+                'height': ('name', {int_or_none}),
+            }, get_all=False)
+
     def _real_extract(self, url):
         video_id = self._match_id(url)
-
-        webpage, urlh = self._download_webpage_handle(url, video_id)
-
-        hostname = urllib.parse.urlparse(urlh.geturl()).hostname
-        # ecchi is 'sexy' in Japanese
-        age_limit = 18 if hostname.split('.')[0] == 'ecchi' else 0
-
-        video_data = self._download_json('http://www.iwara.tv/api/video/%s' % video_id, video_id)
-
-        if not video_data:
-            iframe_url = self._html_search_regex(
-                r'<iframe[^>]+src=([\'"])(?P<url>[^\'"]+)\1',
-                webpage, 'iframe URL', group='url')
-            return {
-                '_type': 'url_transparent',
-                'url': iframe_url,
-                'age_limit': age_limit,
-            }
-
-        title = remove_end(self._html_extract_title(webpage), ' | Iwara')
-
-        thumbnail = self._html_search_regex(
-            r'poster=[\'"]([^\'"]+)', webpage, 'thumbnail', default=None)
-
-        uploader = self._html_search_regex(
-            r'class="username">([^<]+)', webpage, 'uploader', fatal=False)
-
-        upload_date = unified_strdate(self._html_search_regex(
-            r'作成日:([^\s]+)', webpage, 'upload_date', fatal=False))
-
-        description = strip_or_none(self._search_regex(
-            r'<p>(.+?(?=</div))', webpage, 'description', fatal=False,
-            flags=re.DOTALL))
-
-        formats = []
-        for a_format in video_data:
-            format_uri = url_or_none(a_format.get('uri'))
-            if not format_uri:
-                continue
-            format_id = a_format.get('resolution')
-            height = int_or_none(self._search_regex(
-                r'(\d+)p', format_id, 'height', default=None))
-            formats.append({
-                'url': self._proto_relative_url(format_uri, 'https:'),
-                'format_id': format_id,
-                'ext': mimetype2ext(a_format.get('mime')) or 'mp4',
-                'height': height,
-                'width': int_or_none(height / 9.0 * 16.0 if height else None),
-                'quality': 1 if format_id == 'Source' else 0,
-            })
+        video_data = self._download_json(f'http://api.iwara.tv/video/{video_id}', video_id)
 
         return {
             'id': video_id,
-            'title': title,
-            'age_limit': age_limit,
-            'formats': formats,
-            'thumbnail': self._proto_relative_url(thumbnail, 'https:'),
-            'uploader': uploader,
-            'upload_date': upload_date,
-            'description': description,
+            'age_limit': 18 if video_data.get('rating') == 'ecchi' else 0,  # ecchi is 'sexy' in Japanese
+            **traverse_obj(video_data, {
+                'title': 'title',
+                'description': 'body',
+                'uploader': ('user', 'name'),
+                'uploader_id': ('user', 'username'),
+                'tags': ('tags', ..., 'id'),
+                'like_count': 'numLikes',
+                'view_count': 'numViews',
+                'comment_count': 'numComments',
+                'timestamp': ('createdAt', {unified_timestamp}),
+                'modified_timestamp': ('updatedAt', {unified_timestamp}),
+                'thumbnail': ('file', 'id', {str}, {
+                    lambda x: f'https://files.iwara.tv/image/thumbnail/{x}/thumbnail-00.jpg'}),
+            }),
+            'formats': list(self._extract_formats(video_id, video_data.get('fileUrl'))),
         }
 
 
-class IwaraPlaylistIE(IwaraBaseIE):
-    _VALID_URL = fr'{IwaraBaseIE._BASE_REGEX}/playlist/(?P<id>[^/?#&]+)'
-    IE_NAME = 'iwara:playlist'
+class IwaraUserIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:www\.)?iwara\.tv/profile/(?P<id>[^/?#&]+)'
+    IE_NAME = 'iwara:user'
+    _PER_PAGE = 32
 
     _TESTS = [{
-        'url': 'https://ecchi.iwara.tv/playlist/best-enf',
+        'url': 'https://iwara.tv/profile/user792540/videos',
         'info_dict': {
-            'title': 'Best enf',
-            'uploader': 'Jared98112',
-            'id': 'best-enf',
+            'id': 'user792540',
         },
-        'playlist_mincount': 1097,
+        'playlist_mincount': 80,
     }, {
-        # urlencoded
-        'url': 'https://ecchi.iwara.tv/playlist/%E3%83%97%E3%83%AC%E3%82%A4%E3%83%AA%E3%82%B9%E3%83%88-2',
+        'url': 'https://iwara.tv/profile/theblackbirdcalls/videos',
         'info_dict': {
-            'id': 'プレイリスト-2',
-            'title': 'プレイリスト',
-            'uploader': 'mainyu',
+            'id': 'theblackbirdcalls',
         },
-        'playlist_mincount': 91,
+        'playlist_mincount': 723,
+    }, {
+        'url': 'https://iwara.tv/profile/user792540',
+        'only_matching': True,
+    }, {
+        'url': 'https://iwara.tv/profile/theblackbirdcalls',
+        'only_matching': True,
     }]
 
+    def _entries(self, playlist_id, user_id, page):
+        videos = self._download_json(
+            'https://api.iwara.tv/videos', playlist_id,
+            note=f'Downloading page {page}',
+            query={
+                'page': page,
+                'sort': 'date',
+                'user': user_id,
+                'limit': self._PER_PAGE,
+            })
+        for x in traverse_obj(videos, ('results', ..., 'id')):
+            yield self.url_result(f'https://iwara.tv/video/{x}')
+
     def _real_extract(self, url):
-        playlist_id, base_url = self._match_valid_url(url).group('id', 'base_url')
-        playlist_id = urllib.parse.unquote(playlist_id)
-        webpage = self._download_webpage(url, playlist_id)
+        playlist_id = self._match_id(url)
+        user_info = self._download_json(
+            f'https://api.iwara.tv/profile/{playlist_id}', playlist_id,
+            note='Requesting user info')
+        user_id = traverse_obj(user_info, ('user', 'id'))
 
-        return {
-            '_type': 'playlist',
-            'id': playlist_id,
-            'title': self._html_search_regex(r'class="title"[^>]*>([^<]+)', webpage, 'title', fatal=False),
-            'uploader': self._html_search_regex(r'<h2>([^<]+)', webpage, 'uploader', fatal=False),
-            'entries': self._extract_playlist(base_url, webpage),
-        }
+        return self.playlist_result(
+            OnDemandPagedList(
+                functools.partial(self._entries, playlist_id, user_id),
+                self._PER_PAGE),
+            playlist_id, traverse_obj(user_info, ('user', 'name')))
 
 
-class IwaraUserIE(IwaraBaseIE):
-    _VALID_URL = fr'{IwaraBaseIE._BASE_REGEX}/users/(?P<id>[^/?#&]+)'
-    IE_NAME = 'iwara:user'
+class IwaraPlaylistIE(InfoExtractor):
+    # the ID is an UUID but I don't think it's necessary to write concrete regex
+    _VALID_URL = r'https?://(?:www\.)?iwara\.tv/playlist/(?P<id>[0-9a-f-]+)'
+    IE_NAME = 'iwara:playlist'
+    _PER_PAGE = 32
 
     _TESTS = [{
-        'note': 'number of all videos page is just 1 page. less than 40 videos',
-        'url': 'https://ecchi.iwara.tv/users/infinityyukarip',
-        'info_dict': {
-            'title': 'Uploaded videos from Infinity_YukariP',
-            'id': 'infinityyukarip',
-            'uploader': 'Infinity_YukariP',
-            'uploader_id': 'infinityyukarip',
-        },
-        'playlist_mincount': 39,
-    }, {
-        'note': 'no even all videos page. probably less than 10 videos',
-        'url': 'https://ecchi.iwara.tv/users/mmd-quintet',
-        'info_dict': {
-            'title': 'Uploaded videos from mmd quintet',
-            'id': 'mmd-quintet',
-            'uploader': 'mmd quintet',
-            'uploader_id': 'mmd-quintet',
-        },
-        'playlist_mincount': 6,
-    }, {
-        'note': 'has paging. more than 40 videos',
-        'url': 'https://ecchi.iwara.tv/users/theblackbirdcalls',
+        'url': 'https://iwara.tv/playlist/458e5486-36a4-4ac0-b233-7e9eef01025f',
         'info_dict': {
-            'title': 'Uploaded videos from TheBlackbirdCalls',
-            'id': 'theblackbirdcalls',
-            'uploader': 'TheBlackbirdCalls',
-            'uploader_id': 'theblackbirdcalls',
+            'id': '458e5486-36a4-4ac0-b233-7e9eef01025f',
         },
-        'playlist_mincount': 420,
-    }, {
-        'note': 'foreign chars in URL. there must be foreign characters in URL',
-        'url': 'https://ecchi.iwara.tv/users/ぶた丼',
-        'info_dict': {
-            'title': 'Uploaded videos from ぶた丼',
-            'id': 'ぶた丼',
-            'uploader': 'ぶた丼',
-            'uploader_id': 'ぶた丼',
-        },
-        'playlist_mincount': 170,
+        'playlist_mincount': 3,
     }]
 
-    def _entries(self, playlist_id, base_url):
-        webpage = self._download_webpage(
-            f'{base_url}/users/{playlist_id}', playlist_id)
-        videos_url = self._search_regex(r'<a href="(/users/[^/]+/videos)(?:\?[^"]+)?">', webpage, 'all videos url', default=None)
-        if not videos_url:
-            yield from self._extract_playlist(base_url, webpage)
-            return
-
-        videos_url = urljoin(base_url, videos_url)
-
-        for n in itertools.count(1):
-            page = self._download_webpage(
-                videos_url, playlist_id, note=f'Downloading playlist page {n}',
-                query={'page': str(n - 1)} if n > 1 else {})
-            yield from self._extract_playlist(
-                base_url, page)
-
-            if f'page={n}' not in page:
-                break
+    def _entries(self, playlist_id, first_page, page):
+        videos = self._download_json(
+            'https://api.iwara.tv/videos', playlist_id, f'Downloading page {page}',
+            query={'page': page, 'limit': self._PER_PAGE}) if page else first_page
+        for x in traverse_obj(videos, ('results', ..., 'id')):
+            yield self.url_result(f'https://iwara.tv/video/{x}')
 
     def _real_extract(self, url):
-        playlist_id, base_url = self._match_valid_url(url).group('id', 'base_url')
-        playlist_id = urllib.parse.unquote(playlist_id)
+        playlist_id = self._match_id(url)
+        page_0 = self._download_json(
+            f'https://api.iwara.tv/playlist/{playlist_id}?page=0&limit={self._PER_PAGE}', playlist_id,
+            note='Requesting playlist info')
 
         return self.playlist_result(
-            self._entries(playlist_id, base_url), playlist_id)
+            OnDemandPagedList(
+                functools.partial(self._entries, playlist_id, page_0),
+                self._PER_PAGE),
+            playlist_id, traverse_obj(page_0, ('title', 'name')))