]> jfr.im git - yt-dlp.git/commitdiff
[iwara] Add playlist extractors (#3639)
authori6t <redacted>
Wed, 4 May 2022 15:49:46 +0000 (00:49 +0900)
committerGitHub <redacted>
Wed, 4 May 2022 15:49:46 +0000 (08:49 -0700)
Authored by: i6t

yt_dlp/extractor/extractors.py
yt_dlp/extractor/iwara.py

index c29a78deb0cc8791e9157c367866275a59f5783d..2c09a161ec0405cb6fbebff6aeb96f8b783cae9e 100644 (file)
     IviCompilationIE
 )
 from .ivideon import IvideonIE
-from .iwara import IwaraIE
+from .iwara import (
+    IwaraIE,
+    IwaraPlaylistIE,
+    IwaraUserIE,
+)
 from .izlesene import IzleseneIE
 from .jable import (
     JableIE,
index 974b4be7d8eca246cad9a4d8536fc690df7b943b..4b88da35f421acec417253e5c8f9109e706b26b7 100644 (file)
@@ -1,19 +1,28 @@
 import re
+import urllib
 
 from .common import InfoExtractor
-from ..compat import compat_urllib_parse_urlparse
 from ..utils import (
     int_or_none,
     mimetype2ext,
     remove_end,
     url_or_none,
+    urljoin,
     unified_strdate,
     strip_or_none,
 )
 
 
-class IwaraIE(InfoExtractor):
-    _VALID_URL = r'https?://(?:www\.|ecchi\.)?iwara\.tv/videos/(?P<id>[a-zA-Z0-9]+)'
+class IwaraBaseIE(InfoExtractor):
+    _BASE_REGEX = r'(?P<base_url>https?://(?:www\.|ecchi\.)?iwara\.tv)'
+
+    def _extract_playlist(self, base_url, webpage):
+        for path in re.findall(r'class="title">\s*<a[^<]+href="([^"]+)', webpage):
+            yield self.url_result(urljoin(base_url, path))
+
+
+class IwaraIE(IwaraBaseIE):
+    _VALID_URL = fr'{IwaraBaseIE._BASE_REGEX}/videos/(?P<id>[a-zA-Z0-9]+)'
     _TESTS = [{
         'url': 'http://iwara.tv/videos/amVwUl1EHpAD9RD',
         # md5 is unstable
@@ -58,7 +67,7 @@ def _real_extract(self, url):
 
         webpage, urlh = self._download_webpage_handle(url, video_id)
 
-        hostname = compat_urllib_parse_urlparse(urlh.geturl()).hostname
+        hostname = urllib.parse.urlparse(urlh.geturl()).hostname
         # ecchi is 'sexy' in Japanese
         age_limit = 18 if hostname.split('.')[0] == 'ecchi' else 0
 
@@ -118,3 +127,81 @@ def _real_extract(self, url):
             'upload_date': upload_date,
             'description': description,
         }
+
+
+class IwaraPlaylistIE(IwaraBaseIE):
+    _VALID_URL = fr'{IwaraBaseIE._BASE_REGEX}/playlist/(?P<id>[^/?#&]+)'
+    IE_NAME = 'iwara:playlist'
+
+    _TESTS = [{
+        'url': 'https://ecchi.iwara.tv/playlist/best-enf',
+        'info_dict': {
+            'title': 'Best enf',
+            'uploader': 'Jared98112',
+            'id': 'best-enf',
+        },
+        'playlist_mincount': 1097,
+    }, {
+        # urlencoded
+        'url': 'https://ecchi.iwara.tv/playlist/%E3%83%97%E3%83%AC%E3%82%A4%E3%83%AA%E3%82%B9%E3%83%88-2',
+        'info_dict': {
+            'id': 'プレイリスト-2',
+            'title': 'プレイリスト',
+            'uploader': 'mainyu',
+        },
+        'playlist_mincount': 91,
+    }]
+
+    def _real_extract(self, url):
+        playlist_id, base_url = self._match_valid_url(url).group('id', 'base_url')
+        playlist_id = urllib.parse.unquote(playlist_id)
+        webpage = self._download_webpage(url, playlist_id)
+
+        return {
+            '_type': 'playlist',
+            'id': playlist_id,
+            'title': self._html_search_regex(r'class="title"[^>]*>([^<]+)', webpage, 'title', fatal=False),
+            'uploader': self._html_search_regex(r'<h2>([^<]+)', webpage, 'uploader', fatal=False),
+            'entries': self._extract_playlist(base_url, webpage),
+        }
+
+
+class IwaraUserIE(IwaraBaseIE):
+    _VALID_URL = fr'{IwaraBaseIE._BASE_REGEX}/users/(?P<id>[^/?#&]+)'
+    IE_NAME = 'iwara:user'
+
+    _TESTS = [{
+        'url': 'https://ecchi.iwara.tv/users/CuteMMD',
+        'info_dict': {
+            'id': 'CuteMMD',
+        },
+        'playlist_mincount': 198,
+    }, {
+        # urlencoded
+        'url': 'https://ecchi.iwara.tv/users/%E5%92%95%E5%98%BF%E5%98%BF',
+        'info_dict': {
+            'id': '咕嘿嘿',
+        },
+        'playlist_mincount': 141,
+    }]
+
+    def _entries(self, playlist_id, base_url, webpage):
+        yield from self._extract_playlist(base_url, webpage)
+
+        page_urls = re.findall(
+            r'class="pager-item"[^>]*>\s*<a[^<]+href="([^"]+)', webpage)
+
+        for n, path in enumerate(page_urls, 2):
+            yield from self._extract_playlist(
+                base_url, self._download_webpage(
+                    urljoin(base_url, path), playlist_id, note=f'Downloading playlist page {n}'))
+
+    def _real_extract(self, url):
+        playlist_id, base_url = self._match_valid_url(url).group('id', 'base_url')
+        playlist_id = urllib.parse.unquote(playlist_id)
+
+        webpage = self._download_webpage(
+            f'{base_url}/users/{playlist_id}/videos', playlist_id)
+
+        return self.playlist_result(
+            self._entries(playlist_id, base_url, webpage), playlist_id)