]> jfr.im git - yt-dlp.git/blobdiff - yt_dlp/extractor/fourtube.py
[cleanup] Add more ruff rules (#10149)
[yt-dlp.git] / yt_dlp / extractor / fourtube.py
index c6af100f3728cb8f011a6a374415f33e0b39d0ff..ba94b5bdc917c20dcc2677b47989e6e8a3849367 100644 (file)
@@ -1,12 +1,8 @@
+import base64
 import re
+import urllib.parse
 
 from .common import InfoExtractor
-from ..compat import (
-    compat_b64decode,
-    compat_str,
-    compat_urllib_parse_unquote,
-    compat_urlparse,
-)
 from ..utils import (
     int_or_none,
     parse_duration,
 
 class FourTubeBaseIE(InfoExtractor):
     def _extract_formats(self, url, video_id, media_id, sources):
-        token_url = 'https://%s/%s/desktop/%s' % (
+        token_url = 'https://{}/{}/desktop/{}'.format(
             self._TKN_HOST, media_id, '+'.join(sources))
 
-        parsed_url = compat_urlparse.urlparse(url)
+        parsed_url = urllib.parse.urlparse(url)
         tokens = self._download_json(token_url, video_id, data=b'', headers={
-            'Origin': '%s://%s' % (parsed_url.scheme, parsed_url.hostname),
+            'Origin': f'{parsed_url.scheme}://{parsed_url.hostname}',
             'Referer': url,
         })
-        formats = [{
-            'url': tokens[format]['token'],
-            'format_id': format + 'p',
-            'resolution': format + 'p',
-            'quality': int(format),
-        } for format in sources]
-        self._sort_formats(formats)
-        return formats
+        return [{
+            'url': tokens[res]['token'],
+            'format_id': res + 'p',
+            'resolution': res + 'p',
+            'quality': int(res),
+        } for res in sources]
 
     def _real_extract(self, url):
         mobj = self._match_valid_url(url)
@@ -90,9 +84,9 @@ def _real_extract(self, url):
             params_js = self._search_regex(
                 r'\$\.ajax\(url,\ opts\);\s*\}\s*\}\)\(([0-9,\[\] ]+)\)',
                 player_js, 'initialization parameters')
-            params = self._parse_json('[%s]' % params_js, video_id)
+            params = self._parse_json(f'[{params_js}]', video_id)
             media_id = params[0]
-            sources = ['%s' % p for p in params[2]]
+            sources = [f'{p}' for p in params[2]]
 
         formats = self._extract_formats(url, video_id, media_id, sources)
 
@@ -235,20 +229,20 @@ def _real_extract(self, url):
             self._search_regex(
                 r'INITIALSTATE\s*=\s*(["\'])(?P<value>(?:(?!\1).)+)\1',
                 webpage, 'data', group='value'), video_id,
-            transform_source=lambda x: compat_urllib_parse_unquote(
-                compat_b64decode(x).decode('utf-8')))['page']['video']
+            transform_source=lambda x: urllib.parse.unquote(
+                base64.b64decode(x).decode('utf-8')))['page']['video']
 
         title = video['title']
         media_id = video['mediaId']
-        sources = [compat_str(e['height'])
+        sources = [str(e['height'])
                    for e in video['encodings'] if e.get('height')]
         formats = self._extract_formats(url, video_id, media_id, sources)
 
         thumbnail = url_or_none(video.get('masterThumb'))
-        uploader = try_get(video, lambda x: x['user']['username'], compat_str)
+        uploader = try_get(video, lambda x: x['user']['username'], str)
         uploader_id = str_or_none(try_get(
             video, lambda x: x['user']['id'], int))
-        channel = try_get(video, lambda x: x['channel']['name'], compat_str)
+        channel = try_get(video, lambda x: x['channel']['name'], str)
         channel_id = str_or_none(try_get(
             video, lambda x: x['channel']['id'], int))
         like_count = int_or_none(video.get('likes'))