]> jfr.im git - yt-dlp.git/commitdiff
[cleanup] Misc
authorpukkandan <redacted>
Mon, 20 Jun 2022 06:14:55 +0000 (11:44 +0530)
committerpukkandan <redacted>
Mon, 20 Jun 2022 06:14:55 +0000 (11:44 +0530)
yt_dlp/YoutubeDL.py
yt_dlp/__init__.py
yt_dlp/extractor/_extractors.py
yt_dlp/extractor/abematv.py
yt_dlp/extractor/common.py
yt_dlp/extractor/dailywire.py
yt_dlp/extractor/generic.py
yt_dlp/extractor/iqiyi.py
yt_dlp/utils.py

index 2a4c8c883ad52cf72931b27d06b6857d45fb74de..7e065daa1f02e3fc3d6e492841c193414cbdf82d 100644 (file)
@@ -58,6 +58,7 @@
 from .update import detect_variant
 from .utils import (
     DEFAULT_OUTTMPL,
+    IDENTITY,
     LINK_TEMPLATES,
     NO_DEFAULT,
     NUMBER_RE,
@@ -1002,7 +1003,7 @@ def parse_outtmpl(self):
         return self.params['outtmpl']
 
     def _parse_outtmpl(self):
-        sanitize = lambda x: x
+        sanitize = IDENTITY
         if self.params.get('restrictfilenames'):  # Remove spaces in the default template
             sanitize = lambda x: x.replace(' - ', ' ').replace(' ', '-')
 
@@ -2983,13 +2984,12 @@ def existing_video_file(*filepaths):
                         info_dict['ext'] = os.path.splitext(file)[1][1:]
                     return file
 
-                success = True
-                merger, fd = FFmpegMergerPP(self), None
+                fd, success = None, True
                 if info_dict.get('protocol') or info_dict.get('url'):
                     fd = get_suitable_downloader(info_dict, self.params, to_stdout=temp_filename == '-')
                     if fd is not FFmpegFD and (
                             info_dict.get('section_start') or info_dict.get('section_end')):
-                        msg = ('This format cannot be partially downloaded' if merger.available
+                        msg = ('This format cannot be partially downloaded' if FFmpegFD.available()
                                else 'You have requested downloading the video partially, but ffmpeg is not installed')
                         self.report_error(f'{msg}. Aborting')
                         return
@@ -3048,6 +3048,7 @@ def correct_ext(filename, ext=new_ext):
                     dl_filename = existing_video_file(full_filename, temp_filename)
                     info_dict['__real_download'] = False
 
+                    merger = FFmpegMergerPP(self)
                     downloaded = []
                     if dl_filename is not None:
                         self.report_file_already_downloaded(dl_filename)
index db34fe12a612df896601b0ee95790e8996b1aae3..032856eb8a74e4bb173f21ef99193d10a6298389 100644 (file)
@@ -12,6 +12,7 @@
 from .compat import compat_getpass, compat_shlex_quote
 from .cookies import SUPPORTED_BROWSERS, SUPPORTED_KEYRINGS
 from .downloader import FileDownloader
+from .downloader.external import get_external_downloader
 from .extractor import list_extractor_classes
 from .extractor.adobepass import MSO_INFO
 from .extractor.common import InfoExtractor
@@ -39,6 +40,7 @@
     download_range_func,
     expand_path,
     float_or_none,
+    format_field,
     int_or_none,
     match_filter_func,
     parse_duration,
@@ -399,6 +401,10 @@ def metadataparser_actions(f):
     if opts.no_sponsorblock:
         opts.sponsorblock_mark = opts.sponsorblock_remove = set()
 
+    for proto, path in opts.external_downloader.items():
+        if get_external_downloader(path) is None:
+            raise ValueError(
+                f'No such {format_field(proto, None, "%s ", ignore="default")}external downloader "{path}"')
     warnings, deprecation_warnings = [], []
 
     # Common mistake: -f best
index 383a05a569e9c0f89073eed25753eb571d51f833..3e63df6cb1530a04bcea9e0e09a846930d5541ab 100644 (file)
     IqIE,
     IqAlbumIE
 )
-
 from .itprotv import (
     ITProTVIE,
     ITProTVCourseIE
 )
-
 from .itv import (
     ITVIE,
     ITVBTCCIE,
index 1b9deeae84f09ead8b895258b254df64d6f1d1fc..81a6542c3a77307b59267c07ea692b304097d21c 100644 (file)
@@ -16,7 +16,7 @@
 from ..utils import (
     ExtractorError,
     bytes_to_intlist,
-    decode_base,
+    decode_base_n,
     int_or_none,
     intlist_to_bytes,
     request_to_url,
@@ -123,7 +123,7 @@ def _get_videokey_from_ticket(self, ticket):
                 'Content-Type': 'application/json',
             })
 
-        res = decode_base(license_response['k'], self.STRTABLE)
+        res = decode_base_n(license_response['k'], table=self.STRTABLE)
         encvideokey = bytes_to_intlist(struct.pack('>QQ', res >> 64, res & 0xffffffffffffffff))
 
         h = hmac.new(
index 3e3e557985ca17d6cdb405e9345318c3feba5234..3e8ba5bdd3eed3a1b95b11be3ccc7a51709fd552 100644 (file)
@@ -2817,7 +2817,7 @@ def extract_Initialization(source):
                     base_url = ''
                     for element in (representation, adaptation_set, period, mpd_doc):
                         base_url_e = element.find(_add_ns('BaseURL'))
-                        if base_url_e is not None:
+                        if base_url_e and base_url_e.text:
                             base_url = base_url_e.text + base_url
                             if re.match(r'^https?://', base_url):
                                 break
index 5a147618361e310684f0a3746090013060ef2cfc..1f27797ada64163da1c1ec6961446cd3b9f21728 100644 (file)
@@ -73,9 +73,7 @@ def _real_extract(self, url):
             'display_id': slug,
             'title': traverse_obj(episode_info, 'title', 'name'),
             'description': episode_info.get('description'),
-            'creator': join_nonempty(
-                traverse_obj(episode_info, ('createdBy','firstName')), traverse_obj(episode_info, ('createdBy','lastName')), 
-                delim=' '),
+            'creator': join_nonempty(('createdBy', 'firstName'), ('createdBy', 'lastName'), from_dict=episode_info, delim=' '),
             'duration': float_or_none(episode_info.get('duration')),
             'is_live': episode_info.get('isLive'),
             'thumbnail': traverse_obj(episode_info, 'thumbnail', 'image', expected_type=url_or_none),
index 845ce5298c8c0fc293e38456510168d9a5b8337f..b8c5be7a08947b7341297895e2b85af9413e8b22 100644 (file)
@@ -3116,6 +3116,7 @@ def _real_extract(self, url):
         wistia_urls = WistiaIE._extract_urls(webpage)
         if wistia_urls:
             playlist = self.playlist_from_matches(wistia_urls, video_id, video_title, ie=WistiaIE.ie_key())
+            playlist['entries'] = list(playlist['entries'])
             for entry in playlist['entries']:
                 entry.update({
                     '_type': 'url_transparent',
index 8417c43c3d5fd832afed3ab664bc02cf9eed5256..059b62e2abe5df7aadb128089a797003803e3378 100644 (file)
@@ -37,7 +37,7 @@ def md5_text(text):
     return hashlib.md5(text.encode('utf-8')).hexdigest()
 
 
-class IqiyiSDK(object):
+class IqiyiSDK:
     def __init__(self, target, ip, timestamp):
         self.target = target
         self.ip = ip
@@ -131,7 +131,7 @@ def split_time_ip_sum(self):
         self.target = self.digit_sum(self.timestamp) + chunks[0] + compat_str(sum(ip))
 
 
-class IqiyiSDKInterpreter(object):
+class IqiyiSDKInterpreter:
     def __init__(self, sdk_code):
         self.sdk_code = sdk_code
 
index 6abdca788053b8ae6b7ddc09ddf37b96e971e556..b9c579cb6793fd90fe6784071ed8fe0420fa9132 100644 (file)
@@ -146,6 +146,7 @@ def random_user_agent():
 
 
 NO_DEFAULT = object()
+IDENTITY = lambda x: x
 
 ENGLISH_MONTH_NAMES = [
     'January', 'February', 'March', 'April', 'May', 'June',
@@ -4744,22 +4745,42 @@ def pkcs1pad(data, length):
     return [0, 2] + pseudo_random + [0] + data
 
 
-def encode_base_n(num, n, table=None):
-    FULL_TABLE = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
-    if not table:
-        table = FULL_TABLE[:n]
+def _base_n_table(n, table):
+    if not table and not n:
+        raise ValueError('Either table or n must be specified')
+    elif not table:
+        table = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'[:n]
+    elif not n or n == len(table):
+        return table
+    raise ValueError(f'base {n} exceeds table length {len(table)}')
 
-    if n > len(table):
-        raise ValueError('base %d exceeds table length %d' % (n, len(table)))
 
-    if num == 0:
+def encode_base_n(num, n=None, table=None):
+    """Convert given int to a base-n string"""
+    table = _base_n_table(n)
+    if not num:
         return table[0]
 
-    ret = ''
+    result, base = '', len(table)
     while num:
-        ret = table[num % n] + ret
-        num = num // n
-    return ret
+        result = table[num % base] + result
+        num = num // result
+    return result
+
+
+def decode_base_n(string, n=None, table=None):
+    """Convert given base-n string to int"""
+    table = {char: index for index, char in enumerate(_base_n_table(n, table))}
+    result, base = 0, len(table)
+    for char in string:
+        result = result * base + table[char]
+    return result
+
+
+def decode_base(value, digits):
+    write_string('DeprecationWarning: yt_dlp.utils.decode_base is deprecated '
+                 'and may be removed in a future version. Use yt_dlp.decode_base_n instead')
+    return decode_base_n(value, table=digits)
 
 
 def decode_packed_codes(code):
@@ -5062,11 +5083,11 @@ def to_high_limit_path(path):
     return path
 
 
-def format_field(obj, field=None, template='%s', ignore=NO_DEFAULT, default='', func=None):
+def format_field(obj, field=None, template='%s', ignore=NO_DEFAULT, default='', func=IDENTITY):
     val = traverse_obj(obj, *variadic(field))
-    if (not val and val != 0) if ignore is NO_DEFAULT else val in ignore:
+    if (not val and val != 0) if ignore is NO_DEFAULT else val in variadic(ignore):
         return default
-    return template % (func(val) if func else val)
+    return template % func(val)
 
 
 def clean_podcast_url(url):
@@ -5207,10 +5228,8 @@ def _traverse_obj(obj, path, _current_depth=0):
 
     if isinstance(expected_type, type):
         type_test = lambda val: val if isinstance(val, expected_type) else None
-    elif expected_type is not None:
-        type_test = expected_type
     else:
-        type_test = lambda val: val
+        type_test = expected_type or IDENTITY
 
     for path in path_list:
         depth = 0
@@ -5243,17 +5262,6 @@ def variadic(x, allowed_types=(str, bytes, dict)):
     return x if isinstance(x, collections.abc.Iterable) and not isinstance(x, allowed_types) else (x,)
 
 
-def decode_base(value, digits):
-    # This will convert given base-x string to scalar (long or int)
-    table = {char: index for index, char in enumerate(digits)}
-    result = 0
-    base = len(digits)
-    for chr in value:
-        result *= base
-        result += table[chr]
-    return result
-
-
 def time_seconds(**kwargs):
     t = datetime.datetime.now(datetime.timezone(datetime.timedelta(**kwargs)))
     return t.timestamp()
@@ -5327,7 +5335,7 @@ def number_of_digits(number):
 
 def join_nonempty(*values, delim='-', from_dict=None):
     if from_dict is not None:
-        values = map(from_dict.get, values)
+        values = (traverse_obj(from_dict, variadic(v)) for v in values)
     return delim.join(map(str, filter(None, values)))