]> jfr.im git - yt-dlp.git/blob - yt_dlp/utils.py
Update to ytdl-commit-2dd6c6e
[yt-dlp.git] / yt_dlp / utils.py
1 import asyncio
2 import atexit
3 import base64
4 import binascii
5 import calendar
6 import codecs
7 import collections
8 import collections.abc
9 import contextlib
10 import datetime
11 import email.header
12 import email.utils
13 import errno
14 import gzip
15 import hashlib
16 import hmac
17 import html.entities
18 import html.parser
19 import http.client
20 import http.cookiejar
21 import inspect
22 import io
23 import itertools
24 import json
25 import locale
26 import math
27 import mimetypes
28 import operator
29 import os
30 import platform
31 import random
32 import re
33 import shlex
34 import socket
35 import ssl
36 import struct
37 import subprocess
38 import sys
39 import tempfile
40 import time
41 import traceback
42 import types
43 import unicodedata
44 import urllib.error
45 import urllib.parse
46 import urllib.request
47 import xml.etree.ElementTree
48 import zlib
49
50 from .compat import functools # isort: split
51 from .compat import (
52 compat_etree_fromstring,
53 compat_expanduser,
54 compat_HTMLParseError,
55 compat_os_name,
56 compat_shlex_quote,
57 )
58 from .dependencies import brotli, certifi, websockets, xattr
59 from .socks import ProxyType, sockssocket
60
61
62 def register_socks_protocols():
63 # "Register" SOCKS protocols
64 # In Python < 2.6.5, urlsplit() suffers from bug https://bugs.python.org/issue7904
65 # URLs with protocols not in urlparse.uses_netloc are not handled correctly
66 for scheme in ('socks', 'socks4', 'socks4a', 'socks5'):
67 if scheme not in urllib.parse.uses_netloc:
68 urllib.parse.uses_netloc.append(scheme)
69
70
71 # This is not clearly defined otherwise
72 compiled_regex_type = type(re.compile(''))
73
74
75 def random_user_agent():
76 _USER_AGENT_TPL = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/%s Safari/537.36'
77 _CHROME_VERSIONS = (
78 '90.0.4430.212',
79 '90.0.4430.24',
80 '90.0.4430.70',
81 '90.0.4430.72',
82 '90.0.4430.85',
83 '90.0.4430.93',
84 '91.0.4472.101',
85 '91.0.4472.106',
86 '91.0.4472.114',
87 '91.0.4472.124',
88 '91.0.4472.164',
89 '91.0.4472.19',
90 '91.0.4472.77',
91 '92.0.4515.107',
92 '92.0.4515.115',
93 '92.0.4515.131',
94 '92.0.4515.159',
95 '92.0.4515.43',
96 '93.0.4556.0',
97 '93.0.4577.15',
98 '93.0.4577.63',
99 '93.0.4577.82',
100 '94.0.4606.41',
101 '94.0.4606.54',
102 '94.0.4606.61',
103 '94.0.4606.71',
104 '94.0.4606.81',
105 '94.0.4606.85',
106 '95.0.4638.17',
107 '95.0.4638.50',
108 '95.0.4638.54',
109 '95.0.4638.69',
110 '95.0.4638.74',
111 '96.0.4664.18',
112 '96.0.4664.45',
113 '96.0.4664.55',
114 '96.0.4664.93',
115 '97.0.4692.20',
116 )
117 return _USER_AGENT_TPL % random.choice(_CHROME_VERSIONS)
118
119
120 SUPPORTED_ENCODINGS = [
121 'gzip', 'deflate'
122 ]
123 if brotli:
124 SUPPORTED_ENCODINGS.append('br')
125
126 std_headers = {
127 'User-Agent': random_user_agent(),
128 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
129 'Accept-Language': 'en-us,en;q=0.5',
130 'Sec-Fetch-Mode': 'navigate',
131 }
132
133
134 USER_AGENTS = {
135 'Safari': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27',
136 }
137
138
139 NO_DEFAULT = object()
140 IDENTITY = lambda x: x
141
142 ENGLISH_MONTH_NAMES = [
143 'January', 'February', 'March', 'April', 'May', 'June',
144 'July', 'August', 'September', 'October', 'November', 'December']
145
146 MONTH_NAMES = {
147 'en': ENGLISH_MONTH_NAMES,
148 'fr': [
149 'janvier', 'février', 'mars', 'avril', 'mai', 'juin',
150 'juillet', 'août', 'septembre', 'octobre', 'novembre', 'décembre'],
151 # these follow the genitive grammatical case (dopełniacz)
152 # some websites might be using nominative, which will require another month list
153 # https://en.wikibooks.org/wiki/Polish/Noun_cases
154 'pl': ['stycznia', 'lutego', 'marca', 'kwietnia', 'maja', 'czerwca',
155 'lipca', 'sierpnia', 'września', 'października', 'listopada', 'grudnia'],
156 }
157
158 # From https://github.com/python/cpython/blob/3.11/Lib/email/_parseaddr.py#L36-L42
159 TIMEZONE_NAMES = {
160 'UT': 0, 'UTC': 0, 'GMT': 0, 'Z': 0,
161 'AST': -4, 'ADT': -3, # Atlantic (used in Canada)
162 'EST': -5, 'EDT': -4, # Eastern
163 'CST': -6, 'CDT': -5, # Central
164 'MST': -7, 'MDT': -6, # Mountain
165 'PST': -8, 'PDT': -7 # Pacific
166 }
167
168 # needed for sanitizing filenames in restricted mode
169 ACCENT_CHARS = dict(zip('ÂÃÄÀÁÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖŐØŒÙÚÛÜŰÝÞßàáâãäåæçèéêëìíîïðñòóôõöőøœùúûüűýþÿ',
170 itertools.chain('AAAAAA', ['AE'], 'CEEEEIIIIDNOOOOOOO', ['OE'], 'UUUUUY', ['TH', 'ss'],
171 'aaaaaa', ['ae'], 'ceeeeiiiionooooooo', ['oe'], 'uuuuuy', ['th'], 'y')))
172
173 DATE_FORMATS = (
174 '%d %B %Y',
175 '%d %b %Y',
176 '%B %d %Y',
177 '%B %dst %Y',
178 '%B %dnd %Y',
179 '%B %drd %Y',
180 '%B %dth %Y',
181 '%b %d %Y',
182 '%b %dst %Y',
183 '%b %dnd %Y',
184 '%b %drd %Y',
185 '%b %dth %Y',
186 '%b %dst %Y %I:%M',
187 '%b %dnd %Y %I:%M',
188 '%b %drd %Y %I:%M',
189 '%b %dth %Y %I:%M',
190 '%Y %m %d',
191 '%Y-%m-%d',
192 '%Y.%m.%d.',
193 '%Y/%m/%d',
194 '%Y/%m/%d %H:%M',
195 '%Y/%m/%d %H:%M:%S',
196 '%Y%m%d%H%M',
197 '%Y%m%d%H%M%S',
198 '%Y%m%d',
199 '%Y-%m-%d %H:%M',
200 '%Y-%m-%d %H:%M:%S',
201 '%Y-%m-%d %H:%M:%S.%f',
202 '%Y-%m-%d %H:%M:%S:%f',
203 '%d.%m.%Y %H:%M',
204 '%d.%m.%Y %H.%M',
205 '%Y-%m-%dT%H:%M:%SZ',
206 '%Y-%m-%dT%H:%M:%S.%fZ',
207 '%Y-%m-%dT%H:%M:%S.%f0Z',
208 '%Y-%m-%dT%H:%M:%S',
209 '%Y-%m-%dT%H:%M:%S.%f',
210 '%Y-%m-%dT%H:%M',
211 '%b %d %Y at %H:%M',
212 '%b %d %Y at %H:%M:%S',
213 '%B %d %Y at %H:%M',
214 '%B %d %Y at %H:%M:%S',
215 '%H:%M %d-%b-%Y',
216 )
217
218 DATE_FORMATS_DAY_FIRST = list(DATE_FORMATS)
219 DATE_FORMATS_DAY_FIRST.extend([
220 '%d-%m-%Y',
221 '%d.%m.%Y',
222 '%d.%m.%y',
223 '%d/%m/%Y',
224 '%d/%m/%y',
225 '%d/%m/%Y %H:%M:%S',
226 '%d-%m-%Y %H:%M',
227 ])
228
229 DATE_FORMATS_MONTH_FIRST = list(DATE_FORMATS)
230 DATE_FORMATS_MONTH_FIRST.extend([
231 '%m-%d-%Y',
232 '%m.%d.%Y',
233 '%m/%d/%Y',
234 '%m/%d/%y',
235 '%m/%d/%Y %H:%M:%S',
236 ])
237
238 PACKED_CODES_RE = r"}\('(.+)',(\d+),(\d+),'([^']+)'\.split\('\|'\)"
239 JSON_LD_RE = r'(?is)<script[^>]+type=(["\']?)application/ld\+json\1[^>]*>\s*(?P<json_ld>{.+?}|\[.+?\])\s*</script>'
240
241 NUMBER_RE = r'\d+(?:\.\d+)?'
242
243
244 @functools.cache
245 def preferredencoding():
246 """Get preferred encoding.
247
248 Returns the best encoding scheme for the system, based on
249 locale.getpreferredencoding() and some further tweaks.
250 """
251 try:
252 pref = locale.getpreferredencoding()
253 'TEST'.encode(pref)
254 except Exception:
255 pref = 'UTF-8'
256
257 return pref
258
259
260 def write_json_file(obj, fn):
261 """ Encode obj as JSON and write it to fn, atomically if possible """
262
263 tf = tempfile.NamedTemporaryFile(
264 prefix=f'{os.path.basename(fn)}.', dir=os.path.dirname(fn),
265 suffix='.tmp', delete=False, mode='w', encoding='utf-8')
266
267 try:
268 with tf:
269 json.dump(obj, tf, ensure_ascii=False)
270 if sys.platform == 'win32':
271 # Need to remove existing file on Windows, else os.rename raises
272 # WindowsError or FileExistsError.
273 with contextlib.suppress(OSError):
274 os.unlink(fn)
275 with contextlib.suppress(OSError):
276 mask = os.umask(0)
277 os.umask(mask)
278 os.chmod(tf.name, 0o666 & ~mask)
279 os.rename(tf.name, fn)
280 except Exception:
281 with contextlib.suppress(OSError):
282 os.remove(tf.name)
283 raise
284
285
286 def find_xpath_attr(node, xpath, key, val=None):
287 """ Find the xpath xpath[@key=val] """
288 assert re.match(r'^[a-zA-Z_-]+$', key)
289 expr = xpath + ('[@%s]' % key if val is None else f"[@{key}='{val}']")
290 return node.find(expr)
291
292 # On python2.6 the xml.etree.ElementTree.Element methods don't support
293 # the namespace parameter
294
295
296 def xpath_with_ns(path, ns_map):
297 components = [c.split(':') for c in path.split('/')]
298 replaced = []
299 for c in components:
300 if len(c) == 1:
301 replaced.append(c[0])
302 else:
303 ns, tag = c
304 replaced.append('{%s}%s' % (ns_map[ns], tag))
305 return '/'.join(replaced)
306
307
308 def xpath_element(node, xpath, name=None, fatal=False, default=NO_DEFAULT):
309 def _find_xpath(xpath):
310 return node.find(xpath)
311
312 if isinstance(xpath, str):
313 n = _find_xpath(xpath)
314 else:
315 for xp in xpath:
316 n = _find_xpath(xp)
317 if n is not None:
318 break
319
320 if n is None:
321 if default is not NO_DEFAULT:
322 return default
323 elif fatal:
324 name = xpath if name is None else name
325 raise ExtractorError('Could not find XML element %s' % name)
326 else:
327 return None
328 return n
329
330
331 def xpath_text(node, xpath, name=None, fatal=False, default=NO_DEFAULT):
332 n = xpath_element(node, xpath, name, fatal=fatal, default=default)
333 if n is None or n == default:
334 return n
335 if n.text is None:
336 if default is not NO_DEFAULT:
337 return default
338 elif fatal:
339 name = xpath if name is None else name
340 raise ExtractorError('Could not find XML element\'s text %s' % name)
341 else:
342 return None
343 return n.text
344
345
346 def xpath_attr(node, xpath, key, name=None, fatal=False, default=NO_DEFAULT):
347 n = find_xpath_attr(node, xpath, key)
348 if n is None:
349 if default is not NO_DEFAULT:
350 return default
351 elif fatal:
352 name = f'{xpath}[@{key}]' if name is None else name
353 raise ExtractorError('Could not find XML attribute %s' % name)
354 else:
355 return None
356 return n.attrib[key]
357
358
359 def get_element_by_id(id, html, **kwargs):
360 """Return the content of the tag with the specified ID in the passed HTML document"""
361 return get_element_by_attribute('id', id, html, **kwargs)
362
363
364 def get_element_html_by_id(id, html, **kwargs):
365 """Return the html of the tag with the specified ID in the passed HTML document"""
366 return get_element_html_by_attribute('id', id, html, **kwargs)
367
368
369 def get_element_by_class(class_name, html):
370 """Return the content of the first tag with the specified class in the passed HTML document"""
371 retval = get_elements_by_class(class_name, html)
372 return retval[0] if retval else None
373
374
375 def get_element_html_by_class(class_name, html):
376 """Return the html of the first tag with the specified class in the passed HTML document"""
377 retval = get_elements_html_by_class(class_name, html)
378 return retval[0] if retval else None
379
380
381 def get_element_by_attribute(attribute, value, html, **kwargs):
382 retval = get_elements_by_attribute(attribute, value, html, **kwargs)
383 return retval[0] if retval else None
384
385
386 def get_element_html_by_attribute(attribute, value, html, **kargs):
387 retval = get_elements_html_by_attribute(attribute, value, html, **kargs)
388 return retval[0] if retval else None
389
390
391 def get_elements_by_class(class_name, html, **kargs):
392 """Return the content of all tags with the specified class in the passed HTML document as a list"""
393 return get_elements_by_attribute(
394 'class', r'[^\'"]*(?<=[\'"\s])%s(?=[\'"\s])[^\'"]*' % re.escape(class_name),
395 html, escape_value=False)
396
397
398 def get_elements_html_by_class(class_name, html):
399 """Return the html of all tags with the specified class in the passed HTML document as a list"""
400 return get_elements_html_by_attribute(
401 'class', r'[^\'"]*(?<=[\'"\s])%s(?=[\'"\s])[^\'"]*' % re.escape(class_name),
402 html, escape_value=False)
403
404
405 def get_elements_by_attribute(*args, **kwargs):
406 """Return the content of the tag with the specified attribute in the passed HTML document"""
407 return [content for content, _ in get_elements_text_and_html_by_attribute(*args, **kwargs)]
408
409
410 def get_elements_html_by_attribute(*args, **kwargs):
411 """Return the html of the tag with the specified attribute in the passed HTML document"""
412 return [whole for _, whole in get_elements_text_and_html_by_attribute(*args, **kwargs)]
413
414
415 def get_elements_text_and_html_by_attribute(attribute, value, html, *, tag=r'[\w:.-]+', escape_value=True):
416 """
417 Return the text (content) and the html (whole) of the tag with the specified
418 attribute in the passed HTML document
419 """
420 if not value:
421 return
422
423 quote = '' if re.match(r'''[\s"'`=<>]''', value) else '?'
424
425 value = re.escape(value) if escape_value else value
426
427 partial_element_re = rf'''(?x)
428 <(?P<tag>{tag})
429 (?:\s(?:[^>"']|"[^"]*"|'[^']*')*)?
430 \s{re.escape(attribute)}\s*=\s*(?P<_q>['"]{quote})(?-x:{value})(?P=_q)
431 '''
432
433 for m in re.finditer(partial_element_re, html):
434 content, whole = get_element_text_and_html_by_tag(m.group('tag'), html[m.start():])
435
436 yield (
437 unescapeHTML(re.sub(r'^(?P<q>["\'])(?P<content>.*)(?P=q)$', r'\g<content>', content, flags=re.DOTALL)),
438 whole
439 )
440
441
442 class HTMLBreakOnClosingTagParser(html.parser.HTMLParser):
443 """
444 HTML parser which raises HTMLBreakOnClosingTagException upon reaching the
445 closing tag for the first opening tag it has encountered, and can be used
446 as a context manager
447 """
448
449 class HTMLBreakOnClosingTagException(Exception):
450 pass
451
452 def __init__(self):
453 self.tagstack = collections.deque()
454 html.parser.HTMLParser.__init__(self)
455
456 def __enter__(self):
457 return self
458
459 def __exit__(self, *_):
460 self.close()
461
462 def close(self):
463 # handle_endtag does not return upon raising HTMLBreakOnClosingTagException,
464 # so data remains buffered; we no longer have any interest in it, thus
465 # override this method to discard it
466 pass
467
468 def handle_starttag(self, tag, _):
469 self.tagstack.append(tag)
470
471 def handle_endtag(self, tag):
472 if not self.tagstack:
473 raise compat_HTMLParseError('no tags in the stack')
474 while self.tagstack:
475 inner_tag = self.tagstack.pop()
476 if inner_tag == tag:
477 break
478 else:
479 raise compat_HTMLParseError(f'matching opening tag for closing {tag} tag not found')
480 if not self.tagstack:
481 raise self.HTMLBreakOnClosingTagException()
482
483
484 # XXX: This should be far less strict
485 def get_element_text_and_html_by_tag(tag, html):
486 """
487 For the first element with the specified tag in the passed HTML document
488 return its' content (text) and the whole element (html)
489 """
490 def find_or_raise(haystack, needle, exc):
491 try:
492 return haystack.index(needle)
493 except ValueError:
494 raise exc
495 closing_tag = f'</{tag}>'
496 whole_start = find_or_raise(
497 html, f'<{tag}', compat_HTMLParseError(f'opening {tag} tag not found'))
498 content_start = find_or_raise(
499 html[whole_start:], '>', compat_HTMLParseError(f'malformed opening {tag} tag'))
500 content_start += whole_start + 1
501 with HTMLBreakOnClosingTagParser() as parser:
502 parser.feed(html[whole_start:content_start])
503 if not parser.tagstack or parser.tagstack[0] != tag:
504 raise compat_HTMLParseError(f'parser did not match opening {tag} tag')
505 offset = content_start
506 while offset < len(html):
507 next_closing_tag_start = find_or_raise(
508 html[offset:], closing_tag,
509 compat_HTMLParseError(f'closing {tag} tag not found'))
510 next_closing_tag_end = next_closing_tag_start + len(closing_tag)
511 try:
512 parser.feed(html[offset:offset + next_closing_tag_end])
513 offset += next_closing_tag_end
514 except HTMLBreakOnClosingTagParser.HTMLBreakOnClosingTagException:
515 return html[content_start:offset + next_closing_tag_start], \
516 html[whole_start:offset + next_closing_tag_end]
517 raise compat_HTMLParseError('unexpected end of html')
518
519
520 class HTMLAttributeParser(html.parser.HTMLParser):
521 """Trivial HTML parser to gather the attributes for a single element"""
522
523 def __init__(self):
524 self.attrs = {}
525 html.parser.HTMLParser.__init__(self)
526
527 def handle_starttag(self, tag, attrs):
528 self.attrs = dict(attrs)
529 raise compat_HTMLParseError('done')
530
531
532 class HTMLListAttrsParser(html.parser.HTMLParser):
533 """HTML parser to gather the attributes for the elements of a list"""
534
535 def __init__(self):
536 html.parser.HTMLParser.__init__(self)
537 self.items = []
538 self._level = 0
539
540 def handle_starttag(self, tag, attrs):
541 if tag == 'li' and self._level == 0:
542 self.items.append(dict(attrs))
543 self._level += 1
544
545 def handle_endtag(self, tag):
546 self._level -= 1
547
548
549 def extract_attributes(html_element):
550 """Given a string for an HTML element such as
551 <el
552 a="foo" B="bar" c="&98;az" d=boz
553 empty= noval entity="&amp;"
554 sq='"' dq="'"
555 >
556 Decode and return a dictionary of attributes.
557 {
558 'a': 'foo', 'b': 'bar', c: 'baz', d: 'boz',
559 'empty': '', 'noval': None, 'entity': '&',
560 'sq': '"', 'dq': '\''
561 }.
562 """
563 parser = HTMLAttributeParser()
564 with contextlib.suppress(compat_HTMLParseError):
565 parser.feed(html_element)
566 parser.close()
567 return parser.attrs
568
569
570 def parse_list(webpage):
571 """Given a string for an series of HTML <li> elements,
572 return a dictionary of their attributes"""
573 parser = HTMLListAttrsParser()
574 parser.feed(webpage)
575 parser.close()
576 return parser.items
577
578
579 def clean_html(html):
580 """Clean an HTML snippet into a readable string"""
581
582 if html is None: # Convenience for sanitizing descriptions etc.
583 return html
584
585 html = re.sub(r'\s+', ' ', html)
586 html = re.sub(r'(?u)\s?<\s?br\s?/?\s?>\s?', '\n', html)
587 html = re.sub(r'(?u)<\s?/\s?p\s?>\s?<\s?p[^>]*>', '\n', html)
588 # Strip html tags
589 html = re.sub('<.*?>', '', html)
590 # Replace html entities
591 html = unescapeHTML(html)
592 return html.strip()
593
594
595 class LenientJSONDecoder(json.JSONDecoder):
596 def __init__(self, *args, transform_source=None, ignore_extra=False, **kwargs):
597 self.transform_source, self.ignore_extra = transform_source, ignore_extra
598 super().__init__(*args, **kwargs)
599
600 def decode(self, s):
601 if self.transform_source:
602 s = self.transform_source(s)
603 try:
604 if self.ignore_extra:
605 return self.raw_decode(s.lstrip())[0]
606 return super().decode(s)
607 except json.JSONDecodeError as e:
608 if e.pos is not None:
609 raise type(e)(f'{e.msg} in {s[e.pos-10:e.pos+10]!r}', s, e.pos)
610 raise
611
612
613 def sanitize_open(filename, open_mode):
614 """Try to open the given filename, and slightly tweak it if this fails.
615
616 Attempts to open the given filename. If this fails, it tries to change
617 the filename slightly, step by step, until it's either able to open it
618 or it fails and raises a final exception, like the standard open()
619 function.
620
621 It returns the tuple (stream, definitive_file_name).
622 """
623 if filename == '-':
624 if sys.platform == 'win32':
625 import msvcrt
626
627 # stdout may be any IO stream, e.g. when using contextlib.redirect_stdout
628 with contextlib.suppress(io.UnsupportedOperation):
629 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
630 return (sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout, filename)
631
632 for attempt in range(2):
633 try:
634 try:
635 if sys.platform == 'win32':
636 # FIXME: An exclusive lock also locks the file from being read.
637 # Since windows locks are mandatory, don't lock the file on windows (for now).
638 # Ref: https://github.com/yt-dlp/yt-dlp/issues/3124
639 raise LockingUnsupportedError()
640 stream = locked_file(filename, open_mode, block=False).__enter__()
641 except OSError:
642 stream = open(filename, open_mode)
643 return stream, filename
644 except OSError as err:
645 if attempt or err.errno in (errno.EACCES,):
646 raise
647 old_filename, filename = filename, sanitize_path(filename)
648 if old_filename == filename:
649 raise
650
651
652 def timeconvert(timestr):
653 """Convert RFC 2822 defined time string into system timestamp"""
654 timestamp = None
655 timetuple = email.utils.parsedate_tz(timestr)
656 if timetuple is not None:
657 timestamp = email.utils.mktime_tz(timetuple)
658 return timestamp
659
660
661 def sanitize_filename(s, restricted=False, is_id=NO_DEFAULT):
662 """Sanitizes a string so it could be used as part of a filename.
663 @param restricted Use a stricter subset of allowed characters
664 @param is_id Whether this is an ID that should be kept unchanged if possible.
665 If unset, yt-dlp's new sanitization rules are in effect
666 """
667 if s == '':
668 return ''
669
670 def replace_insane(char):
671 if restricted and char in ACCENT_CHARS:
672 return ACCENT_CHARS[char]
673 elif not restricted and char == '\n':
674 return '\0 '
675 elif is_id is NO_DEFAULT and not restricted and char in '"*:<>?|/\\':
676 # Replace with their full-width unicode counterparts
677 return {'/': '\u29F8', '\\': '\u29f9'}.get(char, chr(ord(char) + 0xfee0))
678 elif char == '?' or ord(char) < 32 or ord(char) == 127:
679 return ''
680 elif char == '"':
681 return '' if restricted else '\''
682 elif char == ':':
683 return '\0_\0-' if restricted else '\0 \0-'
684 elif char in '\\/|*<>':
685 return '\0_'
686 if restricted and (char in '!&\'()[]{}$;`^,#' or char.isspace() or ord(char) > 127):
687 return '\0_'
688 return char
689
690 # Replace look-alike Unicode glyphs
691 if restricted and (is_id is NO_DEFAULT or not is_id):
692 s = unicodedata.normalize('NFKC', s)
693 s = re.sub(r'[0-9]+(?::[0-9]+)+', lambda m: m.group(0).replace(':', '_'), s) # Handle timestamps
694 result = ''.join(map(replace_insane, s))
695 if is_id is NO_DEFAULT:
696 result = re.sub(r'(\0.)(?:(?=\1)..)+', r'\1', result) # Remove repeated substitute chars
697 STRIP_RE = r'(?:\0.|[ _-])*'
698 result = re.sub(f'^\0.{STRIP_RE}|{STRIP_RE}\0.$', '', result) # Remove substitute chars from start/end
699 result = result.replace('\0', '') or '_'
700
701 if not is_id:
702 while '__' in result:
703 result = result.replace('__', '_')
704 result = result.strip('_')
705 # Common case of "Foreign band name - English song title"
706 if restricted and result.startswith('-_'):
707 result = result[2:]
708 if result.startswith('-'):
709 result = '_' + result[len('-'):]
710 result = result.lstrip('.')
711 if not result:
712 result = '_'
713 return result
714
715
716 def sanitize_path(s, force=False):
717 """Sanitizes and normalizes path on Windows"""
718 if sys.platform == 'win32':
719 force = False
720 drive_or_unc, _ = os.path.splitdrive(s)
721 elif force:
722 drive_or_unc = ''
723 else:
724 return s
725
726 norm_path = os.path.normpath(remove_start(s, drive_or_unc)).split(os.path.sep)
727 if drive_or_unc:
728 norm_path.pop(0)
729 sanitized_path = [
730 path_part if path_part in ['.', '..'] else re.sub(r'(?:[/<>:"\|\\?\*]|[\s.]$)', '#', path_part)
731 for path_part in norm_path]
732 if drive_or_unc:
733 sanitized_path.insert(0, drive_or_unc + os.path.sep)
734 elif force and s and s[0] == os.path.sep:
735 sanitized_path.insert(0, os.path.sep)
736 return os.path.join(*sanitized_path)
737
738
739 def sanitize_url(url, *, scheme='http'):
740 # Prepend protocol-less URLs with `http:` scheme in order to mitigate
741 # the number of unwanted failures due to missing protocol
742 if url is None:
743 return
744 elif url.startswith('//'):
745 return f'{scheme}:{url}'
746 # Fix some common typos seen so far
747 COMMON_TYPOS = (
748 # https://github.com/ytdl-org/youtube-dl/issues/15649
749 (r'^httpss://', r'https://'),
750 # https://bx1.be/lives/direct-tv/
751 (r'^rmtp([es]?)://', r'rtmp\1://'),
752 )
753 for mistake, fixup in COMMON_TYPOS:
754 if re.match(mistake, url):
755 return re.sub(mistake, fixup, url)
756 return url
757
758
759 def extract_basic_auth(url):
760 parts = urllib.parse.urlsplit(url)
761 if parts.username is None:
762 return url, None
763 url = urllib.parse.urlunsplit(parts._replace(netloc=(
764 parts.hostname if parts.port is None
765 else '%s:%d' % (parts.hostname, parts.port))))
766 auth_payload = base64.b64encode(
767 ('%s:%s' % (parts.username, parts.password or '')).encode())
768 return url, f'Basic {auth_payload.decode()}'
769
770
771 def sanitized_Request(url, *args, **kwargs):
772 url, auth_header = extract_basic_auth(escape_url(sanitize_url(url)))
773 if auth_header is not None:
774 headers = args[1] if len(args) >= 2 else kwargs.setdefault('headers', {})
775 headers['Authorization'] = auth_header
776 return urllib.request.Request(url, *args, **kwargs)
777
778
779 def expand_path(s):
780 """Expand shell variables and ~"""
781 return os.path.expandvars(compat_expanduser(s))
782
783
784 def orderedSet(iterable, *, lazy=False):
785 """Remove all duplicates from the input iterable"""
786 def _iter():
787 seen = [] # Do not use set since the items can be unhashable
788 for x in iterable:
789 if x not in seen:
790 seen.append(x)
791 yield x
792
793 return _iter() if lazy else list(_iter())
794
795
796 def _htmlentity_transform(entity_with_semicolon):
797 """Transforms an HTML entity to a character."""
798 entity = entity_with_semicolon[:-1]
799
800 # Known non-numeric HTML entity
801 if entity in html.entities.name2codepoint:
802 return chr(html.entities.name2codepoint[entity])
803
804 # TODO: HTML5 allows entities without a semicolon.
805 # E.g. '&Eacuteric' should be decoded as 'Éric'.
806 if entity_with_semicolon in html.entities.html5:
807 return html.entities.html5[entity_with_semicolon]
808
809 mobj = re.match(r'#(x[0-9a-fA-F]+|[0-9]+)', entity)
810 if mobj is not None:
811 numstr = mobj.group(1)
812 if numstr.startswith('x'):
813 base = 16
814 numstr = '0%s' % numstr
815 else:
816 base = 10
817 # See https://github.com/ytdl-org/youtube-dl/issues/7518
818 with contextlib.suppress(ValueError):
819 return chr(int(numstr, base))
820
821 # Unknown entity in name, return its literal representation
822 return '&%s;' % entity
823
824
825 def unescapeHTML(s):
826 if s is None:
827 return None
828 assert isinstance(s, str)
829
830 return re.sub(
831 r'&([^&;]+;)', lambda m: _htmlentity_transform(m.group(1)), s)
832
833
834 def escapeHTML(text):
835 return (
836 text
837 .replace('&', '&amp;')
838 .replace('<', '&lt;')
839 .replace('>', '&gt;')
840 .replace('"', '&quot;')
841 .replace("'", '&#39;')
842 )
843
844
845 def process_communicate_or_kill(p, *args, **kwargs):
846 deprecation_warning(f'"{__name__}.process_communicate_or_kill" is deprecated and may be removed '
847 f'in a future version. Use "{__name__}.Popen.communicate_or_kill" instead')
848 return Popen.communicate_or_kill(p, *args, **kwargs)
849
850
851 class Popen(subprocess.Popen):
852 if sys.platform == 'win32':
853 _startupinfo = subprocess.STARTUPINFO()
854 _startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
855 else:
856 _startupinfo = None
857
858 @staticmethod
859 def _fix_pyinstaller_ld_path(env):
860 """Restore LD_LIBRARY_PATH when using PyInstaller
861 Ref: https://github.com/pyinstaller/pyinstaller/blob/develop/doc/runtime-information.rst#ld_library_path--libpath-considerations
862 https://github.com/yt-dlp/yt-dlp/issues/4573
863 """
864 if not hasattr(sys, '_MEIPASS'):
865 return
866
867 def _fix(key):
868 orig = env.get(f'{key}_ORIG')
869 if orig is None:
870 env.pop(key, None)
871 else:
872 env[key] = orig
873
874 _fix('LD_LIBRARY_PATH') # Linux
875 _fix('DYLD_LIBRARY_PATH') # macOS
876
877 def __init__(self, *args, env=None, text=False, **kwargs):
878 if env is None:
879 env = os.environ.copy()
880 self._fix_pyinstaller_ld_path(env)
881
882 if text is True:
883 kwargs['universal_newlines'] = True # For 3.6 compatibility
884 kwargs.setdefault('encoding', 'utf-8')
885 kwargs.setdefault('errors', 'replace')
886 super().__init__(*args, env=env, **kwargs, startupinfo=self._startupinfo)
887
888 def communicate_or_kill(self, *args, **kwargs):
889 try:
890 return self.communicate(*args, **kwargs)
891 except BaseException: # Including KeyboardInterrupt
892 self.kill(timeout=None)
893 raise
894
895 def kill(self, *, timeout=0):
896 super().kill()
897 if timeout != 0:
898 self.wait(timeout=timeout)
899
900 @classmethod
901 def run(cls, *args, timeout=None, **kwargs):
902 with cls(*args, **kwargs) as proc:
903 default = '' if proc.text_mode else b''
904 stdout, stderr = proc.communicate_or_kill(timeout=timeout)
905 return stdout or default, stderr or default, proc.returncode
906
907
908 def get_subprocess_encoding():
909 if sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5:
910 # For subprocess calls, encode with locale encoding
911 # Refer to http://stackoverflow.com/a/9951851/35070
912 encoding = preferredencoding()
913 else:
914 encoding = sys.getfilesystemencoding()
915 if encoding is None:
916 encoding = 'utf-8'
917 return encoding
918
919
920 def encodeFilename(s, for_subprocess=False):
921 assert isinstance(s, str)
922 return s
923
924
925 def decodeFilename(b, for_subprocess=False):
926 return b
927
928
929 def encodeArgument(s):
930 # Legacy code that uses byte strings
931 # Uncomment the following line after fixing all post processors
932 # assert isinstance(s, str), 'Internal error: %r should be of type %r, is %r' % (s, str, type(s))
933 return s if isinstance(s, str) else s.decode('ascii')
934
935
936 def decodeArgument(b):
937 return b
938
939
940 def decodeOption(optval):
941 if optval is None:
942 return optval
943 if isinstance(optval, bytes):
944 optval = optval.decode(preferredencoding())
945
946 assert isinstance(optval, str)
947 return optval
948
949
950 _timetuple = collections.namedtuple('Time', ('hours', 'minutes', 'seconds', 'milliseconds'))
951
952
953 def timetuple_from_msec(msec):
954 secs, msec = divmod(msec, 1000)
955 mins, secs = divmod(secs, 60)
956 hrs, mins = divmod(mins, 60)
957 return _timetuple(hrs, mins, secs, msec)
958
959
960 def formatSeconds(secs, delim=':', msec=False):
961 time = timetuple_from_msec(secs * 1000)
962 if time.hours:
963 ret = '%d%s%02d%s%02d' % (time.hours, delim, time.minutes, delim, time.seconds)
964 elif time.minutes:
965 ret = '%d%s%02d' % (time.minutes, delim, time.seconds)
966 else:
967 ret = '%d' % time.seconds
968 return '%s.%03d' % (ret, time.milliseconds) if msec else ret
969
970
971 def _ssl_load_windows_store_certs(ssl_context, storename):
972 # Code adapted from _load_windows_store_certs in https://github.com/python/cpython/blob/main/Lib/ssl.py
973 try:
974 certs = [cert for cert, encoding, trust in ssl.enum_certificates(storename)
975 if encoding == 'x509_asn' and (
976 trust is True or ssl.Purpose.SERVER_AUTH.oid in trust)]
977 except PermissionError:
978 return
979 for cert in certs:
980 with contextlib.suppress(ssl.SSLError):
981 ssl_context.load_verify_locations(cadata=cert)
982
983
984 def make_HTTPS_handler(params, **kwargs):
985 opts_check_certificate = not params.get('nocheckcertificate')
986 context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
987 context.check_hostname = opts_check_certificate
988 if params.get('legacyserverconnect'):
989 context.options |= 4 # SSL_OP_LEGACY_SERVER_CONNECT
990 # Allow use of weaker ciphers in Python 3.10+. See https://bugs.python.org/issue43998
991 context.set_ciphers('DEFAULT')
992 elif (
993 sys.version_info < (3, 10)
994 and ssl.OPENSSL_VERSION_INFO >= (1, 1, 1)
995 and not ssl.OPENSSL_VERSION.startswith('LibreSSL')
996 ):
997 # Backport the default SSL ciphers and minimum TLS version settings from Python 3.10 [1].
998 # This is to ensure consistent behavior across Python versions, and help avoid fingerprinting
999 # in some situations [2][3].
1000 # Python 3.10 only supports OpenSSL 1.1.1+ [4]. Because this change is likely
1001 # untested on older versions, we only apply this to OpenSSL 1.1.1+ to be safe.
1002 # LibreSSL is excluded until further investigation due to cipher support issues [5][6].
1003 # 1. https://github.com/python/cpython/commit/e983252b516edb15d4338b0a47631b59ef1e2536
1004 # 2. https://github.com/yt-dlp/yt-dlp/issues/4627
1005 # 3. https://github.com/yt-dlp/yt-dlp/pull/5294
1006 # 4. https://peps.python.org/pep-0644/
1007 # 5. https://peps.python.org/pep-0644/#libressl-support
1008 # 6. https://github.com/yt-dlp/yt-dlp/commit/5b9f253fa0aee996cf1ed30185d4b502e00609c4#commitcomment-89054368
1009 context.set_ciphers('@SECLEVEL=2:ECDH+AESGCM:ECDH+CHACHA20:ECDH+AES:DHE+AES:!aNULL:!eNULL:!aDSS:!SHA1:!AESCCM')
1010 context.minimum_version = ssl.TLSVersion.TLSv1_2
1011
1012 context.verify_mode = ssl.CERT_REQUIRED if opts_check_certificate else ssl.CERT_NONE
1013 if opts_check_certificate:
1014 if has_certifi and 'no-certifi' not in params.get('compat_opts', []):
1015 context.load_verify_locations(cafile=certifi.where())
1016 else:
1017 try:
1018 context.load_default_certs()
1019 # Work around the issue in load_default_certs when there are bad certificates. See:
1020 # https://github.com/yt-dlp/yt-dlp/issues/1060,
1021 # https://bugs.python.org/issue35665, https://bugs.python.org/issue45312
1022 except ssl.SSLError:
1023 # enum_certificates is not present in mingw python. See https://github.com/yt-dlp/yt-dlp/issues/1151
1024 if sys.platform == 'win32' and hasattr(ssl, 'enum_certificates'):
1025 for storename in ('CA', 'ROOT'):
1026 _ssl_load_windows_store_certs(context, storename)
1027 context.set_default_verify_paths()
1028
1029 client_certfile = params.get('client_certificate')
1030 if client_certfile:
1031 try:
1032 context.load_cert_chain(
1033 client_certfile, keyfile=params.get('client_certificate_key'),
1034 password=params.get('client_certificate_password'))
1035 except ssl.SSLError:
1036 raise YoutubeDLError('Unable to load client certificate')
1037
1038 # Some servers may reject requests if ALPN extension is not sent. See:
1039 # https://github.com/python/cpython/issues/85140
1040 # https://github.com/yt-dlp/yt-dlp/issues/3878
1041 with contextlib.suppress(NotImplementedError):
1042 context.set_alpn_protocols(['http/1.1'])
1043
1044 return YoutubeDLHTTPSHandler(params, context=context, **kwargs)
1045
1046
1047 def bug_reports_message(before=';'):
1048 from .update import REPOSITORY
1049
1050 msg = (f'please report this issue on https://github.com/{REPOSITORY}/issues?q= , '
1051 'filling out the appropriate issue template. Confirm you are on the latest version using yt-dlp -U')
1052
1053 before = before.rstrip()
1054 if not before or before.endswith(('.', '!', '?')):
1055 msg = msg[0].title() + msg[1:]
1056
1057 return (before + ' ' if before else '') + msg
1058
1059
1060 class YoutubeDLError(Exception):
1061 """Base exception for YoutubeDL errors."""
1062 msg = None
1063
1064 def __init__(self, msg=None):
1065 if msg is not None:
1066 self.msg = msg
1067 elif self.msg is None:
1068 self.msg = type(self).__name__
1069 super().__init__(self.msg)
1070
1071
1072 network_exceptions = [urllib.error.URLError, http.client.HTTPException, socket.error]
1073 if hasattr(ssl, 'CertificateError'):
1074 network_exceptions.append(ssl.CertificateError)
1075 network_exceptions = tuple(network_exceptions)
1076
1077
1078 class ExtractorError(YoutubeDLError):
1079 """Error during info extraction."""
1080
1081 def __init__(self, msg, tb=None, expected=False, cause=None, video_id=None, ie=None):
1082 """ tb, if given, is the original traceback (so that it can be printed out).
1083 If expected is set, this is a normal error message and most likely not a bug in yt-dlp.
1084 """
1085 if sys.exc_info()[0] in network_exceptions:
1086 expected = True
1087
1088 self.orig_msg = str(msg)
1089 self.traceback = tb
1090 self.expected = expected
1091 self.cause = cause
1092 self.video_id = video_id
1093 self.ie = ie
1094 self.exc_info = sys.exc_info() # preserve original exception
1095 if isinstance(self.exc_info[1], ExtractorError):
1096 self.exc_info = self.exc_info[1].exc_info
1097 super().__init__(self.__msg)
1098
1099 @property
1100 def __msg(self):
1101 return ''.join((
1102 format_field(self.ie, None, '[%s] '),
1103 format_field(self.video_id, None, '%s: '),
1104 self.orig_msg,
1105 format_field(self.cause, None, ' (caused by %r)'),
1106 '' if self.expected else bug_reports_message()))
1107
1108 def format_traceback(self):
1109 return join_nonempty(
1110 self.traceback and ''.join(traceback.format_tb(self.traceback)),
1111 self.cause and ''.join(traceback.format_exception(None, self.cause, self.cause.__traceback__)[1:]),
1112 delim='\n') or None
1113
1114 def __setattr__(self, name, value):
1115 super().__setattr__(name, value)
1116 if getattr(self, 'msg', None) and name not in ('msg', 'args'):
1117 self.msg = self.__msg or type(self).__name__
1118 self.args = (self.msg, ) # Cannot be property
1119
1120
1121 class UnsupportedError(ExtractorError):
1122 def __init__(self, url):
1123 super().__init__(
1124 'Unsupported URL: %s' % url, expected=True)
1125 self.url = url
1126
1127
1128 class RegexNotFoundError(ExtractorError):
1129 """Error when a regex didn't match"""
1130 pass
1131
1132
1133 class GeoRestrictedError(ExtractorError):
1134 """Geographic restriction Error exception.
1135
1136 This exception may be thrown when a video is not available from your
1137 geographic location due to geographic restrictions imposed by a website.
1138 """
1139
1140 def __init__(self, msg, countries=None, **kwargs):
1141 kwargs['expected'] = True
1142 super().__init__(msg, **kwargs)
1143 self.countries = countries
1144
1145
1146 class UserNotLive(ExtractorError):
1147 """Error when a channel/user is not live"""
1148
1149 def __init__(self, msg=None, **kwargs):
1150 kwargs['expected'] = True
1151 super().__init__(msg or 'The channel is not currently live', **kwargs)
1152
1153
1154 class DownloadError(YoutubeDLError):
1155 """Download Error exception.
1156
1157 This exception may be thrown by FileDownloader objects if they are not
1158 configured to continue on errors. They will contain the appropriate
1159 error message.
1160 """
1161
1162 def __init__(self, msg, exc_info=None):
1163 """ exc_info, if given, is the original exception that caused the trouble (as returned by sys.exc_info()). """
1164 super().__init__(msg)
1165 self.exc_info = exc_info
1166
1167
1168 class EntryNotInPlaylist(YoutubeDLError):
1169 """Entry not in playlist exception.
1170
1171 This exception will be thrown by YoutubeDL when a requested entry
1172 is not found in the playlist info_dict
1173 """
1174 msg = 'Entry not found in info'
1175
1176
1177 class SameFileError(YoutubeDLError):
1178 """Same File exception.
1179
1180 This exception will be thrown by FileDownloader objects if they detect
1181 multiple files would have to be downloaded to the same file on disk.
1182 """
1183 msg = 'Fixed output name but more than one file to download'
1184
1185 def __init__(self, filename=None):
1186 if filename is not None:
1187 self.msg += f': {filename}'
1188 super().__init__(self.msg)
1189
1190
1191 class PostProcessingError(YoutubeDLError):
1192 """Post Processing exception.
1193
1194 This exception may be raised by PostProcessor's .run() method to
1195 indicate an error in the postprocessing task.
1196 """
1197
1198
1199 class DownloadCancelled(YoutubeDLError):
1200 """ Exception raised when the download queue should be interrupted """
1201 msg = 'The download was cancelled'
1202
1203
1204 class ExistingVideoReached(DownloadCancelled):
1205 """ --break-on-existing triggered """
1206 msg = 'Encountered a video that is already in the archive, stopping due to --break-on-existing'
1207
1208
1209 class RejectedVideoReached(DownloadCancelled):
1210 """ --break-on-reject triggered """
1211 msg = 'Encountered a video that did not match filter, stopping due to --break-on-reject'
1212
1213
1214 class MaxDownloadsReached(DownloadCancelled):
1215 """ --max-downloads limit has been reached. """
1216 msg = 'Maximum number of downloads reached, stopping due to --max-downloads'
1217
1218
1219 class ReExtractInfo(YoutubeDLError):
1220 """ Video info needs to be re-extracted. """
1221
1222 def __init__(self, msg, expected=False):
1223 super().__init__(msg)
1224 self.expected = expected
1225
1226
1227 class ThrottledDownload(ReExtractInfo):
1228 """ Download speed below --throttled-rate. """
1229 msg = 'The download speed is below throttle limit'
1230
1231 def __init__(self):
1232 super().__init__(self.msg, expected=False)
1233
1234
1235 class UnavailableVideoError(YoutubeDLError):
1236 """Unavailable Format exception.
1237
1238 This exception will be thrown when a video is requested
1239 in a format that is not available for that video.
1240 """
1241 msg = 'Unable to download video'
1242
1243 def __init__(self, err=None):
1244 if err is not None:
1245 self.msg += f': {err}'
1246 super().__init__(self.msg)
1247
1248
1249 class ContentTooShortError(YoutubeDLError):
1250 """Content Too Short exception.
1251
1252 This exception may be raised by FileDownloader objects when a file they
1253 download is too small for what the server announced first, indicating
1254 the connection was probably interrupted.
1255 """
1256
1257 def __init__(self, downloaded, expected):
1258 super().__init__(f'Downloaded {downloaded} bytes, expected {expected} bytes')
1259 # Both in bytes
1260 self.downloaded = downloaded
1261 self.expected = expected
1262
1263
1264 class XAttrMetadataError(YoutubeDLError):
1265 def __init__(self, code=None, msg='Unknown error'):
1266 super().__init__(msg)
1267 self.code = code
1268 self.msg = msg
1269
1270 # Parsing code and msg
1271 if (self.code in (errno.ENOSPC, errno.EDQUOT)
1272 or 'No space left' in self.msg or 'Disk quota exceeded' in self.msg):
1273 self.reason = 'NO_SPACE'
1274 elif self.code == errno.E2BIG or 'Argument list too long' in self.msg:
1275 self.reason = 'VALUE_TOO_LONG'
1276 else:
1277 self.reason = 'NOT_SUPPORTED'
1278
1279
1280 class XAttrUnavailableError(YoutubeDLError):
1281 pass
1282
1283
1284 def _create_http_connection(ydl_handler, http_class, is_https, *args, **kwargs):
1285 hc = http_class(*args, **kwargs)
1286 source_address = ydl_handler._params.get('source_address')
1287
1288 if source_address is not None:
1289 # This is to workaround _create_connection() from socket where it will try all
1290 # address data from getaddrinfo() including IPv6. This filters the result from
1291 # getaddrinfo() based on the source_address value.
1292 # This is based on the cpython socket.create_connection() function.
1293 # https://github.com/python/cpython/blob/master/Lib/socket.py#L691
1294 def _create_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, source_address=None):
1295 host, port = address
1296 err = None
1297 addrs = socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM)
1298 af = socket.AF_INET if '.' in source_address[0] else socket.AF_INET6
1299 ip_addrs = [addr for addr in addrs if addr[0] == af]
1300 if addrs and not ip_addrs:
1301 ip_version = 'v4' if af == socket.AF_INET else 'v6'
1302 raise OSError(
1303 "No remote IP%s addresses available for connect, can't use '%s' as source address"
1304 % (ip_version, source_address[0]))
1305 for res in ip_addrs:
1306 af, socktype, proto, canonname, sa = res
1307 sock = None
1308 try:
1309 sock = socket.socket(af, socktype, proto)
1310 if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT:
1311 sock.settimeout(timeout)
1312 sock.bind(source_address)
1313 sock.connect(sa)
1314 err = None # Explicitly break reference cycle
1315 return sock
1316 except OSError as _:
1317 err = _
1318 if sock is not None:
1319 sock.close()
1320 if err is not None:
1321 raise err
1322 else:
1323 raise OSError('getaddrinfo returns an empty list')
1324 if hasattr(hc, '_create_connection'):
1325 hc._create_connection = _create_connection
1326 hc.source_address = (source_address, 0)
1327
1328 return hc
1329
1330
1331 def handle_youtubedl_headers(headers):
1332 filtered_headers = headers
1333
1334 if 'Youtubedl-no-compression' in filtered_headers:
1335 filtered_headers = {k: v for k, v in filtered_headers.items() if k.lower() != 'accept-encoding'}
1336 del filtered_headers['Youtubedl-no-compression']
1337
1338 return filtered_headers
1339
1340
1341 class YoutubeDLHandler(urllib.request.HTTPHandler):
1342 """Handler for HTTP requests and responses.
1343
1344 This class, when installed with an OpenerDirector, automatically adds
1345 the standard headers to every HTTP request and handles gzipped and
1346 deflated responses from web servers. If compression is to be avoided in
1347 a particular request, the original request in the program code only has
1348 to include the HTTP header "Youtubedl-no-compression", which will be
1349 removed before making the real request.
1350
1351 Part of this code was copied from:
1352
1353 http://techknack.net/python-urllib2-handlers/
1354
1355 Andrew Rowls, the author of that code, agreed to release it to the
1356 public domain.
1357 """
1358
1359 def __init__(self, params, *args, **kwargs):
1360 urllib.request.HTTPHandler.__init__(self, *args, **kwargs)
1361 self._params = params
1362
1363 def http_open(self, req):
1364 conn_class = http.client.HTTPConnection
1365
1366 socks_proxy = req.headers.get('Ytdl-socks-proxy')
1367 if socks_proxy:
1368 conn_class = make_socks_conn_class(conn_class, socks_proxy)
1369 del req.headers['Ytdl-socks-proxy']
1370
1371 return self.do_open(functools.partial(
1372 _create_http_connection, self, conn_class, False),
1373 req)
1374
1375 @staticmethod
1376 def deflate(data):
1377 if not data:
1378 return data
1379 try:
1380 return zlib.decompress(data, -zlib.MAX_WBITS)
1381 except zlib.error:
1382 return zlib.decompress(data)
1383
1384 @staticmethod
1385 def brotli(data):
1386 if not data:
1387 return data
1388 return brotli.decompress(data)
1389
1390 def http_request(self, req):
1391 # According to RFC 3986, URLs can not contain non-ASCII characters, however this is not
1392 # always respected by websites, some tend to give out URLs with non percent-encoded
1393 # non-ASCII characters (see telemb.py, ard.py [#3412])
1394 # urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991)
1395 # To work around aforementioned issue we will replace request's original URL with
1396 # percent-encoded one
1397 # Since redirects are also affected (e.g. http://www.southpark.de/alle-episoden/s18e09)
1398 # the code of this workaround has been moved here from YoutubeDL.urlopen()
1399 url = req.get_full_url()
1400 url_escaped = escape_url(url)
1401
1402 # Substitute URL if any change after escaping
1403 if url != url_escaped:
1404 req = update_Request(req, url=url_escaped)
1405
1406 for h, v in self._params.get('http_headers', std_headers).items():
1407 # Capitalize is needed because of Python bug 2275: http://bugs.python.org/issue2275
1408 # The dict keys are capitalized because of this bug by urllib
1409 if h.capitalize() not in req.headers:
1410 req.add_header(h, v)
1411
1412 if 'Accept-encoding' not in req.headers:
1413 req.add_header('Accept-encoding', ', '.join(SUPPORTED_ENCODINGS))
1414
1415 req.headers = handle_youtubedl_headers(req.headers)
1416
1417 return super().do_request_(req)
1418
1419 def http_response(self, req, resp):
1420 old_resp = resp
1421 # gzip
1422 if resp.headers.get('Content-encoding', '') == 'gzip':
1423 content = resp.read()
1424 gz = gzip.GzipFile(fileobj=io.BytesIO(content), mode='rb')
1425 try:
1426 uncompressed = io.BytesIO(gz.read())
1427 except OSError as original_ioerror:
1428 # There may be junk add the end of the file
1429 # See http://stackoverflow.com/q/4928560/35070 for details
1430 for i in range(1, 1024):
1431 try:
1432 gz = gzip.GzipFile(fileobj=io.BytesIO(content[:-i]), mode='rb')
1433 uncompressed = io.BytesIO(gz.read())
1434 except OSError:
1435 continue
1436 break
1437 else:
1438 raise original_ioerror
1439 resp = urllib.request.addinfourl(uncompressed, old_resp.headers, old_resp.url, old_resp.code)
1440 resp.msg = old_resp.msg
1441 # deflate
1442 if resp.headers.get('Content-encoding', '') == 'deflate':
1443 gz = io.BytesIO(self.deflate(resp.read()))
1444 resp = urllib.request.addinfourl(gz, old_resp.headers, old_resp.url, old_resp.code)
1445 resp.msg = old_resp.msg
1446 # brotli
1447 if resp.headers.get('Content-encoding', '') == 'br':
1448 resp = urllib.request.addinfourl(
1449 io.BytesIO(self.brotli(resp.read())), old_resp.headers, old_resp.url, old_resp.code)
1450 resp.msg = old_resp.msg
1451 # Percent-encode redirect URL of Location HTTP header to satisfy RFC 3986 (see
1452 # https://github.com/ytdl-org/youtube-dl/issues/6457).
1453 if 300 <= resp.code < 400:
1454 location = resp.headers.get('Location')
1455 if location:
1456 # As of RFC 2616 default charset is iso-8859-1 that is respected by python 3
1457 location = location.encode('iso-8859-1').decode()
1458 location_escaped = escape_url(location)
1459 if location != location_escaped:
1460 del resp.headers['Location']
1461 resp.headers['Location'] = location_escaped
1462 return resp
1463
1464 https_request = http_request
1465 https_response = http_response
1466
1467
1468 def make_socks_conn_class(base_class, socks_proxy):
1469 assert issubclass(base_class, (
1470 http.client.HTTPConnection, http.client.HTTPSConnection))
1471
1472 url_components = urllib.parse.urlparse(socks_proxy)
1473 if url_components.scheme.lower() == 'socks5':
1474 socks_type = ProxyType.SOCKS5
1475 elif url_components.scheme.lower() in ('socks', 'socks4'):
1476 socks_type = ProxyType.SOCKS4
1477 elif url_components.scheme.lower() == 'socks4a':
1478 socks_type = ProxyType.SOCKS4A
1479
1480 def unquote_if_non_empty(s):
1481 if not s:
1482 return s
1483 return urllib.parse.unquote_plus(s)
1484
1485 proxy_args = (
1486 socks_type,
1487 url_components.hostname, url_components.port or 1080,
1488 True, # Remote DNS
1489 unquote_if_non_empty(url_components.username),
1490 unquote_if_non_empty(url_components.password),
1491 )
1492
1493 class SocksConnection(base_class):
1494 def connect(self):
1495 self.sock = sockssocket()
1496 self.sock.setproxy(*proxy_args)
1497 if isinstance(self.timeout, (int, float)):
1498 self.sock.settimeout(self.timeout)
1499 self.sock.connect((self.host, self.port))
1500
1501 if isinstance(self, http.client.HTTPSConnection):
1502 if hasattr(self, '_context'): # Python > 2.6
1503 self.sock = self._context.wrap_socket(
1504 self.sock, server_hostname=self.host)
1505 else:
1506 self.sock = ssl.wrap_socket(self.sock)
1507
1508 return SocksConnection
1509
1510
1511 class YoutubeDLHTTPSHandler(urllib.request.HTTPSHandler):
1512 def __init__(self, params, https_conn_class=None, *args, **kwargs):
1513 urllib.request.HTTPSHandler.__init__(self, *args, **kwargs)
1514 self._https_conn_class = https_conn_class or http.client.HTTPSConnection
1515 self._params = params
1516
1517 def https_open(self, req):
1518 kwargs = {}
1519 conn_class = self._https_conn_class
1520
1521 if hasattr(self, '_context'): # python > 2.6
1522 kwargs['context'] = self._context
1523 if hasattr(self, '_check_hostname'): # python 3.x
1524 kwargs['check_hostname'] = self._check_hostname
1525
1526 socks_proxy = req.headers.get('Ytdl-socks-proxy')
1527 if socks_proxy:
1528 conn_class = make_socks_conn_class(conn_class, socks_proxy)
1529 del req.headers['Ytdl-socks-proxy']
1530
1531 try:
1532 return self.do_open(
1533 functools.partial(_create_http_connection, self, conn_class, True), req, **kwargs)
1534 except urllib.error.URLError as e:
1535 if (isinstance(e.reason, ssl.SSLError)
1536 and getattr(e.reason, 'reason', None) == 'SSLV3_ALERT_HANDSHAKE_FAILURE'):
1537 raise YoutubeDLError('SSLV3_ALERT_HANDSHAKE_FAILURE: Try using --legacy-server-connect')
1538 raise
1539
1540
1541 def is_path_like(f):
1542 return isinstance(f, (str, bytes, os.PathLike))
1543
1544
1545 class YoutubeDLCookieJar(http.cookiejar.MozillaCookieJar):
1546 """
1547 See [1] for cookie file format.
1548
1549 1. https://curl.haxx.se/docs/http-cookies.html
1550 """
1551 _HTTPONLY_PREFIX = '#HttpOnly_'
1552 _ENTRY_LEN = 7
1553 _HEADER = '''# Netscape HTTP Cookie File
1554 # This file is generated by yt-dlp. Do not edit.
1555
1556 '''
1557 _CookieFileEntry = collections.namedtuple(
1558 'CookieFileEntry',
1559 ('domain_name', 'include_subdomains', 'path', 'https_only', 'expires_at', 'name', 'value'))
1560
1561 def __init__(self, filename=None, *args, **kwargs):
1562 super().__init__(None, *args, **kwargs)
1563 if is_path_like(filename):
1564 filename = os.fspath(filename)
1565 self.filename = filename
1566
1567 @staticmethod
1568 def _true_or_false(cndn):
1569 return 'TRUE' if cndn else 'FALSE'
1570
1571 @contextlib.contextmanager
1572 def open(self, file, *, write=False):
1573 if is_path_like(file):
1574 with open(file, 'w' if write else 'r', encoding='utf-8') as f:
1575 yield f
1576 else:
1577 if write:
1578 file.truncate(0)
1579 yield file
1580
1581 def _really_save(self, f, ignore_discard=False, ignore_expires=False):
1582 now = time.time()
1583 for cookie in self:
1584 if (not ignore_discard and cookie.discard
1585 or not ignore_expires and cookie.is_expired(now)):
1586 continue
1587 name, value = cookie.name, cookie.value
1588 if value is None:
1589 # cookies.txt regards 'Set-Cookie: foo' as a cookie
1590 # with no name, whereas http.cookiejar regards it as a
1591 # cookie with no value.
1592 name, value = '', name
1593 f.write('%s\n' % '\t'.join((
1594 cookie.domain,
1595 self._true_or_false(cookie.domain.startswith('.')),
1596 cookie.path,
1597 self._true_or_false(cookie.secure),
1598 str_or_none(cookie.expires, default=''),
1599 name, value
1600 )))
1601
1602 def save(self, filename=None, *args, **kwargs):
1603 """
1604 Save cookies to a file.
1605 Code is taken from CPython 3.6
1606 https://github.com/python/cpython/blob/8d999cbf4adea053be6dbb612b9844635c4dfb8e/Lib/http/cookiejar.py#L2091-L2117 """
1607
1608 if filename is None:
1609 if self.filename is not None:
1610 filename = self.filename
1611 else:
1612 raise ValueError(http.cookiejar.MISSING_FILENAME_TEXT)
1613
1614 # Store session cookies with `expires` set to 0 instead of an empty string
1615 for cookie in self:
1616 if cookie.expires is None:
1617 cookie.expires = 0
1618
1619 with self.open(filename, write=True) as f:
1620 f.write(self._HEADER)
1621 self._really_save(f, *args, **kwargs)
1622
1623 def load(self, filename=None, ignore_discard=False, ignore_expires=False):
1624 """Load cookies from a file."""
1625 if filename is None:
1626 if self.filename is not None:
1627 filename = self.filename
1628 else:
1629 raise ValueError(http.cookiejar.MISSING_FILENAME_TEXT)
1630
1631 def prepare_line(line):
1632 if line.startswith(self._HTTPONLY_PREFIX):
1633 line = line[len(self._HTTPONLY_PREFIX):]
1634 # comments and empty lines are fine
1635 if line.startswith('#') or not line.strip():
1636 return line
1637 cookie_list = line.split('\t')
1638 if len(cookie_list) != self._ENTRY_LEN:
1639 raise http.cookiejar.LoadError('invalid length %d' % len(cookie_list))
1640 cookie = self._CookieFileEntry(*cookie_list)
1641 if cookie.expires_at and not cookie.expires_at.isdigit():
1642 raise http.cookiejar.LoadError('invalid expires at %s' % cookie.expires_at)
1643 return line
1644
1645 cf = io.StringIO()
1646 with self.open(filename) as f:
1647 for line in f:
1648 try:
1649 cf.write(prepare_line(line))
1650 except http.cookiejar.LoadError as e:
1651 if f'{line.strip()} '[0] in '[{"':
1652 raise http.cookiejar.LoadError(
1653 'Cookies file must be Netscape formatted, not JSON. See '
1654 'https://github.com/yt-dlp/yt-dlp/wiki/FAQ#how-do-i-pass-cookies-to-yt-dlp')
1655 write_string(f'WARNING: skipping cookie file entry due to {e}: {line!r}\n')
1656 continue
1657 cf.seek(0)
1658 self._really_load(cf, filename, ignore_discard, ignore_expires)
1659 # Session cookies are denoted by either `expires` field set to
1660 # an empty string or 0. MozillaCookieJar only recognizes the former
1661 # (see [1]). So we need force the latter to be recognized as session
1662 # cookies on our own.
1663 # Session cookies may be important for cookies-based authentication,
1664 # e.g. usually, when user does not check 'Remember me' check box while
1665 # logging in on a site, some important cookies are stored as session
1666 # cookies so that not recognizing them will result in failed login.
1667 # 1. https://bugs.python.org/issue17164
1668 for cookie in self:
1669 # Treat `expires=0` cookies as session cookies
1670 if cookie.expires == 0:
1671 cookie.expires = None
1672 cookie.discard = True
1673
1674
1675 class YoutubeDLCookieProcessor(urllib.request.HTTPCookieProcessor):
1676 def __init__(self, cookiejar=None):
1677 urllib.request.HTTPCookieProcessor.__init__(self, cookiejar)
1678
1679 def http_response(self, request, response):
1680 return urllib.request.HTTPCookieProcessor.http_response(self, request, response)
1681
1682 https_request = urllib.request.HTTPCookieProcessor.http_request
1683 https_response = http_response
1684
1685
1686 class YoutubeDLRedirectHandler(urllib.request.HTTPRedirectHandler):
1687 """YoutubeDL redirect handler
1688
1689 The code is based on HTTPRedirectHandler implementation from CPython [1].
1690
1691 This redirect handler solves two issues:
1692 - ensures redirect URL is always unicode under python 2
1693 - introduces support for experimental HTTP response status code
1694 308 Permanent Redirect [2] used by some sites [3]
1695
1696 1. https://github.com/python/cpython/blob/master/Lib/urllib/request.py
1697 2. https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/308
1698 3. https://github.com/ytdl-org/youtube-dl/issues/28768
1699 """
1700
1701 http_error_301 = http_error_303 = http_error_307 = http_error_308 = urllib.request.HTTPRedirectHandler.http_error_302
1702
1703 def redirect_request(self, req, fp, code, msg, headers, newurl):
1704 """Return a Request or None in response to a redirect.
1705
1706 This is called by the http_error_30x methods when a
1707 redirection response is received. If a redirection should
1708 take place, return a new Request to allow http_error_30x to
1709 perform the redirect. Otherwise, raise HTTPError if no-one
1710 else should try to handle this url. Return None if you can't
1711 but another Handler might.
1712 """
1713 m = req.get_method()
1714 if (not (code in (301, 302, 303, 307, 308) and m in ("GET", "HEAD")
1715 or code in (301, 302, 303) and m == "POST")):
1716 raise urllib.error.HTTPError(req.full_url, code, msg, headers, fp)
1717 # Strictly (according to RFC 2616), 301 or 302 in response to
1718 # a POST MUST NOT cause a redirection without confirmation
1719 # from the user (of urllib.request, in this case). In practice,
1720 # essentially all clients do redirect in this case, so we do
1721 # the same.
1722
1723 # Be conciliant with URIs containing a space. This is mainly
1724 # redundant with the more complete encoding done in http_error_302(),
1725 # but it is kept for compatibility with other callers.
1726 newurl = newurl.replace(' ', '%20')
1727
1728 CONTENT_HEADERS = ("content-length", "content-type")
1729 # NB: don't use dict comprehension for python 2.6 compatibility
1730 newheaders = {k: v for k, v in req.headers.items() if k.lower() not in CONTENT_HEADERS}
1731
1732 # A 303 must either use GET or HEAD for subsequent request
1733 # https://datatracker.ietf.org/doc/html/rfc7231#section-6.4.4
1734 if code == 303 and m != 'HEAD':
1735 m = 'GET'
1736 # 301 and 302 redirects are commonly turned into a GET from a POST
1737 # for subsequent requests by browsers, so we'll do the same.
1738 # https://datatracker.ietf.org/doc/html/rfc7231#section-6.4.2
1739 # https://datatracker.ietf.org/doc/html/rfc7231#section-6.4.3
1740 if code in (301, 302) and m == 'POST':
1741 m = 'GET'
1742
1743 return urllib.request.Request(
1744 newurl, headers=newheaders, origin_req_host=req.origin_req_host,
1745 unverifiable=True, method=m)
1746
1747
1748 def extract_timezone(date_str):
1749 m = re.search(
1750 r'''(?x)
1751 ^.{8,}? # >=8 char non-TZ prefix, if present
1752 (?P<tz>Z| # just the UTC Z, or
1753 (?:(?<=.\b\d{4}|\b\d{2}:\d\d)| # preceded by 4 digits or hh:mm or
1754 (?<!.\b[a-zA-Z]{3}|[a-zA-Z]{4}|..\b\d\d)) # not preceded by 3 alpha word or >= 4 alpha or 2 digits
1755 [ ]? # optional space
1756 (?P<sign>\+|-) # +/-
1757 (?P<hours>[0-9]{2}):?(?P<minutes>[0-9]{2}) # hh[:]mm
1758 $)
1759 ''', date_str)
1760 if not m:
1761 m = re.search(r'\d{1,2}:\d{1,2}(?:\.\d+)?(?P<tz>\s*[A-Z]+)$', date_str)
1762 timezone = TIMEZONE_NAMES.get(m and m.group('tz').strip())
1763 if timezone is not None:
1764 date_str = date_str[:-len(m.group('tz'))]
1765 timezone = datetime.timedelta(hours=timezone or 0)
1766 else:
1767 date_str = date_str[:-len(m.group('tz'))]
1768 if not m.group('sign'):
1769 timezone = datetime.timedelta()
1770 else:
1771 sign = 1 if m.group('sign') == '+' else -1
1772 timezone = datetime.timedelta(
1773 hours=sign * int(m.group('hours')),
1774 minutes=sign * int(m.group('minutes')))
1775 return timezone, date_str
1776
1777
1778 def parse_iso8601(date_str, delimiter='T', timezone=None):
1779 """ Return a UNIX timestamp from the given date """
1780
1781 if date_str is None:
1782 return None
1783
1784 date_str = re.sub(r'\.[0-9]+', '', date_str)
1785
1786 if timezone is None:
1787 timezone, date_str = extract_timezone(date_str)
1788
1789 with contextlib.suppress(ValueError):
1790 date_format = f'%Y-%m-%d{delimiter}%H:%M:%S'
1791 dt = datetime.datetime.strptime(date_str, date_format) - timezone
1792 return calendar.timegm(dt.timetuple())
1793
1794
1795 def date_formats(day_first=True):
1796 return DATE_FORMATS_DAY_FIRST if day_first else DATE_FORMATS_MONTH_FIRST
1797
1798
1799 def unified_strdate(date_str, day_first=True):
1800 """Return a string with the date in the format YYYYMMDD"""
1801
1802 if date_str is None:
1803 return None
1804 upload_date = None
1805 # Replace commas
1806 date_str = date_str.replace(',', ' ')
1807 # Remove AM/PM + timezone
1808 date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str)
1809 _, date_str = extract_timezone(date_str)
1810
1811 for expression in date_formats(day_first):
1812 with contextlib.suppress(ValueError):
1813 upload_date = datetime.datetime.strptime(date_str, expression).strftime('%Y%m%d')
1814 if upload_date is None:
1815 timetuple = email.utils.parsedate_tz(date_str)
1816 if timetuple:
1817 with contextlib.suppress(ValueError):
1818 upload_date = datetime.datetime(*timetuple[:6]).strftime('%Y%m%d')
1819 if upload_date is not None:
1820 return str(upload_date)
1821
1822
1823 def unified_timestamp(date_str, day_first=True):
1824 if date_str is None:
1825 return None
1826
1827 date_str = re.sub(r'\s+', ' ', re.sub(
1828 r'(?i)[,|]|(mon|tues?|wed(nes)?|thu(rs)?|fri|sat(ur)?)(day)?', '', date_str))
1829
1830 pm_delta = 12 if re.search(r'(?i)PM', date_str) else 0
1831 timezone, date_str = extract_timezone(date_str)
1832
1833 # Remove AM/PM + timezone
1834 date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str)
1835
1836 # Remove unrecognized timezones from ISO 8601 alike timestamps
1837 m = re.search(r'\d{1,2}:\d{1,2}(?:\.\d+)?(?P<tz>\s*[A-Z]+)$', date_str)
1838 if m:
1839 date_str = date_str[:-len(m.group('tz'))]
1840
1841 # Python only supports microseconds, so remove nanoseconds
1842 m = re.search(r'^([0-9]{4,}-[0-9]{1,2}-[0-9]{1,2}T[0-9]{1,2}:[0-9]{1,2}:[0-9]{1,2}\.[0-9]{6})[0-9]+$', date_str)
1843 if m:
1844 date_str = m.group(1)
1845
1846 for expression in date_formats(day_first):
1847 with contextlib.suppress(ValueError):
1848 dt = datetime.datetime.strptime(date_str, expression) - timezone + datetime.timedelta(hours=pm_delta)
1849 return calendar.timegm(dt.timetuple())
1850
1851 timetuple = email.utils.parsedate_tz(date_str)
1852 if timetuple:
1853 return calendar.timegm(timetuple) + pm_delta * 3600 - timezone.total_seconds()
1854
1855
1856 def determine_ext(url, default_ext='unknown_video'):
1857 if url is None or '.' not in url:
1858 return default_ext
1859 guess = url.partition('?')[0].rpartition('.')[2]
1860 if re.match(r'^[A-Za-z0-9]+$', guess):
1861 return guess
1862 # Try extract ext from URLs like http://example.com/foo/bar.mp4/?download
1863 elif guess.rstrip('/') in KNOWN_EXTENSIONS:
1864 return guess.rstrip('/')
1865 else:
1866 return default_ext
1867
1868
1869 def subtitles_filename(filename, sub_lang, sub_format, expected_real_ext=None):
1870 return replace_extension(filename, sub_lang + '.' + sub_format, expected_real_ext)
1871
1872
1873 def datetime_from_str(date_str, precision='auto', format='%Y%m%d'):
1874 R"""
1875 Return a datetime object from a string.
1876 Supported format:
1877 (now|today|yesterday|DATE)([+-]\d+(microsecond|second|minute|hour|day|week|month|year)s?)?
1878
1879 @param format strftime format of DATE
1880 @param precision Round the datetime object: auto|microsecond|second|minute|hour|day
1881 auto: round to the unit provided in date_str (if applicable).
1882 """
1883 auto_precision = False
1884 if precision == 'auto':
1885 auto_precision = True
1886 precision = 'microsecond'
1887 today = datetime_round(datetime.datetime.utcnow(), precision)
1888 if date_str in ('now', 'today'):
1889 return today
1890 if date_str == 'yesterday':
1891 return today - datetime.timedelta(days=1)
1892 match = re.match(
1893 r'(?P<start>.+)(?P<sign>[+-])(?P<time>\d+)(?P<unit>microsecond|second|minute|hour|day|week|month|year)s?',
1894 date_str)
1895 if match is not None:
1896 start_time = datetime_from_str(match.group('start'), precision, format)
1897 time = int(match.group('time')) * (-1 if match.group('sign') == '-' else 1)
1898 unit = match.group('unit')
1899 if unit == 'month' or unit == 'year':
1900 new_date = datetime_add_months(start_time, time * 12 if unit == 'year' else time)
1901 unit = 'day'
1902 else:
1903 if unit == 'week':
1904 unit = 'day'
1905 time *= 7
1906 delta = datetime.timedelta(**{unit + 's': time})
1907 new_date = start_time + delta
1908 if auto_precision:
1909 return datetime_round(new_date, unit)
1910 return new_date
1911
1912 return datetime_round(datetime.datetime.strptime(date_str, format), precision)
1913
1914
1915 def date_from_str(date_str, format='%Y%m%d', strict=False):
1916 R"""
1917 Return a date object from a string using datetime_from_str
1918
1919 @param strict Restrict allowed patterns to "YYYYMMDD" and
1920 (now|today|yesterday)(-\d+(day|week|month|year)s?)?
1921 """
1922 if strict and not re.fullmatch(r'\d{8}|(now|today|yesterday)(-\d+(day|week|month|year)s?)?', date_str):
1923 raise ValueError(f'Invalid date format "{date_str}"')
1924 return datetime_from_str(date_str, precision='microsecond', format=format).date()
1925
1926
1927 def datetime_add_months(dt, months):
1928 """Increment/Decrement a datetime object by months."""
1929 month = dt.month + months - 1
1930 year = dt.year + month // 12
1931 month = month % 12 + 1
1932 day = min(dt.day, calendar.monthrange(year, month)[1])
1933 return dt.replace(year, month, day)
1934
1935
1936 def datetime_round(dt, precision='day'):
1937 """
1938 Round a datetime object's time to a specific precision
1939 """
1940 if precision == 'microsecond':
1941 return dt
1942
1943 unit_seconds = {
1944 'day': 86400,
1945 'hour': 3600,
1946 'minute': 60,
1947 'second': 1,
1948 }
1949 roundto = lambda x, n: ((x + n / 2) // n) * n
1950 timestamp = calendar.timegm(dt.timetuple())
1951 return datetime.datetime.utcfromtimestamp(roundto(timestamp, unit_seconds[precision]))
1952
1953
1954 def hyphenate_date(date_str):
1955 """
1956 Convert a date in 'YYYYMMDD' format to 'YYYY-MM-DD' format"""
1957 match = re.match(r'^(\d\d\d\d)(\d\d)(\d\d)$', date_str)
1958 if match is not None:
1959 return '-'.join(match.groups())
1960 else:
1961 return date_str
1962
1963
1964 class DateRange:
1965 """Represents a time interval between two dates"""
1966
1967 def __init__(self, start=None, end=None):
1968 """start and end must be strings in the format accepted by date"""
1969 if start is not None:
1970 self.start = date_from_str(start, strict=True)
1971 else:
1972 self.start = datetime.datetime.min.date()
1973 if end is not None:
1974 self.end = date_from_str(end, strict=True)
1975 else:
1976 self.end = datetime.datetime.max.date()
1977 if self.start > self.end:
1978 raise ValueError('Date range: "%s" , the start date must be before the end date' % self)
1979
1980 @classmethod
1981 def day(cls, day):
1982 """Returns a range that only contains the given day"""
1983 return cls(day, day)
1984
1985 def __contains__(self, date):
1986 """Check if the date is in the range"""
1987 if not isinstance(date, datetime.date):
1988 date = date_from_str(date)
1989 return self.start <= date <= self.end
1990
1991 def __str__(self):
1992 return f'{self.start.isoformat()} - {self.end.isoformat()}'
1993
1994 def __eq__(self, other):
1995 return (isinstance(other, DateRange)
1996 and self.start == other.start and self.end == other.end)
1997
1998
1999 def platform_name():
2000 """ Returns the platform name as a str """
2001 deprecation_warning(f'"{__name__}.platform_name" is deprecated, use "platform.platform" instead')
2002 return platform.platform()
2003
2004
2005 @functools.cache
2006 def system_identifier():
2007 python_implementation = platform.python_implementation()
2008 if python_implementation == 'PyPy' and hasattr(sys, 'pypy_version_info'):
2009 python_implementation += ' version %d.%d.%d' % sys.pypy_version_info[:3]
2010 libc_ver = []
2011 with contextlib.suppress(OSError): # We may not have access to the executable
2012 libc_ver = platform.libc_ver()
2013
2014 return 'Python %s (%s %s %s) - %s (%s%s)' % (
2015 platform.python_version(),
2016 python_implementation,
2017 platform.machine(),
2018 platform.architecture()[0],
2019 platform.platform(),
2020 ssl.OPENSSL_VERSION,
2021 format_field(join_nonempty(*libc_ver, delim=' '), None, ', %s'),
2022 )
2023
2024
2025 @functools.cache
2026 def get_windows_version():
2027 ''' Get Windows version. returns () if it's not running on Windows '''
2028 if compat_os_name == 'nt':
2029 return version_tuple(platform.win32_ver()[1])
2030 else:
2031 return ()
2032
2033
2034 def write_string(s, out=None, encoding=None):
2035 assert isinstance(s, str)
2036 out = out or sys.stderr
2037
2038 if compat_os_name == 'nt' and supports_terminal_sequences(out):
2039 s = re.sub(r'([\r\n]+)', r' \1', s)
2040
2041 enc, buffer = None, out
2042 if 'b' in getattr(out, 'mode', ''):
2043 enc = encoding or preferredencoding()
2044 elif hasattr(out, 'buffer'):
2045 buffer = out.buffer
2046 enc = encoding or getattr(out, 'encoding', None) or preferredencoding()
2047
2048 buffer.write(s.encode(enc, 'ignore') if enc else s)
2049 out.flush()
2050
2051
2052 def deprecation_warning(msg, *, printer=None, stacklevel=0, **kwargs):
2053 from . import _IN_CLI
2054 if _IN_CLI:
2055 if msg in deprecation_warning._cache:
2056 return
2057 deprecation_warning._cache.add(msg)
2058 if printer:
2059 return printer(f'{msg}{bug_reports_message()}', **kwargs)
2060 return write_string(f'ERROR: {msg}{bug_reports_message()}\n', **kwargs)
2061 else:
2062 import warnings
2063 warnings.warn(DeprecationWarning(msg), stacklevel=stacklevel + 3)
2064
2065
2066 deprecation_warning._cache = set()
2067
2068
2069 def bytes_to_intlist(bs):
2070 if not bs:
2071 return []
2072 if isinstance(bs[0], int): # Python 3
2073 return list(bs)
2074 else:
2075 return [ord(c) for c in bs]
2076
2077
2078 def intlist_to_bytes(xs):
2079 if not xs:
2080 return b''
2081 return struct.pack('%dB' % len(xs), *xs)
2082
2083
2084 class LockingUnsupportedError(OSError):
2085 msg = 'File locking is not supported'
2086
2087 def __init__(self):
2088 super().__init__(self.msg)
2089
2090
2091 # Cross-platform file locking
2092 if sys.platform == 'win32':
2093 import ctypes
2094 import ctypes.wintypes
2095 import msvcrt
2096
2097 class OVERLAPPED(ctypes.Structure):
2098 _fields_ = [
2099 ('Internal', ctypes.wintypes.LPVOID),
2100 ('InternalHigh', ctypes.wintypes.LPVOID),
2101 ('Offset', ctypes.wintypes.DWORD),
2102 ('OffsetHigh', ctypes.wintypes.DWORD),
2103 ('hEvent', ctypes.wintypes.HANDLE),
2104 ]
2105
2106 kernel32 = ctypes.WinDLL('kernel32')
2107 LockFileEx = kernel32.LockFileEx
2108 LockFileEx.argtypes = [
2109 ctypes.wintypes.HANDLE, # hFile
2110 ctypes.wintypes.DWORD, # dwFlags
2111 ctypes.wintypes.DWORD, # dwReserved
2112 ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow
2113 ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh
2114 ctypes.POINTER(OVERLAPPED) # Overlapped
2115 ]
2116 LockFileEx.restype = ctypes.wintypes.BOOL
2117 UnlockFileEx = kernel32.UnlockFileEx
2118 UnlockFileEx.argtypes = [
2119 ctypes.wintypes.HANDLE, # hFile
2120 ctypes.wintypes.DWORD, # dwReserved
2121 ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow
2122 ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh
2123 ctypes.POINTER(OVERLAPPED) # Overlapped
2124 ]
2125 UnlockFileEx.restype = ctypes.wintypes.BOOL
2126 whole_low = 0xffffffff
2127 whole_high = 0x7fffffff
2128
2129 def _lock_file(f, exclusive, block):
2130 overlapped = OVERLAPPED()
2131 overlapped.Offset = 0
2132 overlapped.OffsetHigh = 0
2133 overlapped.hEvent = 0
2134 f._lock_file_overlapped_p = ctypes.pointer(overlapped)
2135
2136 if not LockFileEx(msvcrt.get_osfhandle(f.fileno()),
2137 (0x2 if exclusive else 0x0) | (0x0 if block else 0x1),
2138 0, whole_low, whole_high, f._lock_file_overlapped_p):
2139 # NB: No argument form of "ctypes.FormatError" does not work on PyPy
2140 raise BlockingIOError(f'Locking file failed: {ctypes.FormatError(ctypes.GetLastError())!r}')
2141
2142 def _unlock_file(f):
2143 assert f._lock_file_overlapped_p
2144 handle = msvcrt.get_osfhandle(f.fileno())
2145 if not UnlockFileEx(handle, 0, whole_low, whole_high, f._lock_file_overlapped_p):
2146 raise OSError('Unlocking file failed: %r' % ctypes.FormatError())
2147
2148 else:
2149 try:
2150 import fcntl
2151
2152 def _lock_file(f, exclusive, block):
2153 flags = fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH
2154 if not block:
2155 flags |= fcntl.LOCK_NB
2156 try:
2157 fcntl.flock(f, flags)
2158 except BlockingIOError:
2159 raise
2160 except OSError: # AOSP does not have flock()
2161 fcntl.lockf(f, flags)
2162
2163 def _unlock_file(f):
2164 try:
2165 fcntl.flock(f, fcntl.LOCK_UN)
2166 except OSError:
2167 fcntl.lockf(f, fcntl.LOCK_UN)
2168
2169 except ImportError:
2170
2171 def _lock_file(f, exclusive, block):
2172 raise LockingUnsupportedError()
2173
2174 def _unlock_file(f):
2175 raise LockingUnsupportedError()
2176
2177
2178 class locked_file:
2179 locked = False
2180
2181 def __init__(self, filename, mode, block=True, encoding=None):
2182 if mode not in {'r', 'rb', 'a', 'ab', 'w', 'wb'}:
2183 raise NotImplementedError(mode)
2184 self.mode, self.block = mode, block
2185
2186 writable = any(f in mode for f in 'wax+')
2187 readable = any(f in mode for f in 'r+')
2188 flags = functools.reduce(operator.ior, (
2189 getattr(os, 'O_CLOEXEC', 0), # UNIX only
2190 getattr(os, 'O_BINARY', 0), # Windows only
2191 getattr(os, 'O_NOINHERIT', 0), # Windows only
2192 os.O_CREAT if writable else 0, # O_TRUNC only after locking
2193 os.O_APPEND if 'a' in mode else 0,
2194 os.O_EXCL if 'x' in mode else 0,
2195 os.O_RDONLY if not writable else os.O_RDWR if readable else os.O_WRONLY,
2196 ))
2197
2198 self.f = os.fdopen(os.open(filename, flags, 0o666), mode, encoding=encoding)
2199
2200 def __enter__(self):
2201 exclusive = 'r' not in self.mode
2202 try:
2203 _lock_file(self.f, exclusive, self.block)
2204 self.locked = True
2205 except OSError:
2206 self.f.close()
2207 raise
2208 if 'w' in self.mode:
2209 try:
2210 self.f.truncate()
2211 except OSError as e:
2212 if e.errno not in (
2213 errno.ESPIPE, # Illegal seek - expected for FIFO
2214 errno.EINVAL, # Invalid argument - expected for /dev/null
2215 ):
2216 raise
2217 return self
2218
2219 def unlock(self):
2220 if not self.locked:
2221 return
2222 try:
2223 _unlock_file(self.f)
2224 finally:
2225 self.locked = False
2226
2227 def __exit__(self, *_):
2228 try:
2229 self.unlock()
2230 finally:
2231 self.f.close()
2232
2233 open = __enter__
2234 close = __exit__
2235
2236 def __getattr__(self, attr):
2237 return getattr(self.f, attr)
2238
2239 def __iter__(self):
2240 return iter(self.f)
2241
2242
2243 @functools.cache
2244 def get_filesystem_encoding():
2245 encoding = sys.getfilesystemencoding()
2246 return encoding if encoding is not None else 'utf-8'
2247
2248
2249 def shell_quote(args):
2250 quoted_args = []
2251 encoding = get_filesystem_encoding()
2252 for a in args:
2253 if isinstance(a, bytes):
2254 # We may get a filename encoded with 'encodeFilename'
2255 a = a.decode(encoding)
2256 quoted_args.append(compat_shlex_quote(a))
2257 return ' '.join(quoted_args)
2258
2259
2260 def smuggle_url(url, data):
2261 """ Pass additional data in a URL for internal use. """
2262
2263 url, idata = unsmuggle_url(url, {})
2264 data.update(idata)
2265 sdata = urllib.parse.urlencode(
2266 {'__youtubedl_smuggle': json.dumps(data)})
2267 return url + '#' + sdata
2268
2269
2270 def unsmuggle_url(smug_url, default=None):
2271 if '#__youtubedl_smuggle' not in smug_url:
2272 return smug_url, default
2273 url, _, sdata = smug_url.rpartition('#')
2274 jsond = urllib.parse.parse_qs(sdata)['__youtubedl_smuggle'][0]
2275 data = json.loads(jsond)
2276 return url, data
2277
2278
2279 def format_decimal_suffix(num, fmt='%d%s', *, factor=1000):
2280 """ Formats numbers with decimal sufixes like K, M, etc """
2281 num, factor = float_or_none(num), float(factor)
2282 if num is None or num < 0:
2283 return None
2284 POSSIBLE_SUFFIXES = 'kMGTPEZY'
2285 exponent = 0 if num == 0 else min(int(math.log(num, factor)), len(POSSIBLE_SUFFIXES))
2286 suffix = ['', *POSSIBLE_SUFFIXES][exponent]
2287 if factor == 1024:
2288 suffix = {'k': 'Ki', '': ''}.get(suffix, f'{suffix}i')
2289 converted = num / (factor ** exponent)
2290 return fmt % (converted, suffix)
2291
2292
2293 def format_bytes(bytes):
2294 return format_decimal_suffix(bytes, '%.2f%sB', factor=1024) or 'N/A'
2295
2296
2297 def lookup_unit_table(unit_table, s, strict=False):
2298 num_re = NUMBER_RE if strict else NUMBER_RE.replace(R'\.', '[,.]')
2299 units_re = '|'.join(re.escape(u) for u in unit_table)
2300 m = (re.fullmatch if strict else re.match)(
2301 rf'(?P<num>{num_re})\s*(?P<unit>{units_re})\b', s)
2302 if not m:
2303 return None
2304
2305 num = float(m.group('num').replace(',', '.'))
2306 mult = unit_table[m.group('unit')]
2307 return round(num * mult)
2308
2309
2310 def parse_bytes(s):
2311 """Parse a string indicating a byte quantity into an integer"""
2312 return lookup_unit_table(
2313 {u: 1024**i for i, u in enumerate(['', *'KMGTPEZY'])},
2314 s.upper(), strict=True)
2315
2316
2317 def parse_filesize(s):
2318 if s is None:
2319 return None
2320
2321 # The lower-case forms are of course incorrect and unofficial,
2322 # but we support those too
2323 _UNIT_TABLE = {
2324 'B': 1,
2325 'b': 1,
2326 'bytes': 1,
2327 'KiB': 1024,
2328 'KB': 1000,
2329 'kB': 1024,
2330 'Kb': 1000,
2331 'kb': 1000,
2332 'kilobytes': 1000,
2333 'kibibytes': 1024,
2334 'MiB': 1024 ** 2,
2335 'MB': 1000 ** 2,
2336 'mB': 1024 ** 2,
2337 'Mb': 1000 ** 2,
2338 'mb': 1000 ** 2,
2339 'megabytes': 1000 ** 2,
2340 'mebibytes': 1024 ** 2,
2341 'GiB': 1024 ** 3,
2342 'GB': 1000 ** 3,
2343 'gB': 1024 ** 3,
2344 'Gb': 1000 ** 3,
2345 'gb': 1000 ** 3,
2346 'gigabytes': 1000 ** 3,
2347 'gibibytes': 1024 ** 3,
2348 'TiB': 1024 ** 4,
2349 'TB': 1000 ** 4,
2350 'tB': 1024 ** 4,
2351 'Tb': 1000 ** 4,
2352 'tb': 1000 ** 4,
2353 'terabytes': 1000 ** 4,
2354 'tebibytes': 1024 ** 4,
2355 'PiB': 1024 ** 5,
2356 'PB': 1000 ** 5,
2357 'pB': 1024 ** 5,
2358 'Pb': 1000 ** 5,
2359 'pb': 1000 ** 5,
2360 'petabytes': 1000 ** 5,
2361 'pebibytes': 1024 ** 5,
2362 'EiB': 1024 ** 6,
2363 'EB': 1000 ** 6,
2364 'eB': 1024 ** 6,
2365 'Eb': 1000 ** 6,
2366 'eb': 1000 ** 6,
2367 'exabytes': 1000 ** 6,
2368 'exbibytes': 1024 ** 6,
2369 'ZiB': 1024 ** 7,
2370 'ZB': 1000 ** 7,
2371 'zB': 1024 ** 7,
2372 'Zb': 1000 ** 7,
2373 'zb': 1000 ** 7,
2374 'zettabytes': 1000 ** 7,
2375 'zebibytes': 1024 ** 7,
2376 'YiB': 1024 ** 8,
2377 'YB': 1000 ** 8,
2378 'yB': 1024 ** 8,
2379 'Yb': 1000 ** 8,
2380 'yb': 1000 ** 8,
2381 'yottabytes': 1000 ** 8,
2382 'yobibytes': 1024 ** 8,
2383 }
2384
2385 return lookup_unit_table(_UNIT_TABLE, s)
2386
2387
2388 def parse_count(s):
2389 if s is None:
2390 return None
2391
2392 s = re.sub(r'^[^\d]+\s', '', s).strip()
2393
2394 if re.match(r'^[\d,.]+$', s):
2395 return str_to_int(s)
2396
2397 _UNIT_TABLE = {
2398 'k': 1000,
2399 'K': 1000,
2400 'm': 1000 ** 2,
2401 'M': 1000 ** 2,
2402 'kk': 1000 ** 2,
2403 'KK': 1000 ** 2,
2404 'b': 1000 ** 3,
2405 'B': 1000 ** 3,
2406 }
2407
2408 ret = lookup_unit_table(_UNIT_TABLE, s)
2409 if ret is not None:
2410 return ret
2411
2412 mobj = re.match(r'([\d,.]+)(?:$|\s)', s)
2413 if mobj:
2414 return str_to_int(mobj.group(1))
2415
2416
2417 def parse_resolution(s, *, lenient=False):
2418 if s is None:
2419 return {}
2420
2421 if lenient:
2422 mobj = re.search(r'(?P<w>\d+)\s*[xX×,]\s*(?P<h>\d+)', s)
2423 else:
2424 mobj = re.search(r'(?<![a-zA-Z0-9])(?P<w>\d+)\s*[xX×,]\s*(?P<h>\d+)(?![a-zA-Z0-9])', s)
2425 if mobj:
2426 return {
2427 'width': int(mobj.group('w')),
2428 'height': int(mobj.group('h')),
2429 }
2430
2431 mobj = re.search(r'(?<![a-zA-Z0-9])(\d+)[pPiI](?![a-zA-Z0-9])', s)
2432 if mobj:
2433 return {'height': int(mobj.group(1))}
2434
2435 mobj = re.search(r'\b([48])[kK]\b', s)
2436 if mobj:
2437 return {'height': int(mobj.group(1)) * 540}
2438
2439 return {}
2440
2441
2442 def parse_bitrate(s):
2443 if not isinstance(s, str):
2444 return
2445 mobj = re.search(r'\b(\d+)\s*kbps', s)
2446 if mobj:
2447 return int(mobj.group(1))
2448
2449
2450 def month_by_name(name, lang='en'):
2451 """ Return the number of a month by (locale-independently) English name """
2452
2453 month_names = MONTH_NAMES.get(lang, MONTH_NAMES['en'])
2454
2455 try:
2456 return month_names.index(name) + 1
2457 except ValueError:
2458 return None
2459
2460
2461 def month_by_abbreviation(abbrev):
2462 """ Return the number of a month by (locale-independently) English
2463 abbreviations """
2464
2465 try:
2466 return [s[:3] for s in ENGLISH_MONTH_NAMES].index(abbrev) + 1
2467 except ValueError:
2468 return None
2469
2470
2471 def fix_xml_ampersands(xml_str):
2472 """Replace all the '&' by '&amp;' in XML"""
2473 return re.sub(
2474 r'&(?!amp;|lt;|gt;|apos;|quot;|#x[0-9a-fA-F]{,4};|#[0-9]{,4};)',
2475 '&amp;',
2476 xml_str)
2477
2478
2479 def setproctitle(title):
2480 assert isinstance(title, str)
2481
2482 # Workaround for https://github.com/yt-dlp/yt-dlp/issues/4541
2483 try:
2484 import ctypes
2485 except ImportError:
2486 return
2487
2488 try:
2489 libc = ctypes.cdll.LoadLibrary('libc.so.6')
2490 except OSError:
2491 return
2492 except TypeError:
2493 # LoadLibrary in Windows Python 2.7.13 only expects
2494 # a bytestring, but since unicode_literals turns
2495 # every string into a unicode string, it fails.
2496 return
2497 title_bytes = title.encode()
2498 buf = ctypes.create_string_buffer(len(title_bytes))
2499 buf.value = title_bytes
2500 try:
2501 libc.prctl(15, buf, 0, 0, 0)
2502 except AttributeError:
2503 return # Strange libc, just skip this
2504
2505
2506 def remove_start(s, start):
2507 return s[len(start):] if s is not None and s.startswith(start) else s
2508
2509
2510 def remove_end(s, end):
2511 return s[:-len(end)] if s is not None and s.endswith(end) else s
2512
2513
2514 def remove_quotes(s):
2515 if s is None or len(s) < 2:
2516 return s
2517 for quote in ('"', "'", ):
2518 if s[0] == quote and s[-1] == quote:
2519 return s[1:-1]
2520 return s
2521
2522
2523 def get_domain(url):
2524 """
2525 This implementation is inconsistent, but is kept for compatibility.
2526 Use this only for "webpage_url_domain"
2527 """
2528 return remove_start(urllib.parse.urlparse(url).netloc, 'www.') or None
2529
2530
2531 def url_basename(url):
2532 path = urllib.parse.urlparse(url).path
2533 return path.strip('/').split('/')[-1]
2534
2535
2536 def base_url(url):
2537 return re.match(r'https?://[^?#]+/', url).group()
2538
2539
2540 def urljoin(base, path):
2541 if isinstance(path, bytes):
2542 path = path.decode()
2543 if not isinstance(path, str) or not path:
2544 return None
2545 if re.match(r'^(?:[a-zA-Z][a-zA-Z0-9+-.]*:)?//', path):
2546 return path
2547 if isinstance(base, bytes):
2548 base = base.decode()
2549 if not isinstance(base, str) or not re.match(
2550 r'^(?:https?:)?//', base):
2551 return None
2552 return urllib.parse.urljoin(base, path)
2553
2554
2555 class HEADRequest(urllib.request.Request):
2556 def get_method(self):
2557 return 'HEAD'
2558
2559
2560 class PUTRequest(urllib.request.Request):
2561 def get_method(self):
2562 return 'PUT'
2563
2564
2565 def int_or_none(v, scale=1, default=None, get_attr=None, invscale=1):
2566 if get_attr and v is not None:
2567 v = getattr(v, get_attr, None)
2568 try:
2569 return int(v) * invscale // scale
2570 except (ValueError, TypeError, OverflowError):
2571 return default
2572
2573
2574 def str_or_none(v, default=None):
2575 return default if v is None else str(v)
2576
2577
2578 def str_to_int(int_str):
2579 """ A more relaxed version of int_or_none """
2580 if isinstance(int_str, int):
2581 return int_str
2582 elif isinstance(int_str, str):
2583 int_str = re.sub(r'[,\.\+]', '', int_str)
2584 return int_or_none(int_str)
2585
2586
2587 def float_or_none(v, scale=1, invscale=1, default=None):
2588 if v is None:
2589 return default
2590 try:
2591 return float(v) * invscale / scale
2592 except (ValueError, TypeError):
2593 return default
2594
2595
2596 def bool_or_none(v, default=None):
2597 return v if isinstance(v, bool) else default
2598
2599
2600 def strip_or_none(v, default=None):
2601 return v.strip() if isinstance(v, str) else default
2602
2603
2604 def url_or_none(url):
2605 if not url or not isinstance(url, str):
2606 return None
2607 url = url.strip()
2608 return url if re.match(r'^(?:(?:https?|rt(?:m(?:pt?[es]?|fp)|sp[su]?)|mms|ftps?):)?//', url) else None
2609
2610
2611 def request_to_url(req):
2612 if isinstance(req, urllib.request.Request):
2613 return req.get_full_url()
2614 else:
2615 return req
2616
2617
2618 def strftime_or_none(timestamp, date_format, default=None):
2619 datetime_object = None
2620 try:
2621 if isinstance(timestamp, (int, float)): # unix timestamp
2622 # Using naive datetime here can break timestamp() in Windows
2623 # Ref: https://github.com/yt-dlp/yt-dlp/issues/5185, https://github.com/python/cpython/issues/94414
2624 datetime_object = datetime.datetime.fromtimestamp(timestamp, datetime.timezone.utc)
2625 elif isinstance(timestamp, str): # assume YYYYMMDD
2626 datetime_object = datetime.datetime.strptime(timestamp, '%Y%m%d')
2627 date_format = re.sub( # Support %s on windows
2628 r'(?<!%)(%%)*%s', rf'\g<1>{int(datetime_object.timestamp())}', date_format)
2629 return datetime_object.strftime(date_format)
2630 except (ValueError, TypeError, AttributeError):
2631 return default
2632
2633
2634 def parse_duration(s):
2635 if not isinstance(s, str):
2636 return None
2637 s = s.strip()
2638 if not s:
2639 return None
2640
2641 days, hours, mins, secs, ms = [None] * 5
2642 m = re.match(r'''(?x)
2643 (?P<before_secs>
2644 (?:(?:(?P<days>[0-9]+):)?(?P<hours>[0-9]+):)?(?P<mins>[0-9]+):)?
2645 (?P<secs>(?(before_secs)[0-9]{1,2}|[0-9]+))
2646 (?P<ms>[.:][0-9]+)?Z?$
2647 ''', s)
2648 if m:
2649 days, hours, mins, secs, ms = m.group('days', 'hours', 'mins', 'secs', 'ms')
2650 else:
2651 m = re.match(
2652 r'''(?ix)(?:P?
2653 (?:
2654 [0-9]+\s*y(?:ears?)?,?\s*
2655 )?
2656 (?:
2657 [0-9]+\s*m(?:onths?)?,?\s*
2658 )?
2659 (?:
2660 [0-9]+\s*w(?:eeks?)?,?\s*
2661 )?
2662 (?:
2663 (?P<days>[0-9]+)\s*d(?:ays?)?,?\s*
2664 )?
2665 T)?
2666 (?:
2667 (?P<hours>[0-9]+)\s*h(?:ours?)?,?\s*
2668 )?
2669 (?:
2670 (?P<mins>[0-9]+)\s*m(?:in(?:ute)?s?)?,?\s*
2671 )?
2672 (?:
2673 (?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?\s*s(?:ec(?:ond)?s?)?\s*
2674 )?Z?$''', s)
2675 if m:
2676 days, hours, mins, secs, ms = m.groups()
2677 else:
2678 m = re.match(r'(?i)(?:(?P<hours>[0-9.]+)\s*(?:hours?)|(?P<mins>[0-9.]+)\s*(?:mins?\.?|minutes?)\s*)Z?$', s)
2679 if m:
2680 hours, mins = m.groups()
2681 else:
2682 return None
2683
2684 if ms:
2685 ms = ms.replace(':', '.')
2686 return sum(float(part or 0) * mult for part, mult in (
2687 (days, 86400), (hours, 3600), (mins, 60), (secs, 1), (ms, 1)))
2688
2689
2690 def prepend_extension(filename, ext, expected_real_ext=None):
2691 name, real_ext = os.path.splitext(filename)
2692 return (
2693 f'{name}.{ext}{real_ext}'
2694 if not expected_real_ext or real_ext[1:] == expected_real_ext
2695 else f'{filename}.{ext}')
2696
2697
2698 def replace_extension(filename, ext, expected_real_ext=None):
2699 name, real_ext = os.path.splitext(filename)
2700 return '{}.{}'.format(
2701 name if not expected_real_ext or real_ext[1:] == expected_real_ext else filename,
2702 ext)
2703
2704
2705 def check_executable(exe, args=[]):
2706 """ Checks if the given binary is installed somewhere in PATH, and returns its name.
2707 args can be a list of arguments for a short output (like -version) """
2708 try:
2709 Popen.run([exe] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
2710 except OSError:
2711 return False
2712 return exe
2713
2714
2715 def _get_exe_version_output(exe, args):
2716 try:
2717 # STDIN should be redirected too. On UNIX-like systems, ffmpeg triggers
2718 # SIGTTOU if yt-dlp is run in the background.
2719 # See https://github.com/ytdl-org/youtube-dl/issues/955#issuecomment-209789656
2720 stdout, _, ret = Popen.run([encodeArgument(exe)] + args, text=True,
2721 stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
2722 if ret:
2723 return None
2724 except OSError:
2725 return False
2726 return stdout
2727
2728
2729 def detect_exe_version(output, version_re=None, unrecognized='present'):
2730 assert isinstance(output, str)
2731 if version_re is None:
2732 version_re = r'version\s+([-0-9._a-zA-Z]+)'
2733 m = re.search(version_re, output)
2734 if m:
2735 return m.group(1)
2736 else:
2737 return unrecognized
2738
2739
2740 def get_exe_version(exe, args=['--version'],
2741 version_re=None, unrecognized=('present', 'broken')):
2742 """ Returns the version of the specified executable,
2743 or False if the executable is not present """
2744 unrecognized = variadic(unrecognized)
2745 assert len(unrecognized) in (1, 2)
2746 out = _get_exe_version_output(exe, args)
2747 if out is None:
2748 return unrecognized[-1]
2749 return out and detect_exe_version(out, version_re, unrecognized[0])
2750
2751
2752 def frange(start=0, stop=None, step=1):
2753 """Float range"""
2754 if stop is None:
2755 start, stop = 0, start
2756 sign = [-1, 1][step > 0] if step else 0
2757 while sign * start < sign * stop:
2758 yield start
2759 start += step
2760
2761
2762 class LazyList(collections.abc.Sequence):
2763 """Lazy immutable list from an iterable
2764 Note that slices of a LazyList are lists and not LazyList"""
2765
2766 class IndexError(IndexError):
2767 pass
2768
2769 def __init__(self, iterable, *, reverse=False, _cache=None):
2770 self._iterable = iter(iterable)
2771 self._cache = [] if _cache is None else _cache
2772 self._reversed = reverse
2773
2774 def __iter__(self):
2775 if self._reversed:
2776 # We need to consume the entire iterable to iterate in reverse
2777 yield from self.exhaust()
2778 return
2779 yield from self._cache
2780 for item in self._iterable:
2781 self._cache.append(item)
2782 yield item
2783
2784 def _exhaust(self):
2785 self._cache.extend(self._iterable)
2786 self._iterable = [] # Discard the emptied iterable to make it pickle-able
2787 return self._cache
2788
2789 def exhaust(self):
2790 """Evaluate the entire iterable"""
2791 return self._exhaust()[::-1 if self._reversed else 1]
2792
2793 @staticmethod
2794 def _reverse_index(x):
2795 return None if x is None else ~x
2796
2797 def __getitem__(self, idx):
2798 if isinstance(idx, slice):
2799 if self._reversed:
2800 idx = slice(self._reverse_index(idx.start), self._reverse_index(idx.stop), -(idx.step or 1))
2801 start, stop, step = idx.start, idx.stop, idx.step or 1
2802 elif isinstance(idx, int):
2803 if self._reversed:
2804 idx = self._reverse_index(idx)
2805 start, stop, step = idx, idx, 0
2806 else:
2807 raise TypeError('indices must be integers or slices')
2808 if ((start or 0) < 0 or (stop or 0) < 0
2809 or (start is None and step < 0)
2810 or (stop is None and step > 0)):
2811 # We need to consume the entire iterable to be able to slice from the end
2812 # Obviously, never use this with infinite iterables
2813 self._exhaust()
2814 try:
2815 return self._cache[idx]
2816 except IndexError as e:
2817 raise self.IndexError(e) from e
2818 n = max(start or 0, stop or 0) - len(self._cache) + 1
2819 if n > 0:
2820 self._cache.extend(itertools.islice(self._iterable, n))
2821 try:
2822 return self._cache[idx]
2823 except IndexError as e:
2824 raise self.IndexError(e) from e
2825
2826 def __bool__(self):
2827 try:
2828 self[-1] if self._reversed else self[0]
2829 except self.IndexError:
2830 return False
2831 return True
2832
2833 def __len__(self):
2834 self._exhaust()
2835 return len(self._cache)
2836
2837 def __reversed__(self):
2838 return type(self)(self._iterable, reverse=not self._reversed, _cache=self._cache)
2839
2840 def __copy__(self):
2841 return type(self)(self._iterable, reverse=self._reversed, _cache=self._cache)
2842
2843 def __repr__(self):
2844 # repr and str should mimic a list. So we exhaust the iterable
2845 return repr(self.exhaust())
2846
2847 def __str__(self):
2848 return repr(self.exhaust())
2849
2850
2851 class PagedList:
2852
2853 class IndexError(IndexError):
2854 pass
2855
2856 def __len__(self):
2857 # This is only useful for tests
2858 return len(self.getslice())
2859
2860 def __init__(self, pagefunc, pagesize, use_cache=True):
2861 self._pagefunc = pagefunc
2862 self._pagesize = pagesize
2863 self._pagecount = float('inf')
2864 self._use_cache = use_cache
2865 self._cache = {}
2866
2867 def getpage(self, pagenum):
2868 page_results = self._cache.get(pagenum)
2869 if page_results is None:
2870 page_results = [] if pagenum > self._pagecount else list(self._pagefunc(pagenum))
2871 if self._use_cache:
2872 self._cache[pagenum] = page_results
2873 return page_results
2874
2875 def getslice(self, start=0, end=None):
2876 return list(self._getslice(start, end))
2877
2878 def _getslice(self, start, end):
2879 raise NotImplementedError('This method must be implemented by subclasses')
2880
2881 def __getitem__(self, idx):
2882 assert self._use_cache, 'Indexing PagedList requires cache'
2883 if not isinstance(idx, int) or idx < 0:
2884 raise TypeError('indices must be non-negative integers')
2885 entries = self.getslice(idx, idx + 1)
2886 if not entries:
2887 raise self.IndexError()
2888 return entries[0]
2889
2890
2891 class OnDemandPagedList(PagedList):
2892 """Download pages until a page with less than maximum results"""
2893
2894 def _getslice(self, start, end):
2895 for pagenum in itertools.count(start // self._pagesize):
2896 firstid = pagenum * self._pagesize
2897 nextfirstid = pagenum * self._pagesize + self._pagesize
2898 if start >= nextfirstid:
2899 continue
2900
2901 startv = (
2902 start % self._pagesize
2903 if firstid <= start < nextfirstid
2904 else 0)
2905 endv = (
2906 ((end - 1) % self._pagesize) + 1
2907 if (end is not None and firstid <= end <= nextfirstid)
2908 else None)
2909
2910 try:
2911 page_results = self.getpage(pagenum)
2912 except Exception:
2913 self._pagecount = pagenum - 1
2914 raise
2915 if startv != 0 or endv is not None:
2916 page_results = page_results[startv:endv]
2917 yield from page_results
2918
2919 # A little optimization - if current page is not "full", ie. does
2920 # not contain page_size videos then we can assume that this page
2921 # is the last one - there are no more ids on further pages -
2922 # i.e. no need to query again.
2923 if len(page_results) + startv < self._pagesize:
2924 break
2925
2926 # If we got the whole page, but the next page is not interesting,
2927 # break out early as well
2928 if end == nextfirstid:
2929 break
2930
2931
2932 class InAdvancePagedList(PagedList):
2933 """PagedList with total number of pages known in advance"""
2934
2935 def __init__(self, pagefunc, pagecount, pagesize):
2936 PagedList.__init__(self, pagefunc, pagesize, True)
2937 self._pagecount = pagecount
2938
2939 def _getslice(self, start, end):
2940 start_page = start // self._pagesize
2941 end_page = self._pagecount if end is None else min(self._pagecount, end // self._pagesize + 1)
2942 skip_elems = start - start_page * self._pagesize
2943 only_more = None if end is None else end - start
2944 for pagenum in range(start_page, end_page):
2945 page_results = self.getpage(pagenum)
2946 if skip_elems:
2947 page_results = page_results[skip_elems:]
2948 skip_elems = None
2949 if only_more is not None:
2950 if len(page_results) < only_more:
2951 only_more -= len(page_results)
2952 else:
2953 yield from page_results[:only_more]
2954 break
2955 yield from page_results
2956
2957
2958 class PlaylistEntries:
2959 MissingEntry = object()
2960 is_exhausted = False
2961
2962 def __init__(self, ydl, info_dict):
2963 self.ydl = ydl
2964
2965 # _entries must be assigned now since infodict can change during iteration
2966 entries = info_dict.get('entries')
2967 if entries is None:
2968 raise EntryNotInPlaylist('There are no entries')
2969 elif isinstance(entries, list):
2970 self.is_exhausted = True
2971
2972 requested_entries = info_dict.get('requested_entries')
2973 self.is_incomplete = requested_entries is not None
2974 if self.is_incomplete:
2975 assert self.is_exhausted
2976 self._entries = [self.MissingEntry] * max(requested_entries or [0])
2977 for i, entry in zip(requested_entries, entries):
2978 self._entries[i - 1] = entry
2979 elif isinstance(entries, (list, PagedList, LazyList)):
2980 self._entries = entries
2981 else:
2982 self._entries = LazyList(entries)
2983
2984 PLAYLIST_ITEMS_RE = re.compile(r'''(?x)
2985 (?P<start>[+-]?\d+)?
2986 (?P<range>[:-]
2987 (?P<end>[+-]?\d+|inf(?:inite)?)?
2988 (?::(?P<step>[+-]?\d+))?
2989 )?''')
2990
2991 @classmethod
2992 def parse_playlist_items(cls, string):
2993 for segment in string.split(','):
2994 if not segment:
2995 raise ValueError('There is two or more consecutive commas')
2996 mobj = cls.PLAYLIST_ITEMS_RE.fullmatch(segment)
2997 if not mobj:
2998 raise ValueError(f'{segment!r} is not a valid specification')
2999 start, end, step, has_range = mobj.group('start', 'end', 'step', 'range')
3000 if int_or_none(step) == 0:
3001 raise ValueError(f'Step in {segment!r} cannot be zero')
3002 yield slice(int_or_none(start), float_or_none(end), int_or_none(step)) if has_range else int(start)
3003
3004 def get_requested_items(self):
3005 playlist_items = self.ydl.params.get('playlist_items')
3006 playlist_start = self.ydl.params.get('playliststart', 1)
3007 playlist_end = self.ydl.params.get('playlistend')
3008 # For backwards compatibility, interpret -1 as whole list
3009 if playlist_end in (-1, None):
3010 playlist_end = ''
3011 if not playlist_items:
3012 playlist_items = f'{playlist_start}:{playlist_end}'
3013 elif playlist_start != 1 or playlist_end:
3014 self.ydl.report_warning('Ignoring playliststart and playlistend because playlistitems was given', only_once=True)
3015
3016 for index in self.parse_playlist_items(playlist_items):
3017 for i, entry in self[index]:
3018 yield i, entry
3019 if not entry:
3020 continue
3021 try:
3022 # TODO: Add auto-generated fields
3023 self.ydl._match_entry(entry, incomplete=True, silent=True)
3024 except (ExistingVideoReached, RejectedVideoReached):
3025 return
3026
3027 def get_full_count(self):
3028 if self.is_exhausted and not self.is_incomplete:
3029 return len(self)
3030 elif isinstance(self._entries, InAdvancePagedList):
3031 if self._entries._pagesize == 1:
3032 return self._entries._pagecount
3033
3034 @functools.cached_property
3035 def _getter(self):
3036 if isinstance(self._entries, list):
3037 def get_entry(i):
3038 try:
3039 entry = self._entries[i]
3040 except IndexError:
3041 entry = self.MissingEntry
3042 if not self.is_incomplete:
3043 raise self.IndexError()
3044 if entry is self.MissingEntry:
3045 raise EntryNotInPlaylist(f'Entry {i + 1} cannot be found')
3046 return entry
3047 else:
3048 def get_entry(i):
3049 try:
3050 return type(self.ydl)._handle_extraction_exceptions(lambda _, i: self._entries[i])(self.ydl, i)
3051 except (LazyList.IndexError, PagedList.IndexError):
3052 raise self.IndexError()
3053 return get_entry
3054
3055 def __getitem__(self, idx):
3056 if isinstance(idx, int):
3057 idx = slice(idx, idx)
3058
3059 # NB: PlaylistEntries[1:10] => (0, 1, ... 9)
3060 step = 1 if idx.step is None else idx.step
3061 if idx.start is None:
3062 start = 0 if step > 0 else len(self) - 1
3063 else:
3064 start = idx.start - 1 if idx.start >= 0 else len(self) + idx.start
3065
3066 # NB: Do not call len(self) when idx == [:]
3067 if idx.stop is None:
3068 stop = 0 if step < 0 else float('inf')
3069 else:
3070 stop = idx.stop - 1 if idx.stop >= 0 else len(self) + idx.stop
3071 stop += [-1, 1][step > 0]
3072
3073 for i in frange(start, stop, step):
3074 if i < 0:
3075 continue
3076 try:
3077 entry = self._getter(i)
3078 except self.IndexError:
3079 self.is_exhausted = True
3080 if step > 0:
3081 break
3082 continue
3083 yield i + 1, entry
3084
3085 def __len__(self):
3086 return len(tuple(self[:]))
3087
3088 class IndexError(IndexError):
3089 pass
3090
3091
3092 def uppercase_escape(s):
3093 unicode_escape = codecs.getdecoder('unicode_escape')
3094 return re.sub(
3095 r'\\U[0-9a-fA-F]{8}',
3096 lambda m: unicode_escape(m.group(0))[0],
3097 s)
3098
3099
3100 def lowercase_escape(s):
3101 unicode_escape = codecs.getdecoder('unicode_escape')
3102 return re.sub(
3103 r'\\u[0-9a-fA-F]{4}',
3104 lambda m: unicode_escape(m.group(0))[0],
3105 s)
3106
3107
3108 def escape_rfc3986(s):
3109 """Escape non-ASCII characters as suggested by RFC 3986"""
3110 return urllib.parse.quote(s, b"%/;:@&=+$,!~*'()?#[]")
3111
3112
3113 def escape_url(url):
3114 """Escape URL as suggested by RFC 3986"""
3115 url_parsed = urllib.parse.urlparse(url)
3116 return url_parsed._replace(
3117 netloc=url_parsed.netloc.encode('idna').decode('ascii'),
3118 path=escape_rfc3986(url_parsed.path),
3119 params=escape_rfc3986(url_parsed.params),
3120 query=escape_rfc3986(url_parsed.query),
3121 fragment=escape_rfc3986(url_parsed.fragment)
3122 ).geturl()
3123
3124
3125 def parse_qs(url, **kwargs):
3126 return urllib.parse.parse_qs(urllib.parse.urlparse(url).query, **kwargs)
3127
3128
3129 def read_batch_urls(batch_fd):
3130 def fixup(url):
3131 if not isinstance(url, str):
3132 url = url.decode('utf-8', 'replace')
3133 BOM_UTF8 = ('\xef\xbb\xbf', '\ufeff')
3134 for bom in BOM_UTF8:
3135 if url.startswith(bom):
3136 url = url[len(bom):]
3137 url = url.lstrip()
3138 if not url or url.startswith(('#', ';', ']')):
3139 return False
3140 # "#" cannot be stripped out since it is part of the URI
3141 # However, it can be safely stripped out if following a whitespace
3142 return re.split(r'\s#', url, 1)[0].rstrip()
3143
3144 with contextlib.closing(batch_fd) as fd:
3145 return [url for url in map(fixup, fd) if url]
3146
3147
3148 def urlencode_postdata(*args, **kargs):
3149 return urllib.parse.urlencode(*args, **kargs).encode('ascii')
3150
3151
3152 def update_url(url, *, query_update=None, **kwargs):
3153 """Replace URL components specified by kwargs
3154 @param url str or parse url tuple
3155 @param query_update update query
3156 @returns str
3157 """
3158 if isinstance(url, str):
3159 if not kwargs and not query_update:
3160 return url
3161 else:
3162 url = urllib.parse.urlparse(url)
3163 if query_update:
3164 assert 'query' not in kwargs, 'query_update and query cannot be specified at the same time'
3165 kwargs['query'] = urllib.parse.urlencode({
3166 **urllib.parse.parse_qs(url.query),
3167 **query_update
3168 }, True)
3169 return urllib.parse.urlunparse(url._replace(**kwargs))
3170
3171
3172 def update_url_query(url, query):
3173 return update_url(url, query_update=query)
3174
3175
3176 def update_Request(req, url=None, data=None, headers=None, query=None):
3177 req_headers = req.headers.copy()
3178 req_headers.update(headers or {})
3179 req_data = data or req.data
3180 req_url = update_url_query(url or req.get_full_url(), query)
3181 req_get_method = req.get_method()
3182 if req_get_method == 'HEAD':
3183 req_type = HEADRequest
3184 elif req_get_method == 'PUT':
3185 req_type = PUTRequest
3186 else:
3187 req_type = urllib.request.Request
3188 new_req = req_type(
3189 req_url, data=req_data, headers=req_headers,
3190 origin_req_host=req.origin_req_host, unverifiable=req.unverifiable)
3191 if hasattr(req, 'timeout'):
3192 new_req.timeout = req.timeout
3193 return new_req
3194
3195
3196 def _multipart_encode_impl(data, boundary):
3197 content_type = 'multipart/form-data; boundary=%s' % boundary
3198
3199 out = b''
3200 for k, v in data.items():
3201 out += b'--' + boundary.encode('ascii') + b'\r\n'
3202 if isinstance(k, str):
3203 k = k.encode()
3204 if isinstance(v, str):
3205 v = v.encode()
3206 # RFC 2047 requires non-ASCII field names to be encoded, while RFC 7578
3207 # suggests sending UTF-8 directly. Firefox sends UTF-8, too
3208 content = b'Content-Disposition: form-data; name="' + k + b'"\r\n\r\n' + v + b'\r\n'
3209 if boundary.encode('ascii') in content:
3210 raise ValueError('Boundary overlaps with data')
3211 out += content
3212
3213 out += b'--' + boundary.encode('ascii') + b'--\r\n'
3214
3215 return out, content_type
3216
3217
3218 def multipart_encode(data, boundary=None):
3219 '''
3220 Encode a dict to RFC 7578-compliant form-data
3221
3222 data:
3223 A dict where keys and values can be either Unicode or bytes-like
3224 objects.
3225 boundary:
3226 If specified a Unicode object, it's used as the boundary. Otherwise
3227 a random boundary is generated.
3228
3229 Reference: https://tools.ietf.org/html/rfc7578
3230 '''
3231 has_specified_boundary = boundary is not None
3232
3233 while True:
3234 if boundary is None:
3235 boundary = '---------------' + str(random.randrange(0x0fffffff, 0xffffffff))
3236
3237 try:
3238 out, content_type = _multipart_encode_impl(data, boundary)
3239 break
3240 except ValueError:
3241 if has_specified_boundary:
3242 raise
3243 boundary = None
3244
3245 return out, content_type
3246
3247
3248 def variadic(x, allowed_types=(str, bytes, dict)):
3249 return x if isinstance(x, collections.abc.Iterable) and not isinstance(x, allowed_types) else (x,)
3250
3251
3252 def dict_get(d, key_or_keys, default=None, skip_false_values=True):
3253 for val in map(d.get, variadic(key_or_keys)):
3254 if val is not None and (val or not skip_false_values):
3255 return val
3256 return default
3257
3258
3259 def try_call(*funcs, expected_type=None, args=[], kwargs={}):
3260 for f in funcs:
3261 try:
3262 val = f(*args, **kwargs)
3263 except (AttributeError, KeyError, TypeError, IndexError, ValueError, ZeroDivisionError):
3264 pass
3265 else:
3266 if expected_type is None or isinstance(val, expected_type):
3267 return val
3268
3269
3270 def try_get(src, getter, expected_type=None):
3271 return try_call(*variadic(getter), args=(src,), expected_type=expected_type)
3272
3273
3274 def filter_dict(dct, cndn=lambda _, v: v is not None):
3275 return {k: v for k, v in dct.items() if cndn(k, v)}
3276
3277
3278 def merge_dicts(*dicts):
3279 merged = {}
3280 for a_dict in dicts:
3281 for k, v in a_dict.items():
3282 if (v is not None and k not in merged
3283 or isinstance(v, str) and merged[k] == ''):
3284 merged[k] = v
3285 return merged
3286
3287
3288 def encode_compat_str(string, encoding=preferredencoding(), errors='strict'):
3289 return string if isinstance(string, str) else str(string, encoding, errors)
3290
3291
3292 US_RATINGS = {
3293 'G': 0,
3294 'PG': 10,
3295 'PG-13': 13,
3296 'R': 16,
3297 'NC': 18,
3298 }
3299
3300
3301 TV_PARENTAL_GUIDELINES = {
3302 'TV-Y': 0,
3303 'TV-Y7': 7,
3304 'TV-G': 0,
3305 'TV-PG': 0,
3306 'TV-14': 14,
3307 'TV-MA': 17,
3308 }
3309
3310
3311 def parse_age_limit(s):
3312 # isinstance(False, int) is True. So type() must be used instead
3313 if type(s) is int: # noqa: E721
3314 return s if 0 <= s <= 21 else None
3315 elif not isinstance(s, str):
3316 return None
3317 m = re.match(r'^(?P<age>\d{1,2})\+?$', s)
3318 if m:
3319 return int(m.group('age'))
3320 s = s.upper()
3321 if s in US_RATINGS:
3322 return US_RATINGS[s]
3323 m = re.match(r'^TV[_-]?(%s)$' % '|'.join(k[3:] for k in TV_PARENTAL_GUIDELINES), s)
3324 if m:
3325 return TV_PARENTAL_GUIDELINES['TV-' + m.group(1)]
3326 return None
3327
3328
3329 def strip_jsonp(code):
3330 return re.sub(
3331 r'''(?sx)^
3332 (?:window\.)?(?P<func_name>[a-zA-Z0-9_.$]*)
3333 (?:\s*&&\s*(?P=func_name))?
3334 \s*\(\s*(?P<callback_data>.*)\);?
3335 \s*?(?://[^\n]*)*$''',
3336 r'\g<callback_data>', code)
3337
3338
3339 def js_to_json(code, vars={}, *, strict=False):
3340 # vars is a dict of var, val pairs to substitute
3341 STRING_QUOTES = '\'"'
3342 STRING_RE = '|'.join(rf'{q}(?:\\.|[^\\{q}])*{q}' for q in STRING_QUOTES)
3343 COMMENT_RE = r'/\*(?:(?!\*/).)*?\*/|//[^\n]*\n'
3344 SKIP_RE = fr'\s*(?:{COMMENT_RE})?\s*'
3345 INTEGER_TABLE = (
3346 (fr'(?s)^(0[xX][0-9a-fA-F]+){SKIP_RE}:?$', 16),
3347 (fr'(?s)^(0+[0-7]+){SKIP_RE}:?$', 8),
3348 )
3349
3350 def process_escape(match):
3351 JSON_PASSTHROUGH_ESCAPES = R'"\bfnrtu'
3352 escape = match.group(1) or match.group(2)
3353
3354 return (Rf'\{escape}' if escape in JSON_PASSTHROUGH_ESCAPES
3355 else R'\u00' if escape == 'x'
3356 else '' if escape == '\n'
3357 else escape)
3358
3359 def fix_kv(m):
3360 v = m.group(0)
3361 if v in ('true', 'false', 'null'):
3362 return v
3363 elif v in ('undefined', 'void 0'):
3364 return 'null'
3365 elif v.startswith('/*') or v.startswith('//') or v.startswith('!') or v == ',':
3366 return ''
3367
3368 if v[0] in STRING_QUOTES:
3369 escaped = re.sub(r'(?s)(")|\\(.)', process_escape, v[1:-1])
3370 return f'"{escaped}"'
3371
3372 for regex, base in INTEGER_TABLE:
3373 im = re.match(regex, v)
3374 if im:
3375 i = int(im.group(1), base)
3376 return f'"{i}":' if v.endswith(':') else str(i)
3377
3378 if v in vars:
3379 try:
3380 if not strict:
3381 json.loads(vars[v])
3382 except json.JSONDecodeError:
3383 return json.dumps(vars[v])
3384 else:
3385 return vars[v]
3386
3387 if not strict:
3388 return f'"{v}"'
3389
3390 raise ValueError(f'Unknown value: {v}')
3391
3392 def create_map(mobj):
3393 return json.dumps(dict(json.loads(js_to_json(mobj.group(1) or '[]', vars=vars))))
3394
3395 code = re.sub(r'new Map\((\[.*?\])?\)', create_map, code)
3396 if not strict:
3397 code = re.sub(r'new Date\((".+")\)', r'\g<1>', code)
3398 code = re.sub(r'new \w+\((.*?)\)', lambda m: json.dumps(m.group(0)), code)
3399 code = re.sub(r'parseInt\([^\d]+(\d+)[^\d]+\)', r'\1', code)
3400 code = re.sub(r'\(function\([^)]*\)\s*\{[^}]*\}\s*\)\s*\(\s*(["\'][^)]*["\'])\s*\)', r'\1', code)
3401
3402 return re.sub(rf'''(?sx)
3403 {STRING_RE}|
3404 {COMMENT_RE}|,(?={SKIP_RE}[\]}}])|
3405 void\s0|(?:(?<![0-9])[eE]|[a-df-zA-DF-Z_$])[.a-zA-Z_$0-9]*|
3406 \b(?:0[xX][0-9a-fA-F]+|0+[0-7]+)(?:{SKIP_RE}:)?|
3407 [0-9]+(?={SKIP_RE}:)|
3408 !+
3409 ''', fix_kv, code)
3410
3411
3412 def qualities(quality_ids):
3413 """ Get a numeric quality value out of a list of possible values """
3414 def q(qid):
3415 try:
3416 return quality_ids.index(qid)
3417 except ValueError:
3418 return -1
3419 return q
3420
3421
3422 POSTPROCESS_WHEN = ('pre_process', 'after_filter', 'video', 'before_dl', 'post_process', 'after_move', 'after_video', 'playlist')
3423
3424
3425 DEFAULT_OUTTMPL = {
3426 'default': '%(title)s [%(id)s].%(ext)s',
3427 'chapter': '%(title)s - %(section_number)03d %(section_title)s [%(id)s].%(ext)s',
3428 }
3429 OUTTMPL_TYPES = {
3430 'chapter': None,
3431 'subtitle': None,
3432 'thumbnail': None,
3433 'description': 'description',
3434 'annotation': 'annotations.xml',
3435 'infojson': 'info.json',
3436 'link': None,
3437 'pl_video': None,
3438 'pl_thumbnail': None,
3439 'pl_description': 'description',
3440 'pl_infojson': 'info.json',
3441 }
3442
3443 # As of [1] format syntax is:
3444 # %[mapping_key][conversion_flags][minimum_width][.precision][length_modifier]type
3445 # 1. https://docs.python.org/2/library/stdtypes.html#string-formatting
3446 STR_FORMAT_RE_TMPL = r'''(?x)
3447 (?<!%)(?P<prefix>(?:%%)*)
3448 %
3449 (?P<has_key>\((?P<key>{0})\))?
3450 (?P<format>
3451 (?P<conversion>[#0\-+ ]+)?
3452 (?P<min_width>\d+)?
3453 (?P<precision>\.\d+)?
3454 (?P<len_mod>[hlL])? # unused in python
3455 {1} # conversion type
3456 )
3457 '''
3458
3459
3460 STR_FORMAT_TYPES = 'diouxXeEfFgGcrs'
3461
3462
3463 def limit_length(s, length):
3464 """ Add ellipses to overly long strings """
3465 if s is None:
3466 return None
3467 ELLIPSES = '...'
3468 if len(s) > length:
3469 return s[:length - len(ELLIPSES)] + ELLIPSES
3470 return s
3471
3472
3473 def version_tuple(v):
3474 return tuple(int(e) for e in re.split(r'[-.]', v))
3475
3476
3477 def is_outdated_version(version, limit, assume_new=True):
3478 if not version:
3479 return not assume_new
3480 try:
3481 return version_tuple(version) < version_tuple(limit)
3482 except ValueError:
3483 return not assume_new
3484
3485
3486 def ytdl_is_updateable():
3487 """ Returns if yt-dlp can be updated with -U """
3488
3489 from .update import is_non_updateable
3490
3491 return not is_non_updateable()
3492
3493
3494 def args_to_str(args):
3495 # Get a short string representation for a subprocess command
3496 return ' '.join(compat_shlex_quote(a) for a in args)
3497
3498
3499 def error_to_compat_str(err):
3500 return str(err)
3501
3502
3503 def error_to_str(err):
3504 return f'{type(err).__name__}: {err}'
3505
3506
3507 def mimetype2ext(mt, default=NO_DEFAULT):
3508 if not isinstance(mt, str):
3509 if default is not NO_DEFAULT:
3510 return default
3511 return None
3512
3513 MAP = {
3514 # video
3515 '3gpp': '3gp',
3516 'mp2t': 'ts',
3517 'mp4': 'mp4',
3518 'mpeg': 'mpeg',
3519 'mpegurl': 'm3u8',
3520 'quicktime': 'mov',
3521 'webm': 'webm',
3522 'vp9': 'vp9',
3523 'x-flv': 'flv',
3524 'x-m4v': 'm4v',
3525 'x-matroska': 'mkv',
3526 'x-mng': 'mng',
3527 'x-mp4-fragmented': 'mp4',
3528 'x-ms-asf': 'asf',
3529 'x-ms-wmv': 'wmv',
3530 'x-msvideo': 'avi',
3531
3532 # application (streaming playlists)
3533 'dash+xml': 'mpd',
3534 'f4m+xml': 'f4m',
3535 'hds+xml': 'f4m',
3536 'vnd.apple.mpegurl': 'm3u8',
3537 'vnd.ms-sstr+xml': 'ism',
3538 'x-mpegurl': 'm3u8',
3539
3540 # audio
3541 'audio/mp4': 'm4a',
3542 # Per RFC 3003, audio/mpeg can be .mp1, .mp2 or .mp3.
3543 # Using .mp3 as it's the most popular one
3544 'audio/mpeg': 'mp3',
3545 'audio/webm': 'webm',
3546 'audio/x-matroska': 'mka',
3547 'audio/x-mpegurl': 'm3u',
3548 'midi': 'mid',
3549 'ogg': 'ogg',
3550 'wav': 'wav',
3551 'wave': 'wav',
3552 'x-aac': 'aac',
3553 'x-flac': 'flac',
3554 'x-m4a': 'm4a',
3555 'x-realaudio': 'ra',
3556 'x-wav': 'wav',
3557
3558 # image
3559 'avif': 'avif',
3560 'bmp': 'bmp',
3561 'gif': 'gif',
3562 'jpeg': 'jpg',
3563 'png': 'png',
3564 'svg+xml': 'svg',
3565 'tiff': 'tif',
3566 'vnd.wap.wbmp': 'wbmp',
3567 'webp': 'webp',
3568 'x-icon': 'ico',
3569 'x-jng': 'jng',
3570 'x-ms-bmp': 'bmp',
3571
3572 # caption
3573 'filmstrip+json': 'fs',
3574 'smptett+xml': 'tt',
3575 'ttaf+xml': 'dfxp',
3576 'ttml+xml': 'ttml',
3577 'x-ms-sami': 'sami',
3578
3579 # misc
3580 'gzip': 'gz',
3581 'json': 'json',
3582 'xml': 'xml',
3583 'zip': 'zip',
3584 }
3585
3586 mimetype = mt.partition(';')[0].strip().lower()
3587 _, _, subtype = mimetype.rpartition('/')
3588
3589 ext = traverse_obj(MAP, mimetype, subtype, subtype.rsplit('+')[-1])
3590 if ext:
3591 return ext
3592 elif default is not NO_DEFAULT:
3593 return default
3594 return subtype.replace('+', '.')
3595
3596
3597 def ext2mimetype(ext_or_url):
3598 if not ext_or_url:
3599 return None
3600 if '.' not in ext_or_url:
3601 ext_or_url = f'file.{ext_or_url}'
3602 return mimetypes.guess_type(ext_or_url)[0]
3603
3604
3605 def parse_codecs(codecs_str):
3606 # http://tools.ietf.org/html/rfc6381
3607 if not codecs_str:
3608 return {}
3609 split_codecs = list(filter(None, map(
3610 str.strip, codecs_str.strip().strip(',').split(','))))
3611 vcodec, acodec, scodec, hdr = None, None, None, None
3612 for full_codec in split_codecs:
3613 parts = re.sub(r'0+(?=\d)', '', full_codec).split('.')
3614 if parts[0] in ('avc1', 'avc2', 'avc3', 'avc4', 'vp9', 'vp8', 'hev1', 'hev2',
3615 'h263', 'h264', 'mp4v', 'hvc1', 'av1', 'theora', 'dvh1', 'dvhe'):
3616 if vcodec:
3617 continue
3618 vcodec = full_codec
3619 if parts[0] in ('dvh1', 'dvhe'):
3620 hdr = 'DV'
3621 elif parts[0] == 'av1' and traverse_obj(parts, 3) == '10':
3622 hdr = 'HDR10'
3623 elif parts[:2] == ['vp9', '2']:
3624 hdr = 'HDR10'
3625 elif parts[0] in ('flac', 'mp4a', 'opus', 'vorbis', 'mp3', 'aac', 'ac-4',
3626 'ac-3', 'ec-3', 'eac3', 'dtsc', 'dtse', 'dtsh', 'dtsl'):
3627 acodec = acodec or full_codec
3628 elif parts[0] in ('stpp', 'wvtt'):
3629 scodec = scodec or full_codec
3630 else:
3631 write_string(f'WARNING: Unknown codec {full_codec}\n')
3632 if vcodec or acodec or scodec:
3633 return {
3634 'vcodec': vcodec or 'none',
3635 'acodec': acodec or 'none',
3636 'dynamic_range': hdr,
3637 **({'scodec': scodec} if scodec is not None else {}),
3638 }
3639 elif len(split_codecs) == 2:
3640 return {
3641 'vcodec': split_codecs[0],
3642 'acodec': split_codecs[1],
3643 }
3644 return {}
3645
3646
3647 def get_compatible_ext(*, vcodecs, acodecs, vexts, aexts, preferences=None):
3648 assert len(vcodecs) == len(vexts) and len(acodecs) == len(aexts)
3649
3650 allow_mkv = not preferences or 'mkv' in preferences
3651
3652 if allow_mkv and max(len(acodecs), len(vcodecs)) > 1:
3653 return 'mkv' # TODO: any other format allows this?
3654
3655 # TODO: All codecs supported by parse_codecs isn't handled here
3656 COMPATIBLE_CODECS = {
3657 'mp4': {
3658 'av1', 'hevc', 'avc1', 'mp4a', 'ac-4', # fourcc (m3u8, mpd)
3659 'h264', 'aacl', 'ec-3', # Set in ISM
3660 },
3661 'webm': {
3662 'av1', 'vp9', 'vp8', 'opus', 'vrbs',
3663 'vp9x', 'vp8x', # in the webm spec
3664 },
3665 }
3666
3667 sanitize_codec = functools.partial(
3668 try_get, getter=lambda x: x[0].split('.')[0].replace('0', '').lower())
3669 vcodec, acodec = sanitize_codec(vcodecs), sanitize_codec(acodecs)
3670
3671 for ext in preferences or COMPATIBLE_CODECS.keys():
3672 codec_set = COMPATIBLE_CODECS.get(ext, set())
3673 if ext == 'mkv' or codec_set.issuperset((vcodec, acodec)):
3674 return ext
3675
3676 COMPATIBLE_EXTS = (
3677 {'mp3', 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'ismv', 'isma', 'mov'},
3678 {'webm', 'weba'},
3679 )
3680 for ext in preferences or vexts:
3681 current_exts = {ext, *vexts, *aexts}
3682 if ext == 'mkv' or current_exts == {ext} or any(
3683 ext_sets.issuperset(current_exts) for ext_sets in COMPATIBLE_EXTS):
3684 return ext
3685 return 'mkv' if allow_mkv else preferences[-1]
3686
3687
3688 def urlhandle_detect_ext(url_handle, default=NO_DEFAULT):
3689 getheader = url_handle.headers.get
3690
3691 cd = getheader('Content-Disposition')
3692 if cd:
3693 m = re.match(r'attachment;\s*filename="(?P<filename>[^"]+)"', cd)
3694 if m:
3695 e = determine_ext(m.group('filename'), default_ext=None)
3696 if e:
3697 return e
3698
3699 meta_ext = getheader('x-amz-meta-name')
3700 if meta_ext:
3701 e = meta_ext.rpartition('.')[2]
3702 if e:
3703 return e
3704
3705 return mimetype2ext(getheader('Content-Type'), default=default)
3706
3707
3708 def encode_data_uri(data, mime_type):
3709 return 'data:%s;base64,%s' % (mime_type, base64.b64encode(data).decode('ascii'))
3710
3711
3712 def age_restricted(content_limit, age_limit):
3713 """ Returns True iff the content should be blocked """
3714
3715 if age_limit is None: # No limit set
3716 return False
3717 if content_limit is None:
3718 return False # Content available for everyone
3719 return age_limit < content_limit
3720
3721
3722 # List of known byte-order-marks (BOM)
3723 BOMS = [
3724 (b'\xef\xbb\xbf', 'utf-8'),
3725 (b'\x00\x00\xfe\xff', 'utf-32-be'),
3726 (b'\xff\xfe\x00\x00', 'utf-32-le'),
3727 (b'\xff\xfe', 'utf-16-le'),
3728 (b'\xfe\xff', 'utf-16-be'),
3729 ]
3730
3731
3732 def is_html(first_bytes):
3733 """ Detect whether a file contains HTML by examining its first bytes. """
3734
3735 encoding = 'utf-8'
3736 for bom, enc in BOMS:
3737 while first_bytes.startswith(bom):
3738 encoding, first_bytes = enc, first_bytes[len(bom):]
3739
3740 return re.match(r'^\s*<', first_bytes.decode(encoding, 'replace'))
3741
3742
3743 def determine_protocol(info_dict):
3744 protocol = info_dict.get('protocol')
3745 if protocol is not None:
3746 return protocol
3747
3748 url = sanitize_url(info_dict['url'])
3749 if url.startswith('rtmp'):
3750 return 'rtmp'
3751 elif url.startswith('mms'):
3752 return 'mms'
3753 elif url.startswith('rtsp'):
3754 return 'rtsp'
3755
3756 ext = determine_ext(url)
3757 if ext == 'm3u8':
3758 return 'm3u8' if info_dict.get('is_live') else 'm3u8_native'
3759 elif ext == 'f4m':
3760 return 'f4m'
3761
3762 return urllib.parse.urlparse(url).scheme
3763
3764
3765 def render_table(header_row, data, delim=False, extra_gap=0, hide_empty=False):
3766 """ Render a list of rows, each as a list of values.
3767 Text after a \t will be right aligned """
3768 def width(string):
3769 return len(remove_terminal_sequences(string).replace('\t', ''))
3770
3771 def get_max_lens(table):
3772 return [max(width(str(v)) for v in col) for col in zip(*table)]
3773
3774 def filter_using_list(row, filterArray):
3775 return [col for take, col in itertools.zip_longest(filterArray, row, fillvalue=True) if take]
3776
3777 max_lens = get_max_lens(data) if hide_empty else []
3778 header_row = filter_using_list(header_row, max_lens)
3779 data = [filter_using_list(row, max_lens) for row in data]
3780
3781 table = [header_row] + data
3782 max_lens = get_max_lens(table)
3783 extra_gap += 1
3784 if delim:
3785 table = [header_row, [delim * (ml + extra_gap) for ml in max_lens]] + data
3786 table[1][-1] = table[1][-1][:-extra_gap * len(delim)] # Remove extra_gap from end of delimiter
3787 for row in table:
3788 for pos, text in enumerate(map(str, row)):
3789 if '\t' in text:
3790 row[pos] = text.replace('\t', ' ' * (max_lens[pos] - width(text))) + ' ' * extra_gap
3791 else:
3792 row[pos] = text + ' ' * (max_lens[pos] - width(text) + extra_gap)
3793 ret = '\n'.join(''.join(row).rstrip() for row in table)
3794 return ret
3795
3796
3797 def _match_one(filter_part, dct, incomplete):
3798 # TODO: Generalize code with YoutubeDL._build_format_filter
3799 STRING_OPERATORS = {
3800 '*=': operator.contains,
3801 '^=': lambda attr, value: attr.startswith(value),
3802 '$=': lambda attr, value: attr.endswith(value),
3803 '~=': lambda attr, value: re.search(value, attr),
3804 }
3805 COMPARISON_OPERATORS = {
3806 **STRING_OPERATORS,
3807 '<=': operator.le, # "<=" must be defined above "<"
3808 '<': operator.lt,
3809 '>=': operator.ge,
3810 '>': operator.gt,
3811 '=': operator.eq,
3812 }
3813
3814 if isinstance(incomplete, bool):
3815 is_incomplete = lambda _: incomplete
3816 else:
3817 is_incomplete = lambda k: k in incomplete
3818
3819 operator_rex = re.compile(r'''(?x)
3820 (?P<key>[a-z_]+)
3821 \s*(?P<negation>!\s*)?(?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
3822 (?:
3823 (?P<quote>["\'])(?P<quotedstrval>.+?)(?P=quote)|
3824 (?P<strval>.+?)
3825 )
3826 ''' % '|'.join(map(re.escape, COMPARISON_OPERATORS.keys())))
3827 m = operator_rex.fullmatch(filter_part.strip())
3828 if m:
3829 m = m.groupdict()
3830 unnegated_op = COMPARISON_OPERATORS[m['op']]
3831 if m['negation']:
3832 op = lambda attr, value: not unnegated_op(attr, value)
3833 else:
3834 op = unnegated_op
3835 comparison_value = m['quotedstrval'] or m['strval'] or m['intval']
3836 if m['quote']:
3837 comparison_value = comparison_value.replace(r'\%s' % m['quote'], m['quote'])
3838 actual_value = dct.get(m['key'])
3839 numeric_comparison = None
3840 if isinstance(actual_value, (int, float)):
3841 # If the original field is a string and matching comparisonvalue is
3842 # a number we should respect the origin of the original field
3843 # and process comparison value as a string (see
3844 # https://github.com/ytdl-org/youtube-dl/issues/11082)
3845 try:
3846 numeric_comparison = int(comparison_value)
3847 except ValueError:
3848 numeric_comparison = parse_filesize(comparison_value)
3849 if numeric_comparison is None:
3850 numeric_comparison = parse_filesize(f'{comparison_value}B')
3851 if numeric_comparison is None:
3852 numeric_comparison = parse_duration(comparison_value)
3853 if numeric_comparison is not None and m['op'] in STRING_OPERATORS:
3854 raise ValueError('Operator %s only supports string values!' % m['op'])
3855 if actual_value is None:
3856 return is_incomplete(m['key']) or m['none_inclusive']
3857 return op(actual_value, comparison_value if numeric_comparison is None else numeric_comparison)
3858
3859 UNARY_OPERATORS = {
3860 '': lambda v: (v is True) if isinstance(v, bool) else (v is not None),
3861 '!': lambda v: (v is False) if isinstance(v, bool) else (v is None),
3862 }
3863 operator_rex = re.compile(r'''(?x)
3864 (?P<op>%s)\s*(?P<key>[a-z_]+)
3865 ''' % '|'.join(map(re.escape, UNARY_OPERATORS.keys())))
3866 m = operator_rex.fullmatch(filter_part.strip())
3867 if m:
3868 op = UNARY_OPERATORS[m.group('op')]
3869 actual_value = dct.get(m.group('key'))
3870 if is_incomplete(m.group('key')) and actual_value is None:
3871 return True
3872 return op(actual_value)
3873
3874 raise ValueError('Invalid filter part %r' % filter_part)
3875
3876
3877 def match_str(filter_str, dct, incomplete=False):
3878 """ Filter a dictionary with a simple string syntax.
3879 @returns Whether the filter passes
3880 @param incomplete Set of keys that is expected to be missing from dct.
3881 Can be True/False to indicate all/none of the keys may be missing.
3882 All conditions on incomplete keys pass if the key is missing
3883 """
3884 return all(
3885 _match_one(filter_part.replace(r'\&', '&'), dct, incomplete)
3886 for filter_part in re.split(r'(?<!\\)&', filter_str))
3887
3888
3889 def match_filter_func(filters):
3890 if not filters:
3891 return None
3892 filters = set(variadic(filters))
3893
3894 interactive = '-' in filters
3895 if interactive:
3896 filters.remove('-')
3897
3898 def _match_func(info_dict, incomplete=False):
3899 if not filters or any(match_str(f, info_dict, incomplete) for f in filters):
3900 return NO_DEFAULT if interactive and not incomplete else None
3901 else:
3902 video_title = info_dict.get('title') or info_dict.get('id') or 'entry'
3903 filter_str = ') | ('.join(map(str.strip, filters))
3904 return f'{video_title} does not pass filter ({filter_str}), skipping ..'
3905 return _match_func
3906
3907
3908 class download_range_func:
3909 def __init__(self, chapters, ranges):
3910 self.chapters, self.ranges = chapters, ranges
3911
3912 def __call__(self, info_dict, ydl):
3913 if not self.ranges and not self.chapters:
3914 yield {}
3915
3916 warning = ('There are no chapters matching the regex' if info_dict.get('chapters')
3917 else 'Cannot match chapters since chapter information is unavailable')
3918 for regex in self.chapters or []:
3919 for i, chapter in enumerate(info_dict.get('chapters') or []):
3920 if re.search(regex, chapter['title']):
3921 warning = None
3922 yield {**chapter, 'index': i}
3923 if self.chapters and warning:
3924 ydl.to_screen(f'[info] {info_dict["id"]}: {warning}')
3925
3926 yield from ({'start_time': start, 'end_time': end} for start, end in self.ranges or [])
3927
3928 def __eq__(self, other):
3929 return (isinstance(other, download_range_func)
3930 and self.chapters == other.chapters and self.ranges == other.ranges)
3931
3932 def __repr__(self):
3933 return f'{__name__}.{type(self).__name__}({self.chapters}, {self.ranges})'
3934
3935
3936 def parse_dfxp_time_expr(time_expr):
3937 if not time_expr:
3938 return
3939
3940 mobj = re.match(rf'^(?P<time_offset>{NUMBER_RE})s?$', time_expr)
3941 if mobj:
3942 return float(mobj.group('time_offset'))
3943
3944 mobj = re.match(r'^(\d+):(\d\d):(\d\d(?:(?:\.|:)\d+)?)$', time_expr)
3945 if mobj:
3946 return 3600 * int(mobj.group(1)) + 60 * int(mobj.group(2)) + float(mobj.group(3).replace(':', '.'))
3947
3948
3949 def srt_subtitles_timecode(seconds):
3950 return '%02d:%02d:%02d,%03d' % timetuple_from_msec(seconds * 1000)
3951
3952
3953 def ass_subtitles_timecode(seconds):
3954 time = timetuple_from_msec(seconds * 1000)
3955 return '%01d:%02d:%02d.%02d' % (*time[:-1], time.milliseconds / 10)
3956
3957
3958 def dfxp2srt(dfxp_data):
3959 '''
3960 @param dfxp_data A bytes-like object containing DFXP data
3961 @returns A unicode object containing converted SRT data
3962 '''
3963 LEGACY_NAMESPACES = (
3964 (b'http://www.w3.org/ns/ttml', [
3965 b'http://www.w3.org/2004/11/ttaf1',
3966 b'http://www.w3.org/2006/04/ttaf1',
3967 b'http://www.w3.org/2006/10/ttaf1',
3968 ]),
3969 (b'http://www.w3.org/ns/ttml#styling', [
3970 b'http://www.w3.org/ns/ttml#style',
3971 ]),
3972 )
3973
3974 SUPPORTED_STYLING = [
3975 'color',
3976 'fontFamily',
3977 'fontSize',
3978 'fontStyle',
3979 'fontWeight',
3980 'textDecoration'
3981 ]
3982
3983 _x = functools.partial(xpath_with_ns, ns_map={
3984 'xml': 'http://www.w3.org/XML/1998/namespace',
3985 'ttml': 'http://www.w3.org/ns/ttml',
3986 'tts': 'http://www.w3.org/ns/ttml#styling',
3987 })
3988
3989 styles = {}
3990 default_style = {}
3991
3992 class TTMLPElementParser:
3993 _out = ''
3994 _unclosed_elements = []
3995 _applied_styles = []
3996
3997 def start(self, tag, attrib):
3998 if tag in (_x('ttml:br'), 'br'):
3999 self._out += '\n'
4000 else:
4001 unclosed_elements = []
4002 style = {}
4003 element_style_id = attrib.get('style')
4004 if default_style:
4005 style.update(default_style)
4006 if element_style_id:
4007 style.update(styles.get(element_style_id, {}))
4008 for prop in SUPPORTED_STYLING:
4009 prop_val = attrib.get(_x('tts:' + prop))
4010 if prop_val:
4011 style[prop] = prop_val
4012 if style:
4013 font = ''
4014 for k, v in sorted(style.items()):
4015 if self._applied_styles and self._applied_styles[-1].get(k) == v:
4016 continue
4017 if k == 'color':
4018 font += ' color="%s"' % v
4019 elif k == 'fontSize':
4020 font += ' size="%s"' % v
4021 elif k == 'fontFamily':
4022 font += ' face="%s"' % v
4023 elif k == 'fontWeight' and v == 'bold':
4024 self._out += '<b>'
4025 unclosed_elements.append('b')
4026 elif k == 'fontStyle' and v == 'italic':
4027 self._out += '<i>'
4028 unclosed_elements.append('i')
4029 elif k == 'textDecoration' and v == 'underline':
4030 self._out += '<u>'
4031 unclosed_elements.append('u')
4032 if font:
4033 self._out += '<font' + font + '>'
4034 unclosed_elements.append('font')
4035 applied_style = {}
4036 if self._applied_styles:
4037 applied_style.update(self._applied_styles[-1])
4038 applied_style.update(style)
4039 self._applied_styles.append(applied_style)
4040 self._unclosed_elements.append(unclosed_elements)
4041
4042 def end(self, tag):
4043 if tag not in (_x('ttml:br'), 'br'):
4044 unclosed_elements = self._unclosed_elements.pop()
4045 for element in reversed(unclosed_elements):
4046 self._out += '</%s>' % element
4047 if unclosed_elements and self._applied_styles:
4048 self._applied_styles.pop()
4049
4050 def data(self, data):
4051 self._out += data
4052
4053 def close(self):
4054 return self._out.strip()
4055
4056 def parse_node(node):
4057 target = TTMLPElementParser()
4058 parser = xml.etree.ElementTree.XMLParser(target=target)
4059 parser.feed(xml.etree.ElementTree.tostring(node))
4060 return parser.close()
4061
4062 for k, v in LEGACY_NAMESPACES:
4063 for ns in v:
4064 dfxp_data = dfxp_data.replace(ns, k)
4065
4066 dfxp = compat_etree_fromstring(dfxp_data)
4067 out = []
4068 paras = dfxp.findall(_x('.//ttml:p')) or dfxp.findall('.//p')
4069
4070 if not paras:
4071 raise ValueError('Invalid dfxp/TTML subtitle')
4072
4073 repeat = False
4074 while True:
4075 for style in dfxp.findall(_x('.//ttml:style')):
4076 style_id = style.get('id') or style.get(_x('xml:id'))
4077 if not style_id:
4078 continue
4079 parent_style_id = style.get('style')
4080 if parent_style_id:
4081 if parent_style_id not in styles:
4082 repeat = True
4083 continue
4084 styles[style_id] = styles[parent_style_id].copy()
4085 for prop in SUPPORTED_STYLING:
4086 prop_val = style.get(_x('tts:' + prop))
4087 if prop_val:
4088 styles.setdefault(style_id, {})[prop] = prop_val
4089 if repeat:
4090 repeat = False
4091 else:
4092 break
4093
4094 for p in ('body', 'div'):
4095 ele = xpath_element(dfxp, [_x('.//ttml:' + p), './/' + p])
4096 if ele is None:
4097 continue
4098 style = styles.get(ele.get('style'))
4099 if not style:
4100 continue
4101 default_style.update(style)
4102
4103 for para, index in zip(paras, itertools.count(1)):
4104 begin_time = parse_dfxp_time_expr(para.attrib.get('begin'))
4105 end_time = parse_dfxp_time_expr(para.attrib.get('end'))
4106 dur = parse_dfxp_time_expr(para.attrib.get('dur'))
4107 if begin_time is None:
4108 continue
4109 if not end_time:
4110 if not dur:
4111 continue
4112 end_time = begin_time + dur
4113 out.append('%d\n%s --> %s\n%s\n\n' % (
4114 index,
4115 srt_subtitles_timecode(begin_time),
4116 srt_subtitles_timecode(end_time),
4117 parse_node(para)))
4118
4119 return ''.join(out)
4120
4121
4122 def cli_option(params, command_option, param, separator=None):
4123 param = params.get(param)
4124 return ([] if param is None
4125 else [command_option, str(param)] if separator is None
4126 else [f'{command_option}{separator}{param}'])
4127
4128
4129 def cli_bool_option(params, command_option, param, true_value='true', false_value='false', separator=None):
4130 param = params.get(param)
4131 assert param in (True, False, None)
4132 return cli_option({True: true_value, False: false_value}, command_option, param, separator)
4133
4134
4135 def cli_valueless_option(params, command_option, param, expected_value=True):
4136 return [command_option] if params.get(param) == expected_value else []
4137
4138
4139 def cli_configuration_args(argdict, keys, default=[], use_compat=True):
4140 if isinstance(argdict, (list, tuple)): # for backward compatibility
4141 if use_compat:
4142 return argdict
4143 else:
4144 argdict = None
4145 if argdict is None:
4146 return default
4147 assert isinstance(argdict, dict)
4148
4149 assert isinstance(keys, (list, tuple))
4150 for key_list in keys:
4151 arg_list = list(filter(
4152 lambda x: x is not None,
4153 [argdict.get(key.lower()) for key in variadic(key_list)]))
4154 if arg_list:
4155 return [arg for args in arg_list for arg in args]
4156 return default
4157
4158
4159 def _configuration_args(main_key, argdict, exe, keys=None, default=[], use_compat=True):
4160 main_key, exe = main_key.lower(), exe.lower()
4161 root_key = exe if main_key == exe else f'{main_key}+{exe}'
4162 keys = [f'{root_key}{k}' for k in (keys or [''])]
4163 if root_key in keys:
4164 if main_key != exe:
4165 keys.append((main_key, exe))
4166 keys.append('default')
4167 else:
4168 use_compat = False
4169 return cli_configuration_args(argdict, keys, default, use_compat)
4170
4171
4172 class ISO639Utils:
4173 # See http://www.loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt
4174 _lang_map = {
4175 'aa': 'aar',
4176 'ab': 'abk',
4177 'ae': 'ave',
4178 'af': 'afr',
4179 'ak': 'aka',
4180 'am': 'amh',
4181 'an': 'arg',
4182 'ar': 'ara',
4183 'as': 'asm',
4184 'av': 'ava',
4185 'ay': 'aym',
4186 'az': 'aze',
4187 'ba': 'bak',
4188 'be': 'bel',
4189 'bg': 'bul',
4190 'bh': 'bih',
4191 'bi': 'bis',
4192 'bm': 'bam',
4193 'bn': 'ben',
4194 'bo': 'bod',
4195 'br': 'bre',
4196 'bs': 'bos',
4197 'ca': 'cat',
4198 'ce': 'che',
4199 'ch': 'cha',
4200 'co': 'cos',
4201 'cr': 'cre',
4202 'cs': 'ces',
4203 'cu': 'chu',
4204 'cv': 'chv',
4205 'cy': 'cym',
4206 'da': 'dan',
4207 'de': 'deu',
4208 'dv': 'div',
4209 'dz': 'dzo',
4210 'ee': 'ewe',
4211 'el': 'ell',
4212 'en': 'eng',
4213 'eo': 'epo',
4214 'es': 'spa',
4215 'et': 'est',
4216 'eu': 'eus',
4217 'fa': 'fas',
4218 'ff': 'ful',
4219 'fi': 'fin',
4220 'fj': 'fij',
4221 'fo': 'fao',
4222 'fr': 'fra',
4223 'fy': 'fry',
4224 'ga': 'gle',
4225 'gd': 'gla',
4226 'gl': 'glg',
4227 'gn': 'grn',
4228 'gu': 'guj',
4229 'gv': 'glv',
4230 'ha': 'hau',
4231 'he': 'heb',
4232 'iw': 'heb', # Replaced by he in 1989 revision
4233 'hi': 'hin',
4234 'ho': 'hmo',
4235 'hr': 'hrv',
4236 'ht': 'hat',
4237 'hu': 'hun',
4238 'hy': 'hye',
4239 'hz': 'her',
4240 'ia': 'ina',
4241 'id': 'ind',
4242 'in': 'ind', # Replaced by id in 1989 revision
4243 'ie': 'ile',
4244 'ig': 'ibo',
4245 'ii': 'iii',
4246 'ik': 'ipk',
4247 'io': 'ido',
4248 'is': 'isl',
4249 'it': 'ita',
4250 'iu': 'iku',
4251 'ja': 'jpn',
4252 'jv': 'jav',
4253 'ka': 'kat',
4254 'kg': 'kon',
4255 'ki': 'kik',
4256 'kj': 'kua',
4257 'kk': 'kaz',
4258 'kl': 'kal',
4259 'km': 'khm',
4260 'kn': 'kan',
4261 'ko': 'kor',
4262 'kr': 'kau',
4263 'ks': 'kas',
4264 'ku': 'kur',
4265 'kv': 'kom',
4266 'kw': 'cor',
4267 'ky': 'kir',
4268 'la': 'lat',
4269 'lb': 'ltz',
4270 'lg': 'lug',
4271 'li': 'lim',
4272 'ln': 'lin',
4273 'lo': 'lao',
4274 'lt': 'lit',
4275 'lu': 'lub',
4276 'lv': 'lav',
4277 'mg': 'mlg',
4278 'mh': 'mah',
4279 'mi': 'mri',
4280 'mk': 'mkd',
4281 'ml': 'mal',
4282 'mn': 'mon',
4283 'mr': 'mar',
4284 'ms': 'msa',
4285 'mt': 'mlt',
4286 'my': 'mya',
4287 'na': 'nau',
4288 'nb': 'nob',
4289 'nd': 'nde',
4290 'ne': 'nep',
4291 'ng': 'ndo',
4292 'nl': 'nld',
4293 'nn': 'nno',
4294 'no': 'nor',
4295 'nr': 'nbl',
4296 'nv': 'nav',
4297 'ny': 'nya',
4298 'oc': 'oci',
4299 'oj': 'oji',
4300 'om': 'orm',
4301 'or': 'ori',
4302 'os': 'oss',
4303 'pa': 'pan',
4304 'pi': 'pli',
4305 'pl': 'pol',
4306 'ps': 'pus',
4307 'pt': 'por',
4308 'qu': 'que',
4309 'rm': 'roh',
4310 'rn': 'run',
4311 'ro': 'ron',
4312 'ru': 'rus',
4313 'rw': 'kin',
4314 'sa': 'san',
4315 'sc': 'srd',
4316 'sd': 'snd',
4317 'se': 'sme',
4318 'sg': 'sag',
4319 'si': 'sin',
4320 'sk': 'slk',
4321 'sl': 'slv',
4322 'sm': 'smo',
4323 'sn': 'sna',
4324 'so': 'som',
4325 'sq': 'sqi',
4326 'sr': 'srp',
4327 'ss': 'ssw',
4328 'st': 'sot',
4329 'su': 'sun',
4330 'sv': 'swe',
4331 'sw': 'swa',
4332 'ta': 'tam',
4333 'te': 'tel',
4334 'tg': 'tgk',
4335 'th': 'tha',
4336 'ti': 'tir',
4337 'tk': 'tuk',
4338 'tl': 'tgl',
4339 'tn': 'tsn',
4340 'to': 'ton',
4341 'tr': 'tur',
4342 'ts': 'tso',
4343 'tt': 'tat',
4344 'tw': 'twi',
4345 'ty': 'tah',
4346 'ug': 'uig',
4347 'uk': 'ukr',
4348 'ur': 'urd',
4349 'uz': 'uzb',
4350 've': 'ven',
4351 'vi': 'vie',
4352 'vo': 'vol',
4353 'wa': 'wln',
4354 'wo': 'wol',
4355 'xh': 'xho',
4356 'yi': 'yid',
4357 'ji': 'yid', # Replaced by yi in 1989 revision
4358 'yo': 'yor',
4359 'za': 'zha',
4360 'zh': 'zho',
4361 'zu': 'zul',
4362 }
4363
4364 @classmethod
4365 def short2long(cls, code):
4366 """Convert language code from ISO 639-1 to ISO 639-2/T"""
4367 return cls._lang_map.get(code[:2])
4368
4369 @classmethod
4370 def long2short(cls, code):
4371 """Convert language code from ISO 639-2/T to ISO 639-1"""
4372 for short_name, long_name in cls._lang_map.items():
4373 if long_name == code:
4374 return short_name
4375
4376
4377 class ISO3166Utils:
4378 # From http://data.okfn.org/data/core/country-list
4379 _country_map = {
4380 'AF': 'Afghanistan',
4381 'AX': 'Åland Islands',
4382 'AL': 'Albania',
4383 'DZ': 'Algeria',
4384 'AS': 'American Samoa',
4385 'AD': 'Andorra',
4386 'AO': 'Angola',
4387 'AI': 'Anguilla',
4388 'AQ': 'Antarctica',
4389 'AG': 'Antigua and Barbuda',
4390 'AR': 'Argentina',
4391 'AM': 'Armenia',
4392 'AW': 'Aruba',
4393 'AU': 'Australia',
4394 'AT': 'Austria',
4395 'AZ': 'Azerbaijan',
4396 'BS': 'Bahamas',
4397 'BH': 'Bahrain',
4398 'BD': 'Bangladesh',
4399 'BB': 'Barbados',
4400 'BY': 'Belarus',
4401 'BE': 'Belgium',
4402 'BZ': 'Belize',
4403 'BJ': 'Benin',
4404 'BM': 'Bermuda',
4405 'BT': 'Bhutan',
4406 'BO': 'Bolivia, Plurinational State of',
4407 'BQ': 'Bonaire, Sint Eustatius and Saba',
4408 'BA': 'Bosnia and Herzegovina',
4409 'BW': 'Botswana',
4410 'BV': 'Bouvet Island',
4411 'BR': 'Brazil',
4412 'IO': 'British Indian Ocean Territory',
4413 'BN': 'Brunei Darussalam',
4414 'BG': 'Bulgaria',
4415 'BF': 'Burkina Faso',
4416 'BI': 'Burundi',
4417 'KH': 'Cambodia',
4418 'CM': 'Cameroon',
4419 'CA': 'Canada',
4420 'CV': 'Cape Verde',
4421 'KY': 'Cayman Islands',
4422 'CF': 'Central African Republic',
4423 'TD': 'Chad',
4424 'CL': 'Chile',
4425 'CN': 'China',
4426 'CX': 'Christmas Island',
4427 'CC': 'Cocos (Keeling) Islands',
4428 'CO': 'Colombia',
4429 'KM': 'Comoros',
4430 'CG': 'Congo',
4431 'CD': 'Congo, the Democratic Republic of the',
4432 'CK': 'Cook Islands',
4433 'CR': 'Costa Rica',
4434 'CI': 'Côte d\'Ivoire',
4435 'HR': 'Croatia',
4436 'CU': 'Cuba',
4437 'CW': 'Curaçao',
4438 'CY': 'Cyprus',
4439 'CZ': 'Czech Republic',
4440 'DK': 'Denmark',
4441 'DJ': 'Djibouti',
4442 'DM': 'Dominica',
4443 'DO': 'Dominican Republic',
4444 'EC': 'Ecuador',
4445 'EG': 'Egypt',
4446 'SV': 'El Salvador',
4447 'GQ': 'Equatorial Guinea',
4448 'ER': 'Eritrea',
4449 'EE': 'Estonia',
4450 'ET': 'Ethiopia',
4451 'FK': 'Falkland Islands (Malvinas)',
4452 'FO': 'Faroe Islands',
4453 'FJ': 'Fiji',
4454 'FI': 'Finland',
4455 'FR': 'France',
4456 'GF': 'French Guiana',
4457 'PF': 'French Polynesia',
4458 'TF': 'French Southern Territories',
4459 'GA': 'Gabon',
4460 'GM': 'Gambia',
4461 'GE': 'Georgia',
4462 'DE': 'Germany',
4463 'GH': 'Ghana',
4464 'GI': 'Gibraltar',
4465 'GR': 'Greece',
4466 'GL': 'Greenland',
4467 'GD': 'Grenada',
4468 'GP': 'Guadeloupe',
4469 'GU': 'Guam',
4470 'GT': 'Guatemala',
4471 'GG': 'Guernsey',
4472 'GN': 'Guinea',
4473 'GW': 'Guinea-Bissau',
4474 'GY': 'Guyana',
4475 'HT': 'Haiti',
4476 'HM': 'Heard Island and McDonald Islands',
4477 'VA': 'Holy See (Vatican City State)',
4478 'HN': 'Honduras',
4479 'HK': 'Hong Kong',
4480 'HU': 'Hungary',
4481 'IS': 'Iceland',
4482 'IN': 'India',
4483 'ID': 'Indonesia',
4484 'IR': 'Iran, Islamic Republic of',
4485 'IQ': 'Iraq',
4486 'IE': 'Ireland',
4487 'IM': 'Isle of Man',
4488 'IL': 'Israel',
4489 'IT': 'Italy',
4490 'JM': 'Jamaica',
4491 'JP': 'Japan',
4492 'JE': 'Jersey',
4493 'JO': 'Jordan',
4494 'KZ': 'Kazakhstan',
4495 'KE': 'Kenya',
4496 'KI': 'Kiribati',
4497 'KP': 'Korea, Democratic People\'s Republic of',
4498 'KR': 'Korea, Republic of',
4499 'KW': 'Kuwait',
4500 'KG': 'Kyrgyzstan',
4501 'LA': 'Lao People\'s Democratic Republic',
4502 'LV': 'Latvia',
4503 'LB': 'Lebanon',
4504 'LS': 'Lesotho',
4505 'LR': 'Liberia',
4506 'LY': 'Libya',
4507 'LI': 'Liechtenstein',
4508 'LT': 'Lithuania',
4509 'LU': 'Luxembourg',
4510 'MO': 'Macao',
4511 'MK': 'Macedonia, the Former Yugoslav Republic of',
4512 'MG': 'Madagascar',
4513 'MW': 'Malawi',
4514 'MY': 'Malaysia',
4515 'MV': 'Maldives',
4516 'ML': 'Mali',
4517 'MT': 'Malta',
4518 'MH': 'Marshall Islands',
4519 'MQ': 'Martinique',
4520 'MR': 'Mauritania',
4521 'MU': 'Mauritius',
4522 'YT': 'Mayotte',
4523 'MX': 'Mexico',
4524 'FM': 'Micronesia, Federated States of',
4525 'MD': 'Moldova, Republic of',
4526 'MC': 'Monaco',
4527 'MN': 'Mongolia',
4528 'ME': 'Montenegro',
4529 'MS': 'Montserrat',
4530 'MA': 'Morocco',
4531 'MZ': 'Mozambique',
4532 'MM': 'Myanmar',
4533 'NA': 'Namibia',
4534 'NR': 'Nauru',
4535 'NP': 'Nepal',
4536 'NL': 'Netherlands',
4537 'NC': 'New Caledonia',
4538 'NZ': 'New Zealand',
4539 'NI': 'Nicaragua',
4540 'NE': 'Niger',
4541 'NG': 'Nigeria',
4542 'NU': 'Niue',
4543 'NF': 'Norfolk Island',
4544 'MP': 'Northern Mariana Islands',
4545 'NO': 'Norway',
4546 'OM': 'Oman',
4547 'PK': 'Pakistan',
4548 'PW': 'Palau',
4549 'PS': 'Palestine, State of',
4550 'PA': 'Panama',
4551 'PG': 'Papua New Guinea',
4552 'PY': 'Paraguay',
4553 'PE': 'Peru',
4554 'PH': 'Philippines',
4555 'PN': 'Pitcairn',
4556 'PL': 'Poland',
4557 'PT': 'Portugal',
4558 'PR': 'Puerto Rico',
4559 'QA': 'Qatar',
4560 'RE': 'Réunion',
4561 'RO': 'Romania',
4562 'RU': 'Russian Federation',
4563 'RW': 'Rwanda',
4564 'BL': 'Saint Barthélemy',
4565 'SH': 'Saint Helena, Ascension and Tristan da Cunha',
4566 'KN': 'Saint Kitts and Nevis',
4567 'LC': 'Saint Lucia',
4568 'MF': 'Saint Martin (French part)',
4569 'PM': 'Saint Pierre and Miquelon',
4570 'VC': 'Saint Vincent and the Grenadines',
4571 'WS': 'Samoa',
4572 'SM': 'San Marino',
4573 'ST': 'Sao Tome and Principe',
4574 'SA': 'Saudi Arabia',
4575 'SN': 'Senegal',
4576 'RS': 'Serbia',
4577 'SC': 'Seychelles',
4578 'SL': 'Sierra Leone',
4579 'SG': 'Singapore',
4580 'SX': 'Sint Maarten (Dutch part)',
4581 'SK': 'Slovakia',
4582 'SI': 'Slovenia',
4583 'SB': 'Solomon Islands',
4584 'SO': 'Somalia',
4585 'ZA': 'South Africa',
4586 'GS': 'South Georgia and the South Sandwich Islands',
4587 'SS': 'South Sudan',
4588 'ES': 'Spain',
4589 'LK': 'Sri Lanka',
4590 'SD': 'Sudan',
4591 'SR': 'Suriname',
4592 'SJ': 'Svalbard and Jan Mayen',
4593 'SZ': 'Swaziland',
4594 'SE': 'Sweden',
4595 'CH': 'Switzerland',
4596 'SY': 'Syrian Arab Republic',
4597 'TW': 'Taiwan, Province of China',
4598 'TJ': 'Tajikistan',
4599 'TZ': 'Tanzania, United Republic of',
4600 'TH': 'Thailand',
4601 'TL': 'Timor-Leste',
4602 'TG': 'Togo',
4603 'TK': 'Tokelau',
4604 'TO': 'Tonga',
4605 'TT': 'Trinidad and Tobago',
4606 'TN': 'Tunisia',
4607 'TR': 'Turkey',
4608 'TM': 'Turkmenistan',
4609 'TC': 'Turks and Caicos Islands',
4610 'TV': 'Tuvalu',
4611 'UG': 'Uganda',
4612 'UA': 'Ukraine',
4613 'AE': 'United Arab Emirates',
4614 'GB': 'United Kingdom',
4615 'US': 'United States',
4616 'UM': 'United States Minor Outlying Islands',
4617 'UY': 'Uruguay',
4618 'UZ': 'Uzbekistan',
4619 'VU': 'Vanuatu',
4620 'VE': 'Venezuela, Bolivarian Republic of',
4621 'VN': 'Viet Nam',
4622 'VG': 'Virgin Islands, British',
4623 'VI': 'Virgin Islands, U.S.',
4624 'WF': 'Wallis and Futuna',
4625 'EH': 'Western Sahara',
4626 'YE': 'Yemen',
4627 'ZM': 'Zambia',
4628 'ZW': 'Zimbabwe',
4629 # Not ISO 3166 codes, but used for IP blocks
4630 'AP': 'Asia/Pacific Region',
4631 'EU': 'Europe',
4632 }
4633
4634 @classmethod
4635 def short2full(cls, code):
4636 """Convert an ISO 3166-2 country code to the corresponding full name"""
4637 return cls._country_map.get(code.upper())
4638
4639
4640 class GeoUtils:
4641 # Major IPv4 address blocks per country
4642 _country_ip_map = {
4643 'AD': '46.172.224.0/19',
4644 'AE': '94.200.0.0/13',
4645 'AF': '149.54.0.0/17',
4646 'AG': '209.59.64.0/18',
4647 'AI': '204.14.248.0/21',
4648 'AL': '46.99.0.0/16',
4649 'AM': '46.70.0.0/15',
4650 'AO': '105.168.0.0/13',
4651 'AP': '182.50.184.0/21',
4652 'AQ': '23.154.160.0/24',
4653 'AR': '181.0.0.0/12',
4654 'AS': '202.70.112.0/20',
4655 'AT': '77.116.0.0/14',
4656 'AU': '1.128.0.0/11',
4657 'AW': '181.41.0.0/18',
4658 'AX': '185.217.4.0/22',
4659 'AZ': '5.197.0.0/16',
4660 'BA': '31.176.128.0/17',
4661 'BB': '65.48.128.0/17',
4662 'BD': '114.130.0.0/16',
4663 'BE': '57.0.0.0/8',
4664 'BF': '102.178.0.0/15',
4665 'BG': '95.42.0.0/15',
4666 'BH': '37.131.0.0/17',
4667 'BI': '154.117.192.0/18',
4668 'BJ': '137.255.0.0/16',
4669 'BL': '185.212.72.0/23',
4670 'BM': '196.12.64.0/18',
4671 'BN': '156.31.0.0/16',
4672 'BO': '161.56.0.0/16',
4673 'BQ': '161.0.80.0/20',
4674 'BR': '191.128.0.0/12',
4675 'BS': '24.51.64.0/18',
4676 'BT': '119.2.96.0/19',
4677 'BW': '168.167.0.0/16',
4678 'BY': '178.120.0.0/13',
4679 'BZ': '179.42.192.0/18',
4680 'CA': '99.224.0.0/11',
4681 'CD': '41.243.0.0/16',
4682 'CF': '197.242.176.0/21',
4683 'CG': '160.113.0.0/16',
4684 'CH': '85.0.0.0/13',
4685 'CI': '102.136.0.0/14',
4686 'CK': '202.65.32.0/19',
4687 'CL': '152.172.0.0/14',
4688 'CM': '102.244.0.0/14',
4689 'CN': '36.128.0.0/10',
4690 'CO': '181.240.0.0/12',
4691 'CR': '201.192.0.0/12',
4692 'CU': '152.206.0.0/15',
4693 'CV': '165.90.96.0/19',
4694 'CW': '190.88.128.0/17',
4695 'CY': '31.153.0.0/16',
4696 'CZ': '88.100.0.0/14',
4697 'DE': '53.0.0.0/8',
4698 'DJ': '197.241.0.0/17',
4699 'DK': '87.48.0.0/12',
4700 'DM': '192.243.48.0/20',
4701 'DO': '152.166.0.0/15',
4702 'DZ': '41.96.0.0/12',
4703 'EC': '186.68.0.0/15',
4704 'EE': '90.190.0.0/15',
4705 'EG': '156.160.0.0/11',
4706 'ER': '196.200.96.0/20',
4707 'ES': '88.0.0.0/11',
4708 'ET': '196.188.0.0/14',
4709 'EU': '2.16.0.0/13',
4710 'FI': '91.152.0.0/13',
4711 'FJ': '144.120.0.0/16',
4712 'FK': '80.73.208.0/21',
4713 'FM': '119.252.112.0/20',
4714 'FO': '88.85.32.0/19',
4715 'FR': '90.0.0.0/9',
4716 'GA': '41.158.0.0/15',
4717 'GB': '25.0.0.0/8',
4718 'GD': '74.122.88.0/21',
4719 'GE': '31.146.0.0/16',
4720 'GF': '161.22.64.0/18',
4721 'GG': '62.68.160.0/19',
4722 'GH': '154.160.0.0/12',
4723 'GI': '95.164.0.0/16',
4724 'GL': '88.83.0.0/19',
4725 'GM': '160.182.0.0/15',
4726 'GN': '197.149.192.0/18',
4727 'GP': '104.250.0.0/19',
4728 'GQ': '105.235.224.0/20',
4729 'GR': '94.64.0.0/13',
4730 'GT': '168.234.0.0/16',
4731 'GU': '168.123.0.0/16',
4732 'GW': '197.214.80.0/20',
4733 'GY': '181.41.64.0/18',
4734 'HK': '113.252.0.0/14',
4735 'HN': '181.210.0.0/16',
4736 'HR': '93.136.0.0/13',
4737 'HT': '148.102.128.0/17',
4738 'HU': '84.0.0.0/14',
4739 'ID': '39.192.0.0/10',
4740 'IE': '87.32.0.0/12',
4741 'IL': '79.176.0.0/13',
4742 'IM': '5.62.80.0/20',
4743 'IN': '117.192.0.0/10',
4744 'IO': '203.83.48.0/21',
4745 'IQ': '37.236.0.0/14',
4746 'IR': '2.176.0.0/12',
4747 'IS': '82.221.0.0/16',
4748 'IT': '79.0.0.0/10',
4749 'JE': '87.244.64.0/18',
4750 'JM': '72.27.0.0/17',
4751 'JO': '176.29.0.0/16',
4752 'JP': '133.0.0.0/8',
4753 'KE': '105.48.0.0/12',
4754 'KG': '158.181.128.0/17',
4755 'KH': '36.37.128.0/17',
4756 'KI': '103.25.140.0/22',
4757 'KM': '197.255.224.0/20',
4758 'KN': '198.167.192.0/19',
4759 'KP': '175.45.176.0/22',
4760 'KR': '175.192.0.0/10',
4761 'KW': '37.36.0.0/14',
4762 'KY': '64.96.0.0/15',
4763 'KZ': '2.72.0.0/13',
4764 'LA': '115.84.64.0/18',
4765 'LB': '178.135.0.0/16',
4766 'LC': '24.92.144.0/20',
4767 'LI': '82.117.0.0/19',
4768 'LK': '112.134.0.0/15',
4769 'LR': '102.183.0.0/16',
4770 'LS': '129.232.0.0/17',
4771 'LT': '78.56.0.0/13',
4772 'LU': '188.42.0.0/16',
4773 'LV': '46.109.0.0/16',
4774 'LY': '41.252.0.0/14',
4775 'MA': '105.128.0.0/11',
4776 'MC': '88.209.64.0/18',
4777 'MD': '37.246.0.0/16',
4778 'ME': '178.175.0.0/17',
4779 'MF': '74.112.232.0/21',
4780 'MG': '154.126.0.0/17',
4781 'MH': '117.103.88.0/21',
4782 'MK': '77.28.0.0/15',
4783 'ML': '154.118.128.0/18',
4784 'MM': '37.111.0.0/17',
4785 'MN': '49.0.128.0/17',
4786 'MO': '60.246.0.0/16',
4787 'MP': '202.88.64.0/20',
4788 'MQ': '109.203.224.0/19',
4789 'MR': '41.188.64.0/18',
4790 'MS': '208.90.112.0/22',
4791 'MT': '46.11.0.0/16',
4792 'MU': '105.16.0.0/12',
4793 'MV': '27.114.128.0/18',
4794 'MW': '102.70.0.0/15',
4795 'MX': '187.192.0.0/11',
4796 'MY': '175.136.0.0/13',
4797 'MZ': '197.218.0.0/15',
4798 'NA': '41.182.0.0/16',
4799 'NC': '101.101.0.0/18',
4800 'NE': '197.214.0.0/18',
4801 'NF': '203.17.240.0/22',
4802 'NG': '105.112.0.0/12',
4803 'NI': '186.76.0.0/15',
4804 'NL': '145.96.0.0/11',
4805 'NO': '84.208.0.0/13',
4806 'NP': '36.252.0.0/15',
4807 'NR': '203.98.224.0/19',
4808 'NU': '49.156.48.0/22',
4809 'NZ': '49.224.0.0/14',
4810 'OM': '5.36.0.0/15',
4811 'PA': '186.72.0.0/15',
4812 'PE': '186.160.0.0/14',
4813 'PF': '123.50.64.0/18',
4814 'PG': '124.240.192.0/19',
4815 'PH': '49.144.0.0/13',
4816 'PK': '39.32.0.0/11',
4817 'PL': '83.0.0.0/11',
4818 'PM': '70.36.0.0/20',
4819 'PR': '66.50.0.0/16',
4820 'PS': '188.161.0.0/16',
4821 'PT': '85.240.0.0/13',
4822 'PW': '202.124.224.0/20',
4823 'PY': '181.120.0.0/14',
4824 'QA': '37.210.0.0/15',
4825 'RE': '102.35.0.0/16',
4826 'RO': '79.112.0.0/13',
4827 'RS': '93.86.0.0/15',
4828 'RU': '5.136.0.0/13',
4829 'RW': '41.186.0.0/16',
4830 'SA': '188.48.0.0/13',
4831 'SB': '202.1.160.0/19',
4832 'SC': '154.192.0.0/11',
4833 'SD': '102.120.0.0/13',
4834 'SE': '78.64.0.0/12',
4835 'SG': '8.128.0.0/10',
4836 'SI': '188.196.0.0/14',
4837 'SK': '78.98.0.0/15',
4838 'SL': '102.143.0.0/17',
4839 'SM': '89.186.32.0/19',
4840 'SN': '41.82.0.0/15',
4841 'SO': '154.115.192.0/18',
4842 'SR': '186.179.128.0/17',
4843 'SS': '105.235.208.0/21',
4844 'ST': '197.159.160.0/19',
4845 'SV': '168.243.0.0/16',
4846 'SX': '190.102.0.0/20',
4847 'SY': '5.0.0.0/16',
4848 'SZ': '41.84.224.0/19',
4849 'TC': '65.255.48.0/20',
4850 'TD': '154.68.128.0/19',
4851 'TG': '196.168.0.0/14',
4852 'TH': '171.96.0.0/13',
4853 'TJ': '85.9.128.0/18',
4854 'TK': '27.96.24.0/21',
4855 'TL': '180.189.160.0/20',
4856 'TM': '95.85.96.0/19',
4857 'TN': '197.0.0.0/11',
4858 'TO': '175.176.144.0/21',
4859 'TR': '78.160.0.0/11',
4860 'TT': '186.44.0.0/15',
4861 'TV': '202.2.96.0/19',
4862 'TW': '120.96.0.0/11',
4863 'TZ': '156.156.0.0/14',
4864 'UA': '37.52.0.0/14',
4865 'UG': '102.80.0.0/13',
4866 'US': '6.0.0.0/8',
4867 'UY': '167.56.0.0/13',
4868 'UZ': '84.54.64.0/18',
4869 'VA': '212.77.0.0/19',
4870 'VC': '207.191.240.0/21',
4871 'VE': '186.88.0.0/13',
4872 'VG': '66.81.192.0/20',
4873 'VI': '146.226.0.0/16',
4874 'VN': '14.160.0.0/11',
4875 'VU': '202.80.32.0/20',
4876 'WF': '117.20.32.0/21',
4877 'WS': '202.4.32.0/19',
4878 'YE': '134.35.0.0/16',
4879 'YT': '41.242.116.0/22',
4880 'ZA': '41.0.0.0/11',
4881 'ZM': '102.144.0.0/13',
4882 'ZW': '102.177.192.0/18',
4883 }
4884
4885 @classmethod
4886 def random_ipv4(cls, code_or_block):
4887 if len(code_or_block) == 2:
4888 block = cls._country_ip_map.get(code_or_block.upper())
4889 if not block:
4890 return None
4891 else:
4892 block = code_or_block
4893 addr, preflen = block.split('/')
4894 addr_min = struct.unpack('!L', socket.inet_aton(addr))[0]
4895 addr_max = addr_min | (0xffffffff >> int(preflen))
4896 return str(socket.inet_ntoa(
4897 struct.pack('!L', random.randint(addr_min, addr_max))))
4898
4899
4900 class PerRequestProxyHandler(urllib.request.ProxyHandler):
4901 def __init__(self, proxies=None):
4902 # Set default handlers
4903 for type in ('http', 'https'):
4904 setattr(self, '%s_open' % type,
4905 lambda r, proxy='__noproxy__', type=type, meth=self.proxy_open:
4906 meth(r, proxy, type))
4907 urllib.request.ProxyHandler.__init__(self, proxies)
4908
4909 def proxy_open(self, req, proxy, type):
4910 req_proxy = req.headers.get('Ytdl-request-proxy')
4911 if req_proxy is not None:
4912 proxy = req_proxy
4913 del req.headers['Ytdl-request-proxy']
4914
4915 if proxy == '__noproxy__':
4916 return None # No Proxy
4917 if urllib.parse.urlparse(proxy).scheme.lower() in ('socks', 'socks4', 'socks4a', 'socks5'):
4918 req.add_header('Ytdl-socks-proxy', proxy)
4919 # yt-dlp's http/https handlers do wrapping the socket with socks
4920 return None
4921 return urllib.request.ProxyHandler.proxy_open(
4922 self, req, proxy, type)
4923
4924
4925 # Both long_to_bytes and bytes_to_long are adapted from PyCrypto, which is
4926 # released into Public Domain
4927 # https://github.com/dlitz/pycrypto/blob/master/lib/Crypto/Util/number.py#L387
4928
4929 def long_to_bytes(n, blocksize=0):
4930 """long_to_bytes(n:long, blocksize:int) : string
4931 Convert a long integer to a byte string.
4932
4933 If optional blocksize is given and greater than zero, pad the front of the
4934 byte string with binary zeros so that the length is a multiple of
4935 blocksize.
4936 """
4937 # after much testing, this algorithm was deemed to be the fastest
4938 s = b''
4939 n = int(n)
4940 while n > 0:
4941 s = struct.pack('>I', n & 0xffffffff) + s
4942 n = n >> 32
4943 # strip off leading zeros
4944 for i in range(len(s)):
4945 if s[i] != b'\000'[0]:
4946 break
4947 else:
4948 # only happens when n == 0
4949 s = b'\000'
4950 i = 0
4951 s = s[i:]
4952 # add back some pad bytes. this could be done more efficiently w.r.t. the
4953 # de-padding being done above, but sigh...
4954 if blocksize > 0 and len(s) % blocksize:
4955 s = (blocksize - len(s) % blocksize) * b'\000' + s
4956 return s
4957
4958
4959 def bytes_to_long(s):
4960 """bytes_to_long(string) : long
4961 Convert a byte string to a long integer.
4962
4963 This is (essentially) the inverse of long_to_bytes().
4964 """
4965 acc = 0
4966 length = len(s)
4967 if length % 4:
4968 extra = (4 - length % 4)
4969 s = b'\000' * extra + s
4970 length = length + extra
4971 for i in range(0, length, 4):
4972 acc = (acc << 32) + struct.unpack('>I', s[i:i + 4])[0]
4973 return acc
4974
4975
4976 def ohdave_rsa_encrypt(data, exponent, modulus):
4977 '''
4978 Implement OHDave's RSA algorithm. See http://www.ohdave.com/rsa/
4979
4980 Input:
4981 data: data to encrypt, bytes-like object
4982 exponent, modulus: parameter e and N of RSA algorithm, both integer
4983 Output: hex string of encrypted data
4984
4985 Limitation: supports one block encryption only
4986 '''
4987
4988 payload = int(binascii.hexlify(data[::-1]), 16)
4989 encrypted = pow(payload, exponent, modulus)
4990 return '%x' % encrypted
4991
4992
4993 def pkcs1pad(data, length):
4994 """
4995 Padding input data with PKCS#1 scheme
4996
4997 @param {int[]} data input data
4998 @param {int} length target length
4999 @returns {int[]} padded data
5000 """
5001 if len(data) > length - 11:
5002 raise ValueError('Input data too long for PKCS#1 padding')
5003
5004 pseudo_random = [random.randint(0, 254) for _ in range(length - len(data) - 3)]
5005 return [0, 2] + pseudo_random + [0] + data
5006
5007
5008 def _base_n_table(n, table):
5009 if not table and not n:
5010 raise ValueError('Either table or n must be specified')
5011 table = (table or '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ')[:n]
5012
5013 if n and n != len(table):
5014 raise ValueError(f'base {n} exceeds table length {len(table)}')
5015 return table
5016
5017
5018 def encode_base_n(num, n=None, table=None):
5019 """Convert given int to a base-n string"""
5020 table = _base_n_table(n, table)
5021 if not num:
5022 return table[0]
5023
5024 result, base = '', len(table)
5025 while num:
5026 result = table[num % base] + result
5027 num = num // base
5028 return result
5029
5030
5031 def decode_base_n(string, n=None, table=None):
5032 """Convert given base-n string to int"""
5033 table = {char: index for index, char in enumerate(_base_n_table(n, table))}
5034 result, base = 0, len(table)
5035 for char in string:
5036 result = result * base + table[char]
5037 return result
5038
5039
5040 def decode_base(value, digits):
5041 deprecation_warning(f'{__name__}.decode_base is deprecated and may be removed '
5042 f'in a future version. Use {__name__}.decode_base_n instead')
5043 return decode_base_n(value, table=digits)
5044
5045
5046 def decode_packed_codes(code):
5047 mobj = re.search(PACKED_CODES_RE, code)
5048 obfuscated_code, base, count, symbols = mobj.groups()
5049 base = int(base)
5050 count = int(count)
5051 symbols = symbols.split('|')
5052 symbol_table = {}
5053
5054 while count:
5055 count -= 1
5056 base_n_count = encode_base_n(count, base)
5057 symbol_table[base_n_count] = symbols[count] or base_n_count
5058
5059 return re.sub(
5060 r'\b(\w+)\b', lambda mobj: symbol_table[mobj.group(0)],
5061 obfuscated_code)
5062
5063
5064 def caesar(s, alphabet, shift):
5065 if shift == 0:
5066 return s
5067 l = len(alphabet)
5068 return ''.join(
5069 alphabet[(alphabet.index(c) + shift) % l] if c in alphabet else c
5070 for c in s)
5071
5072
5073 def rot47(s):
5074 return caesar(s, r'''!"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~''', 47)
5075
5076
5077 def parse_m3u8_attributes(attrib):
5078 info = {}
5079 for (key, val) in re.findall(r'(?P<key>[A-Z0-9-]+)=(?P<val>"[^"]+"|[^",]+)(?:,|$)', attrib):
5080 if val.startswith('"'):
5081 val = val[1:-1]
5082 info[key] = val
5083 return info
5084
5085
5086 def urshift(val, n):
5087 return val >> n if val >= 0 else (val + 0x100000000) >> n
5088
5089
5090 # Based on png2str() written by @gdkchan and improved by @yokrysty
5091 # Originally posted at https://github.com/ytdl-org/youtube-dl/issues/9706
5092 def decode_png(png_data):
5093 # Reference: https://www.w3.org/TR/PNG/
5094 header = png_data[8:]
5095
5096 if png_data[:8] != b'\x89PNG\x0d\x0a\x1a\x0a' or header[4:8] != b'IHDR':
5097 raise OSError('Not a valid PNG file.')
5098
5099 int_map = {1: '>B', 2: '>H', 4: '>I'}
5100 unpack_integer = lambda x: struct.unpack(int_map[len(x)], x)[0]
5101
5102 chunks = []
5103
5104 while header:
5105 length = unpack_integer(header[:4])
5106 header = header[4:]
5107
5108 chunk_type = header[:4]
5109 header = header[4:]
5110
5111 chunk_data = header[:length]
5112 header = header[length:]
5113
5114 header = header[4:] # Skip CRC
5115
5116 chunks.append({
5117 'type': chunk_type,
5118 'length': length,
5119 'data': chunk_data
5120 })
5121
5122 ihdr = chunks[0]['data']
5123
5124 width = unpack_integer(ihdr[:4])
5125 height = unpack_integer(ihdr[4:8])
5126
5127 idat = b''
5128
5129 for chunk in chunks:
5130 if chunk['type'] == b'IDAT':
5131 idat += chunk['data']
5132
5133 if not idat:
5134 raise OSError('Unable to read PNG data.')
5135
5136 decompressed_data = bytearray(zlib.decompress(idat))
5137
5138 stride = width * 3
5139 pixels = []
5140
5141 def _get_pixel(idx):
5142 x = idx % stride
5143 y = idx // stride
5144 return pixels[y][x]
5145
5146 for y in range(height):
5147 basePos = y * (1 + stride)
5148 filter_type = decompressed_data[basePos]
5149
5150 current_row = []
5151
5152 pixels.append(current_row)
5153
5154 for x in range(stride):
5155 color = decompressed_data[1 + basePos + x]
5156 basex = y * stride + x
5157 left = 0
5158 up = 0
5159
5160 if x > 2:
5161 left = _get_pixel(basex - 3)
5162 if y > 0:
5163 up = _get_pixel(basex - stride)
5164
5165 if filter_type == 1: # Sub
5166 color = (color + left) & 0xff
5167 elif filter_type == 2: # Up
5168 color = (color + up) & 0xff
5169 elif filter_type == 3: # Average
5170 color = (color + ((left + up) >> 1)) & 0xff
5171 elif filter_type == 4: # Paeth
5172 a = left
5173 b = up
5174 c = 0
5175
5176 if x > 2 and y > 0:
5177 c = _get_pixel(basex - stride - 3)
5178
5179 p = a + b - c
5180
5181 pa = abs(p - a)
5182 pb = abs(p - b)
5183 pc = abs(p - c)
5184
5185 if pa <= pb and pa <= pc:
5186 color = (color + a) & 0xff
5187 elif pb <= pc:
5188 color = (color + b) & 0xff
5189 else:
5190 color = (color + c) & 0xff
5191
5192 current_row.append(color)
5193
5194 return width, height, pixels
5195
5196
5197 def write_xattr(path, key, value):
5198 # Windows: Write xattrs to NTFS Alternate Data Streams:
5199 # http://en.wikipedia.org/wiki/NTFS#Alternate_data_streams_.28ADS.29
5200 if compat_os_name == 'nt':
5201 assert ':' not in key
5202 assert os.path.exists(path)
5203
5204 try:
5205 with open(f'{path}:{key}', 'wb') as f:
5206 f.write(value)
5207 except OSError as e:
5208 raise XAttrMetadataError(e.errno, e.strerror)
5209 return
5210
5211 # UNIX Method 1. Use xattrs/pyxattrs modules
5212
5213 setxattr = None
5214 if getattr(xattr, '_yt_dlp__identifier', None) == 'pyxattr':
5215 # Unicode arguments are not supported in pyxattr until version 0.5.0
5216 # See https://github.com/ytdl-org/youtube-dl/issues/5498
5217 if version_tuple(xattr.__version__) >= (0, 5, 0):
5218 setxattr = xattr.set
5219 elif xattr:
5220 setxattr = xattr.setxattr
5221
5222 if setxattr:
5223 try:
5224 setxattr(path, key, value)
5225 except OSError as e:
5226 raise XAttrMetadataError(e.errno, e.strerror)
5227 return
5228
5229 # UNIX Method 2. Use setfattr/xattr executables
5230 exe = ('setfattr' if check_executable('setfattr', ['--version'])
5231 else 'xattr' if check_executable('xattr', ['-h']) else None)
5232 if not exe:
5233 raise XAttrUnavailableError(
5234 'Couldn\'t find a tool to set the xattrs. Install either the python "xattr" or "pyxattr" modules or the '
5235 + ('"xattr" binary' if sys.platform != 'linux' else 'GNU "attr" package (which contains the "setfattr" tool)'))
5236
5237 value = value.decode()
5238 try:
5239 _, stderr, returncode = Popen.run(
5240 [exe, '-w', key, value, path] if exe == 'xattr' else [exe, '-n', key, '-v', value, path],
5241 text=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
5242 except OSError as e:
5243 raise XAttrMetadataError(e.errno, e.strerror)
5244 if returncode:
5245 raise XAttrMetadataError(returncode, stderr)
5246
5247
5248 def random_birthday(year_field, month_field, day_field):
5249 start_date = datetime.date(1950, 1, 1)
5250 end_date = datetime.date(1995, 12, 31)
5251 offset = random.randint(0, (end_date - start_date).days)
5252 random_date = start_date + datetime.timedelta(offset)
5253 return {
5254 year_field: str(random_date.year),
5255 month_field: str(random_date.month),
5256 day_field: str(random_date.day),
5257 }
5258
5259
5260 def find_available_port(interface=''):
5261 try:
5262 with socket.socket() as sock:
5263 sock.bind((interface, 0))
5264 return sock.getsockname()[1]
5265 except OSError:
5266 return None
5267
5268
5269 # Templates for internet shortcut files, which are plain text files.
5270 DOT_URL_LINK_TEMPLATE = '''\
5271 [InternetShortcut]
5272 URL=%(url)s
5273 '''
5274
5275 DOT_WEBLOC_LINK_TEMPLATE = '''\
5276 <?xml version="1.0" encoding="UTF-8"?>
5277 <!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
5278 <plist version="1.0">
5279 <dict>
5280 \t<key>URL</key>
5281 \t<string>%(url)s</string>
5282 </dict>
5283 </plist>
5284 '''
5285
5286 DOT_DESKTOP_LINK_TEMPLATE = '''\
5287 [Desktop Entry]
5288 Encoding=UTF-8
5289 Name=%(filename)s
5290 Type=Link
5291 URL=%(url)s
5292 Icon=text-html
5293 '''
5294
5295 LINK_TEMPLATES = {
5296 'url': DOT_URL_LINK_TEMPLATE,
5297 'desktop': DOT_DESKTOP_LINK_TEMPLATE,
5298 'webloc': DOT_WEBLOC_LINK_TEMPLATE,
5299 }
5300
5301
5302 def iri_to_uri(iri):
5303 """
5304 Converts an IRI (Internationalized Resource Identifier, allowing Unicode characters) to a URI (Uniform Resource Identifier, ASCII-only).
5305
5306 The function doesn't add an additional layer of escaping; e.g., it doesn't escape `%3C` as `%253C`. Instead, it percent-escapes characters with an underlying UTF-8 encoding *besides* those already escaped, leaving the URI intact.
5307 """
5308
5309 iri_parts = urllib.parse.urlparse(iri)
5310
5311 if '[' in iri_parts.netloc:
5312 raise ValueError('IPv6 URIs are not, yet, supported.')
5313 # Querying `.netloc`, when there's only one bracket, also raises a ValueError.
5314
5315 # The `safe` argument values, that the following code uses, contain the characters that should not be percent-encoded. Everything else but letters, digits and '_.-' will be percent-encoded with an underlying UTF-8 encoding. Everything already percent-encoded will be left as is.
5316
5317 net_location = ''
5318 if iri_parts.username:
5319 net_location += urllib.parse.quote(iri_parts.username, safe=r"!$%&'()*+,~")
5320 if iri_parts.password is not None:
5321 net_location += ':' + urllib.parse.quote(iri_parts.password, safe=r"!$%&'()*+,~")
5322 net_location += '@'
5323
5324 net_location += iri_parts.hostname.encode('idna').decode() # Punycode for Unicode hostnames.
5325 # The 'idna' encoding produces ASCII text.
5326 if iri_parts.port is not None and iri_parts.port != 80:
5327 net_location += ':' + str(iri_parts.port)
5328
5329 return urllib.parse.urlunparse(
5330 (iri_parts.scheme,
5331 net_location,
5332
5333 urllib.parse.quote_plus(iri_parts.path, safe=r"!$%&'()*+,/:;=@|~"),
5334
5335 # Unsure about the `safe` argument, since this is a legacy way of handling parameters.
5336 urllib.parse.quote_plus(iri_parts.params, safe=r"!$%&'()*+,/:;=@|~"),
5337
5338 # Not totally sure about the `safe` argument, since the source does not explicitly mention the query URI component.
5339 urllib.parse.quote_plus(iri_parts.query, safe=r"!$%&'()*+,/:;=?@{|}~"),
5340
5341 urllib.parse.quote_plus(iri_parts.fragment, safe=r"!#$%&'()*+,/:;=?@{|}~")))
5342
5343 # Source for `safe` arguments: https://url.spec.whatwg.org/#percent-encoded-bytes.
5344
5345
5346 def to_high_limit_path(path):
5347 if sys.platform in ['win32', 'cygwin']:
5348 # Work around MAX_PATH limitation on Windows. The maximum allowed length for the individual path segments may still be quite limited.
5349 return '\\\\?\\' + os.path.abspath(path)
5350
5351 return path
5352
5353
5354 def format_field(obj, field=None, template='%s', ignore=NO_DEFAULT, default='', func=IDENTITY):
5355 val = traverse_obj(obj, *variadic(field))
5356 if (not val and val != 0) if ignore is NO_DEFAULT else val in variadic(ignore):
5357 return default
5358 return template % func(val)
5359
5360
5361 def clean_podcast_url(url):
5362 return re.sub(r'''(?x)
5363 (?:
5364 (?:
5365 chtbl\.com/track|
5366 media\.blubrry\.com| # https://create.blubrry.com/resources/podcast-media-download-statistics/getting-started/
5367 play\.podtrac\.com
5368 )/[^/]+|
5369 (?:dts|www)\.podtrac\.com/(?:pts/)?redirect\.[0-9a-z]{3,4}| # http://analytics.podtrac.com/how-to-measure
5370 flex\.acast\.com|
5371 pd(?:
5372 cn\.co| # https://podcorn.com/analytics-prefix/
5373 st\.fm # https://podsights.com/docs/
5374 )/e
5375 )/''', '', url)
5376
5377
5378 _HEX_TABLE = '0123456789abcdef'
5379
5380
5381 def random_uuidv4():
5382 return re.sub(r'[xy]', lambda x: _HEX_TABLE[random.randint(0, 15)], 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx')
5383
5384
5385 def make_dir(path, to_screen=None):
5386 try:
5387 dn = os.path.dirname(path)
5388 if dn:
5389 os.makedirs(dn, exist_ok=True)
5390 return True
5391 except OSError as err:
5392 if callable(to_screen) is not None:
5393 to_screen('unable to create directory ' + error_to_compat_str(err))
5394 return False
5395
5396
5397 def get_executable_path():
5398 from .update import _get_variant_and_executable_path
5399
5400 return os.path.dirname(os.path.abspath(_get_variant_and_executable_path()[1]))
5401
5402
5403 def get_user_config_dirs(package_name):
5404 # .config (e.g. ~/.config/package_name)
5405 xdg_config_home = os.getenv('XDG_CONFIG_HOME') or compat_expanduser('~/.config')
5406 yield os.path.join(xdg_config_home, package_name)
5407
5408 # appdata (%APPDATA%/package_name)
5409 appdata_dir = os.getenv('appdata')
5410 if appdata_dir:
5411 yield os.path.join(appdata_dir, package_name)
5412
5413 # home (~/.package_name)
5414 yield os.path.join(compat_expanduser('~'), f'.{package_name}')
5415
5416
5417 def get_system_config_dirs(package_name):
5418 # /etc/package_name
5419 yield os.path.join('/etc', package_name)
5420
5421
5422 def traverse_obj(
5423 obj, *paths, default=NO_DEFAULT, expected_type=None, get_all=True,
5424 casesense=True, is_user_input=False, traverse_string=False):
5425 """
5426 Safely traverse nested `dict`s and `Sequence`s
5427
5428 >>> obj = [{}, {"key": "value"}]
5429 >>> traverse_obj(obj, (1, "key"))
5430 "value"
5431
5432 Each of the provided `paths` is tested and the first producing a valid result will be returned.
5433 The next path will also be tested if the path branched but no results could be found.
5434 Supported values for traversal are `Mapping`, `Sequence` and `re.Match`.
5435 Unhelpful values (`{}`, `None`) are treated as the absence of a value and discarded.
5436
5437 The paths will be wrapped in `variadic`, so that `'key'` is conveniently the same as `('key', )`.
5438
5439 The keys in the path can be one of:
5440 - `None`: Return the current object.
5441 - `set`: Requires the only item in the set to be a type or function,
5442 like `{type}`/`{func}`. If a `type`, returns only values
5443 of this type. If a function, returns `func(obj)`.
5444 - `str`/`int`: Return `obj[key]`. For `re.Match`, return `obj.group(key)`.
5445 - `slice`: Branch out and return all values in `obj[key]`.
5446 - `Ellipsis`: Branch out and return a list of all values.
5447 - `tuple`/`list`: Branch out and return a list of all matching values.
5448 Read as: `[traverse_obj(obj, branch) for branch in branches]`.
5449 - `function`: Branch out and return values filtered by the function.
5450 Read as: `[value for key, value in obj if function(key, value)]`.
5451 For `Sequence`s, `key` is the index of the value.
5452 For `re.Match`es, `key` is the group number (0 = full match)
5453 as well as additionally any group names, if given.
5454 - `dict` Transform the current object and return a matching dict.
5455 Read as: `{key: traverse_obj(obj, path) for key, path in dct.items()}`.
5456
5457 `tuple`, `list`, and `dict` all support nested paths and branches.
5458
5459 @params paths Paths which to traverse by.
5460 @param default Value to return if the paths do not match.
5461 If the last key in the path is a `dict`, it will apply to each value inside
5462 the dict instead, depth first. Try to avoid if using nested `dict` keys.
5463 @param expected_type If a `type`, only accept final values of this type.
5464 If any other callable, try to call the function on each result.
5465 If the last key in the path is a `dict`, it will apply to each value inside
5466 the dict instead, recursively. This does respect branching paths.
5467 @param get_all If `False`, return the first matching result, otherwise all matching ones.
5468 @param casesense If `False`, consider string dictionary keys as case insensitive.
5469
5470 The following are only meant to be used by YoutubeDL.prepare_outtmpl and are not part of the API
5471
5472 @param is_user_input Whether the keys are generated from user input.
5473 If `True` strings get converted to `int`/`slice` if needed.
5474 @param traverse_string Whether to traverse into objects as strings.
5475 If `True`, any non-compatible object will first be
5476 converted into a string and then traversed into.
5477 The return value of that path will be a string instead,
5478 not respecting any further branching.
5479
5480
5481 @returns The result of the object traversal.
5482 If successful, `get_all=True`, and the path branches at least once,
5483 then a list of results is returned instead.
5484 If no `default` is given and the last path branches, a `list` of results
5485 is always returned. If a path ends on a `dict` that result will always be a `dict`.
5486 """
5487 is_sequence = lambda x: isinstance(x, collections.abc.Sequence) and not isinstance(x, (str, bytes))
5488 casefold = lambda k: k.casefold() if isinstance(k, str) else k
5489
5490 if isinstance(expected_type, type):
5491 type_test = lambda val: val if isinstance(val, expected_type) else None
5492 else:
5493 type_test = lambda val: try_call(expected_type or IDENTITY, args=(val,))
5494
5495 def apply_key(key, obj, is_last):
5496 branching = False
5497 result = None
5498
5499 if obj is None and traverse_string:
5500 pass
5501
5502 elif key is None:
5503 result = obj
5504
5505 elif isinstance(key, set):
5506 assert len(key) == 1, 'Set should only be used to wrap a single item'
5507 item = next(iter(key))
5508 if isinstance(item, type):
5509 if isinstance(obj, item):
5510 result = obj
5511 else:
5512 result = try_call(item, args=(obj,))
5513
5514 elif isinstance(key, (list, tuple)):
5515 branching = True
5516 result = itertools.chain.from_iterable(
5517 apply_path(obj, branch, is_last)[0] for branch in key)
5518
5519 elif key is ...:
5520 branching = True
5521 if isinstance(obj, collections.abc.Mapping):
5522 result = obj.values()
5523 elif is_sequence(obj):
5524 result = obj
5525 elif isinstance(obj, re.Match):
5526 result = obj.groups()
5527 elif traverse_string:
5528 branching = False
5529 result = str(obj)
5530 else:
5531 result = ()
5532
5533 elif callable(key):
5534 branching = True
5535 if isinstance(obj, collections.abc.Mapping):
5536 iter_obj = obj.items()
5537 elif is_sequence(obj):
5538 iter_obj = enumerate(obj)
5539 elif isinstance(obj, re.Match):
5540 iter_obj = itertools.chain(
5541 enumerate((obj.group(), *obj.groups())),
5542 obj.groupdict().items())
5543 elif traverse_string:
5544 branching = False
5545 iter_obj = enumerate(str(obj))
5546 else:
5547 iter_obj = ()
5548
5549 result = (v for k, v in iter_obj if try_call(key, args=(k, v)))
5550 if not branching: # string traversal
5551 result = ''.join(result)
5552
5553 elif isinstance(key, dict):
5554 iter_obj = ((k, _traverse_obj(obj, v, False, is_last)) for k, v in key.items())
5555 result = {
5556 k: v if v is not None else default for k, v in iter_obj
5557 if v is not None or default is not NO_DEFAULT
5558 } or None
5559
5560 elif isinstance(obj, collections.abc.Mapping):
5561 result = (obj.get(key) if casesense or (key in obj) else
5562 next((v for k, v in obj.items() if casefold(k) == key), None))
5563
5564 elif isinstance(obj, re.Match):
5565 if isinstance(key, int) or casesense:
5566 with contextlib.suppress(IndexError):
5567 result = obj.group(key)
5568
5569 elif isinstance(key, str):
5570 result = next((v for k, v in obj.groupdict().items() if casefold(k) == key), None)
5571
5572 elif isinstance(key, (int, slice)):
5573 if is_sequence(obj):
5574 branching = isinstance(key, slice)
5575 with contextlib.suppress(IndexError):
5576 result = obj[key]
5577 elif traverse_string:
5578 with contextlib.suppress(IndexError):
5579 result = str(obj)[key]
5580
5581 return branching, result if branching else (result,)
5582
5583 def lazy_last(iterable):
5584 iterator = iter(iterable)
5585 prev = next(iterator, NO_DEFAULT)
5586 if prev is NO_DEFAULT:
5587 return
5588
5589 for item in iterator:
5590 yield False, prev
5591 prev = item
5592
5593 yield True, prev
5594
5595 def apply_path(start_obj, path, test_type):
5596 objs = (start_obj,)
5597 has_branched = False
5598
5599 key = None
5600 for last, key in lazy_last(variadic(path, (str, bytes, dict, set))):
5601 if is_user_input and isinstance(key, str):
5602 if key == ':':
5603 key = ...
5604 elif ':' in key:
5605 key = slice(*map(int_or_none, key.split(':')))
5606 elif int_or_none(key) is not None:
5607 key = int(key)
5608
5609 if not casesense and isinstance(key, str):
5610 key = key.casefold()
5611
5612 if __debug__ and callable(key):
5613 # Verify function signature
5614 inspect.signature(key).bind(None, None)
5615
5616 new_objs = []
5617 for obj in objs:
5618 branching, results = apply_key(key, obj, last)
5619 has_branched |= branching
5620 new_objs.append(results)
5621
5622 objs = itertools.chain.from_iterable(new_objs)
5623
5624 if test_type and not isinstance(key, (dict, list, tuple)):
5625 objs = map(type_test, objs)
5626
5627 return objs, has_branched, isinstance(key, dict)
5628
5629 def _traverse_obj(obj, path, allow_empty, test_type):
5630 results, has_branched, is_dict = apply_path(obj, path, test_type)
5631 results = LazyList(item for item in results if item not in (None, {}))
5632 if get_all and has_branched:
5633 if results:
5634 return results.exhaust()
5635 if allow_empty:
5636 return [] if default is NO_DEFAULT else default
5637 return None
5638
5639 return results[0] if results else {} if allow_empty and is_dict else None
5640
5641 for index, path in enumerate(paths, 1):
5642 result = _traverse_obj(obj, path, index == len(paths), True)
5643 if result is not None:
5644 return result
5645
5646 return None if default is NO_DEFAULT else default
5647
5648
5649 def traverse_dict(dictn, keys, casesense=True):
5650 deprecation_warning(f'"{__name__}.traverse_dict" is deprecated and may be removed '
5651 f'in a future version. Use "{__name__}.traverse_obj" instead')
5652 return traverse_obj(dictn, keys, casesense=casesense, is_user_input=True, traverse_string=True)
5653
5654
5655 def get_first(obj, keys, **kwargs):
5656 return traverse_obj(obj, (..., *variadic(keys)), **kwargs, get_all=False)
5657
5658
5659 def time_seconds(**kwargs):
5660 """
5661 Returns TZ-aware time in seconds since the epoch (1970-01-01T00:00:00Z)
5662 """
5663 return time.time() + datetime.timedelta(**kwargs).total_seconds()
5664
5665
5666 # create a JSON Web Signature (jws) with HS256 algorithm
5667 # the resulting format is in JWS Compact Serialization
5668 # implemented following JWT https://www.rfc-editor.org/rfc/rfc7519.html
5669 # implemented following JWS https://www.rfc-editor.org/rfc/rfc7515.html
5670 def jwt_encode_hs256(payload_data, key, headers={}):
5671 header_data = {
5672 'alg': 'HS256',
5673 'typ': 'JWT',
5674 }
5675 if headers:
5676 header_data.update(headers)
5677 header_b64 = base64.b64encode(json.dumps(header_data).encode())
5678 payload_b64 = base64.b64encode(json.dumps(payload_data).encode())
5679 h = hmac.new(key.encode(), header_b64 + b'.' + payload_b64, hashlib.sha256)
5680 signature_b64 = base64.b64encode(h.digest())
5681 token = header_b64 + b'.' + payload_b64 + b'.' + signature_b64
5682 return token
5683
5684
5685 # can be extended in future to verify the signature and parse header and return the algorithm used if it's not HS256
5686 def jwt_decode_hs256(jwt):
5687 header_b64, payload_b64, signature_b64 = jwt.split('.')
5688 # add trailing ='s that may have been stripped, superfluous ='s are ignored
5689 payload_data = json.loads(base64.urlsafe_b64decode(f'{payload_b64}==='))
5690 return payload_data
5691
5692
5693 WINDOWS_VT_MODE = False if compat_os_name == 'nt' else None
5694
5695
5696 @functools.cache
5697 def supports_terminal_sequences(stream):
5698 if compat_os_name == 'nt':
5699 if not WINDOWS_VT_MODE:
5700 return False
5701 elif not os.getenv('TERM'):
5702 return False
5703 try:
5704 return stream.isatty()
5705 except BaseException:
5706 return False
5707
5708
5709 def windows_enable_vt_mode():
5710 """Ref: https://bugs.python.org/issue30075 """
5711 if get_windows_version() < (10, 0, 10586):
5712 return
5713
5714 import ctypes
5715 import ctypes.wintypes
5716 import msvcrt
5717
5718 ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x0004
5719
5720 dll = ctypes.WinDLL('kernel32', use_last_error=False)
5721 handle = os.open('CONOUT$', os.O_RDWR)
5722 try:
5723 h_out = ctypes.wintypes.HANDLE(msvcrt.get_osfhandle(handle))
5724 dw_original_mode = ctypes.wintypes.DWORD()
5725 success = dll.GetConsoleMode(h_out, ctypes.byref(dw_original_mode))
5726 if not success:
5727 raise Exception('GetConsoleMode failed')
5728
5729 success = dll.SetConsoleMode(h_out, ctypes.wintypes.DWORD(
5730 dw_original_mode.value | ENABLE_VIRTUAL_TERMINAL_PROCESSING))
5731 if not success:
5732 raise Exception('SetConsoleMode failed')
5733 finally:
5734 os.close(handle)
5735
5736 global WINDOWS_VT_MODE
5737 WINDOWS_VT_MODE = True
5738 supports_terminal_sequences.cache_clear()
5739
5740
5741 _terminal_sequences_re = re.compile('\033\\[[^m]+m')
5742
5743
5744 def remove_terminal_sequences(string):
5745 return _terminal_sequences_re.sub('', string)
5746
5747
5748 def number_of_digits(number):
5749 return len('%d' % number)
5750
5751
5752 def join_nonempty(*values, delim='-', from_dict=None):
5753 if from_dict is not None:
5754 values = (traverse_obj(from_dict, variadic(v)) for v in values)
5755 return delim.join(map(str, filter(None, values)))
5756
5757
5758 def scale_thumbnails_to_max_format_width(formats, thumbnails, url_width_re):
5759 """
5760 Find the largest format dimensions in terms of video width and, for each thumbnail:
5761 * Modify the URL: Match the width with the provided regex and replace with the former width
5762 * Update dimensions
5763
5764 This function is useful with video services that scale the provided thumbnails on demand
5765 """
5766 _keys = ('width', 'height')
5767 max_dimensions = max(
5768 (tuple(format.get(k) or 0 for k in _keys) for format in formats),
5769 default=(0, 0))
5770 if not max_dimensions[0]:
5771 return thumbnails
5772 return [
5773 merge_dicts(
5774 {'url': re.sub(url_width_re, str(max_dimensions[0]), thumbnail['url'])},
5775 dict(zip(_keys, max_dimensions)), thumbnail)
5776 for thumbnail in thumbnails
5777 ]
5778
5779
5780 def parse_http_range(range):
5781 """ Parse value of "Range" or "Content-Range" HTTP header into tuple. """
5782 if not range:
5783 return None, None, None
5784 crg = re.search(r'bytes[ =](\d+)-(\d+)?(?:/(\d+))?', range)
5785 if not crg:
5786 return None, None, None
5787 return int(crg.group(1)), int_or_none(crg.group(2)), int_or_none(crg.group(3))
5788
5789
5790 def read_stdin(what):
5791 eof = 'Ctrl+Z' if compat_os_name == 'nt' else 'Ctrl+D'
5792 write_string(f'Reading {what} from STDIN - EOF ({eof}) to end:\n')
5793 return sys.stdin
5794
5795
5796 def determine_file_encoding(data):
5797 """
5798 Detect the text encoding used
5799 @returns (encoding, bytes to skip)
5800 """
5801
5802 # BOM marks are given priority over declarations
5803 for bom, enc in BOMS:
5804 if data.startswith(bom):
5805 return enc, len(bom)
5806
5807 # Strip off all null bytes to match even when UTF-16 or UTF-32 is used.
5808 # We ignore the endianness to get a good enough match
5809 data = data.replace(b'\0', b'')
5810 mobj = re.match(rb'(?m)^#\s*coding\s*:\s*(\S+)\s*$', data)
5811 return mobj.group(1).decode() if mobj else None, 0
5812
5813
5814 class Config:
5815 own_args = None
5816 parsed_args = None
5817 filename = None
5818 __initialized = False
5819
5820 def __init__(self, parser, label=None):
5821 self.parser, self.label = parser, label
5822 self._loaded_paths, self.configs = set(), []
5823
5824 def init(self, args=None, filename=None):
5825 assert not self.__initialized
5826 self.own_args, self.filename = args, filename
5827 return self.load_configs()
5828
5829 def load_configs(self):
5830 directory = ''
5831 if self.filename:
5832 location = os.path.realpath(self.filename)
5833 directory = os.path.dirname(location)
5834 if location in self._loaded_paths:
5835 return False
5836 self._loaded_paths.add(location)
5837
5838 self.__initialized = True
5839 opts, _ = self.parser.parse_known_args(self.own_args)
5840 self.parsed_args = self.own_args
5841 for location in opts.config_locations or []:
5842 if location == '-':
5843 if location in self._loaded_paths:
5844 continue
5845 self._loaded_paths.add(location)
5846 self.append_config(shlex.split(read_stdin('options'), comments=True), label='stdin')
5847 continue
5848 location = os.path.join(directory, expand_path(location))
5849 if os.path.isdir(location):
5850 location = os.path.join(location, 'yt-dlp.conf')
5851 if not os.path.exists(location):
5852 self.parser.error(f'config location {location} does not exist')
5853 self.append_config(self.read_file(location), location)
5854 return True
5855
5856 def __str__(self):
5857 label = join_nonempty(
5858 self.label, 'config', f'"{self.filename}"' if self.filename else '',
5859 delim=' ')
5860 return join_nonempty(
5861 self.own_args is not None and f'{label[0].upper()}{label[1:]}: {self.hide_login_info(self.own_args)}',
5862 *(f'\n{c}'.replace('\n', '\n| ')[1:] for c in self.configs),
5863 delim='\n')
5864
5865 @staticmethod
5866 def read_file(filename, default=[]):
5867 try:
5868 optionf = open(filename, 'rb')
5869 except OSError:
5870 return default # silently skip if file is not present
5871 try:
5872 enc, skip = determine_file_encoding(optionf.read(512))
5873 optionf.seek(skip, io.SEEK_SET)
5874 except OSError:
5875 enc = None # silently skip read errors
5876 try:
5877 # FIXME: https://github.com/ytdl-org/youtube-dl/commit/dfe5fa49aed02cf36ba9f743b11b0903554b5e56
5878 contents = optionf.read().decode(enc or preferredencoding())
5879 res = shlex.split(contents, comments=True)
5880 except Exception as err:
5881 raise ValueError(f'Unable to parse "{filename}": {err}')
5882 finally:
5883 optionf.close()
5884 return res
5885
5886 @staticmethod
5887 def hide_login_info(opts):
5888 PRIVATE_OPTS = {'-p', '--password', '-u', '--username', '--video-password', '--ap-password', '--ap-username'}
5889 eqre = re.compile('^(?P<key>' + ('|'.join(re.escape(po) for po in PRIVATE_OPTS)) + ')=.+$')
5890
5891 def _scrub_eq(o):
5892 m = eqre.match(o)
5893 if m:
5894 return m.group('key') + '=PRIVATE'
5895 else:
5896 return o
5897
5898 opts = list(map(_scrub_eq, opts))
5899 for idx, opt in enumerate(opts):
5900 if opt in PRIVATE_OPTS and idx + 1 < len(opts):
5901 opts[idx + 1] = 'PRIVATE'
5902 return opts
5903
5904 def append_config(self, *args, label=None):
5905 config = type(self)(self.parser, label)
5906 config._loaded_paths = self._loaded_paths
5907 if config.init(*args):
5908 self.configs.append(config)
5909
5910 @property
5911 def all_args(self):
5912 for config in reversed(self.configs):
5913 yield from config.all_args
5914 yield from self.parsed_args or []
5915
5916 def parse_known_args(self, **kwargs):
5917 return self.parser.parse_known_args(self.all_args, **kwargs)
5918
5919 def parse_args(self):
5920 return self.parser.parse_args(self.all_args)
5921
5922
5923 class WebSocketsWrapper:
5924 """Wraps websockets module to use in non-async scopes"""
5925 pool = None
5926
5927 def __init__(self, url, headers=None, connect=True):
5928 self.loop = asyncio.new_event_loop()
5929 # XXX: "loop" is deprecated
5930 self.conn = websockets.connect(
5931 url, extra_headers=headers, ping_interval=None,
5932 close_timeout=float('inf'), loop=self.loop, ping_timeout=float('inf'))
5933 if connect:
5934 self.__enter__()
5935 atexit.register(self.__exit__, None, None, None)
5936
5937 def __enter__(self):
5938 if not self.pool:
5939 self.pool = self.run_with_loop(self.conn.__aenter__(), self.loop)
5940 return self
5941
5942 def send(self, *args):
5943 self.run_with_loop(self.pool.send(*args), self.loop)
5944
5945 def recv(self, *args):
5946 return self.run_with_loop(self.pool.recv(*args), self.loop)
5947
5948 def __exit__(self, type, value, traceback):
5949 try:
5950 return self.run_with_loop(self.conn.__aexit__(type, value, traceback), self.loop)
5951 finally:
5952 self.loop.close()
5953 self._cancel_all_tasks(self.loop)
5954
5955 # taken from https://github.com/python/cpython/blob/3.9/Lib/asyncio/runners.py with modifications
5956 # for contributors: If there's any new library using asyncio needs to be run in non-async, move these function out of this class
5957 @staticmethod
5958 def run_with_loop(main, loop):
5959 if not asyncio.iscoroutine(main):
5960 raise ValueError(f'a coroutine was expected, got {main!r}')
5961
5962 try:
5963 return loop.run_until_complete(main)
5964 finally:
5965 loop.run_until_complete(loop.shutdown_asyncgens())
5966 if hasattr(loop, 'shutdown_default_executor'):
5967 loop.run_until_complete(loop.shutdown_default_executor())
5968
5969 @staticmethod
5970 def _cancel_all_tasks(loop):
5971 to_cancel = asyncio.all_tasks(loop)
5972
5973 if not to_cancel:
5974 return
5975
5976 for task in to_cancel:
5977 task.cancel()
5978
5979 # XXX: "loop" is removed in python 3.10+
5980 loop.run_until_complete(
5981 asyncio.gather(*to_cancel, loop=loop, return_exceptions=True))
5982
5983 for task in to_cancel:
5984 if task.cancelled():
5985 continue
5986 if task.exception() is not None:
5987 loop.call_exception_handler({
5988 'message': 'unhandled exception during asyncio.run() shutdown',
5989 'exception': task.exception(),
5990 'task': task,
5991 })
5992
5993
5994 def merge_headers(*dicts):
5995 """Merge dicts of http headers case insensitively, prioritizing the latter ones"""
5996 return {k.title(): v for k, v in itertools.chain.from_iterable(map(dict.items, dicts))}
5997
5998
5999 def cached_method(f):
6000 """Cache a method"""
6001 signature = inspect.signature(f)
6002
6003 @functools.wraps(f)
6004 def wrapper(self, *args, **kwargs):
6005 bound_args = signature.bind(self, *args, **kwargs)
6006 bound_args.apply_defaults()
6007 key = tuple(bound_args.arguments.values())[1:]
6008
6009 cache = vars(self).setdefault('_cached_method__cache', {}).setdefault(f.__name__, {})
6010 if key not in cache:
6011 cache[key] = f(self, *args, **kwargs)
6012 return cache[key]
6013 return wrapper
6014
6015
6016 class classproperty:
6017 """property access for class methods with optional caching"""
6018 def __new__(cls, func=None, *args, **kwargs):
6019 if not func:
6020 return functools.partial(cls, *args, **kwargs)
6021 return super().__new__(cls)
6022
6023 def __init__(self, func, *, cache=False):
6024 functools.update_wrapper(self, func)
6025 self.func = func
6026 self._cache = {} if cache else None
6027
6028 def __get__(self, _, cls):
6029 if self._cache is None:
6030 return self.func(cls)
6031 elif cls not in self._cache:
6032 self._cache[cls] = self.func(cls)
6033 return self._cache[cls]
6034
6035
6036 class function_with_repr:
6037 def __init__(self, func):
6038 functools.update_wrapper(self, func)
6039 self.func = func
6040
6041 def __call__(self, *args, **kwargs):
6042 return self.func(*args, **kwargs)
6043
6044 def __repr__(self):
6045 return f'{self.func.__module__}.{self.func.__qualname__}'
6046
6047
6048 class Namespace(types.SimpleNamespace):
6049 """Immutable namespace"""
6050
6051 def __iter__(self):
6052 return iter(self.__dict__.values())
6053
6054 @property
6055 def items_(self):
6056 return self.__dict__.items()
6057
6058
6059 MEDIA_EXTENSIONS = Namespace(
6060 common_video=('avi', 'flv', 'mkv', 'mov', 'mp4', 'webm'),
6061 video=('3g2', '3gp', 'f4v', 'mk3d', 'divx', 'mpg', 'ogv', 'm4v', 'wmv'),
6062 common_audio=('aiff', 'alac', 'flac', 'm4a', 'mka', 'mp3', 'ogg', 'opus', 'wav'),
6063 audio=('aac', 'ape', 'asf', 'f4a', 'f4b', 'm4b', 'm4p', 'm4r', 'oga', 'ogx', 'spx', 'vorbis', 'wma', 'weba'),
6064 thumbnails=('jpg', 'png', 'webp'),
6065 storyboards=('mhtml', ),
6066 subtitles=('srt', 'vtt', 'ass', 'lrc'),
6067 manifests=('f4f', 'f4m', 'm3u8', 'smil', 'mpd'),
6068 )
6069 MEDIA_EXTENSIONS.video += MEDIA_EXTENSIONS.common_video
6070 MEDIA_EXTENSIONS.audio += MEDIA_EXTENSIONS.common_audio
6071
6072 KNOWN_EXTENSIONS = (*MEDIA_EXTENSIONS.video, *MEDIA_EXTENSIONS.audio, *MEDIA_EXTENSIONS.manifests)
6073
6074
6075 class RetryManager:
6076 """Usage:
6077 for retry in RetryManager(...):
6078 try:
6079 ...
6080 except SomeException as err:
6081 retry.error = err
6082 continue
6083 """
6084 attempt, _error = 0, None
6085
6086 def __init__(self, _retries, _error_callback, **kwargs):
6087 self.retries = _retries or 0
6088 self.error_callback = functools.partial(_error_callback, **kwargs)
6089
6090 def _should_retry(self):
6091 return self._error is not NO_DEFAULT and self.attempt <= self.retries
6092
6093 @property
6094 def error(self):
6095 if self._error is NO_DEFAULT:
6096 return None
6097 return self._error
6098
6099 @error.setter
6100 def error(self, value):
6101 self._error = value
6102
6103 def __iter__(self):
6104 while self._should_retry():
6105 self.error = NO_DEFAULT
6106 self.attempt += 1
6107 yield self
6108 if self.error:
6109 self.error_callback(self.error, self.attempt, self.retries)
6110
6111 @staticmethod
6112 def report_retry(e, count, retries, *, sleep_func, info, warn, error=None, suffix=None):
6113 """Utility function for reporting retries"""
6114 if count > retries:
6115 if error:
6116 return error(f'{e}. Giving up after {count - 1} retries') if count > 1 else error(str(e))
6117 raise e
6118
6119 if not count:
6120 return warn(e)
6121 elif isinstance(e, ExtractorError):
6122 e = remove_end(str_or_none(e.cause) or e.orig_msg, '.')
6123 warn(f'{e}. Retrying{format_field(suffix, None, " %s")} ({count}/{retries})...')
6124
6125 delay = float_or_none(sleep_func(n=count - 1)) if callable(sleep_func) else sleep_func
6126 if delay:
6127 info(f'Sleeping {delay:.2f} seconds ...')
6128 time.sleep(delay)
6129
6130
6131 def make_archive_id(ie, video_id):
6132 ie_key = ie if isinstance(ie, str) else ie.ie_key()
6133 return f'{ie_key.lower()} {video_id}'
6134
6135
6136 def truncate_string(s, left, right=0):
6137 assert left > 3 and right >= 0
6138 if s is None or len(s) <= left + right:
6139 return s
6140 return f'{s[:left-3]}...{s[-right:] if right else ""}'
6141
6142
6143 def orderedSet_from_options(options, alias_dict, *, use_regex=False, start=None):
6144 assert 'all' in alias_dict, '"all" alias is required'
6145 requested = list(start or [])
6146 for val in options:
6147 discard = val.startswith('-')
6148 if discard:
6149 val = val[1:]
6150
6151 if val in alias_dict:
6152 val = alias_dict[val] if not discard else [
6153 i[1:] if i.startswith('-') else f'-{i}' for i in alias_dict[val]]
6154 # NB: Do not allow regex in aliases for performance
6155 requested = orderedSet_from_options(val, alias_dict, start=requested)
6156 continue
6157
6158 current = (filter(re.compile(val, re.I).fullmatch, alias_dict['all']) if use_regex
6159 else [val] if val in alias_dict['all'] else None)
6160 if current is None:
6161 raise ValueError(val)
6162
6163 if discard:
6164 for item in current:
6165 while item in requested:
6166 requested.remove(item)
6167 else:
6168 requested.extend(current)
6169
6170 return orderedSet(requested)
6171
6172
6173 class FormatSorter:
6174 regex = r' *((?P<reverse>\+)?(?P<field>[a-zA-Z0-9_]+)((?P<separator>[~:])(?P<limit>.*?))?)? *$'
6175
6176 default = ('hidden', 'aud_or_vid', 'hasvid', 'ie_pref', 'lang', 'quality',
6177 'res', 'fps', 'hdr:12', 'vcodec:vp9.2', 'channels', 'acodec',
6178 'size', 'br', 'asr', 'proto', 'ext', 'hasaud', 'source', 'id') # These must not be aliases
6179 ytdl_default = ('hasaud', 'lang', 'quality', 'tbr', 'filesize', 'vbr',
6180 'height', 'width', 'proto', 'vext', 'abr', 'aext',
6181 'fps', 'fs_approx', 'source', 'id')
6182
6183 settings = {
6184 'vcodec': {'type': 'ordered', 'regex': True,
6185 'order': ['av0?1', 'vp0?9.2', 'vp0?9', '[hx]265|he?vc?', '[hx]264|avc', 'vp0?8', 'mp4v|h263', 'theora', '', None, 'none']},
6186 'acodec': {'type': 'ordered', 'regex': True,
6187 'order': ['[af]lac', 'wav|aiff', 'opus', 'vorbis|ogg', 'aac', 'mp?4a?', 'mp3', 'ac-?4', 'e-?a?c-?3', 'ac-?3', 'dts', '', None, 'none']},
6188 'hdr': {'type': 'ordered', 'regex': True, 'field': 'dynamic_range',
6189 'order': ['dv', '(hdr)?12', r'(hdr)?10\+', '(hdr)?10', 'hlg', '', 'sdr', None]},
6190 'proto': {'type': 'ordered', 'regex': True, 'field': 'protocol',
6191 'order': ['(ht|f)tps', '(ht|f)tp$', 'm3u8.*', '.*dash', 'websocket_frag', 'rtmpe?', '', 'mms|rtsp', 'ws|websocket', 'f4']},
6192 'vext': {'type': 'ordered', 'field': 'video_ext',
6193 'order': ('mp4', 'mov', 'webm', 'flv', '', 'none'),
6194 'order_free': ('webm', 'mp4', 'mov', 'flv', '', 'none')},
6195 'aext': {'type': 'ordered', 'regex': True, 'field': 'audio_ext',
6196 'order': ('m4a', 'aac', 'mp3', 'ogg', 'opus', 'web[am]', '', 'none'),
6197 'order_free': ('ogg', 'opus', 'web[am]', 'mp3', 'm4a', 'aac', '', 'none')},
6198 'hidden': {'visible': False, 'forced': True, 'type': 'extractor', 'max': -1000},
6199 'aud_or_vid': {'visible': False, 'forced': True, 'type': 'multiple',
6200 'field': ('vcodec', 'acodec'),
6201 'function': lambda it: int(any(v != 'none' for v in it))},
6202 'ie_pref': {'priority': True, 'type': 'extractor'},
6203 'hasvid': {'priority': True, 'field': 'vcodec', 'type': 'boolean', 'not_in_list': ('none',)},
6204 'hasaud': {'field': 'acodec', 'type': 'boolean', 'not_in_list': ('none',)},
6205 'lang': {'convert': 'float', 'field': 'language_preference', 'default': -1},
6206 'quality': {'convert': 'float', 'default': -1},
6207 'filesize': {'convert': 'bytes'},
6208 'fs_approx': {'convert': 'bytes', 'field': 'filesize_approx'},
6209 'id': {'convert': 'string', 'field': 'format_id'},
6210 'height': {'convert': 'float_none'},
6211 'width': {'convert': 'float_none'},
6212 'fps': {'convert': 'float_none'},
6213 'channels': {'convert': 'float_none', 'field': 'audio_channels'},
6214 'tbr': {'convert': 'float_none'},
6215 'vbr': {'convert': 'float_none'},
6216 'abr': {'convert': 'float_none'},
6217 'asr': {'convert': 'float_none'},
6218 'source': {'convert': 'float', 'field': 'source_preference', 'default': -1},
6219
6220 'codec': {'type': 'combined', 'field': ('vcodec', 'acodec')},
6221 'br': {'type': 'combined', 'field': ('tbr', 'vbr', 'abr'), 'same_limit': True},
6222 'size': {'type': 'combined', 'same_limit': True, 'field': ('filesize', 'fs_approx')},
6223 'ext': {'type': 'combined', 'field': ('vext', 'aext')},
6224 'res': {'type': 'multiple', 'field': ('height', 'width'),
6225 'function': lambda it: (lambda l: min(l) if l else 0)(tuple(filter(None, it)))},
6226
6227 # Actual field names
6228 'format_id': {'type': 'alias', 'field': 'id'},
6229 'preference': {'type': 'alias', 'field': 'ie_pref'},
6230 'language_preference': {'type': 'alias', 'field': 'lang'},
6231 'source_preference': {'type': 'alias', 'field': 'source'},
6232 'protocol': {'type': 'alias', 'field': 'proto'},
6233 'filesize_approx': {'type': 'alias', 'field': 'fs_approx'},
6234 'audio_channels': {'type': 'alias', 'field': 'channels'},
6235
6236 # Deprecated
6237 'dimension': {'type': 'alias', 'field': 'res', 'deprecated': True},
6238 'resolution': {'type': 'alias', 'field': 'res', 'deprecated': True},
6239 'extension': {'type': 'alias', 'field': 'ext', 'deprecated': True},
6240 'bitrate': {'type': 'alias', 'field': 'br', 'deprecated': True},
6241 'total_bitrate': {'type': 'alias', 'field': 'tbr', 'deprecated': True},
6242 'video_bitrate': {'type': 'alias', 'field': 'vbr', 'deprecated': True},
6243 'audio_bitrate': {'type': 'alias', 'field': 'abr', 'deprecated': True},
6244 'framerate': {'type': 'alias', 'field': 'fps', 'deprecated': True},
6245 'filesize_estimate': {'type': 'alias', 'field': 'size', 'deprecated': True},
6246 'samplerate': {'type': 'alias', 'field': 'asr', 'deprecated': True},
6247 'video_ext': {'type': 'alias', 'field': 'vext', 'deprecated': True},
6248 'audio_ext': {'type': 'alias', 'field': 'aext', 'deprecated': True},
6249 'video_codec': {'type': 'alias', 'field': 'vcodec', 'deprecated': True},
6250 'audio_codec': {'type': 'alias', 'field': 'acodec', 'deprecated': True},
6251 'video': {'type': 'alias', 'field': 'hasvid', 'deprecated': True},
6252 'has_video': {'type': 'alias', 'field': 'hasvid', 'deprecated': True},
6253 'audio': {'type': 'alias', 'field': 'hasaud', 'deprecated': True},
6254 'has_audio': {'type': 'alias', 'field': 'hasaud', 'deprecated': True},
6255 'extractor': {'type': 'alias', 'field': 'ie_pref', 'deprecated': True},
6256 'extractor_preference': {'type': 'alias', 'field': 'ie_pref', 'deprecated': True},
6257 }
6258
6259 def __init__(self, ydl, field_preference):
6260 self.ydl = ydl
6261 self._order = []
6262 self.evaluate_params(self.ydl.params, field_preference)
6263 if ydl.params.get('verbose'):
6264 self.print_verbose_info(self.ydl.write_debug)
6265
6266 def _get_field_setting(self, field, key):
6267 if field not in self.settings:
6268 if key in ('forced', 'priority'):
6269 return False
6270 self.ydl.deprecated_feature(f'Using arbitrary fields ({field}) for format sorting is '
6271 'deprecated and may be removed in a future version')
6272 self.settings[field] = {}
6273 propObj = self.settings[field]
6274 if key not in propObj:
6275 type = propObj.get('type')
6276 if key == 'field':
6277 default = 'preference' if type == 'extractor' else (field,) if type in ('combined', 'multiple') else field
6278 elif key == 'convert':
6279 default = 'order' if type == 'ordered' else 'float_string' if field else 'ignore'
6280 else:
6281 default = {'type': 'field', 'visible': True, 'order': [], 'not_in_list': (None,)}.get(key, None)
6282 propObj[key] = default
6283 return propObj[key]
6284
6285 def _resolve_field_value(self, field, value, convertNone=False):
6286 if value is None:
6287 if not convertNone:
6288 return None
6289 else:
6290 value = value.lower()
6291 conversion = self._get_field_setting(field, 'convert')
6292 if conversion == 'ignore':
6293 return None
6294 if conversion == 'string':
6295 return value
6296 elif conversion == 'float_none':
6297 return float_or_none(value)
6298 elif conversion == 'bytes':
6299 return parse_bytes(value)
6300 elif conversion == 'order':
6301 order_list = (self._use_free_order and self._get_field_setting(field, 'order_free')) or self._get_field_setting(field, 'order')
6302 use_regex = self._get_field_setting(field, 'regex')
6303 list_length = len(order_list)
6304 empty_pos = order_list.index('') if '' in order_list else list_length + 1
6305 if use_regex and value is not None:
6306 for i, regex in enumerate(order_list):
6307 if regex and re.match(regex, value):
6308 return list_length - i
6309 return list_length - empty_pos # not in list
6310 else: # not regex or value = None
6311 return list_length - (order_list.index(value) if value in order_list else empty_pos)
6312 else:
6313 if value.isnumeric():
6314 return float(value)
6315 else:
6316 self.settings[field]['convert'] = 'string'
6317 return value
6318
6319 def evaluate_params(self, params, sort_extractor):
6320 self._use_free_order = params.get('prefer_free_formats', False)
6321 self._sort_user = params.get('format_sort', [])
6322 self._sort_extractor = sort_extractor
6323
6324 def add_item(field, reverse, closest, limit_text):
6325 field = field.lower()
6326 if field in self._order:
6327 return
6328 self._order.append(field)
6329 limit = self._resolve_field_value(field, limit_text)
6330 data = {
6331 'reverse': reverse,
6332 'closest': False if limit is None else closest,
6333 'limit_text': limit_text,
6334 'limit': limit}
6335 if field in self.settings:
6336 self.settings[field].update(data)
6337 else:
6338 self.settings[field] = data
6339
6340 sort_list = (
6341 tuple(field for field in self.default if self._get_field_setting(field, 'forced'))
6342 + (tuple() if params.get('format_sort_force', False)
6343 else tuple(field for field in self.default if self._get_field_setting(field, 'priority')))
6344 + tuple(self._sort_user) + tuple(sort_extractor) + self.default)
6345
6346 for item in sort_list:
6347 match = re.match(self.regex, item)
6348 if match is None:
6349 raise ExtractorError('Invalid format sort string "%s" given by extractor' % item)
6350 field = match.group('field')
6351 if field is None:
6352 continue
6353 if self._get_field_setting(field, 'type') == 'alias':
6354 alias, field = field, self._get_field_setting(field, 'field')
6355 if self._get_field_setting(alias, 'deprecated'):
6356 self.ydl.deprecated_feature(f'Format sorting alias {alias} is deprecated and may '
6357 f'be removed in a future version. Please use {field} instead')
6358 reverse = match.group('reverse') is not None
6359 closest = match.group('separator') == '~'
6360 limit_text = match.group('limit')
6361
6362 has_limit = limit_text is not None
6363 has_multiple_fields = self._get_field_setting(field, 'type') == 'combined'
6364 has_multiple_limits = has_limit and has_multiple_fields and not self._get_field_setting(field, 'same_limit')
6365
6366 fields = self._get_field_setting(field, 'field') if has_multiple_fields else (field,)
6367 limits = limit_text.split(':') if has_multiple_limits else (limit_text,) if has_limit else tuple()
6368 limit_count = len(limits)
6369 for (i, f) in enumerate(fields):
6370 add_item(f, reverse, closest,
6371 limits[i] if i < limit_count
6372 else limits[0] if has_limit and not has_multiple_limits
6373 else None)
6374
6375 def print_verbose_info(self, write_debug):
6376 if self._sort_user:
6377 write_debug('Sort order given by user: %s' % ', '.join(self._sort_user))
6378 if self._sort_extractor:
6379 write_debug('Sort order given by extractor: %s' % ', '.join(self._sort_extractor))
6380 write_debug('Formats sorted by: %s' % ', '.join(['%s%s%s' % (
6381 '+' if self._get_field_setting(field, 'reverse') else '', field,
6382 '%s%s(%s)' % ('~' if self._get_field_setting(field, 'closest') else ':',
6383 self._get_field_setting(field, 'limit_text'),
6384 self._get_field_setting(field, 'limit'))
6385 if self._get_field_setting(field, 'limit_text') is not None else '')
6386 for field in self._order if self._get_field_setting(field, 'visible')]))
6387
6388 def _calculate_field_preference_from_value(self, format, field, type, value):
6389 reverse = self._get_field_setting(field, 'reverse')
6390 closest = self._get_field_setting(field, 'closest')
6391 limit = self._get_field_setting(field, 'limit')
6392
6393 if type == 'extractor':
6394 maximum = self._get_field_setting(field, 'max')
6395 if value is None or (maximum is not None and value >= maximum):
6396 value = -1
6397 elif type == 'boolean':
6398 in_list = self._get_field_setting(field, 'in_list')
6399 not_in_list = self._get_field_setting(field, 'not_in_list')
6400 value = 0 if ((in_list is None or value in in_list) and (not_in_list is None or value not in not_in_list)) else -1
6401 elif type == 'ordered':
6402 value = self._resolve_field_value(field, value, True)
6403
6404 # try to convert to number
6405 val_num = float_or_none(value, default=self._get_field_setting(field, 'default'))
6406 is_num = self._get_field_setting(field, 'convert') != 'string' and val_num is not None
6407 if is_num:
6408 value = val_num
6409
6410 return ((-10, 0) if value is None
6411 else (1, value, 0) if not is_num # if a field has mixed strings and numbers, strings are sorted higher
6412 else (0, -abs(value - limit), value - limit if reverse else limit - value) if closest
6413 else (0, value, 0) if not reverse and (limit is None or value <= limit)
6414 else (0, -value, 0) if limit is None or (reverse and value == limit) or value > limit
6415 else (-1, value, 0))
6416
6417 def _calculate_field_preference(self, format, field):
6418 type = self._get_field_setting(field, 'type') # extractor, boolean, ordered, field, multiple
6419 get_value = lambda f: format.get(self._get_field_setting(f, 'field'))
6420 if type == 'multiple':
6421 type = 'field' # Only 'field' is allowed in multiple for now
6422 actual_fields = self._get_field_setting(field, 'field')
6423
6424 value = self._get_field_setting(field, 'function')(get_value(f) for f in actual_fields)
6425 else:
6426 value = get_value(field)
6427 return self._calculate_field_preference_from_value(format, field, type, value)
6428
6429 def calculate_preference(self, format):
6430 # Determine missing protocol
6431 if not format.get('protocol'):
6432 format['protocol'] = determine_protocol(format)
6433
6434 # Determine missing ext
6435 if not format.get('ext') and 'url' in format:
6436 format['ext'] = determine_ext(format['url'])
6437 if format.get('vcodec') == 'none':
6438 format['audio_ext'] = format['ext'] if format.get('acodec') != 'none' else 'none'
6439 format['video_ext'] = 'none'
6440 else:
6441 format['video_ext'] = format['ext']
6442 format['audio_ext'] = 'none'
6443 # if format.get('preference') is None and format.get('ext') in ('f4f', 'f4m'): # Not supported?
6444 # format['preference'] = -1000
6445
6446 if format.get('preference') is None and format.get('ext') == 'flv' and re.match('[hx]265|he?vc?', format.get('vcodec') or ''):
6447 # HEVC-over-FLV is out-of-spec by FLV's original spec
6448 # ref. https://trac.ffmpeg.org/ticket/6389
6449 # ref. https://github.com/yt-dlp/yt-dlp/pull/5821
6450 format['preference'] = -100
6451
6452 # Determine missing bitrates
6453 if format.get('tbr') is None:
6454 if format.get('vbr') is not None and format.get('abr') is not None:
6455 format['tbr'] = format.get('vbr', 0) + format.get('abr', 0)
6456 else:
6457 if format.get('vcodec') != 'none' and format.get('vbr') is None:
6458 format['vbr'] = format.get('tbr') - format.get('abr', 0)
6459 if format.get('acodec') != 'none' and format.get('abr') is None:
6460 format['abr'] = format.get('tbr') - format.get('vbr', 0)
6461
6462 return tuple(self._calculate_field_preference(format, field) for field in self._order)
6463
6464
6465 # Deprecated
6466 has_certifi = bool(certifi)
6467 has_websockets = bool(websockets)
6468
6469
6470 def load_plugins(name, suffix, namespace):
6471 from .plugins import load_plugins
6472 ret = load_plugins(name, suffix)
6473 namespace.update(ret)
6474 return ret