47 import xml
.etree
.ElementTree
50 from . import traversal
52 from ..compat
import functools
# isort: split
53 from ..compat
import (
54 compat_etree_fromstring
,
56 compat_HTMLParseError
,
60 from ..dependencies
import brotli
, certifi
, websockets
, xattr
61 from ..socks
import ProxyType
, sockssocket
63 __name__
= __name__
.rsplit('.', 1)[0] # Pretend to be the parent module
65 # This is not clearly defined otherwise
66 compiled_regex_type
= type(re
.compile(''))
69 def random_user_agent():
70 _USER_AGENT_TPL
= 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/%s Safari/537.36'
111 return _USER_AGENT_TPL
% random
.choice(_CHROME_VERSIONS
)
114 SUPPORTED_ENCODINGS
= [
118 SUPPORTED_ENCODINGS
.append('br')
121 'User-Agent': random_user_agent(),
122 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
123 'Accept-Language': 'en-us,en;q=0.5',
124 'Sec-Fetch-Mode': 'navigate',
129 'Safari': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27',
141 ENGLISH_MONTH_NAMES
= [
142 'January', 'February', 'March', 'April', 'May', 'June',
143 'July', 'August', 'September', 'October', 'November', 'December']
146 'en': ENGLISH_MONTH_NAMES
,
148 'janvier', 'février', 'mars', 'avril', 'mai', 'juin',
149 'juillet', 'août', 'septembre', 'octobre', 'novembre', 'décembre'],
150 # these follow the genitive grammatical case (dopełniacz)
151 # some websites might be using nominative, which will require another month list
152 # https://en.wikibooks.org/wiki/Polish/Noun_cases
153 'pl': ['stycznia', 'lutego', 'marca', 'kwietnia', 'maja', 'czerwca',
154 'lipca', 'sierpnia', 'września', 'października', 'listopada', 'grudnia'],
157 # From https://github.com/python/cpython/blob/3.11/Lib/email/_parseaddr.py#L36-L42
159 'UT': 0, 'UTC': 0, 'GMT': 0, 'Z': 0,
160 'AST': -4, 'ADT': -3, # Atlantic (used in Canada)
161 'EST': -5, 'EDT': -4, # Eastern
162 'CST': -6, 'CDT': -5, # Central
163 'MST': -7, 'MDT': -6, # Mountain
164 'PST': -8, 'PDT': -7 # Pacific
167 # needed for sanitizing filenames in restricted mode
168 ACCENT_CHARS
= dict(zip('ÂÃÄÀÁÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖŐØŒÙÚÛÜŰÝÞßàáâãäåæçèéêëìíîïðñòóôõöőøœùúûüűýþÿ',
169 itertools
.chain('AAAAAA', ['AE'], 'CEEEEIIIIDNOOOOOOO', ['OE'], 'UUUUUY', ['TH', 'ss'],
170 'aaaaaa', ['ae'], 'ceeeeiiiionooooooo', ['oe'], 'uuuuuy', ['th'], 'y')))
200 '%Y-%m-%d %H:%M:%S.%f',
201 '%Y-%m-%d %H:%M:%S:%f',
204 '%Y-%m-%dT%H:%M:%SZ',
205 '%Y-%m-%dT%H:%M:%S.%fZ',
206 '%Y-%m-%dT%H:%M:%S.%f0Z',
208 '%Y-%m-%dT%H:%M:%S.%f',
211 '%b %d %Y at %H:%M:%S',
213 '%B %d %Y at %H:%M:%S',
217 DATE_FORMATS_DAY_FIRST
= list(DATE_FORMATS
)
218 DATE_FORMATS_DAY_FIRST
.extend([
228 DATE_FORMATS_MONTH_FIRST
= list(DATE_FORMATS
)
229 DATE_FORMATS_MONTH_FIRST
.extend([
237 PACKED_CODES_RE
= r
"}\('(.+)',(\d+),(\d+),'([^']+)'\.split\('\|'\)"
238 JSON_LD_RE
= r
'(?is)<script[^>]+type=(["\']?
)application
/ld\
+json\
1[^
>]*>\s
*(?P
<json_ld
>{.+?}|\
[.+?\
])\s
*</script
>'
240 NUMBER_RE = r'\d
+(?
:\
.\d
+)?
'
244 def preferredencoding():
245 """Get preferred encoding.
247 Returns the best encoding scheme for the system, based on
248 locale.getpreferredencoding() and some further tweaks.
251 pref = locale.getpreferredencoding()
259 def write_json_file(obj, fn):
260 """ Encode obj as JSON and write it to fn, atomically if possible """
262 tf = tempfile.NamedTemporaryFile(
263 prefix=f'{os.path.basename(fn)}
.', dir=os.path.dirname(fn),
264 suffix='.tmp
', delete=False, mode='w
', encoding='utf
-8')
268 json.dump(obj, tf, ensure_ascii=False)
269 if sys.platform == 'win32
':
270 # Need to remove existing file on Windows, else os.rename raises
271 # WindowsError or FileExistsError.
272 with contextlib.suppress(OSError):
274 with contextlib.suppress(OSError):
277 os.chmod(tf.name, 0o666 & ~mask)
278 os.rename(tf.name, fn)
280 with contextlib.suppress(OSError):
285 def find_xpath_attr(node, xpath, key, val=None):
286 """ Find the xpath xpath[@key=val] """
287 assert re.match(r'^
[a
-zA
-Z_
-]+$
', key)
288 expr = xpath + ('[@%s]' % key if val is None else f"[@{key}='{val}
']")
289 return node.find(expr)
291 # On python2.6 the xml.etree.ElementTree.Element methods don't support
292 # the namespace parameter
295 def xpath_with_ns(path
, ns_map
):
296 components
= [c
.split(':') for c
in path
.split('/')]
300 replaced
.append(c
[0])
303 replaced
.append('{%s}%s' % (ns_map
[ns
], tag
))
304 return '/'.join(replaced
)
307 def xpath_element(node
, xpath
, name
=None, fatal
=False, default
=NO_DEFAULT
):
308 def _find_xpath(xpath
):
309 return node
.find(xpath
)
311 if isinstance(xpath
, str):
312 n
= _find_xpath(xpath
)
320 if default
is not NO_DEFAULT
:
323 name
= xpath
if name
is None else name
324 raise ExtractorError('Could not find XML element %s' % name
)
330 def xpath_text(node
, xpath
, name
=None, fatal
=False, default
=NO_DEFAULT
):
331 n
= xpath_element(node
, xpath
, name
, fatal
=fatal
, default
=default
)
332 if n
is None or n
== default
:
335 if default
is not NO_DEFAULT
:
338 name
= xpath
if name
is None else name
339 raise ExtractorError('Could not find XML element\'s text %s' % name
)
345 def xpath_attr(node
, xpath
, key
, name
=None, fatal
=False, default
=NO_DEFAULT
):
346 n
= find_xpath_attr(node
, xpath
, key
)
348 if default
is not NO_DEFAULT
:
351 name
= f
'{xpath}[@{key}]' if name
is None else name
352 raise ExtractorError('Could not find XML attribute %s' % name
)
358 def get_element_by_id(id, html
, **kwargs
):
359 """Return the content of the tag with the specified ID in the passed HTML document"""
360 return get_element_by_attribute('id', id, html
, **kwargs
)
363 def get_element_html_by_id(id, html
, **kwargs
):
364 """Return the html of the tag with the specified ID in the passed HTML document"""
365 return get_element_html_by_attribute('id', id, html
, **kwargs
)
368 def get_element_by_class(class_name
, html
):
369 """Return the content of the first tag with the specified class in the passed HTML document"""
370 retval
= get_elements_by_class(class_name
, html
)
371 return retval
[0] if retval
else None
374 def get_element_html_by_class(class_name
, html
):
375 """Return the html of the first tag with the specified class in the passed HTML document"""
376 retval
= get_elements_html_by_class(class_name
, html
)
377 return retval
[0] if retval
else None
380 def get_element_by_attribute(attribute
, value
, html
, **kwargs
):
381 retval
= get_elements_by_attribute(attribute
, value
, html
, **kwargs
)
382 return retval
[0] if retval
else None
385 def get_element_html_by_attribute(attribute
, value
, html
, **kargs
):
386 retval
= get_elements_html_by_attribute(attribute
, value
, html
, **kargs
)
387 return retval
[0] if retval
else None
390 def get_elements_by_class(class_name
, html
, **kargs
):
391 """Return the content of all tags with the specified class in the passed HTML document as a list"""
392 return get_elements_by_attribute(
393 'class', r
'[^\'"]*(?<=[\'"\s
])%s(?
=[\'"\s])[^\'"]*' % re.escape(class_name),
394 html, escape_value=False)
397 def get_elements_html_by_class(class_name, html):
398 """Return the html of all tags with the specified class in the passed HTML document as a list"""
399 return get_elements_html_by_attribute(
400 'class', r'[^
\'"]*(?<=[\'"\s
])%s(?
=[\'"\s])[^\'"]*' % re.escape(class_name),
401 html, escape_value=False)
404 def get_elements_by_attribute(*args, **kwargs):
405 """Return the content of the tag with the specified attribute in the passed HTML document"""
406 return [content for content, _ in get_elements_text_and_html_by_attribute(*args, **kwargs)]
409 def get_elements_html_by_attribute(*args, **kwargs):
410 """Return the html of the tag with the specified attribute in the passed HTML document"""
411 return [whole for _, whole in get_elements_text_and_html_by_attribute(*args, **kwargs)]
414 def get_elements_text_and_html_by_attribute(attribute, value, html, *, tag=r'[\w
:.-]+', escape_value=True):
416 Return the text (content) and the html (whole) of the tag with the specified
417 attribute in the passed HTML document
422 quote = '' if re.match(r'''[\s"'`
=<>]''', value) else '?'
424 value = re.escape(value) if escape_value else value
426 partial_element_re = rf'''(?x
)
428 (?
:\
s(?
:[^
>"']|"[^
"]*"|
'[^']*')*)?
429 \s{re.escape(attribute)}\s*=\s*(?P<_q>['"]{quote})(?-x:{value})(?P=_q)
432 for m in re.finditer(partial_element_re, html):
433 content, whole = get_element_text_and_html_by_tag(m.group('tag'), html[m.start():])
436 unescapeHTML(re.sub(r'^(?P<q>["\'])(?P
<content
>.*)(?P
=q
)$
', r'\g
<content
>', content, flags=re.DOTALL)),
441 class HTMLBreakOnClosingTagParser(html.parser.HTMLParser):
443 HTML parser which raises HTMLBreakOnClosingTagException upon reaching the
444 closing tag for the first opening tag it has encountered, and can be used
448 class HTMLBreakOnClosingTagException(Exception):
452 self.tagstack = collections.deque()
453 html.parser.HTMLParser.__init__(self)
458 def __exit__(self, *_):
462 # handle_endtag does not return upon raising HTMLBreakOnClosingTagException,
463 # so data remains buffered; we no longer have any interest in it, thus
464 # override this method to discard it
467 def handle_starttag(self, tag, _):
468 self.tagstack.append(tag)
470 def handle_endtag(self, tag):
471 if not self.tagstack:
472 raise compat_HTMLParseError('no tags
in the stack
')
474 inner_tag = self.tagstack.pop()
478 raise compat_HTMLParseError(f'matching opening tag
for closing {tag} tag
not found
')
479 if not self.tagstack:
480 raise self.HTMLBreakOnClosingTagException()
483 # XXX: This should be far less strict
484 def get_element_text_and_html_by_tag(tag, html):
486 For the first element with the specified tag in the passed HTML document
487 return its' content (text
) and the whole
element (html
)
489 def find_or_raise(haystack, needle, exc):
491 return haystack.index(needle)
494 closing_tag = f'</{tag}>'
495 whole_start = find_or_raise(
496 html, f'<{tag}', compat_HTMLParseError(f'opening {tag} tag not found'))
497 content_start = find_or_raise(
498 html[whole_start:], '>', compat_HTMLParseError(f'malformed opening {tag} tag'))
499 content_start += whole_start + 1
500 with HTMLBreakOnClosingTagParser() as parser:
501 parser.feed(html[whole_start:content_start])
502 if not parser.tagstack or parser.tagstack[0] != tag:
503 raise compat_HTMLParseError(f'parser did not match opening {tag} tag')
504 offset = content_start
505 while offset < len(html):
506 next_closing_tag_start = find_or_raise(
507 html[offset:], closing_tag,
508 compat_HTMLParseError(f'closing {tag} tag not found'))
509 next_closing_tag_end = next_closing_tag_start + len(closing_tag)
511 parser.feed(html[offset:offset + next_closing_tag_end])
512 offset += next_closing_tag_end
513 except HTMLBreakOnClosingTagParser.HTMLBreakOnClosingTagException:
514 return html[content_start:offset + next_closing_tag_start], \
515 html[whole_start:offset + next_closing_tag_end]
516 raise compat_HTMLParseError('unexpected end of html')
519 class HTMLAttributeParser(html.parser.HTMLParser):
520 """Trivial HTML parser to gather the attributes
for a single element
"""
524 html.parser.HTMLParser.__init__(self)
526 def handle_starttag(self, tag, attrs):
527 self.attrs = dict(attrs)
528 raise compat_HTMLParseError('done')
531 class HTMLListAttrsParser(html.parser.HTMLParser):
532 """HTML parser to gather the attributes
for the elements of a
list"""
535 html.parser.HTMLParser.__init__(self)
539 def handle_starttag(self, tag, attrs):
540 if tag == 'li' and self._level == 0:
541 self.items.append(dict(attrs))
544 def handle_endtag(self, tag):
548 def extract_attributes(html_element):
549 """Given a string
for an HTML element such
as
551 a
="foo" B
="bar" c
="&98;az" d
=boz
552 empty
= noval entity
="&"
555 Decode
and return a dictionary of attributes
.
557 'a': 'foo', 'b': 'bar', c
: 'baz', d
: 'boz',
558 'empty': '', 'noval': None, 'entity': '&',
559 'sq': '"', 'dq': '\''
562 parser = HTMLAttributeParser()
563 with contextlib.suppress(compat_HTMLParseError):
564 parser.feed(html_element)
569 def parse_list(webpage):
570 """Given a string
for an series of HTML
<li
> elements
,
571 return a dictionary of their attributes
"""
572 parser = HTMLListAttrsParser()
578 def clean_html(html):
579 """Clean an HTML snippet into a readable string
"""
581 if html is None: # Convenience for sanitizing descriptions etc.
584 html = re.sub(r'\s+', ' ', html)
585 html = re.sub(r'(?u)\s?<\s?br\s?/?\s?>\s?', '\n', html)
586 html = re.sub(r'(?u)<\s?/\s?p\s?>\s?<\s?p[^>]*>', '\n', html)
588 html = re.sub('<.*?>', '', html)
589 # Replace html entities
590 html = unescapeHTML(html)
594 class LenientJSONDecoder(json.JSONDecoder):
596 def __init__(self, *args, transform_source=None, ignore_extra=False, close_objects=0, **kwargs):
597 self.transform_source, self.ignore_extra = transform_source, ignore_extra
598 self._close_attempts = 2 * close_objects
599 super().__init__(*args, **kwargs)
602 def _close_object(err):
603 doc = err.doc[:err.pos]
604 # We need to add comma first to get the correct error message
605 if err.msg.startswith('Expecting \',\''):
607 elif not doc.endswith(','):
610 if err.msg.startswith('Expecting property name'):
611 return doc[:-1] + '}'
612 elif err.msg.startswith('Expecting value'):
613 return doc[:-1] + ']'
616 if self.transform_source:
617 s = self.transform_source(s)
618 for attempt in range(self._close_attempts + 1):
620 if self.ignore_extra:
621 return self.raw_decode(s.lstrip())[0]
622 return super().decode(s)
623 except json.JSONDecodeError as e:
626 elif attempt < self._close_attempts:
627 s = self._close_object(e)
630 raise type(e)(f'{e.msg} in {s[e.pos-10:e.pos+10]!r}', s, e.pos)
631 assert False, 'Too many attempts to decode JSON'
634 def sanitize_open(filename, open_mode):
635 """Try to
open the given filename
, and slightly tweak it
if this fails
.
637 Attempts to
open the given filename
. If this fails
, it tries to change
638 the filename slightly
, step by step
, until it
's either able to open it
639 or it fails and raises a final exception, like the standard open()
642 It returns the tuple (stream, definitive_file_name).
645 if sys.platform == 'win32
':
648 # stdout may be any IO stream, e.g. when using contextlib.redirect_stdout
649 with contextlib.suppress(io.UnsupportedOperation):
650 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
651 return (sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout, filename)
653 for attempt in range(2):
656 if sys.platform == 'win32
':
657 # FIXME: An exclusive lock also locks the file from being read.
658 # Since windows locks are mandatory, don't lock the
file on
windows (for now
).
659 # Ref: https://github.com/yt-dlp/yt-dlp/issues/3124
660 raise LockingUnsupportedError()
661 stream
= locked_file(filename
, open_mode
, block
=False).__enter
__()
663 stream
= open(filename
, open_mode
)
664 return stream
, filename
665 except OSError as err
:
666 if attempt
or err
.errno
in (errno
.EACCES
,):
668 old_filename
, filename
= filename
, sanitize_path(filename
)
669 if old_filename
== filename
:
673 def timeconvert(timestr
):
674 """Convert RFC 2822 defined time string into system timestamp"""
676 timetuple
= email
.utils
.parsedate_tz(timestr
)
677 if timetuple
is not None:
678 timestamp
= email
.utils
.mktime_tz(timetuple
)
682 def sanitize_filename(s
, restricted
=False, is_id
=NO_DEFAULT
):
683 """Sanitizes a string so it could be used as part of a filename.
684 @param restricted Use a stricter subset of allowed characters
685 @param is_id Whether this is an ID that should be kept unchanged if possible.
686 If unset, yt-dlp's new sanitization rules are in effect
691 def replace_insane(char
):
692 if restricted
and char
in ACCENT_CHARS
:
693 return ACCENT_CHARS
[char
]
694 elif not restricted
and char
== '\n':
696 elif is_id
is NO_DEFAULT
and not restricted
and char
in '"*:<>?|/\\':
697 # Replace with their full-width unicode counterparts
698 return {'/': '\u29F8', '\\': '\u29f9'}
.get(char
, chr(ord(char
) + 0xfee0))
699 elif char
== '?' or ord(char
) < 32 or ord(char
) == 127:
702 return '' if restricted
else '\''
704 return '\0_\0-' if restricted
else '\0 \0-'
705 elif char
in '\\/|*<>':
707 if restricted
and (char
in '!&\'()[]{}$;`^,#' or char
.isspace() or ord(char
) > 127):
711 # Replace look-alike Unicode glyphs
712 if restricted
and (is_id
is NO_DEFAULT
or not is_id
):
713 s
= unicodedata
.normalize('NFKC', s
)
714 s
= re
.sub(r
'[0-9]+(?::[0-9]+)+', lambda m
: m
.group(0).replace(':', '_'), s
) # Handle timestamps
715 result
= ''.join(map(replace_insane
, s
))
716 if is_id
is NO_DEFAULT
:
717 result
= re
.sub(r
'(\0.)(?:(?=\1)..)+', r
'\1', result
) # Remove repeated substitute chars
718 STRIP_RE
= r
'(?:\0.|[ _-])*'
719 result
= re
.sub(f
'^\0.{STRIP_RE}|{STRIP_RE}\0.$', '', result
) # Remove substitute chars from start/end
720 result
= result
.replace('\0', '') or '_'
723 while '__' in result
:
724 result
= result
.replace('__', '_')
725 result
= result
.strip('_')
726 # Common case of "Foreign band name - English song title"
727 if restricted
and result
.startswith('-_'):
729 if result
.startswith('-'):
730 result
= '_' + result
[len('-'):]
731 result
= result
.lstrip('.')
737 def sanitize_path(s
, force
=False):
738 """Sanitizes and normalizes path on Windows"""
739 if sys
.platform
== 'win32':
741 drive_or_unc
, _
= os
.path
.splitdrive(s
)
747 norm_path
= os
.path
.normpath(remove_start(s
, drive_or_unc
)).split(os
.path
.sep
)
751 path_part
if path_part
in ['.', '..'] else re
.sub(r
'(?:[/<>:"\|\\?\*]|[\s.]$)', '#', path_part
)
752 for path_part
in norm_path
]
754 sanitized_path
.insert(0, drive_or_unc
+ os
.path
.sep
)
755 elif force
and s
and s
[0] == os
.path
.sep
:
756 sanitized_path
.insert(0, os
.path
.sep
)
757 return os
.path
.join(*sanitized_path
)
760 def sanitize_url(url
, *, scheme
='http'):
761 # Prepend protocol-less URLs with `http:` scheme in order to mitigate
762 # the number of unwanted failures due to missing protocol
765 elif url
.startswith('//'):
766 return f
'{scheme}:{url}'
767 # Fix some common typos seen so far
769 # https://github.com/ytdl-org/youtube-dl/issues/15649
770 (r
'^httpss://', r
'https://'),
771 # https://bx1.be/lives/direct-tv/
772 (r
'^rmtp([es]?)://', r
'rtmp\1://'),
774 for mistake
, fixup
in COMMON_TYPOS
:
775 if re
.match(mistake
, url
):
776 return re
.sub(mistake
, fixup
, url
)
780 def extract_basic_auth(url
):
781 parts
= urllib
.parse
.urlsplit(url
)
782 if parts
.username
is None:
784 url
= urllib
.parse
.urlunsplit(parts
._replace
(netloc
=(
785 parts
.hostname
if parts
.port
is None
786 else '%s:%d' % (parts
.hostname
, parts
.port
))))
787 auth_payload
= base64
.b64encode(
788 ('%s:%s' % (parts
.username
, parts
.password
or '')).encode())
789 return url
, f
'Basic {auth_payload.decode()}'
792 def sanitized_Request(url
, *args
, **kwargs
):
793 url
, auth_header
= extract_basic_auth(escape_url(sanitize_url(url
)))
794 if auth_header
is not None:
795 headers
= args
[1] if len(args
) >= 2 else kwargs
.setdefault('headers', {})
796 headers
['Authorization'] = auth_header
797 return urllib
.request
.Request(url
, *args
, **kwargs
)
801 """Expand shell variables and ~"""
802 return os
.path
.expandvars(compat_expanduser(s
))
805 def orderedSet(iterable
, *, lazy
=False):
806 """Remove all duplicates from the input iterable"""
808 seen
= [] # Do not use set since the items can be unhashable
814 return _iter() if lazy
else list(_iter())
817 def _htmlentity_transform(entity_with_semicolon
):
818 """Transforms an HTML entity to a character."""
819 entity
= entity_with_semicolon
[:-1]
821 # Known non-numeric HTML entity
822 if entity
in html
.entities
.name2codepoint
:
823 return chr(html
.entities
.name2codepoint
[entity
])
825 # TODO: HTML5 allows entities without a semicolon.
826 # E.g. 'Éric' should be decoded as 'Éric'.
827 if entity_with_semicolon
in html
.entities
.html5
:
828 return html
.entities
.html5
[entity_with_semicolon
]
830 mobj
= re
.match(r
'#(x[0-9a-fA-F]+|[0-9]+)', entity
)
832 numstr
= mobj
.group(1)
833 if numstr
.startswith('x'):
835 numstr
= '0%s' % numstr
838 # See https://github.com/ytdl-org/youtube-dl/issues/7518
839 with contextlib
.suppress(ValueError):
840 return chr(int(numstr
, base
))
842 # Unknown entity in name, return its literal representation
843 return '&%s;' % entity
849 assert isinstance(s
, str)
852 r
'&([^&;]+;)', lambda m
: _htmlentity_transform(m
.group(1)), s
)
855 def escapeHTML(text
):
858 .replace('&', '&')
859 .replace('<', '<')
860 .replace('>', '>')
861 .replace('"', '"')
862 .replace("'", ''')
866 def process_communicate_or_kill(p
, *args
, **kwargs
):
867 deprecation_warning(f
'"{__name__}.process_communicate_or_kill" is deprecated and may be removed '
868 f
'in a future version. Use "{__name__}.Popen.communicate_or_kill" instead')
869 return Popen
.communicate_or_kill(p
, *args
, **kwargs
)
872 class Popen(subprocess
.Popen
):
873 if sys
.platform
== 'win32':
874 _startupinfo
= subprocess
.STARTUPINFO()
875 _startupinfo
.dwFlags |
= subprocess
.STARTF_USESHOWWINDOW
880 def _fix_pyinstaller_ld_path(env
):
881 """Restore LD_LIBRARY_PATH when using PyInstaller
882 Ref: https://github.com/pyinstaller/pyinstaller/blob/develop/doc/runtime-information.rst#ld_library_path--libpath-considerations
883 https://github.com/yt-dlp/yt-dlp/issues/4573
885 if not hasattr(sys
, '_MEIPASS'):
889 orig
= env
.get(f
'{key}_ORIG')
895 _fix('LD_LIBRARY_PATH') # Linux
896 _fix('DYLD_LIBRARY_PATH') # macOS
898 def __init__(self
, *args
, env
=None, text
=False, **kwargs
):
900 env
= os
.environ
.copy()
901 self
._fix
_pyinstaller
_ld
_path
(env
)
903 self
.__text
_mode
= kwargs
.get('encoding') or kwargs
.get('errors') or text
or kwargs
.get('universal_newlines')
905 kwargs
['universal_newlines'] = True # For 3.6 compatibility
906 kwargs
.setdefault('encoding', 'utf-8')
907 kwargs
.setdefault('errors', 'replace')
908 super().__init
__(*args
, env
=env
, **kwargs
, startupinfo
=self
._startupinfo
)
910 def communicate_or_kill(self
, *args
, **kwargs
):
912 return self
.communicate(*args
, **kwargs
)
913 except BaseException
: # Including KeyboardInterrupt
914 self
.kill(timeout
=None)
917 def kill(self
, *, timeout
=0):
920 self
.wait(timeout
=timeout
)
923 def run(cls
, *args
, timeout
=None, **kwargs
):
924 with cls(*args
, **kwargs
) as proc
:
925 default
= '' if proc
.__text
_mode
else b
''
926 stdout
, stderr
= proc
.communicate_or_kill(timeout
=timeout
)
927 return stdout
or default
, stderr
or default
, proc
.returncode
930 def encodeArgument(s
):
931 # Legacy code that uses byte strings
932 # Uncomment the following line after fixing all post processors
933 # assert isinstance(s, str), 'Internal error: %r should be of type %r, is %r' % (s, str, type(s))
934 return s
if isinstance(s
, str) else s
.decode('ascii')
937 _timetuple
= collections
.namedtuple('Time', ('hours', 'minutes', 'seconds', 'milliseconds'))
940 def timetuple_from_msec(msec
):
941 secs
, msec
= divmod(msec
, 1000)
942 mins
, secs
= divmod(secs
, 60)
943 hrs
, mins
= divmod(mins
, 60)
944 return _timetuple(hrs
, mins
, secs
, msec
)
947 def formatSeconds(secs
, delim
=':', msec
=False):
948 time
= timetuple_from_msec(secs
* 1000)
950 ret
= '%d%s%02d%s%02d' % (time
.hours
, delim
, time
.minutes
, delim
, time
.seconds
)
952 ret
= '%d%s%02d' % (time
.minutes
, delim
, time
.seconds
)
954 ret
= '%d' % time
.seconds
955 return '%s.%03d' % (ret
, time
.milliseconds
) if msec
else ret
958 def _ssl_load_windows_store_certs(ssl_context
, storename
):
959 # Code adapted from _load_windows_store_certs in https://github.com/python/cpython/blob/main/Lib/ssl.py
961 certs
= [cert
for cert
, encoding
, trust
in ssl
.enum_certificates(storename
)
962 if encoding
== 'x509_asn' and (
963 trust
is True or ssl
.Purpose
.SERVER_AUTH
.oid
in trust
)]
964 except PermissionError
:
967 with contextlib
.suppress(ssl
.SSLError
):
968 ssl_context
.load_verify_locations(cadata
=cert
)
971 def make_HTTPS_handler(params
, **kwargs
):
972 opts_check_certificate
= not params
.get('nocheckcertificate')
973 context
= ssl
.SSLContext(ssl
.PROTOCOL_TLS_CLIENT
)
974 context
.check_hostname
= opts_check_certificate
975 if params
.get('legacyserverconnect'):
976 context
.options |
= 4 # SSL_OP_LEGACY_SERVER_CONNECT
977 # Allow use of weaker ciphers in Python 3.10+. See https://bugs.python.org/issue43998
978 context
.set_ciphers('DEFAULT')
980 sys
.version_info
< (3, 10)
981 and ssl
.OPENSSL_VERSION_INFO
>= (1, 1, 1)
982 and not ssl
.OPENSSL_VERSION
.startswith('LibreSSL')
984 # Backport the default SSL ciphers and minimum TLS version settings from Python 3.10 [1].
985 # This is to ensure consistent behavior across Python versions, and help avoid fingerprinting
986 # in some situations [2][3].
987 # Python 3.10 only supports OpenSSL 1.1.1+ [4]. Because this change is likely
988 # untested on older versions, we only apply this to OpenSSL 1.1.1+ to be safe.
989 # LibreSSL is excluded until further investigation due to cipher support issues [5][6].
990 # 1. https://github.com/python/cpython/commit/e983252b516edb15d4338b0a47631b59ef1e2536
991 # 2. https://github.com/yt-dlp/yt-dlp/issues/4627
992 # 3. https://github.com/yt-dlp/yt-dlp/pull/5294
993 # 4. https://peps.python.org/pep-0644/
994 # 5. https://peps.python.org/pep-0644/#libressl-support
995 # 6. https://github.com/yt-dlp/yt-dlp/commit/5b9f253fa0aee996cf1ed30185d4b502e00609c4#commitcomment-89054368
996 context
.set_ciphers('@SECLEVEL=2:ECDH+AESGCM:ECDH+CHACHA20:ECDH+AES:DHE+AES:!aNULL:!eNULL:!aDSS:!SHA1:!AESCCM')
997 context
.minimum_version
= ssl
.TLSVersion
.TLSv1_2
999 context
.verify_mode
= ssl
.CERT_REQUIRED
if opts_check_certificate
else ssl
.CERT_NONE
1000 if opts_check_certificate
:
1001 if certifi
and 'no-certifi' not in params
.get('compat_opts', []):
1002 context
.load_verify_locations(cafile
=certifi
.where())
1005 context
.load_default_certs()
1006 # Work around the issue in load_default_certs when there are bad certificates. See:
1007 # https://github.com/yt-dlp/yt-dlp/issues/1060,
1008 # https://bugs.python.org/issue35665, https://bugs.python.org/issue45312
1009 except ssl
.SSLError
:
1010 # enum_certificates is not present in mingw python. See https://github.com/yt-dlp/yt-dlp/issues/1151
1011 if sys
.platform
== 'win32' and hasattr(ssl
, 'enum_certificates'):
1012 for storename
in ('CA', 'ROOT'):
1013 _ssl_load_windows_store_certs(context
, storename
)
1014 context
.set_default_verify_paths()
1016 client_certfile
= params
.get('client_certificate')
1019 context
.load_cert_chain(
1020 client_certfile
, keyfile
=params
.get('client_certificate_key'),
1021 password
=params
.get('client_certificate_password'))
1022 except ssl
.SSLError
:
1023 raise YoutubeDLError('Unable to load client certificate')
1025 # Some servers may reject requests if ALPN extension is not sent. See:
1026 # https://github.com/python/cpython/issues/85140
1027 # https://github.com/yt-dlp/yt-dlp/issues/3878
1028 with contextlib
.suppress(NotImplementedError):
1029 context
.set_alpn_protocols(['http/1.1'])
1031 return YoutubeDLHTTPSHandler(params
, context
=context
, **kwargs
)
1034 def bug_reports_message(before
=';'):
1035 from ..update
import REPOSITORY
1037 msg
= (f
'please report this issue on https://github.com/{REPOSITORY}/issues?q= , '
1038 'filling out the appropriate issue template. Confirm you are on the latest version using yt-dlp -U')
1040 before
= before
.rstrip()
1041 if not before
or before
.endswith(('.', '!', '?')):
1042 msg
= msg
[0].title() + msg
[1:]
1044 return (before
+ ' ' if before
else '') + msg
1047 class YoutubeDLError(Exception):
1048 """Base exception for YoutubeDL errors."""
1051 def __init__(self
, msg
=None):
1054 elif self
.msg
is None:
1055 self
.msg
= type(self
).__name
__
1056 super().__init
__(self
.msg
)
1059 network_exceptions
= [urllib
.error
.URLError
, http
.client
.HTTPException
, socket
.error
]
1060 if hasattr(ssl
, 'CertificateError'):
1061 network_exceptions
.append(ssl
.CertificateError
)
1062 network_exceptions
= tuple(network_exceptions
)
1065 class ExtractorError(YoutubeDLError
):
1066 """Error during info extraction."""
1068 def __init__(self
, msg
, tb
=None, expected
=False, cause
=None, video_id
=None, ie
=None):
1069 """ tb, if given, is the original traceback (so that it can be printed out).
1070 If expected is set, this is a normal error message and most likely not a bug in yt-dlp.
1072 if sys
.exc_info()[0] in network_exceptions
:
1075 self
.orig_msg
= str(msg
)
1077 self
.expected
= expected
1079 self
.video_id
= video_id
1081 self
.exc_info
= sys
.exc_info() # preserve original exception
1082 if isinstance(self
.exc_info
[1], ExtractorError
):
1083 self
.exc_info
= self
.exc_info
[1].exc_info
1084 super().__init
__(self
.__msg
)
1089 format_field(self
.ie
, None, '[%s] '),
1090 format_field(self
.video_id
, None, '%s: '),
1092 format_field(self
.cause
, None, ' (caused by %r)'),
1093 '' if self
.expected
else bug_reports_message()))
1095 def format_traceback(self
):
1096 return join_nonempty(
1097 self
.traceback
and ''.join(traceback
.format_tb(self
.traceback
)),
1098 self
.cause
and ''.join(traceback
.format_exception(None, self
.cause
, self
.cause
.__traceback
__)[1:]),
1101 def __setattr__(self
, name
, value
):
1102 super().__setattr
__(name
, value
)
1103 if getattr(self
, 'msg', None) and name
not in ('msg', 'args'):
1104 self
.msg
= self
.__msg
or type(self
).__name
__
1105 self
.args
= (self
.msg
, ) # Cannot be property
1108 class UnsupportedError(ExtractorError
):
1109 def __init__(self
, url
):
1111 'Unsupported URL: %s' % url
, expected
=True)
1115 class RegexNotFoundError(ExtractorError
):
1116 """Error when a regex didn't match"""
1120 class GeoRestrictedError(ExtractorError
):
1121 """Geographic restriction Error exception.
1123 This exception may be thrown when a video is not available from your
1124 geographic location due to geographic restrictions imposed by a website.
1127 def __init__(self
, msg
, countries
=None, **kwargs
):
1128 kwargs
['expected'] = True
1129 super().__init
__(msg
, **kwargs
)
1130 self
.countries
= countries
1133 class UserNotLive(ExtractorError
):
1134 """Error when a channel/user is not live"""
1136 def __init__(self
, msg
=None, **kwargs
):
1137 kwargs
['expected'] = True
1138 super().__init
__(msg
or 'The channel is not currently live', **kwargs
)
1141 class DownloadError(YoutubeDLError
):
1142 """Download Error exception.
1144 This exception may be thrown by FileDownloader objects if they are not
1145 configured to continue on errors. They will contain the appropriate
1149 def __init__(self
, msg
, exc_info
=None):
1150 """ exc_info, if given, is the original exception that caused the trouble (as returned by sys.exc_info()). """
1151 super().__init
__(msg
)
1152 self
.exc_info
= exc_info
1155 class EntryNotInPlaylist(YoutubeDLError
):
1156 """Entry not in playlist exception.
1158 This exception will be thrown by YoutubeDL when a requested entry
1159 is not found in the playlist info_dict
1161 msg
= 'Entry not found in info'
1164 class SameFileError(YoutubeDLError
):
1165 """Same File exception.
1167 This exception will be thrown by FileDownloader objects if they detect
1168 multiple files would have to be downloaded to the same file on disk.
1170 msg
= 'Fixed output name but more than one file to download'
1172 def __init__(self
, filename
=None):
1173 if filename
is not None:
1174 self
.msg
+= f
': {filename}'
1175 super().__init
__(self
.msg
)
1178 class PostProcessingError(YoutubeDLError
):
1179 """Post Processing exception.
1181 This exception may be raised by PostProcessor's .run() method to
1182 indicate an error in the postprocessing task.
1186 class DownloadCancelled(YoutubeDLError
):
1187 """ Exception raised when the download queue should be interrupted """
1188 msg
= 'The download was cancelled'
1191 class ExistingVideoReached(DownloadCancelled
):
1192 """ --break-on-existing triggered """
1193 msg
= 'Encountered a video that is already in the archive, stopping due to --break-on-existing'
1196 class RejectedVideoReached(DownloadCancelled
):
1197 """ --break-match-filter triggered """
1198 msg
= 'Encountered a video that did not match filter, stopping due to --break-match-filter'
1201 class MaxDownloadsReached(DownloadCancelled
):
1202 """ --max-downloads limit has been reached. """
1203 msg
= 'Maximum number of downloads reached, stopping due to --max-downloads'
1206 class ReExtractInfo(YoutubeDLError
):
1207 """ Video info needs to be re-extracted. """
1209 def __init__(self
, msg
, expected
=False):
1210 super().__init
__(msg
)
1211 self
.expected
= expected
1214 class ThrottledDownload(ReExtractInfo
):
1215 """ Download speed below --throttled-rate. """
1216 msg
= 'The download speed is below throttle limit'
1219 super().__init
__(self
.msg
, expected
=False)
1222 class UnavailableVideoError(YoutubeDLError
):
1223 """Unavailable Format exception.
1225 This exception will be thrown when a video is requested
1226 in a format that is not available for that video.
1228 msg
= 'Unable to download video'
1230 def __init__(self
, err
=None):
1232 self
.msg
+= f
': {err}'
1233 super().__init
__(self
.msg
)
1236 class ContentTooShortError(YoutubeDLError
):
1237 """Content Too Short exception.
1239 This exception may be raised by FileDownloader objects when a file they
1240 download is too small for what the server announced first, indicating
1241 the connection was probably interrupted.
1244 def __init__(self
, downloaded
, expected
):
1245 super().__init
__(f
'Downloaded {downloaded} bytes, expected {expected} bytes')
1247 self
.downloaded
= downloaded
1248 self
.expected
= expected
1251 class XAttrMetadataError(YoutubeDLError
):
1252 def __init__(self
, code
=None, msg
='Unknown error'):
1253 super().__init
__(msg
)
1257 # Parsing code and msg
1258 if (self
.code
in (errno
.ENOSPC
, errno
.EDQUOT
)
1259 or 'No space left' in self
.msg
or 'Disk quota exceeded' in self
.msg
):
1260 self
.reason
= 'NO_SPACE'
1261 elif self
.code
== errno
.E2BIG
or 'Argument list too long' in self
.msg
:
1262 self
.reason
= 'VALUE_TOO_LONG'
1264 self
.reason
= 'NOT_SUPPORTED'
1267 class XAttrUnavailableError(YoutubeDLError
):
1271 def _create_http_connection(ydl_handler
, http_class
, is_https
, *args
, **kwargs
):
1272 hc
= http_class(*args
, **kwargs
)
1273 source_address
= ydl_handler
._params
.get('source_address')
1275 if source_address
is not None:
1276 # This is to workaround _create_connection() from socket where it will try all
1277 # address data from getaddrinfo() including IPv6. This filters the result from
1278 # getaddrinfo() based on the source_address value.
1279 # This is based on the cpython socket.create_connection() function.
1280 # https://github.com/python/cpython/blob/master/Lib/socket.py#L691
1281 def _create_connection(address
, timeout
=socket
._GLOBAL
_DEFAULT
_TIMEOUT
, source_address
=None):
1282 host
, port
= address
1284 addrs
= socket
.getaddrinfo(host
, port
, 0, socket
.SOCK_STREAM
)
1285 af
= socket
.AF_INET
if '.' in source_address
[0] else socket
.AF_INET6
1286 ip_addrs
= [addr
for addr
in addrs
if addr
[0] == af
]
1287 if addrs
and not ip_addrs
:
1288 ip_version
= 'v4' if af
== socket
.AF_INET
else 'v6'
1290 "No remote IP%s addresses available for connect, can't use '%s' as source address"
1291 % (ip_version
, source_address
[0]))
1292 for res
in ip_addrs
:
1293 af
, socktype
, proto
, canonname
, sa
= res
1296 sock
= socket
.socket(af
, socktype
, proto
)
1297 if timeout
is not socket
._GLOBAL
_DEFAULT
_TIMEOUT
:
1298 sock
.settimeout(timeout
)
1299 sock
.bind(source_address
)
1301 err
= None # Explicitly break reference cycle
1303 except OSError as _
:
1305 if sock
is not None:
1310 raise OSError('getaddrinfo returns an empty list')
1311 if hasattr(hc
, '_create_connection'):
1312 hc
._create
_connection
= _create_connection
1313 hc
.source_address
= (source_address
, 0)
1318 class YoutubeDLHandler(urllib
.request
.HTTPHandler
):
1319 """Handler for HTTP requests and responses.
1321 This class, when installed with an OpenerDirector, automatically adds
1322 the standard headers to every HTTP request and handles gzipped, deflated and
1323 brotli responses from web servers.
1325 Part of this code was copied from:
1327 http://techknack.net/python-urllib2-handlers/
1329 Andrew Rowls, the author of that code, agreed to release it to the
1333 def __init__(self
, params
, *args
, **kwargs
):
1334 urllib
.request
.HTTPHandler
.__init
__(self
, *args
, **kwargs
)
1335 self
._params
= params
1337 def http_open(self
, req
):
1338 conn_class
= http
.client
.HTTPConnection
1340 socks_proxy
= req
.headers
.get('Ytdl-socks-proxy')
1342 conn_class
= make_socks_conn_class(conn_class
, socks_proxy
)
1343 del req
.headers
['Ytdl-socks-proxy']
1345 return self
.do_open(functools
.partial(
1346 _create_http_connection
, self
, conn_class
, False),
1354 return zlib
.decompress(data
, -zlib
.MAX_WBITS
)
1356 return zlib
.decompress(data
)
1362 return brotli
.decompress(data
)
1364 def http_request(self
, req
):
1365 # According to RFC 3986, URLs can not contain non-ASCII characters, however this is not
1366 # always respected by websites, some tend to give out URLs with non percent-encoded
1367 # non-ASCII characters (see telemb.py, ard.py [#3412])
1368 # urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991)
1369 # To work around aforementioned issue we will replace request's original URL with
1370 # percent-encoded one
1371 # Since redirects are also affected (e.g. http://www.southpark.de/alle-episoden/s18e09)
1372 # the code of this workaround has been moved here from YoutubeDL.urlopen()
1373 url
= req
.get_full_url()
1374 url_escaped
= escape_url(url
)
1376 # Substitute URL if any change after escaping
1377 if url
!= url_escaped
:
1378 req
= update_Request(req
, url
=url_escaped
)
1380 for h
, v
in self
._params
.get('http_headers', std_headers
).items():
1381 # Capitalize is needed because of Python bug 2275: http://bugs.python.org/issue2275
1382 # The dict keys are capitalized because of this bug by urllib
1383 if h
.capitalize() not in req
.headers
:
1384 req
.add_header(h
, v
)
1386 if 'Youtubedl-no-compression' in req
.headers
: # deprecated
1387 req
.headers
.pop('Youtubedl-no-compression', None)
1388 req
.add_header('Accept-encoding', 'identity')
1390 if 'Accept-encoding' not in req
.headers
:
1391 req
.add_header('Accept-encoding', ', '.join(SUPPORTED_ENCODINGS
))
1393 return super().do_request_(req
)
1395 def http_response(self
, req
, resp
):
1398 if resp
.headers
.get('Content-encoding', '') == 'gzip':
1399 content
= resp
.read()
1400 gz
= gzip
.GzipFile(fileobj
=io
.BytesIO(content
), mode
='rb')
1402 uncompressed
= io
.BytesIO(gz
.read())
1403 except OSError as original_ioerror
:
1404 # There may be junk add the end of the file
1405 # See http://stackoverflow.com/q/4928560/35070 for details
1406 for i
in range(1, 1024):
1408 gz
= gzip
.GzipFile(fileobj
=io
.BytesIO(content
[:-i
]), mode
='rb')
1409 uncompressed
= io
.BytesIO(gz
.read())
1414 raise original_ioerror
1415 resp
= urllib
.request
.addinfourl(uncompressed
, old_resp
.headers
, old_resp
.url
, old_resp
.code
)
1416 resp
.msg
= old_resp
.msg
1418 if resp
.headers
.get('Content-encoding', '') == 'deflate':
1419 gz
= io
.BytesIO(self
.deflate(resp
.read()))
1420 resp
= urllib
.request
.addinfourl(gz
, old_resp
.headers
, old_resp
.url
, old_resp
.code
)
1421 resp
.msg
= old_resp
.msg
1423 if resp
.headers
.get('Content-encoding', '') == 'br':
1424 resp
= urllib
.request
.addinfourl(
1425 io
.BytesIO(self
.brotli(resp
.read())), old_resp
.headers
, old_resp
.url
, old_resp
.code
)
1426 resp
.msg
= old_resp
.msg
1427 # Percent-encode redirect URL of Location HTTP header to satisfy RFC 3986 (see
1428 # https://github.com/ytdl-org/youtube-dl/issues/6457).
1429 if 300 <= resp
.code
< 400:
1430 location
= resp
.headers
.get('Location')
1432 # As of RFC 2616 default charset is iso-8859-1 that is respected by python 3
1433 location
= location
.encode('iso-8859-1').decode()
1434 location_escaped
= escape_url(location
)
1435 if location
!= location_escaped
:
1436 del resp
.headers
['Location']
1437 resp
.headers
['Location'] = location_escaped
1440 https_request
= http_request
1441 https_response
= http_response
1444 def make_socks_conn_class(base_class
, socks_proxy
):
1445 assert issubclass(base_class
, (
1446 http
.client
.HTTPConnection
, http
.client
.HTTPSConnection
))
1448 url_components
= urllib
.parse
.urlparse(socks_proxy
)
1449 if url_components
.scheme
.lower() == 'socks5':
1450 socks_type
= ProxyType
.SOCKS5
1451 elif url_components
.scheme
.lower() in ('socks', 'socks4'):
1452 socks_type
= ProxyType
.SOCKS4
1453 elif url_components
.scheme
.lower() == 'socks4a':
1454 socks_type
= ProxyType
.SOCKS4A
1456 def unquote_if_non_empty(s
):
1459 return urllib
.parse
.unquote_plus(s
)
1463 url_components
.hostname
, url_components
.port
or 1080,
1465 unquote_if_non_empty(url_components
.username
),
1466 unquote_if_non_empty(url_components
.password
),
1469 class SocksConnection(base_class
):
1471 self
.sock
= sockssocket()
1472 self
.sock
.setproxy(*proxy_args
)
1473 if isinstance(self
.timeout
, (int, float)):
1474 self
.sock
.settimeout(self
.timeout
)
1475 self
.sock
.connect((self
.host
, self
.port
))
1477 if isinstance(self
, http
.client
.HTTPSConnection
):
1478 if hasattr(self
, '_context'): # Python > 2.6
1479 self
.sock
= self
._context
.wrap_socket(
1480 self
.sock
, server_hostname
=self
.host
)
1482 self
.sock
= ssl
.wrap_socket(self
.sock
)
1484 return SocksConnection
1487 class YoutubeDLHTTPSHandler(urllib
.request
.HTTPSHandler
):
1488 def __init__(self
, params
, https_conn_class
=None, *args
, **kwargs
):
1489 urllib
.request
.HTTPSHandler
.__init
__(self
, *args
, **kwargs
)
1490 self
._https
_conn
_class
= https_conn_class
or http
.client
.HTTPSConnection
1491 self
._params
= params
1493 def https_open(self
, req
):
1495 conn_class
= self
._https
_conn
_class
1497 if hasattr(self
, '_context'): # python > 2.6
1498 kwargs
['context'] = self
._context
1499 if hasattr(self
, '_check_hostname'): # python 3.x
1500 kwargs
['check_hostname'] = self
._check
_hostname
1502 socks_proxy
= req
.headers
.get('Ytdl-socks-proxy')
1504 conn_class
= make_socks_conn_class(conn_class
, socks_proxy
)
1505 del req
.headers
['Ytdl-socks-proxy']
1508 return self
.do_open(
1509 functools
.partial(_create_http_connection
, self
, conn_class
, True), req
, **kwargs
)
1510 except urllib
.error
.URLError
as e
:
1511 if (isinstance(e
.reason
, ssl
.SSLError
)
1512 and getattr(e
.reason
, 'reason', None) == 'SSLV3_ALERT_HANDSHAKE_FAILURE'):
1513 raise YoutubeDLError('SSLV3_ALERT_HANDSHAKE_FAILURE: Try using --legacy-server-connect')
1517 def is_path_like(f
):
1518 return isinstance(f
, (str, bytes, os
.PathLike
))
1521 class YoutubeDLCookieProcessor(urllib
.request
.HTTPCookieProcessor
):
1522 def __init__(self
, cookiejar
=None):
1523 urllib
.request
.HTTPCookieProcessor
.__init
__(self
, cookiejar
)
1525 def http_response(self
, request
, response
):
1526 return urllib
.request
.HTTPCookieProcessor
.http_response(self
, request
, response
)
1528 https_request
= urllib
.request
.HTTPCookieProcessor
.http_request
1529 https_response
= http_response
1532 class YoutubeDLRedirectHandler(urllib
.request
.HTTPRedirectHandler
):
1533 """YoutubeDL redirect handler
1535 The code is based on HTTPRedirectHandler implementation from CPython [1].
1537 This redirect handler fixes and improves the logic to better align with RFC7261
1538 and what browsers tend to do [2][3]
1540 1. https://github.com/python/cpython/blob/master/Lib/urllib/request.py
1541 2. https://datatracker.ietf.org/doc/html/rfc7231
1542 3. https://github.com/python/cpython/issues/91306
1545 http_error_301
= http_error_303
= http_error_307
= http_error_308
= urllib
.request
.HTTPRedirectHandler
.http_error_302
1547 def redirect_request(self
, req
, fp
, code
, msg
, headers
, newurl
):
1548 if code
not in (301, 302, 303, 307, 308):
1549 raise urllib
.error
.HTTPError(req
.full_url
, code
, msg
, headers
, fp
)
1551 new_method
= req
.get_method()
1554 # A 303 must either use GET or HEAD for subsequent request
1555 # https://datatracker.ietf.org/doc/html/rfc7231#section-6.4.4
1556 if code
== 303 and req
.get_method() != 'HEAD':
1558 # 301 and 302 redirects are commonly turned into a GET from a POST
1559 # for subsequent requests by browsers, so we'll do the same.
1560 # https://datatracker.ietf.org/doc/html/rfc7231#section-6.4.2
1561 # https://datatracker.ietf.org/doc/html/rfc7231#section-6.4.3
1562 elif code
in (301, 302) and req
.get_method() == 'POST':
1565 # only remove payload if method changed (e.g. POST to GET)
1566 if new_method
!= req
.get_method():
1568 remove_headers
.extend(['Content-Length', 'Content-Type'])
1570 new_headers
= {k: v for k, v in req.headers.items() if k.lower() not in remove_headers}
1572 return urllib
.request
.Request(
1573 newurl
, headers
=new_headers
, origin_req_host
=req
.origin_req_host
,
1574 unverifiable
=True, method
=new_method
, data
=new_data
)
1577 def extract_timezone(date_str
):
1580 ^.{8,}? # >=8 char non-TZ prefix, if present
1581 (?P<tz>Z| # just the UTC Z, or
1582 (?:(?<=.\b\d{4}|\b\d{2}:\d\d)| # preceded by 4 digits or hh:mm or
1583 (?<!.\b[a-zA-Z]{3}|[a-zA-Z]{4}|..\b\d\d)) # not preceded by 3 alpha word or >= 4 alpha or 2 digits
1584 [ ]? # optional space
1585 (?P<sign>\+|-) # +/-
1586 (?P<hours>[0-9]{2}):?(?P<minutes>[0-9]{2}) # hh[:]mm
1590 m
= re
.search(r
'\d{1,2}:\d{1,2}(?:\.\d+)?(?P<tz>\s*[A-Z]+)$', date_str
)
1591 timezone
= TIMEZONE_NAMES
.get(m
and m
.group('tz').strip())
1592 if timezone
is not None:
1593 date_str
= date_str
[:-len(m
.group('tz'))]
1594 timezone
= datetime
.timedelta(hours
=timezone
or 0)
1596 date_str
= date_str
[:-len(m
.group('tz'))]
1597 if not m
.group('sign'):
1598 timezone
= datetime
.timedelta()
1600 sign
= 1 if m
.group('sign') == '+' else -1
1601 timezone
= datetime
.timedelta(
1602 hours
=sign
* int(m
.group('hours')),
1603 minutes
=sign
* int(m
.group('minutes')))
1604 return timezone
, date_str
1607 def parse_iso8601(date_str
, delimiter
='T', timezone
=None):
1608 """ Return a UNIX timestamp from the given date """
1610 if date_str
is None:
1613 date_str
= re
.sub(r
'\.[0-9]+', '', date_str
)
1615 if timezone
is None:
1616 timezone
, date_str
= extract_timezone(date_str
)
1618 with contextlib
.suppress(ValueError):
1619 date_format
= f
'%Y-%m-%d{delimiter}%H:%M:%S'
1620 dt
= datetime
.datetime
.strptime(date_str
, date_format
) - timezone
1621 return calendar
.timegm(dt
.timetuple())
1624 def date_formats(day_first
=True):
1625 return DATE_FORMATS_DAY_FIRST
if day_first
else DATE_FORMATS_MONTH_FIRST
1628 def unified_strdate(date_str
, day_first
=True):
1629 """Return a string with the date in the format YYYYMMDD"""
1631 if date_str
is None:
1635 date_str
= date_str
.replace(',', ' ')
1636 # Remove AM/PM + timezone
1637 date_str
= re
.sub(r
'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str
)
1638 _
, date_str
= extract_timezone(date_str
)
1640 for expression
in date_formats(day_first
):
1641 with contextlib
.suppress(ValueError):
1642 upload_date
= datetime
.datetime
.strptime(date_str
, expression
).strftime('%Y%m%d')
1643 if upload_date
is None:
1644 timetuple
= email
.utils
.parsedate_tz(date_str
)
1646 with contextlib
.suppress(ValueError):
1647 upload_date
= datetime
.datetime(*timetuple
[:6]).strftime('%Y%m%d')
1648 if upload_date
is not None:
1649 return str(upload_date
)
1652 def unified_timestamp(date_str
, day_first
=True):
1653 if date_str
is None:
1656 date_str
= re
.sub(r
'\s+', ' ', re
.sub(
1657 r
'(?i)[,|]|(mon|tues?|wed(nes)?|thu(rs)?|fri|sat(ur)?)(day)?', '', date_str
))
1659 pm_delta
= 12 if re
.search(r
'(?i)PM', date_str
) else 0
1660 timezone
, date_str
= extract_timezone(date_str
)
1662 # Remove AM/PM + timezone
1663 date_str
= re
.sub(r
'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str
)
1665 # Remove unrecognized timezones from ISO 8601 alike timestamps
1666 m
= re
.search(r
'\d{1,2}:\d{1,2}(?:\.\d+)?(?P<tz>\s*[A-Z]+)$', date_str
)
1668 date_str
= date_str
[:-len(m
.group('tz'))]
1670 # Python only supports microseconds, so remove nanoseconds
1671 m
= re
.search(r
'^([0-9]{4,}-[0-9]{1,2}-[0-9]{1,2}T[0-9]{1,2}:[0-9]{1,2}:[0-9]{1,2}\.[0-9]{6})[0-9]+$', date_str
)
1673 date_str
= m
.group(1)
1675 for expression
in date_formats(day_first
):
1676 with contextlib
.suppress(ValueError):
1677 dt
= datetime
.datetime
.strptime(date_str
, expression
) - timezone
+ datetime
.timedelta(hours
=pm_delta
)
1678 return calendar
.timegm(dt
.timetuple())
1680 timetuple
= email
.utils
.parsedate_tz(date_str
)
1682 return calendar
.timegm(timetuple
) + pm_delta
* 3600 - timezone
.total_seconds()
1685 def determine_ext(url
, default_ext
='unknown_video'):
1686 if url
is None or '.' not in url
:
1688 guess
= url
.partition('?')[0].rpartition('.')[2]
1689 if re
.match(r
'^[A-Za-z0-9]+$', guess
):
1691 # Try extract ext from URLs like http://example.com/foo/bar.mp4/?download
1692 elif guess
.rstrip('/') in KNOWN_EXTENSIONS
:
1693 return guess
.rstrip('/')
1698 def subtitles_filename(filename
, sub_lang
, sub_format
, expected_real_ext
=None):
1699 return replace_extension(filename
, sub_lang
+ '.' + sub_format
, expected_real_ext
)
1702 def datetime_from_str(date_str
, precision
='auto', format
='%Y%m%d'):
1704 Return a datetime object from a string.
1706 (now|today|yesterday|DATE)([+-]\d+(microsecond|second|minute|hour|day|week|month|year)s?)?
1708 @param format strftime format of DATE
1709 @param precision Round the datetime object: auto|microsecond|second|minute|hour|day
1710 auto: round to the unit provided in date_str (if applicable).
1712 auto_precision
= False
1713 if precision
== 'auto':
1714 auto_precision
= True
1715 precision
= 'microsecond'
1716 today
= datetime_round(datetime
.datetime
.utcnow(), precision
)
1717 if date_str
in ('now', 'today'):
1719 if date_str
== 'yesterday':
1720 return today
- datetime
.timedelta(days
=1)
1722 r
'(?P<start>.+)(?P<sign>[+-])(?P<time>\d+)(?P<unit>microsecond|second|minute|hour|day|week|month|year)s?',
1724 if match
is not None:
1725 start_time
= datetime_from_str(match
.group('start'), precision
, format
)
1726 time
= int(match
.group('time')) * (-1 if match
.group('sign') == '-' else 1)
1727 unit
= match
.group('unit')
1728 if unit
== 'month' or unit
== 'year':
1729 new_date
= datetime_add_months(start_time
, time
* 12 if unit
== 'year' else time
)
1735 delta
= datetime
.timedelta(**{unit + 's': time}
)
1736 new_date
= start_time
+ delta
1738 return datetime_round(new_date
, unit
)
1741 return datetime_round(datetime
.datetime
.strptime(date_str
, format
), precision
)
1744 def date_from_str(date_str
, format
='%Y%m%d', strict
=False):
1746 Return a date object from a string using datetime_from_str
1748 @param strict Restrict allowed patterns to "YYYYMMDD" and
1749 (now|today|yesterday)(-\d+(day|week|month|year)s?)?
1751 if strict
and not re
.fullmatch(r
'\d{8}|(now|today|yesterday)(-\d+(day|week|month|year)s?)?', date_str
):
1752 raise ValueError(f
'Invalid date format "{date_str}"')
1753 return datetime_from_str(date_str
, precision
='microsecond', format
=format
).date()
1756 def datetime_add_months(dt
, months
):
1757 """Increment/Decrement a datetime object by months."""
1758 month
= dt
.month
+ months
- 1
1759 year
= dt
.year
+ month
// 12
1760 month
= month
% 12 + 1
1761 day
= min(dt
.day
, calendar
.monthrange(year
, month
)[1])
1762 return dt
.replace(year
, month
, day
)
1765 def datetime_round(dt
, precision
='day'):
1767 Round a datetime object's time to a specific precision
1769 if precision
== 'microsecond':
1778 roundto
= lambda x
, n
: ((x
+ n
/ 2) // n
) * n
1779 timestamp
= calendar
.timegm(dt
.timetuple())
1780 return datetime
.datetime
.utcfromtimestamp(roundto(timestamp
, unit_seconds
[precision
]))
1783 def hyphenate_date(date_str
):
1785 Convert a date in 'YYYYMMDD' format to 'YYYY-MM-DD' format"""
1786 match
= re
.match(r
'^(\d\d\d\d)(\d\d)(\d\d)$', date_str
)
1787 if match
is not None:
1788 return '-'.join(match
.groups())
1794 """Represents a time interval between two dates"""
1796 def __init__(self
, start
=None, end
=None):
1797 """start and end must be strings in the format accepted by date"""
1798 if start
is not None:
1799 self
.start
= date_from_str(start
, strict
=True)
1801 self
.start
= datetime
.datetime
.min.date()
1803 self
.end
= date_from_str(end
, strict
=True)
1805 self
.end
= datetime
.datetime
.max.date()
1806 if self
.start
> self
.end
:
1807 raise ValueError('Date range: "%s" , the start date must be before the end date' % self
)
1811 """Returns a range that only contains the given day"""
1812 return cls(day
, day
)
1814 def __contains__(self
, date
):
1815 """Check if the date is in the range"""
1816 if not isinstance(date
, datetime
.date
):
1817 date
= date_from_str(date
)
1818 return self
.start
<= date
<= self
.end
1821 return f
'{__name__}.{type(self).__name__}({self.start.isoformat()!r}, {self.end.isoformat()!r})'
1823 def __eq__(self
, other
):
1824 return (isinstance(other
, DateRange
)
1825 and self
.start
== other
.start
and self
.end
== other
.end
)
1829 def system_identifier():
1830 python_implementation
= platform
.python_implementation()
1831 if python_implementation
== 'PyPy' and hasattr(sys
, 'pypy_version_info'):
1832 python_implementation
+= ' version %d.%d.%d' % sys
.pypy_version_info
[:3]
1834 with contextlib
.suppress(OSError): # We may not have access to the executable
1835 libc_ver
= platform
.libc_ver()
1837 return 'Python %s (%s %s %s) - %s (%s%s)' % (
1838 platform
.python_version(),
1839 python_implementation
,
1841 platform
.architecture()[0],
1842 platform
.platform(),
1843 ssl
.OPENSSL_VERSION
,
1844 format_field(join_nonempty(*libc_ver
, delim
=' '), None, ', %s'),
1849 def get_windows_version():
1850 ''' Get Windows version. returns () if it's not running on Windows '''
1851 if compat_os_name
== 'nt':
1852 return version_tuple(platform
.win32_ver()[1])
1857 def write_string(s
, out
=None, encoding
=None):
1858 assert isinstance(s
, str)
1859 out
= out
or sys
.stderr
1860 # `sys.stderr` might be `None` (Ref: https://github.com/pyinstaller/pyinstaller/pull/7217)
1864 if compat_os_name
== 'nt' and supports_terminal_sequences(out
):
1865 s
= re
.sub(r
'([\r\n]+)', r
' \1', s
)
1867 enc
, buffer = None, out
1868 if 'b' in getattr(out
, 'mode', ''):
1869 enc
= encoding
or preferredencoding()
1870 elif hasattr(out
, 'buffer'):
1872 enc
= encoding
or getattr(out
, 'encoding', None) or preferredencoding()
1874 buffer.write(s
.encode(enc
, 'ignore') if enc
else s
)
1878 def deprecation_warning(msg
, *, printer
=None, stacklevel
=0, **kwargs
):
1879 from .. import _IN_CLI
1881 if msg
in deprecation_warning
._cache
:
1883 deprecation_warning
._cache
.add(msg
)
1885 return printer(f
'{msg}{bug_reports_message()}', **kwargs
)
1886 return write_string(f
'ERROR: {msg}{bug_reports_message()}\n', **kwargs
)
1889 warnings
.warn(DeprecationWarning(msg
), stacklevel
=stacklevel
+ 3)
1892 deprecation_warning
._cache
= set()
1895 def bytes_to_intlist(bs
):
1898 if isinstance(bs
[0], int): # Python 3
1901 return [ord(c
) for c
in bs
]
1904 def intlist_to_bytes(xs
):
1907 return struct
.pack('%dB' % len(xs
), *xs
)
1910 class LockingUnsupportedError(OSError):
1911 msg
= 'File locking is not supported'
1914 super().__init
__(self
.msg
)
1917 # Cross-platform file locking
1918 if sys
.platform
== 'win32':
1920 import ctypes
.wintypes
1923 class OVERLAPPED(ctypes
.Structure
):
1925 ('Internal', ctypes
.wintypes
.LPVOID
),
1926 ('InternalHigh', ctypes
.wintypes
.LPVOID
),
1927 ('Offset', ctypes
.wintypes
.DWORD
),
1928 ('OffsetHigh', ctypes
.wintypes
.DWORD
),
1929 ('hEvent', ctypes
.wintypes
.HANDLE
),
1932 kernel32
= ctypes
.WinDLL('kernel32')
1933 LockFileEx
= kernel32
.LockFileEx
1934 LockFileEx
.argtypes
= [
1935 ctypes
.wintypes
.HANDLE
, # hFile
1936 ctypes
.wintypes
.DWORD
, # dwFlags
1937 ctypes
.wintypes
.DWORD
, # dwReserved
1938 ctypes
.wintypes
.DWORD
, # nNumberOfBytesToLockLow
1939 ctypes
.wintypes
.DWORD
, # nNumberOfBytesToLockHigh
1940 ctypes
.POINTER(OVERLAPPED
) # Overlapped
1942 LockFileEx
.restype
= ctypes
.wintypes
.BOOL
1943 UnlockFileEx
= kernel32
.UnlockFileEx
1944 UnlockFileEx
.argtypes
= [
1945 ctypes
.wintypes
.HANDLE
, # hFile
1946 ctypes
.wintypes
.DWORD
, # dwReserved
1947 ctypes
.wintypes
.DWORD
, # nNumberOfBytesToLockLow
1948 ctypes
.wintypes
.DWORD
, # nNumberOfBytesToLockHigh
1949 ctypes
.POINTER(OVERLAPPED
) # Overlapped
1951 UnlockFileEx
.restype
= ctypes
.wintypes
.BOOL
1952 whole_low
= 0xffffffff
1953 whole_high
= 0x7fffffff
1955 def _lock_file(f
, exclusive
, block
):
1956 overlapped
= OVERLAPPED()
1957 overlapped
.Offset
= 0
1958 overlapped
.OffsetHigh
= 0
1959 overlapped
.hEvent
= 0
1960 f
._lock
_file
_overlapped
_p
= ctypes
.pointer(overlapped
)
1962 if not LockFileEx(msvcrt
.get_osfhandle(f
.fileno()),
1963 (0x2 if exclusive
else 0x0) |
(0x0 if block
else 0x1),
1964 0, whole_low
, whole_high
, f
._lock
_file
_overlapped
_p
):
1965 # NB: No argument form of "ctypes.FormatError" does not work on PyPy
1966 raise BlockingIOError(f
'Locking file failed: {ctypes.FormatError(ctypes.GetLastError())!r}')
1968 def _unlock_file(f
):
1969 assert f
._lock
_file
_overlapped
_p
1970 handle
= msvcrt
.get_osfhandle(f
.fileno())
1971 if not UnlockFileEx(handle
, 0, whole_low
, whole_high
, f
._lock
_file
_overlapped
_p
):
1972 raise OSError('Unlocking file failed: %r' % ctypes
.FormatError())
1978 def _lock_file(f
, exclusive
, block
):
1979 flags
= fcntl
.LOCK_EX
if exclusive
else fcntl
.LOCK_SH
1981 flags |
= fcntl
.LOCK_NB
1983 fcntl
.flock(f
, flags
)
1984 except BlockingIOError
:
1986 except OSError: # AOSP does not have flock()
1987 fcntl
.lockf(f
, flags
)
1989 def _unlock_file(f
):
1990 with contextlib
.suppress(OSError):
1991 return fcntl
.flock(f
, fcntl
.LOCK_UN
)
1992 with contextlib
.suppress(OSError):
1993 return fcntl
.lockf(f
, fcntl
.LOCK_UN
) # AOSP does not have flock()
1994 return fcntl
.flock(f
, fcntl
.LOCK_UN | fcntl
.LOCK_NB
) # virtiofs needs LOCK_NB on unlocking
1998 def _lock_file(f
, exclusive
, block
):
1999 raise LockingUnsupportedError()
2001 def _unlock_file(f
):
2002 raise LockingUnsupportedError()
2008 def __init__(self
, filename
, mode
, block
=True, encoding
=None):
2009 if mode
not in {'r', 'rb', 'a', 'ab', 'w', 'wb'}
:
2010 raise NotImplementedError(mode
)
2011 self
.mode
, self
.block
= mode
, block
2013 writable
= any(f
in mode
for f
in 'wax+')
2014 readable
= any(f
in mode
for f
in 'r+')
2015 flags
= functools
.reduce(operator
.ior
, (
2016 getattr(os
, 'O_CLOEXEC', 0), # UNIX only
2017 getattr(os
, 'O_BINARY', 0), # Windows only
2018 getattr(os
, 'O_NOINHERIT', 0), # Windows only
2019 os
.O_CREAT
if writable
else 0, # O_TRUNC only after locking
2020 os
.O_APPEND
if 'a' in mode
else 0,
2021 os
.O_EXCL
if 'x' in mode
else 0,
2022 os
.O_RDONLY
if not writable
else os
.O_RDWR
if readable
else os
.O_WRONLY
,
2025 self
.f
= os
.fdopen(os
.open(filename
, flags
, 0o666), mode
, encoding
=encoding
)
2027 def __enter__(self
):
2028 exclusive
= 'r' not in self
.mode
2030 _lock_file(self
.f
, exclusive
, self
.block
)
2035 if 'w' in self
.mode
:
2038 except OSError as e
:
2040 errno
.ESPIPE
, # Illegal seek - expected for FIFO
2041 errno
.EINVAL
, # Invalid argument - expected for /dev/null
2050 _unlock_file(self
.f
)
2054 def __exit__(self
, *_
):
2063 def __getattr__(self
, attr
):
2064 return getattr(self
.f
, attr
)
2071 def get_filesystem_encoding():
2072 encoding
= sys
.getfilesystemencoding()
2073 return encoding
if encoding
is not None else 'utf-8'
2076 def shell_quote(args
):
2078 encoding
= get_filesystem_encoding()
2080 if isinstance(a
, bytes):
2081 # We may get a filename encoded with 'encodeFilename'
2082 a
= a
.decode(encoding
)
2083 quoted_args
.append(compat_shlex_quote(a
))
2084 return ' '.join(quoted_args
)
2087 def smuggle_url(url
, data
):
2088 """ Pass additional data in a URL for internal use. """
2090 url
, idata
= unsmuggle_url(url
, {})
2092 sdata
= urllib
.parse
.urlencode(
2093 {'__youtubedl_smuggle': json.dumps(data)}
)
2094 return url
+ '#' + sdata
2097 def unsmuggle_url(smug_url
, default
=None):
2098 if '#__youtubedl_smuggle' not in smug_url
:
2099 return smug_url
, default
2100 url
, _
, sdata
= smug_url
.rpartition('#')
2101 jsond
= urllib
.parse
.parse_qs(sdata
)['__youtubedl_smuggle'][0]
2102 data
= json
.loads(jsond
)
2106 def format_decimal_suffix(num
, fmt
='%d%s', *, factor
=1000):
2107 """ Formats numbers with decimal sufixes like K, M, etc """
2108 num
, factor
= float_or_none(num
), float(factor
)
2109 if num
is None or num
< 0:
2111 POSSIBLE_SUFFIXES
= 'kMGTPEZY'
2112 exponent
= 0 if num
== 0 else min(int(math
.log(num
, factor
)), len(POSSIBLE_SUFFIXES
))
2113 suffix
= ['', *POSSIBLE_SUFFIXES
][exponent
]
2115 suffix
= {'k': 'Ki', '': ''}
.get(suffix
, f
'{suffix}i')
2116 converted
= num
/ (factor
** exponent
)
2117 return fmt
% (converted
, suffix
)
2120 def format_bytes(bytes):
2121 return format_decimal_suffix(bytes, '%.2f%sB', factor
=1024) or 'N/A'
2124 def lookup_unit_table(unit_table
, s
, strict
=False):
2125 num_re
= NUMBER_RE
if strict
else NUMBER_RE
.replace(R
'\.', '[,.]')
2126 units_re
= '|'.join(re
.escape(u
) for u
in unit_table
)
2127 m
= (re
.fullmatch
if strict
else re
.match
)(
2128 rf
'(?P<num>{num_re})\s*(?P<unit>{units_re})\b', s
)
2132 num
= float(m
.group('num').replace(',', '.'))
2133 mult
= unit_table
[m
.group('unit')]
2134 return round(num
* mult
)
2138 """Parse a string indicating a byte quantity into an integer"""
2139 return lookup_unit_table(
2140 {u: 1024**i for i, u in enumerate(['', *'KMGTPEZY'])}
,
2141 s
.upper(), strict
=True)
2144 def parse_filesize(s
):
2148 # The lower-case forms are of course incorrect and unofficial,
2149 # but we support those too
2166 'megabytes': 1000 ** 2,
2167 'mebibytes': 1024 ** 2,
2173 'gigabytes': 1000 ** 3,
2174 'gibibytes': 1024 ** 3,
2180 'terabytes': 1000 ** 4,
2181 'tebibytes': 1024 ** 4,
2187 'petabytes': 1000 ** 5,
2188 'pebibytes': 1024 ** 5,
2194 'exabytes': 1000 ** 6,
2195 'exbibytes': 1024 ** 6,
2201 'zettabytes': 1000 ** 7,
2202 'zebibytes': 1024 ** 7,
2208 'yottabytes': 1000 ** 8,
2209 'yobibytes': 1024 ** 8,
2212 return lookup_unit_table(_UNIT_TABLE
, s
)
2219 s
= re
.sub(r
'^[^\d]+\s', '', s
).strip()
2221 if re
.match(r
'^[\d,.]+$', s
):
2222 return str_to_int(s
)
2235 ret
= lookup_unit_table(_UNIT_TABLE
, s
)
2239 mobj
= re
.match(r
'([\d,.]+)(?:$|\s)', s
)
2241 return str_to_int(mobj
.group(1))
2244 def parse_resolution(s
, *, lenient
=False):
2249 mobj
= re
.search(r
'(?P<w>\d+)\s*[xX×,]\s*(?P<h>\d+)', s
)
2251 mobj
= re
.search(r
'(?<![a-zA-Z0-9])(?P<w>\d+)\s*[xX×,]\s*(?P<h>\d+)(?![a-zA-Z0-9])', s
)
2254 'width': int(mobj
.group('w')),
2255 'height': int(mobj
.group('h')),
2258 mobj
= re
.search(r
'(?<![a-zA-Z0-9])(\d+)[pPiI](?![a-zA-Z0-9])', s
)
2260 return {'height': int(mobj.group(1))}
2262 mobj
= re
.search(r
'\b([48])[kK]\b', s
)
2264 return {'height': int(mobj.group(1)) * 540}
2269 def parse_bitrate(s
):
2270 if not isinstance(s
, str):
2272 mobj
= re
.search(r
'\b(\d+)\s*kbps', s
)
2274 return int(mobj
.group(1))
2277 def month_by_name(name
, lang
='en'):
2278 """ Return the number of a month by (locale-independently) English name """
2280 month_names
= MONTH_NAMES
.get(lang
, MONTH_NAMES
['en'])
2283 return month_names
.index(name
) + 1
2288 def month_by_abbreviation(abbrev
):
2289 """ Return the number of a month by (locale-independently) English
2293 return [s
[:3] for s
in ENGLISH_MONTH_NAMES
].index(abbrev
) + 1
2298 def fix_xml_ampersands(xml_str
):
2299 """Replace all the '&' by '&' in XML"""
2301 r
'&(?!amp;|lt;|gt;|apos;|quot;|#x[0-9a-fA-F]{,4};|#[0-9]{,4};)',
2306 def setproctitle(title
):
2307 assert isinstance(title
, str)
2309 # Workaround for https://github.com/yt-dlp/yt-dlp/issues/4541
2316 libc
= ctypes
.cdll
.LoadLibrary('libc.so.6')
2320 # LoadLibrary in Windows Python 2.7.13 only expects
2321 # a bytestring, but since unicode_literals turns
2322 # every string into a unicode string, it fails.
2324 title_bytes
= title
.encode()
2325 buf
= ctypes
.create_string_buffer(len(title_bytes
))
2326 buf
.value
= title_bytes
2328 libc
.prctl(15, buf
, 0, 0, 0)
2329 except AttributeError:
2330 return # Strange libc, just skip this
2333 def remove_start(s
, start
):
2334 return s
[len(start
):] if s
is not None and s
.startswith(start
) else s
2337 def remove_end(s
, end
):
2338 return s
[:-len(end
)] if s
is not None and s
.endswith(end
) else s
2341 def remove_quotes(s
):
2342 if s
is None or len(s
) < 2:
2344 for quote
in ('"', "'", ):
2345 if s
[0] == quote
and s
[-1] == quote
:
2350 def get_domain(url
):
2352 This implementation is inconsistent, but is kept for compatibility.
2353 Use this only for "webpage_url_domain"
2355 return remove_start(urllib
.parse
.urlparse(url
).netloc
, 'www.') or None
2358 def url_basename(url
):
2359 path
= urllib
.parse
.urlparse(url
).path
2360 return path
.strip('/').split('/')[-1]
2364 return re
.match(r
'https?://[^?#]+/', url
).group()
2367 def urljoin(base
, path
):
2368 if isinstance(path
, bytes):
2369 path
= path
.decode()
2370 if not isinstance(path
, str) or not path
:
2372 if re
.match(r
'^(?:[a-zA-Z][a-zA-Z0-9+-.]*:)?//', path
):
2374 if isinstance(base
, bytes):
2375 base
= base
.decode()
2376 if not isinstance(base
, str) or not re
.match(
2377 r
'^(?:https?:)?//', base
):
2379 return urllib
.parse
.urljoin(base
, path
)
2382 class HEADRequest(urllib
.request
.Request
):
2383 def get_method(self
):
2387 class PUTRequest(urllib
.request
.Request
):
2388 def get_method(self
):
2392 def int_or_none(v
, scale
=1, default
=None, get_attr
=None, invscale
=1):
2393 if get_attr
and v
is not None:
2394 v
= getattr(v
, get_attr
, None)
2396 return int(v
) * invscale
// scale
2397 except (ValueError, TypeError, OverflowError):
2401 def str_or_none(v
, default
=None):
2402 return default
if v
is None else str(v
)
2405 def str_to_int(int_str
):
2406 """ A more relaxed version of int_or_none """
2407 if isinstance(int_str
, int):
2409 elif isinstance(int_str
, str):
2410 int_str
= re
.sub(r
'[,\.\+]', '', int_str
)
2411 return int_or_none(int_str
)
2414 def float_or_none(v
, scale
=1, invscale
=1, default
=None):
2418 return float(v
) * invscale
/ scale
2419 except (ValueError, TypeError):
2423 def bool_or_none(v
, default
=None):
2424 return v
if isinstance(v
, bool) else default
2427 def strip_or_none(v
, default
=None):
2428 return v
.strip() if isinstance(v
, str) else default
2431 def url_or_none(url
):
2432 if not url
or not isinstance(url
, str):
2435 return url
if re
.match(r
'^(?:(?:https?|rt(?:m(?:pt?[es]?|fp)|sp[su]?)|mms|ftps?):)?//', url
) else None
2438 def request_to_url(req
):
2439 if isinstance(req
, urllib
.request
.Request
):
2440 return req
.get_full_url()
2445 def strftime_or_none(timestamp
, date_format
, default
=None):
2446 datetime_object
= None
2448 if isinstance(timestamp
, (int, float)): # unix timestamp
2449 # Using naive datetime here can break timestamp() in Windows
2450 # Ref: https://github.com/yt-dlp/yt-dlp/issues/5185, https://github.com/python/cpython/issues/94414
2451 datetime_object
= datetime
.datetime
.fromtimestamp(timestamp
, datetime
.timezone
.utc
)
2452 elif isinstance(timestamp
, str): # assume YYYYMMDD
2453 datetime_object
= datetime
.datetime
.strptime(timestamp
, '%Y%m%d')
2454 date_format
= re
.sub( # Support %s on windows
2455 r
'(?<!%)(%%)*%s', rf
'\g<1>{int(datetime_object.timestamp())}', date_format
)
2456 return datetime_object
.strftime(date_format
)
2457 except (ValueError, TypeError, AttributeError):
2461 def parse_duration(s
):
2462 if not isinstance(s
, str):
2468 days
, hours
, mins
, secs
, ms
= [None] * 5
2469 m
= re
.match(r
'''(?x)
2471 (?:(?:(?P<days>[0-9]+):)?(?P<hours>[0-9]+):)?(?P<mins>[0-9]+):)?
2472 (?P<secs>(?(before_secs)[0-9]{1,2}|[0-9]+))
2473 (?P<ms>[.:][0-9]+)?Z?$
2476 days
, hours
, mins
, secs
, ms
= m
.group('days', 'hours', 'mins', 'secs', 'ms')
2481 [0-9]+\s*y(?:ears?)?,?\s*
2484 [0-9]+\s*m(?:onths?)?,?\s*
2487 [0-9]+\s*w(?:eeks?)?,?\s*
2490 (?P<days>[0-9]+)\s*d(?:ays?)?,?\s*
2494 (?P<hours>[0-9]+)\s*h(?:ours?)?,?\s*
2497 (?P<mins>[0-9]+)\s*m(?:in(?:ute)?s?)?,?\s*
2500 (?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?\s*s(?:ec(?:ond)?s?)?\s*
2503 days
, hours
, mins
, secs
, ms
= m
.groups()
2505 m
= re
.match(r
'(?i)(?:(?P<hours>[0-9.]+)\s*(?:hours?)|(?P<mins>[0-9.]+)\s*(?:mins?\.?|minutes?)\s*)Z?$', s
)
2507 hours
, mins
= m
.groups()
2512 ms
= ms
.replace(':', '.')
2513 return sum(float(part
or 0) * mult
for part
, mult
in (
2514 (days
, 86400), (hours
, 3600), (mins
, 60), (secs
, 1), (ms
, 1)))
2517 def prepend_extension(filename
, ext
, expected_real_ext
=None):
2518 name
, real_ext
= os
.path
.splitext(filename
)
2520 f
'{name}.{ext}{real_ext}'
2521 if not expected_real_ext
or real_ext
[1:] == expected_real_ext
2522 else f
'{filename}.{ext}')
2525 def replace_extension(filename
, ext
, expected_real_ext
=None):
2526 name
, real_ext
= os
.path
.splitext(filename
)
2527 return '{}.{}'.format(
2528 name
if not expected_real_ext
or real_ext
[1:] == expected_real_ext
else filename
,
2532 def check_executable(exe
, args
=[]):
2533 """ Checks if the given binary is installed somewhere in PATH, and returns its name.
2534 args can be a list of arguments for a short output (like -version) """
2536 Popen
.run([exe
] + args
, stdout
=subprocess
.PIPE
, stderr
=subprocess
.PIPE
)
2542 def _get_exe_version_output(exe
, args
):
2544 # STDIN should be redirected too. On UNIX-like systems, ffmpeg triggers
2545 # SIGTTOU if yt-dlp is run in the background.
2546 # See https://github.com/ytdl-org/youtube-dl/issues/955#issuecomment-209789656
2547 stdout
, _
, ret
= Popen
.run([encodeArgument(exe
)] + args
, text
=True,
2548 stdin
=subprocess
.PIPE
, stdout
=subprocess
.PIPE
, stderr
=subprocess
.STDOUT
)
2556 def detect_exe_version(output
, version_re
=None, unrecognized
='present'):
2557 assert isinstance(output
, str)
2558 if version_re
is None:
2559 version_re
= r
'version\s+([-0-9._a-zA-Z]+)'
2560 m
= re
.search(version_re
, output
)
2567 def get_exe_version(exe
, args
=['--version'],
2568 version_re
=None, unrecognized
=('present', 'broken')):
2569 """ Returns the version of the specified executable,
2570 or False if the executable is not present """
2571 unrecognized
= variadic(unrecognized
)
2572 assert len(unrecognized
) in (1, 2)
2573 out
= _get_exe_version_output(exe
, args
)
2575 return unrecognized
[-1]
2576 return out
and detect_exe_version(out
, version_re
, unrecognized
[0])
2579 def frange(start
=0, stop
=None, step
=1):
2582 start
, stop
= 0, start
2583 sign
= [-1, 1][step
> 0] if step
else 0
2584 while sign
* start
< sign
* stop
:
2589 class LazyList(collections
.abc
.Sequence
):
2590 """Lazy immutable list from an iterable
2591 Note that slices of a LazyList are lists and not LazyList"""
2593 class IndexError(IndexError):
2596 def __init__(self
, iterable
, *, reverse
=False, _cache
=None):
2597 self
._iterable
= iter(iterable
)
2598 self
._cache
= [] if _cache
is None else _cache
2599 self
._reversed
= reverse
2603 # We need to consume the entire iterable to iterate in reverse
2604 yield from self
.exhaust()
2606 yield from self
._cache
2607 for item
in self
._iterable
:
2608 self
._cache
.append(item
)
2612 self
._cache
.extend(self
._iterable
)
2613 self
._iterable
= [] # Discard the emptied iterable to make it pickle-able
2617 """Evaluate the entire iterable"""
2618 return self
._exhaust
()[::-1 if self
._reversed
else 1]
2621 def _reverse_index(x
):
2622 return None if x
is None else ~x
2624 def __getitem__(self
, idx
):
2625 if isinstance(idx
, slice):
2627 idx
= slice(self
._reverse
_index
(idx
.start
), self
._reverse
_index
(idx
.stop
), -(idx
.step
or 1))
2628 start
, stop
, step
= idx
.start
, idx
.stop
, idx
.step
or 1
2629 elif isinstance(idx
, int):
2631 idx
= self
._reverse
_index
(idx
)
2632 start
, stop
, step
= idx
, idx
, 0
2634 raise TypeError('indices must be integers or slices')
2635 if ((start
or 0) < 0 or (stop
or 0) < 0
2636 or (start
is None and step
< 0)
2637 or (stop
is None and step
> 0)):
2638 # We need to consume the entire iterable to be able to slice from the end
2639 # Obviously, never use this with infinite iterables
2642 return self
._cache
[idx
]
2643 except IndexError as e
:
2644 raise self
.IndexError(e
) from e
2645 n
= max(start
or 0, stop
or 0) - len(self
._cache
) + 1
2647 self
._cache
.extend(itertools
.islice(self
._iterable
, n
))
2649 return self
._cache
[idx
]
2650 except IndexError as e
:
2651 raise self
.IndexError(e
) from e
2655 self
[-1] if self
._reversed
else self
[0]
2656 except self
.IndexError:
2662 return len(self
._cache
)
2664 def __reversed__(self
):
2665 return type(self
)(self
._iterable
, reverse
=not self
._reversed
, _cache
=self
._cache
)
2668 return type(self
)(self
._iterable
, reverse
=self
._reversed
, _cache
=self
._cache
)
2671 # repr and str should mimic a list. So we exhaust the iterable
2672 return repr(self
.exhaust())
2675 return repr(self
.exhaust())
2680 class IndexError(IndexError):
2684 # This is only useful for tests
2685 return len(self
.getslice())
2687 def __init__(self
, pagefunc
, pagesize
, use_cache
=True):
2688 self
._pagefunc
= pagefunc
2689 self
._pagesize
= pagesize
2690 self
._pagecount
= float('inf')
2691 self
._use
_cache
= use_cache
2694 def getpage(self
, pagenum
):
2695 page_results
= self
._cache
.get(pagenum
)
2696 if page_results
is None:
2697 page_results
= [] if pagenum
> self
._pagecount
else list(self
._pagefunc
(pagenum
))
2699 self
._cache
[pagenum
] = page_results
2702 def getslice(self
, start
=0, end
=None):
2703 return list(self
._getslice
(start
, end
))
2705 def _getslice(self
, start
, end
):
2706 raise NotImplementedError('This method must be implemented by subclasses')
2708 def __getitem__(self
, idx
):
2709 assert self
._use
_cache
, 'Indexing PagedList requires cache'
2710 if not isinstance(idx
, int) or idx
< 0:
2711 raise TypeError('indices must be non-negative integers')
2712 entries
= self
.getslice(idx
, idx
+ 1)
2714 raise self
.IndexError()
2718 class OnDemandPagedList(PagedList
):
2719 """Download pages until a page with less than maximum results"""
2721 def _getslice(self
, start
, end
):
2722 for pagenum
in itertools
.count(start
// self
._pagesize
):
2723 firstid
= pagenum
* self
._pagesize
2724 nextfirstid
= pagenum
* self
._pagesize
+ self
._pagesize
2725 if start
>= nextfirstid
:
2729 start
% self
._pagesize
2730 if firstid
<= start
< nextfirstid
2733 ((end
- 1) % self
._pagesize
) + 1
2734 if (end
is not None and firstid
<= end
<= nextfirstid
)
2738 page_results
= self
.getpage(pagenum
)
2740 self
._pagecount
= pagenum
- 1
2742 if startv
!= 0 or endv
is not None:
2743 page_results
= page_results
[startv
:endv
]
2744 yield from page_results
2746 # A little optimization - if current page is not "full", ie. does
2747 # not contain page_size videos then we can assume that this page
2748 # is the last one - there are no more ids on further pages -
2749 # i.e. no need to query again.
2750 if len(page_results
) + startv
< self
._pagesize
:
2753 # If we got the whole page, but the next page is not interesting,
2754 # break out early as well
2755 if end
== nextfirstid
:
2759 class InAdvancePagedList(PagedList
):
2760 """PagedList with total number of pages known in advance"""
2762 def __init__(self
, pagefunc
, pagecount
, pagesize
):
2763 PagedList
.__init
__(self
, pagefunc
, pagesize
, True)
2764 self
._pagecount
= pagecount
2766 def _getslice(self
, start
, end
):
2767 start_page
= start
// self
._pagesize
2768 end_page
= self
._pagecount
if end
is None else min(self
._pagecount
, end
// self
._pagesize
+ 1)
2769 skip_elems
= start
- start_page
* self
._pagesize
2770 only_more
= None if end
is None else end
- start
2771 for pagenum
in range(start_page
, end_page
):
2772 page_results
= self
.getpage(pagenum
)
2774 page_results
= page_results
[skip_elems
:]
2776 if only_more
is not None:
2777 if len(page_results
) < only_more
:
2778 only_more
-= len(page_results
)
2780 yield from page_results
[:only_more
]
2782 yield from page_results
2785 class PlaylistEntries
:
2786 MissingEntry
= object()
2787 is_exhausted
= False
2789 def __init__(self
, ydl
, info_dict
):
2792 # _entries must be assigned now since infodict can change during iteration
2793 entries
= info_dict
.get('entries')
2795 raise EntryNotInPlaylist('There are no entries')
2796 elif isinstance(entries
, list):
2797 self
.is_exhausted
= True
2799 requested_entries
= info_dict
.get('requested_entries')
2800 self
.is_incomplete
= requested_entries
is not None
2801 if self
.is_incomplete
:
2802 assert self
.is_exhausted
2803 self
._entries
= [self
.MissingEntry
] * max(requested_entries
or [0])
2804 for i
, entry
in zip(requested_entries
, entries
):
2805 self
._entries
[i
- 1] = entry
2806 elif isinstance(entries
, (list, PagedList
, LazyList
)):
2807 self
._entries
= entries
2809 self
._entries
= LazyList(entries
)
2811 PLAYLIST_ITEMS_RE
= re
.compile(r
'''(?x)
2812 (?P<start>[+-]?\d+)?
2814 (?P<end>[+-]?\d+|inf(?:inite)?)?
2815 (?::(?P<step>[+-]?\d+))?
2819 def parse_playlist_items(cls
, string
):
2820 for segment
in string
.split(','):
2822 raise ValueError('There is two or more consecutive commas')
2823 mobj
= cls
.PLAYLIST_ITEMS_RE
.fullmatch(segment
)
2825 raise ValueError(f
'{segment!r} is not a valid specification')
2826 start
, end
, step
, has_range
= mobj
.group('start', 'end', 'step', 'range')
2827 if int_or_none(step
) == 0:
2828 raise ValueError(f
'Step in {segment!r} cannot be zero')
2829 yield slice(int_or_none(start
), float_or_none(end
), int_or_none(step
)) if has_range
else int(start
)
2831 def get_requested_items(self
):
2832 playlist_items
= self
.ydl
.params
.get('playlist_items')
2833 playlist_start
= self
.ydl
.params
.get('playliststart', 1)
2834 playlist_end
= self
.ydl
.params
.get('playlistend')
2835 # For backwards compatibility, interpret -1 as whole list
2836 if playlist_end
in (-1, None):
2838 if not playlist_items
:
2839 playlist_items
= f
'{playlist_start}:{playlist_end}'
2840 elif playlist_start
!= 1 or playlist_end
:
2841 self
.ydl
.report_warning('Ignoring playliststart and playlistend because playlistitems was given', only_once
=True)
2843 for index
in self
.parse_playlist_items(playlist_items
):
2844 for i
, entry
in self
[index
]:
2849 # The item may have just been added to archive. Don't break due to it
2850 if not self
.ydl
.params
.get('lazy_playlist'):
2851 # TODO: Add auto-generated fields
2852 self
.ydl
._match
_entry
(entry
, incomplete
=True, silent
=True)
2853 except (ExistingVideoReached
, RejectedVideoReached
):
2856 def get_full_count(self
):
2857 if self
.is_exhausted
and not self
.is_incomplete
:
2859 elif isinstance(self
._entries
, InAdvancePagedList
):
2860 if self
._entries
._pagesize
== 1:
2861 return self
._entries
._pagecount
2863 @functools.cached_property
2865 if isinstance(self
._entries
, list):
2868 entry
= self
._entries
[i
]
2870 entry
= self
.MissingEntry
2871 if not self
.is_incomplete
:
2872 raise self
.IndexError()
2873 if entry
is self
.MissingEntry
:
2874 raise EntryNotInPlaylist(f
'Entry {i + 1} cannot be found')
2879 return type(self
.ydl
)._handle
_extraction
_exceptions
(lambda _
, i
: self
._entries
[i
])(self
.ydl
, i
)
2880 except (LazyList
.IndexError, PagedList
.IndexError):
2881 raise self
.IndexError()
2884 def __getitem__(self
, idx
):
2885 if isinstance(idx
, int):
2886 idx
= slice(idx
, idx
)
2888 # NB: PlaylistEntries[1:10] => (0, 1, ... 9)
2889 step
= 1 if idx
.step
is None else idx
.step
2890 if idx
.start
is None:
2891 start
= 0 if step
> 0 else len(self
) - 1
2893 start
= idx
.start
- 1 if idx
.start
>= 0 else len(self
) + idx
.start
2895 # NB: Do not call len(self) when idx == [:]
2896 if idx
.stop
is None:
2897 stop
= 0 if step
< 0 else float('inf')
2899 stop
= idx
.stop
- 1 if idx
.stop
>= 0 else len(self
) + idx
.stop
2900 stop
+= [-1, 1][step
> 0]
2902 for i
in frange(start
, stop
, step
):
2906 entry
= self
._getter
(i
)
2907 except self
.IndexError:
2908 self
.is_exhausted
= True
2915 return len(tuple(self
[:]))
2917 class IndexError(IndexError):
2921 def uppercase_escape(s
):
2922 unicode_escape
= codecs
.getdecoder('unicode_escape')
2924 r
'\\U[0-9a-fA-F]{8}',
2925 lambda m
: unicode_escape(m
.group(0))[0],
2929 def lowercase_escape(s
):
2930 unicode_escape
= codecs
.getdecoder('unicode_escape')
2932 r
'\\u[0-9a-fA-F]{4}',
2933 lambda m
: unicode_escape(m
.group(0))[0],
2937 def escape_rfc3986(s
):
2938 """Escape non-ASCII characters as suggested by RFC 3986"""
2939 return urllib
.parse
.quote(s
, b
"%/;:@&=+$,!~*'()?#[]")
2942 def escape_url(url
):
2943 """Escape URL as suggested by RFC 3986"""
2944 url_parsed
= urllib
.parse
.urlparse(url
)
2945 return url_parsed
._replace
(
2946 netloc
=url_parsed
.netloc
.encode('idna').decode('ascii'),
2947 path
=escape_rfc3986(url_parsed
.path
),
2948 params
=escape_rfc3986(url_parsed
.params
),
2949 query
=escape_rfc3986(url_parsed
.query
),
2950 fragment
=escape_rfc3986(url_parsed
.fragment
)
2954 def parse_qs(url
, **kwargs
):
2955 return urllib
.parse
.parse_qs(urllib
.parse
.urlparse(url
).query
, **kwargs
)
2958 def read_batch_urls(batch_fd
):
2960 if not isinstance(url
, str):
2961 url
= url
.decode('utf-8', 'replace')
2962 BOM_UTF8
= ('\xef\xbb\xbf', '\ufeff')
2963 for bom
in BOM_UTF8
:
2964 if url
.startswith(bom
):
2965 url
= url
[len(bom
):]
2967 if not url
or url
.startswith(('#', ';', ']')):
2969 # "#" cannot be stripped out since it is part of the URI
2970 # However, it can be safely stripped out if following a whitespace
2971 return re
.split(r
'\s#', url
, 1)[0].rstrip()
2973 with contextlib
.closing(batch_fd
) as fd
:
2974 return [url
for url
in map(fixup
, fd
) if url
]
2977 def urlencode_postdata(*args
, **kargs
):
2978 return urllib
.parse
.urlencode(*args
, **kargs
).encode('ascii')
2981 def update_url(url
, *, query_update
=None, **kwargs
):
2982 """Replace URL components specified by kwargs
2983 @param url str or parse url tuple
2984 @param query_update update query
2987 if isinstance(url
, str):
2988 if not kwargs
and not query_update
:
2991 url
= urllib
.parse
.urlparse(url
)
2993 assert 'query' not in kwargs
, 'query_update and query cannot be specified at the same time'
2994 kwargs
['query'] = urllib
.parse
.urlencode({
2995 **urllib
.parse
.parse_qs(url
.query
),
2998 return urllib
.parse
.urlunparse(url
._replace
(**kwargs
))
3001 def update_url_query(url
, query
):
3002 return update_url(url
, query_update
=query
)
3005 def update_Request(req
, url
=None, data
=None, headers
=None, query
=None):
3006 req_headers
= req
.headers
.copy()
3007 req_headers
.update(headers
or {})
3008 req_data
= data
or req
.data
3009 req_url
= update_url_query(url
or req
.get_full_url(), query
)
3010 req_get_method
= req
.get_method()
3011 if req_get_method
== 'HEAD':
3012 req_type
= HEADRequest
3013 elif req_get_method
== 'PUT':
3014 req_type
= PUTRequest
3016 req_type
= urllib
.request
.Request
3018 req_url
, data
=req_data
, headers
=req_headers
,
3019 origin_req_host
=req
.origin_req_host
, unverifiable
=req
.unverifiable
)
3020 if hasattr(req
, 'timeout'):
3021 new_req
.timeout
= req
.timeout
3025 def _multipart_encode_impl(data
, boundary
):
3026 content_type
= 'multipart/form-data; boundary=%s' % boundary
3029 for k
, v
in data
.items():
3030 out
+= b
'--' + boundary
.encode('ascii') + b
'\r\n'
3031 if isinstance(k
, str):
3033 if isinstance(v
, str):
3035 # RFC 2047 requires non-ASCII field names to be encoded, while RFC 7578
3036 # suggests sending UTF-8 directly. Firefox sends UTF-8, too
3037 content
= b
'Content-Disposition: form-data; name="' + k
+ b
'"\r\n\r\n' + v
+ b
'\r\n'
3038 if boundary
.encode('ascii') in content
:
3039 raise ValueError('Boundary overlaps with data')
3042 out
+= b
'--' + boundary
.encode('ascii') + b
'--\r\n'
3044 return out
, content_type
3047 def multipart_encode(data
, boundary
=None):
3049 Encode a dict to RFC 7578-compliant form-data
3052 A dict where keys and values can be either Unicode or bytes-like
3055 If specified a Unicode object, it's used as the boundary. Otherwise
3056 a random boundary is generated.
3058 Reference: https://tools.ietf.org/html/rfc7578
3060 has_specified_boundary
= boundary
is not None
3063 if boundary
is None:
3064 boundary
= '---------------' + str(random
.randrange(0x0fffffff, 0xffffffff))
3067 out
, content_type
= _multipart_encode_impl(data
, boundary
)
3070 if has_specified_boundary
:
3074 return out
, content_type
3077 def is_iterable_like(x
, allowed_types
=collections
.abc
.Iterable
, blocked_types
=NO_DEFAULT
):
3078 if blocked_types
is NO_DEFAULT
:
3079 blocked_types
= (str, bytes, collections
.abc
.Mapping
)
3080 return isinstance(x
, allowed_types
) and not isinstance(x
, blocked_types
)
3083 def variadic(x
, allowed_types
=NO_DEFAULT
):
3084 if not isinstance(allowed_types
, (tuple, type)):
3085 deprecation_warning('allowed_types should be a tuple or a type')
3086 allowed_types
= tuple(allowed_types
)
3087 return x
if is_iterable_like(x
, blocked_types
=allowed_types
) else (x
, )
3090 def try_call(*funcs
, expected_type
=None, args
=[], kwargs
={}):
3093 val
= f(*args
, **kwargs
)
3094 except (AttributeError, KeyError, TypeError, IndexError, ValueError, ZeroDivisionError):
3097 if expected_type
is None or isinstance(val
, expected_type
):
3101 def try_get(src
, getter
, expected_type
=None):
3102 return try_call(*variadic(getter
), args
=(src
,), expected_type
=expected_type
)
3105 def filter_dict(dct
, cndn
=lambda _
, v
: v
is not None):
3106 return {k: v for k, v in dct.items() if cndn(k, v)}
3109 def merge_dicts(*dicts
):
3111 for a_dict
in dicts
:
3112 for k
, v
in a_dict
.items():
3113 if (v
is not None and k
not in merged
3114 or isinstance(v
, str) and merged
[k
] == ''):
3119 def encode_compat_str(string
, encoding
=preferredencoding(), errors
='strict'):
3120 return string
if isinstance(string
, str) else str(string
, encoding
, errors
)
3132 TV_PARENTAL_GUIDELINES
= {
3142 def parse_age_limit(s
):
3143 # isinstance(False, int) is True. So type() must be used instead
3144 if type(s
) is int: # noqa: E721
3145 return s
if 0 <= s
<= 21 else None
3146 elif not isinstance(s
, str):
3148 m
= re
.match(r
'^(?P<age>\d{1,2})\+?$', s
)
3150 return int(m
.group('age'))
3153 return US_RATINGS
[s
]
3154 m
= re
.match(r
'^TV[_-]?(%s)$' % '|'.join(k
[3:] for k
in TV_PARENTAL_GUIDELINES
), s
)
3156 return TV_PARENTAL_GUIDELINES
['TV-' + m
.group(1)]
3160 def strip_jsonp(code
):
3163 (?:window\.)?(?P<func_name>[a-zA-Z0-9_.$]*)
3164 (?:\s*&&\s*(?P=func_name))?
3165 \s*\(\s*(?P<callback_data>.*)\);?
3166 \s*?(?://[^\n]*)*$''',
3167 r
'\g<callback_data>', code
)
3170 def js_to_json(code
, vars={}, *, strict
=False):
3171 # vars is a dict of var, val pairs to substitute
3172 STRING_QUOTES
= '\'"`'
3173 STRING_RE
= '|'.join(rf
'{q}(?:\\.|[^\\{q}])*{q}' for q
in STRING_QUOTES
)
3174 COMMENT_RE
= r
'/\*(?:(?!\*/).)*?\*/|//[^\n]*\n'
3175 SKIP_RE
= fr
'\s*(?:{COMMENT_RE})?\s*'
3177 (fr
'(?s)^(0[xX][0-9a-fA-F]+){SKIP_RE}:?$', 16),
3178 (fr
'(?s)^(0+[0-7]+){SKIP_RE}:?$', 8),
3181 def process_escape(match
):
3182 JSON_PASSTHROUGH_ESCAPES
= R
'"\bfnrtu'
3183 escape
= match
.group(1) or match
.group(2)
3185 return (Rf
'\{escape}' if escape
in JSON_PASSTHROUGH_ESCAPES
3186 else R
'\u00' if escape
== 'x'
3187 else '' if escape
== '\n'
3190 def template_substitute(match
):
3191 evaluated
= js_to_json(match
.group(1), vars, strict
=strict
)
3192 if evaluated
[0] == '"':
3193 return json
.loads(evaluated
)
3198 if v
in ('true', 'false', 'null'):
3200 elif v
in ('undefined', 'void 0'):
3202 elif v
.startswith('/*') or v
.startswith('//') or v
.startswith('!') or v
== ',':
3205 if v
[0] in STRING_QUOTES
:
3206 v
= re
.sub(r
'(?s)\${([^}]+)}', template_substitute
, v
[1:-1]) if v
[0] == '`' else v
[1:-1]
3207 escaped
= re
.sub(r
'(?s)(")|\\(.)', process_escape
, v
)
3208 return f
'"{escaped}"'
3210 for regex
, base
in INTEGER_TABLE
:
3211 im
= re
.match(regex
, v
)
3213 i
= int(im
.group(1), base
)
3214 return f
'"{i}":' if v
.endswith(':') else str(i
)
3220 except json
.JSONDecodeError
:
3221 return json
.dumps(vars[v
])
3228 raise ValueError(f
'Unknown value: {v}')
3230 def create_map(mobj
):
3231 return json
.dumps(dict(json
.loads(js_to_json(mobj
.group(1) or '[]', vars=vars))))
3233 code
= re
.sub(r
'new Map\((\[.*?\])?\)', create_map
, code
)
3235 code
= re
.sub(r
'new Date\((".+")\)', r
'\g<1>', code
)
3236 code
= re
.sub(r
'new \w+\((.*?)\)', lambda m
: json
.dumps(m
.group(0)), code
)
3237 code
= re
.sub(r
'parseInt\([^\d]+(\d+)[^\d]+\)', r
'\1', code
)
3238 code
= re
.sub(r
'\(function\([^)]*\)\s*\{[^}]*\}\s*\)\s*\(\s*(["\'][^
)]*["\'])\s*\)', r'\1', code)
3240 return re.sub(rf'''(?sx)
3242 {COMMENT_RE}|,(?={SKIP_RE}[\]}}])|
3243 void\s0|(?:(?<![0-9])[eE]|[a-df-zA-DF-Z_$])[.a-zA-Z_$0-9]*|
3244 \b(?:0[xX][0-9a-fA-F]+|0+[0-7]+)(?:{SKIP_RE}:)?|
3245 [0-9]+(?={SKIP_RE}:)|
3250 def qualities(quality_ids):
3251 """ Get a numeric quality value out of a list of possible values """
3254 return quality_ids.index(qid)
3260 POSTPROCESS_WHEN = ('pre_process', 'after_filter', 'video', 'before_dl', 'post_process', 'after_move', 'after_video', 'playlist')
3264 'default': '%(title)s [%(id)s].%(ext)s',
3265 'chapter': '%(title)s - %(section_number)03d %(section_title)s [%(id)s].%(ext)s',
3271 'description': 'description',
3272 'annotation': 'annotations.xml',
3273 'infojson': 'info.json',
3276 'pl_thumbnail': None,
3277 'pl_description': 'description',
3278 'pl_infojson': 'info.json',
3281 # As of [1] format syntax is:
3282 # %[mapping_key][conversion_flags][minimum_width][.precision][length_modifier]type
3283 # 1. https://docs.python.org/2/library/stdtypes.html#string-formatting
3284 STR_FORMAT_RE_TMPL = r'''(?x)
3285 (?<!%)(?P<prefix>(?:%%)*)
3287 (?P<has_key>\((?P<key>{0})\))?
3289 (?P<conversion>[#0\-+ ]+)?
3291 (?P<precision>\.\d+)?
3292 (?P<len_mod>[hlL])? # unused in python
3293 {1} # conversion type
3298 STR_FORMAT_TYPES = 'diouxXeEfFgGcrs'
3301 def limit_length(s, length):
3302 """ Add ellipses to overly long strings """
3307 return s[:length - len(ELLIPSES)] + ELLIPSES
3311 def version_tuple(v):
3312 return tuple(int(e) for e in re.split(r'[-.]', v))
3315 def is_outdated_version(version, limit, assume_new=True):
3317 return not assume_new
3319 return version_tuple(version) < version_tuple(limit)
3321 return not assume_new
3324 def ytdl_is_updateable():
3325 """ Returns if yt-dlp can be updated with -U """
3327 from ..update import is_non_updateable
3329 return not is_non_updateable()
3332 def args_to_str(args):
3333 # Get a short string representation for a subprocess command
3334 return ' '.join(compat_shlex_quote(a) for a in args)
3337 def error_to_str(err):
3338 return f'{type(err).__name__}: {err}'
3341 def mimetype2ext(mt, default=NO_DEFAULT):
3342 if not isinstance(mt, str):
3343 if default is not NO_DEFAULT:
3359 'x-matroska': 'mkv',
3361 'x-mp4-fragmented': 'mp4',
3366 # application (streaming playlists)
3370 'vnd.apple.mpegurl': 'm3u8',
3371 'vnd.ms-sstr+xml': 'ism',
3372 'x-mpegurl': 'm3u8',
3376 # Per RFC 3003, audio/mpeg can be .mp1, .mp2 or .mp3.
3377 # Using .mp3 as it's the most popular one
3378 'audio/mpeg': 'mp3',
3379 'audio/webm': 'webm',
3380 'audio/x-matroska': 'mka',
3381 'audio/x-mpegurl': 'm3u',
3389 'x-realaudio': 'ra',
3400 'vnd.wap.wbmp': 'wbmp',
3407 'filmstrip+json': 'fs',
3408 'smptett+xml': 'tt',
3411 'x-ms-sami': 'sami',
3420 mimetype = mt.partition(';')[0].strip().lower()
3421 _, _, subtype = mimetype.rpartition('/')
3423 ext = traversal.traverse_obj(MAP, mimetype, subtype, subtype.rsplit('+')[-1])
3426 elif default is not NO_DEFAULT:
3428 return subtype.replace('+', '.')
3431 def ext2mimetype(ext_or_url):
3434 if '.' not in ext_or_url:
3435 ext_or_url = f'file.{ext_or_url}'
3436 return mimetypes.guess_type(ext_or_url)[0]
3439 def parse_codecs(codecs_str):
3440 # http://tools.ietf.org/html/rfc6381
3443 split_codecs = list(filter(None, map(
3444 str.strip, codecs_str.strip().strip(',').split(','))))
3445 vcodec, acodec, scodec, hdr = None, None, None, None
3446 for full_codec in split_codecs:
3447 parts = re.sub(r'0+(?=\d)', '', full_codec).split('.')
3448 if parts[0] in ('avc1', 'avc2', 'avc3', 'avc4', 'vp9', 'vp8', 'hev1', 'hev2',
3449 'h263', 'h264', 'mp4v', 'hvc1', 'av1', 'theora', 'dvh1', 'dvhe'):
3453 if parts[0] in ('dvh1', 'dvhe'):
3455 elif parts[0] == 'av1' and traversal.traverse_obj(parts, 3) == '10':
3457 elif parts[:2] == ['vp9', '2']:
3459 elif parts[0] in ('flac', 'mp4a', 'opus', 'vorbis', 'mp3', 'aac', 'ac-4',
3460 'ac-3', 'ec-3', 'eac3', 'dtsc', 'dtse', 'dtsh', 'dtsl'):
3461 acodec = acodec or full_codec
3462 elif parts[0] in ('stpp', 'wvtt'):
3463 scodec = scodec or full_codec
3465 write_string(f'WARNING: Unknown codec {full_codec}\n')
3466 if vcodec or acodec or scodec:
3468 'vcodec': vcodec or 'none',
3469 'acodec': acodec or 'none',
3470 'dynamic_range': hdr,
3471 **({'scodec': scodec} if scodec is not None else {}),
3473 elif len(split_codecs) == 2:
3475 'vcodec': split_codecs[0],
3476 'acodec': split_codecs[1],
3481 def get_compatible_ext(*, vcodecs, acodecs, vexts, aexts, preferences=None):
3482 assert len(vcodecs) == len(vexts) and len(acodecs) == len(aexts)
3484 allow_mkv = not preferences or 'mkv' in preferences
3486 if allow_mkv and max(len(acodecs), len(vcodecs)) > 1:
3487 return 'mkv' # TODO: any other format allows this?
3489 # TODO: All codecs supported by parse_codecs isn't handled here
3490 COMPATIBLE_CODECS = {
3492 'av1', 'hevc', 'avc1', 'mp4a', 'ac-4', # fourcc (m3u8, mpd)
3493 'h264', 'aacl', 'ec-3', # Set in ISM
3496 'av1', 'vp9', 'vp8', 'opus', 'vrbs',
3497 'vp9x', 'vp8x', # in the webm spec
3501 sanitize_codec = functools.partial(try_get, getter=lambda x: x[0].split('.')[0].replace('0', ''))
3502 vcodec, acodec = sanitize_codec(vcodecs), sanitize_codec(acodecs)
3504 for ext in preferences or COMPATIBLE_CODECS.keys():
3505 codec_set = COMPATIBLE_CODECS.get(ext, set())
3506 if ext == 'mkv' or codec_set.issuperset((vcodec, acodec)):
3510 {'mp3', 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'ismv', 'isma', 'mov'},
3513 for ext in preferences or vexts:
3514 current_exts = {ext, *vexts, *aexts}
3515 if ext == 'mkv' or current_exts == {ext} or any(
3516 ext_sets.issuperset(current_exts) for ext_sets in COMPATIBLE_EXTS):
3518 return 'mkv' if allow_mkv else preferences[-1]
3521 def urlhandle_detect_ext(url_handle, default=NO_DEFAULT):
3522 getheader = url_handle.headers.get
3524 cd = getheader('Content-Disposition')
3526 m = re.match(r'attachment;\s*filename="(?P
<filename
>[^
"]+)"', cd)
3528 e = determine_ext(m.group('filename
'), default_ext=None)
3532 meta_ext = getheader('x
-amz
-meta
-name
')
3534 e = meta_ext.rpartition('.')[2]
3538 return mimetype2ext(getheader('Content
-Type
'), default=default)
3541 def encode_data_uri(data, mime_type):
3542 return 'data
:%s;base64
,%s' % (mime_type, base64.b64encode(data).decode('ascii
'))
3545 def age_restricted(content_limit, age_limit):
3546 """ Returns True iff the content should be blocked """
3548 if age_limit is None: # No limit set
3550 if content_limit is None:
3551 return False # Content available for everyone
3552 return age_limit < content_limit
3555 # List of known byte-order-marks (BOM)
3557 (b'\xef\xbb\xbf', 'utf
-8'),
3558 (b'\x00\x00\xfe\xff', 'utf
-32-be
'),
3559 (b'\xff\xfe\x00\x00', 'utf
-32-le
'),
3560 (b'\xff\xfe', 'utf
-16-le
'),
3561 (b'\xfe\xff', 'utf
-16-be
'),
3565 def is_html(first_bytes):
3566 """ Detect whether a file contains HTML by examining its first bytes. """
3569 for bom, enc in BOMS:
3570 while first_bytes.startswith(bom):
3571 encoding, first_bytes = enc, first_bytes[len(bom):]
3573 return re.match(r'^\s
*<', first_bytes.decode(encoding, 'replace
'))
3576 def determine_protocol(info_dict):
3577 protocol = info_dict.get('protocol
')
3578 if protocol is not None:
3581 url = sanitize_url(info_dict['url
'])
3582 if url.startswith('rtmp
'):
3584 elif url.startswith('mms
'):
3586 elif url.startswith('rtsp
'):
3589 ext = determine_ext(url)
3591 return 'm3u8
' if info_dict.get('is_live
') else 'm3u8_native
'
3595 return urllib.parse.urlparse(url).scheme
3598 def render_table(header_row, data, delim=False, extra_gap=0, hide_empty=False):
3599 """ Render a list of rows, each as a list of values.
3600 Text after a \t will be right aligned """
3602 return len(remove_terminal_sequences(string).replace('\t', ''))
3604 def get_max_lens(table):
3605 return [max(width(str(v)) for v in col) for col in zip(*table)]
3607 def filter_using_list(row, filterArray):
3608 return [col for take, col in itertools.zip_longest(filterArray, row, fillvalue=True) if take]
3610 max_lens = get_max_lens(data) if hide_empty else []
3611 header_row = filter_using_list(header_row, max_lens)
3612 data = [filter_using_list(row, max_lens) for row in data]
3614 table = [header_row] + data
3615 max_lens = get_max_lens(table)
3618 table = [header_row, [delim * (ml + extra_gap) for ml in max_lens]] + data
3619 table[1][-1] = table[1][-1][:-extra_gap * len(delim)] # Remove extra_gap from end of delimiter
3621 for pos, text in enumerate(map(str, row)):
3623 row[pos] = text.replace('\t', ' ' * (max_lens[pos] - width(text))) + ' ' * extra_gap
3625 row[pos] = text + ' ' * (max_lens[pos] - width(text) + extra_gap)
3626 ret = '\n'.join(''.join(row).rstrip() for row in table)
3630 def _match_one(filter_part, dct, incomplete):
3631 # TODO: Generalize code with YoutubeDL._build_format_filter
3632 STRING_OPERATORS = {
3633 '*=': operator.contains,
3634 '^
=': lambda attr, value: attr.startswith(value),
3635 '$
=': lambda attr, value: attr.endswith(value),
3636 '~
=': lambda attr, value: re.search(value, attr),
3638 COMPARISON_OPERATORS = {
3640 '<=': operator.le, # "<=" must be defined above "<"
3647 if isinstance(incomplete, bool):
3648 is_incomplete = lambda _: incomplete
3650 is_incomplete = lambda k: k in incomplete
3652 operator_rex = re.compile(r'''(?x)
3654 \s*(?P<negation>!\s*)?(?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
3656 (?P<quote>["\'])(?P<quotedstrval>.+?)(?P=quote)|
3659 ''' % '|
'.join(map(re.escape, COMPARISON_OPERATORS.keys())))
3660 m = operator_rex.fullmatch(filter_part.strip())
3663 unnegated_op = COMPARISON_OPERATORS[m['op
']]
3665 op = lambda attr, value: not unnegated_op(attr, value)
3668 comparison_value = m['quotedstrval
'] or m['strval
'] or m['intval
']
3670 comparison_value = comparison_value.replace(r'\
%s' % m['quote
'], m['quote
'])
3671 actual_value = dct.get(m['key
'])
3672 numeric_comparison = None
3673 if isinstance(actual_value, (int, float)):
3674 # If the original field is a string and matching comparisonvalue is
3675 # a number we should respect the origin of the original field
3676 # and process comparison value as a string (see
3677 # https://github.com/ytdl-org/youtube-dl/issues/11082)
3679 numeric_comparison = int(comparison_value)
3681 numeric_comparison = parse_filesize(comparison_value)
3682 if numeric_comparison is None:
3683 numeric_comparison = parse_filesize(f'{comparison_value}B
')
3684 if numeric_comparison is None:
3685 numeric_comparison = parse_duration(comparison_value)
3686 if numeric_comparison is not None and m['op
'] in STRING_OPERATORS:
3687 raise ValueError('Operator
%s only supports string values
!' % m['op
'])
3688 if actual_value is None:
3689 return is_incomplete(m['key
']) or m['none_inclusive
']
3690 return op(actual_value, comparison_value if numeric_comparison is None else numeric_comparison)
3693 '': lambda v: (v is True) if isinstance(v, bool) else (v is not None),
3694 '!': lambda v: (v is False) if isinstance(v, bool) else (v is None),
3696 operator_rex = re.compile(r'''(?x)
3697 (?P<op>%s)\s*(?P<key>[a-z_]+)
3698 ''' % '|
'.join(map(re.escape, UNARY_OPERATORS.keys())))
3699 m = operator_rex.fullmatch(filter_part.strip())
3701 op = UNARY_OPERATORS[m.group('op
')]
3702 actual_value = dct.get(m.group('key
'))
3703 if is_incomplete(m.group('key
')) and actual_value is None:
3705 return op(actual_value)
3707 raise ValueError('Invalid
filter part
%r' % filter_part)
3710 def match_str(filter_str, dct, incomplete=False):
3711 """ Filter a dictionary with a simple string syntax.
3712 @returns Whether the filter passes
3713 @param incomplete Set of keys that is expected to be missing from dct.
3714 Can be True/False to indicate all/none of the keys may be missing.
3715 All conditions on incomplete keys pass if the key is missing
3718 _match_one(filter_part.replace(r'\
&', '&'), dct, incomplete)
3719 for filter_part in re.split(r'(?
<!\\)&', filter_str))
3722 def match_filter_func(filters, breaking_filters=None):
3723 if not filters and not breaking_filters:
3725 breaking_filters = match_filter_func(breaking_filters) or (lambda _, __: None)
3726 filters = set(variadic(filters or []))
3728 interactive = '-' in filters
3732 def _match_func(info_dict, incomplete=False):
3733 ret = breaking_filters(info_dict, incomplete)
3735 raise RejectedVideoReached(ret)
3737 if not filters or any(match_str(f, info_dict, incomplete) for f in filters):
3738 return NO_DEFAULT if interactive and not incomplete else None
3740 video_title = info_dict.get('title
') or info_dict.get('id') or 'entry
'
3741 filter_str = ') |
('.join(map(str.strip, filters))
3742 return f'{video_title} does
not pass filter ({filter_str}
), skipping
..'
3746 class download_range_func:
3747 def __init__(self, chapters, ranges):
3748 self.chapters, self.ranges = chapters, ranges
3750 def __call__(self, info_dict, ydl):
3751 if not self.ranges and not self.chapters:
3754 warning = ('There are no chapters matching the regex
' if info_dict.get('chapters
')
3755 else 'Cannot match chapters since chapter information
is unavailable
')
3756 for regex in self.chapters or []:
3757 for i, chapter in enumerate(info_dict.get('chapters
') or []):
3758 if re.search(regex, chapter['title
']):
3760 yield {**chapter, 'index': i}
3761 if self.chapters and warning:
3762 ydl.to_screen(f'[info
] {info_dict["id"]}
: {warning}
')
3764 yield from ({'start_time': start, 'end_time': end} for start, end in self.ranges or [])
3766 def __eq__(self, other):
3767 return (isinstance(other, download_range_func)
3768 and self.chapters == other.chapters and self.ranges == other.ranges)
3771 return f'{__name__}
.{type(self).__name__}
({self.chapters}
, {self.ranges}
)'
3774 def parse_dfxp_time_expr(time_expr):
3778 mobj = re.match(rf'^
(?P
<time_offset
>{NUMBER_RE}
)s?$
', time_expr)
3780 return float(mobj.group('time_offset
'))
3782 mobj = re.match(r'^
(\d
+):(\d\d
):(\d\
d(?
:(?
:\
.|
:)\d
+)?
)$
', time_expr)
3784 return 3600 * int(mobj.group(1)) + 60 * int(mobj.group(2)) + float(mobj.group(3).replace(':', '.'))
3787 def srt_subtitles_timecode(seconds):
3788 return '%02d
:%02d
:%02d
,%03d
' % timetuple_from_msec(seconds * 1000)
3791 def ass_subtitles_timecode(seconds):
3792 time = timetuple_from_msec(seconds * 1000)
3793 return '%01d
:%02d
:%02d
.%02d
' % (*time[:-1], time.milliseconds / 10)
3796 def dfxp2srt(dfxp_data):
3798 @param dfxp_data A bytes-like object containing DFXP data
3799 @returns A unicode object containing converted SRT data
3801 LEGACY_NAMESPACES = (
3802 (b'http
://www
.w3
.org
/ns
/ttml
', [
3803 b'http
://www
.w3
.org
/2004/11/ttaf1
',
3804 b'http
://www
.w3
.org
/2006/04/ttaf1
',
3805 b'http
://www
.w3
.org
/2006/10/ttaf1
',
3807 (b'http
://www
.w3
.org
/ns
/ttml
#styling', [
3808 b
'http://www.w3.org/ns/ttml#style',
3812 SUPPORTED_STYLING
= [
3821 _x
= functools
.partial(xpath_with_ns
, ns_map
={
3822 'xml': 'http://www.w3.org/XML/1998/namespace',
3823 'ttml': 'http://www.w3.org/ns/ttml',
3824 'tts': 'http://www.w3.org/ns/ttml#styling',
3830 class TTMLPElementParser
:
3832 _unclosed_elements
= []
3833 _applied_styles
= []
3835 def start(self
, tag
, attrib
):
3836 if tag
in (_x('ttml:br'), 'br'):
3839 unclosed_elements
= []
3841 element_style_id
= attrib
.get('style')
3843 style
.update(default_style
)
3844 if element_style_id
:
3845 style
.update(styles
.get(element_style_id
, {}))
3846 for prop
in SUPPORTED_STYLING
:
3847 prop_val
= attrib
.get(_x('tts:' + prop
))
3849 style
[prop
] = prop_val
3852 for k
, v
in sorted(style
.items()):
3853 if self
._applied
_styles
and self
._applied
_styles
[-1].get(k
) == v
:
3856 font
+= ' color="%s"' % v
3857 elif k
== 'fontSize':
3858 font
+= ' size="%s"' % v
3859 elif k
== 'fontFamily':
3860 font
+= ' face="%s"' % v
3861 elif k
== 'fontWeight' and v
== 'bold':
3863 unclosed_elements
.append('b')
3864 elif k
== 'fontStyle' and v
== 'italic':
3866 unclosed_elements
.append('i')
3867 elif k
== 'textDecoration' and v
== 'underline':
3869 unclosed_elements
.append('u')
3871 self
._out
+= '<font' + font
+ '>'
3872 unclosed_elements
.append('font')
3874 if self
._applied
_styles
:
3875 applied_style
.update(self
._applied
_styles
[-1])
3876 applied_style
.update(style
)
3877 self
._applied
_styles
.append(applied_style
)
3878 self
._unclosed
_elements
.append(unclosed_elements
)
3881 if tag
not in (_x('ttml:br'), 'br'):
3882 unclosed_elements
= self
._unclosed
_elements
.pop()
3883 for element
in reversed(unclosed_elements
):
3884 self
._out
+= '</%s>' % element
3885 if unclosed_elements
and self
._applied
_styles
:
3886 self
._applied
_styles
.pop()
3888 def data(self
, data
):
3892 return self
._out
.strip()
3894 # Fix UTF-8 encoded file wrongly marked as UTF-16. See https://github.com/yt-dlp/yt-dlp/issues/6543#issuecomment-1477169870
3895 # This will not trigger false positives since only UTF-8 text is being replaced
3896 dfxp_data
= dfxp_data
.replace(b
'encoding=\'UTF-16\'', b
'encoding=\'UTF-8\'')
3898 def parse_node(node
):
3899 target
= TTMLPElementParser()
3900 parser
= xml
.etree
.ElementTree
.XMLParser(target
=target
)
3901 parser
.feed(xml
.etree
.ElementTree
.tostring(node
))
3902 return parser
.close()
3904 for k
, v
in LEGACY_NAMESPACES
:
3906 dfxp_data
= dfxp_data
.replace(ns
, k
)
3908 dfxp
= compat_etree_fromstring(dfxp_data
)
3910 paras
= dfxp
.findall(_x('.//ttml:p')) or dfxp
.findall('.//p')
3913 raise ValueError('Invalid dfxp/TTML subtitle')
3917 for style
in dfxp
.findall(_x('.//ttml:style')):
3918 style_id
= style
.get('id') or style
.get(_x('xml:id'))
3921 parent_style_id
= style
.get('style')
3923 if parent_style_id
not in styles
:
3926 styles
[style_id
] = styles
[parent_style_id
].copy()
3927 for prop
in SUPPORTED_STYLING
:
3928 prop_val
= style
.get(_x('tts:' + prop
))
3930 styles
.setdefault(style_id
, {})[prop
] = prop_val
3936 for p
in ('body', 'div'):
3937 ele
= xpath_element(dfxp
, [_x('.//ttml:' + p
), './/' + p
])
3940 style
= styles
.get(ele
.get('style'))
3943 default_style
.update(style
)
3945 for para
, index
in zip(paras
, itertools
.count(1)):
3946 begin_time
= parse_dfxp_time_expr(para
.attrib
.get('begin'))
3947 end_time
= parse_dfxp_time_expr(para
.attrib
.get('end'))
3948 dur
= parse_dfxp_time_expr(para
.attrib
.get('dur'))
3949 if begin_time
is None:
3954 end_time
= begin_time
+ dur
3955 out
.append('%d\n%s --> %s\n%s\n\n' % (
3957 srt_subtitles_timecode(begin_time
),
3958 srt_subtitles_timecode(end_time
),
3964 def cli_option(params
, command_option
, param
, separator
=None):
3965 param
= params
.get(param
)
3966 return ([] if param
is None
3967 else [command_option
, str(param
)] if separator
is None
3968 else [f
'{command_option}{separator}{param}'])
3971 def cli_bool_option(params
, command_option
, param
, true_value
='true', false_value
='false', separator
=None):
3972 param
= params
.get(param
)
3973 assert param
in (True, False, None)
3974 return cli_option({True: true_value, False: false_value}
, command_option
, param
, separator
)
3977 def cli_valueless_option(params
, command_option
, param
, expected_value
=True):
3978 return [command_option
] if params
.get(param
) == expected_value
else []
3981 def cli_configuration_args(argdict
, keys
, default
=[], use_compat
=True):
3982 if isinstance(argdict
, (list, tuple)): # for backward compatibility
3989 assert isinstance(argdict
, dict)
3991 assert isinstance(keys
, (list, tuple))
3992 for key_list
in keys
:
3993 arg_list
= list(filter(
3994 lambda x
: x
is not None,
3995 [argdict
.get(key
.lower()) for key
in variadic(key_list
)]))
3997 return [arg
for args
in arg_list
for arg
in args
]
4001 def _configuration_args(main_key
, argdict
, exe
, keys
=None, default
=[], use_compat
=True):
4002 main_key
, exe
= main_key
.lower(), exe
.lower()
4003 root_key
= exe
if main_key
== exe
else f
'{main_key}+{exe}'
4004 keys
= [f
'{root_key}{k}' for k
in (keys
or [''])]
4005 if root_key
in keys
:
4007 keys
.append((main_key
, exe
))
4008 keys
.append('default')
4011 return cli_configuration_args(argdict
, keys
, default
, use_compat
)
4015 # See http://www.loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt
4074 'iw': 'heb', # Replaced by he in 1989 revision
4084 'in': 'ind', # Replaced by id in 1989 revision
4199 'ji': 'yid', # Replaced by yi in 1989 revision
4207 def short2long(cls
, code
):
4208 """Convert language code from ISO 639-1 to ISO 639-2/T"""
4209 return cls
._lang
_map
.get(code
[:2])
4212 def long2short(cls
, code
):
4213 """Convert language code from ISO 639-2/T to ISO 639-1"""
4214 for short_name
, long_name
in cls
._lang
_map
.items():
4215 if long_name
== code
:
4220 # From http://data.okfn.org/data/core/country-list
4222 'AF': 'Afghanistan',
4223 'AX': 'Åland Islands',
4226 'AS': 'American Samoa',
4231 'AG': 'Antigua and Barbuda',
4248 'BO': 'Bolivia, Plurinational State of',
4249 'BQ': 'Bonaire, Sint Eustatius and Saba',
4250 'BA': 'Bosnia and Herzegovina',
4252 'BV': 'Bouvet Island',
4254 'IO': 'British Indian Ocean Territory',
4255 'BN': 'Brunei Darussalam',
4257 'BF': 'Burkina Faso',
4263 'KY': 'Cayman Islands',
4264 'CF': 'Central African Republic',
4268 'CX': 'Christmas Island',
4269 'CC': 'Cocos (Keeling) Islands',
4273 'CD': 'Congo, the Democratic Republic of the',
4274 'CK': 'Cook Islands',
4276 'CI': 'Côte d\'Ivoire',
4281 'CZ': 'Czech Republic',
4285 'DO': 'Dominican Republic',
4288 'SV': 'El Salvador',
4289 'GQ': 'Equatorial Guinea',
4293 'FK': 'Falkland Islands (Malvinas)',
4294 'FO': 'Faroe Islands',
4298 'GF': 'French Guiana',
4299 'PF': 'French Polynesia',
4300 'TF': 'French Southern Territories',
4315 'GW': 'Guinea-Bissau',
4318 'HM': 'Heard Island and McDonald Islands',
4319 'VA': 'Holy See (Vatican City State)',
4326 'IR': 'Iran, Islamic Republic of',
4329 'IM': 'Isle of Man',
4339 'KP': 'Korea, Democratic People\'s Republic of',
4340 'KR': 'Korea, Republic of',
4343 'LA': 'Lao People\'s Democratic Republic',
4349 'LI': 'Liechtenstein',
4353 'MK': 'Macedonia, the Former Yugoslav Republic of',
4360 'MH': 'Marshall Islands',
4366 'FM': 'Micronesia, Federated States of',
4367 'MD': 'Moldova, Republic of',
4378 'NL': 'Netherlands',
4379 'NC': 'New Caledonia',
4380 'NZ': 'New Zealand',
4385 'NF': 'Norfolk Island',
4386 'MP': 'Northern Mariana Islands',
4391 'PS': 'Palestine, State of',
4393 'PG': 'Papua New Guinea',
4396 'PH': 'Philippines',
4400 'PR': 'Puerto Rico',
4404 'RU': 'Russian Federation',
4406 'BL': 'Saint Barthélemy',
4407 'SH': 'Saint Helena, Ascension and Tristan da Cunha',
4408 'KN': 'Saint Kitts and Nevis',
4409 'LC': 'Saint Lucia',
4410 'MF': 'Saint Martin (French part)',
4411 'PM': 'Saint Pierre and Miquelon',
4412 'VC': 'Saint Vincent and the Grenadines',
4415 'ST': 'Sao Tome and Principe',
4416 'SA': 'Saudi Arabia',
4420 'SL': 'Sierra Leone',
4422 'SX': 'Sint Maarten (Dutch part)',
4425 'SB': 'Solomon Islands',
4427 'ZA': 'South Africa',
4428 'GS': 'South Georgia and the South Sandwich Islands',
4429 'SS': 'South Sudan',
4434 'SJ': 'Svalbard and Jan Mayen',
4437 'CH': 'Switzerland',
4438 'SY': 'Syrian Arab Republic',
4439 'TW': 'Taiwan, Province of China',
4441 'TZ': 'Tanzania, United Republic of',
4443 'TL': 'Timor-Leste',
4447 'TT': 'Trinidad and Tobago',
4450 'TM': 'Turkmenistan',
4451 'TC': 'Turks and Caicos Islands',
4455 'AE': 'United Arab Emirates',
4456 'GB': 'United Kingdom',
4457 'US': 'United States',
4458 'UM': 'United States Minor Outlying Islands',
4462 'VE': 'Venezuela, Bolivarian Republic of',
4464 'VG': 'Virgin Islands, British',
4465 'VI': 'Virgin Islands, U.S.',
4466 'WF': 'Wallis and Futuna',
4467 'EH': 'Western Sahara',
4471 # Not ISO 3166 codes, but used for IP blocks
4472 'AP': 'Asia/Pacific Region',
4477 def short2full(cls
, code
):
4478 """Convert an ISO 3166-2 country code to the corresponding full name"""
4479 return cls
._country
_map
.get(code
.upper())
4483 # Major IPv4 address blocks per country
4485 'AD': '46.172.224.0/19',
4486 'AE': '94.200.0.0/13',
4487 'AF': '149.54.0.0/17',
4488 'AG': '209.59.64.0/18',
4489 'AI': '204.14.248.0/21',
4490 'AL': '46.99.0.0/16',
4491 'AM': '46.70.0.0/15',
4492 'AO': '105.168.0.0/13',
4493 'AP': '182.50.184.0/21',
4494 'AQ': '23.154.160.0/24',
4495 'AR': '181.0.0.0/12',
4496 'AS': '202.70.112.0/20',
4497 'AT': '77.116.0.0/14',
4498 'AU': '1.128.0.0/11',
4499 'AW': '181.41.0.0/18',
4500 'AX': '185.217.4.0/22',
4501 'AZ': '5.197.0.0/16',
4502 'BA': '31.176.128.0/17',
4503 'BB': '65.48.128.0/17',
4504 'BD': '114.130.0.0/16',
4506 'BF': '102.178.0.0/15',
4507 'BG': '95.42.0.0/15',
4508 'BH': '37.131.0.0/17',
4509 'BI': '154.117.192.0/18',
4510 'BJ': '137.255.0.0/16',
4511 'BL': '185.212.72.0/23',
4512 'BM': '196.12.64.0/18',
4513 'BN': '156.31.0.0/16',
4514 'BO': '161.56.0.0/16',
4515 'BQ': '161.0.80.0/20',
4516 'BR': '191.128.0.0/12',
4517 'BS': '24.51.64.0/18',
4518 'BT': '119.2.96.0/19',
4519 'BW': '168.167.0.0/16',
4520 'BY': '178.120.0.0/13',
4521 'BZ': '179.42.192.0/18',
4522 'CA': '99.224.0.0/11',
4523 'CD': '41.243.0.0/16',
4524 'CF': '197.242.176.0/21',
4525 'CG': '160.113.0.0/16',
4526 'CH': '85.0.0.0/13',
4527 'CI': '102.136.0.0/14',
4528 'CK': '202.65.32.0/19',
4529 'CL': '152.172.0.0/14',
4530 'CM': '102.244.0.0/14',
4531 'CN': '36.128.0.0/10',
4532 'CO': '181.240.0.0/12',
4533 'CR': '201.192.0.0/12',
4534 'CU': '152.206.0.0/15',
4535 'CV': '165.90.96.0/19',
4536 'CW': '190.88.128.0/17',
4537 'CY': '31.153.0.0/16',
4538 'CZ': '88.100.0.0/14',
4540 'DJ': '197.241.0.0/17',
4541 'DK': '87.48.0.0/12',
4542 'DM': '192.243.48.0/20',
4543 'DO': '152.166.0.0/15',
4544 'DZ': '41.96.0.0/12',
4545 'EC': '186.68.0.0/15',
4546 'EE': '90.190.0.0/15',
4547 'EG': '156.160.0.0/11',
4548 'ER': '196.200.96.0/20',
4549 'ES': '88.0.0.0/11',
4550 'ET': '196.188.0.0/14',
4551 'EU': '2.16.0.0/13',
4552 'FI': '91.152.0.0/13',
4553 'FJ': '144.120.0.0/16',
4554 'FK': '80.73.208.0/21',
4555 'FM': '119.252.112.0/20',
4556 'FO': '88.85.32.0/19',
4558 'GA': '41.158.0.0/15',
4560 'GD': '74.122.88.0/21',
4561 'GE': '31.146.0.0/16',
4562 'GF': '161.22.64.0/18',
4563 'GG': '62.68.160.0/19',
4564 'GH': '154.160.0.0/12',
4565 'GI': '95.164.0.0/16',
4566 'GL': '88.83.0.0/19',
4567 'GM': '160.182.0.0/15',
4568 'GN': '197.149.192.0/18',
4569 'GP': '104.250.0.0/19',
4570 'GQ': '105.235.224.0/20',
4571 'GR': '94.64.0.0/13',
4572 'GT': '168.234.0.0/16',
4573 'GU': '168.123.0.0/16',
4574 'GW': '197.214.80.0/20',
4575 'GY': '181.41.64.0/18',
4576 'HK': '113.252.0.0/14',
4577 'HN': '181.210.0.0/16',
4578 'HR': '93.136.0.0/13',
4579 'HT': '148.102.128.0/17',
4580 'HU': '84.0.0.0/14',
4581 'ID': '39.192.0.0/10',
4582 'IE': '87.32.0.0/12',
4583 'IL': '79.176.0.0/13',
4584 'IM': '5.62.80.0/20',
4585 'IN': '117.192.0.0/10',
4586 'IO': '203.83.48.0/21',
4587 'IQ': '37.236.0.0/14',
4588 'IR': '2.176.0.0/12',
4589 'IS': '82.221.0.0/16',
4590 'IT': '79.0.0.0/10',
4591 'JE': '87.244.64.0/18',
4592 'JM': '72.27.0.0/17',
4593 'JO': '176.29.0.0/16',
4594 'JP': '133.0.0.0/8',
4595 'KE': '105.48.0.0/12',
4596 'KG': '158.181.128.0/17',
4597 'KH': '36.37.128.0/17',
4598 'KI': '103.25.140.0/22',
4599 'KM': '197.255.224.0/20',
4600 'KN': '198.167.192.0/19',
4601 'KP': '175.45.176.0/22',
4602 'KR': '175.192.0.0/10',
4603 'KW': '37.36.0.0/14',
4604 'KY': '64.96.0.0/15',
4605 'KZ': '2.72.0.0/13',
4606 'LA': '115.84.64.0/18',
4607 'LB': '178.135.0.0/16',
4608 'LC': '24.92.144.0/20',
4609 'LI': '82.117.0.0/19',
4610 'LK': '112.134.0.0/15',
4611 'LR': '102.183.0.0/16',
4612 'LS': '129.232.0.0/17',
4613 'LT': '78.56.0.0/13',
4614 'LU': '188.42.0.0/16',
4615 'LV': '46.109.0.0/16',
4616 'LY': '41.252.0.0/14',
4617 'MA': '105.128.0.0/11',
4618 'MC': '88.209.64.0/18',
4619 'MD': '37.246.0.0/16',
4620 'ME': '178.175.0.0/17',
4621 'MF': '74.112.232.0/21',
4622 'MG': '154.126.0.0/17',
4623 'MH': '117.103.88.0/21',
4624 'MK': '77.28.0.0/15',
4625 'ML': '154.118.128.0/18',
4626 'MM': '37.111.0.0/17',
4627 'MN': '49.0.128.0/17',
4628 'MO': '60.246.0.0/16',
4629 'MP': '202.88.64.0/20',
4630 'MQ': '109.203.224.0/19',
4631 'MR': '41.188.64.0/18',
4632 'MS': '208.90.112.0/22',
4633 'MT': '46.11.0.0/16',
4634 'MU': '105.16.0.0/12',
4635 'MV': '27.114.128.0/18',
4636 'MW': '102.70.0.0/15',
4637 'MX': '187.192.0.0/11',
4638 'MY': '175.136.0.0/13',
4639 'MZ': '197.218.0.0/15',
4640 'NA': '41.182.0.0/16',
4641 'NC': '101.101.0.0/18',
4642 'NE': '197.214.0.0/18',
4643 'NF': '203.17.240.0/22',
4644 'NG': '105.112.0.0/12',
4645 'NI': '186.76.0.0/15',
4646 'NL': '145.96.0.0/11',
4647 'NO': '84.208.0.0/13',
4648 'NP': '36.252.0.0/15',
4649 'NR': '203.98.224.0/19',
4650 'NU': '49.156.48.0/22',
4651 'NZ': '49.224.0.0/14',
4652 'OM': '5.36.0.0/15',
4653 'PA': '186.72.0.0/15',
4654 'PE': '186.160.0.0/14',
4655 'PF': '123.50.64.0/18',
4656 'PG': '124.240.192.0/19',
4657 'PH': '49.144.0.0/13',
4658 'PK': '39.32.0.0/11',
4659 'PL': '83.0.0.0/11',
4660 'PM': '70.36.0.0/20',
4661 'PR': '66.50.0.0/16',
4662 'PS': '188.161.0.0/16',
4663 'PT': '85.240.0.0/13',
4664 'PW': '202.124.224.0/20',
4665 'PY': '181.120.0.0/14',
4666 'QA': '37.210.0.0/15',
4667 'RE': '102.35.0.0/16',
4668 'RO': '79.112.0.0/13',
4669 'RS': '93.86.0.0/15',
4670 'RU': '5.136.0.0/13',
4671 'RW': '41.186.0.0/16',
4672 'SA': '188.48.0.0/13',
4673 'SB': '202.1.160.0/19',
4674 'SC': '154.192.0.0/11',
4675 'SD': '102.120.0.0/13',
4676 'SE': '78.64.0.0/12',
4677 'SG': '8.128.0.0/10',
4678 'SI': '188.196.0.0/14',
4679 'SK': '78.98.0.0/15',
4680 'SL': '102.143.0.0/17',
4681 'SM': '89.186.32.0/19',
4682 'SN': '41.82.0.0/15',
4683 'SO': '154.115.192.0/18',
4684 'SR': '186.179.128.0/17',
4685 'SS': '105.235.208.0/21',
4686 'ST': '197.159.160.0/19',
4687 'SV': '168.243.0.0/16',
4688 'SX': '190.102.0.0/20',
4690 'SZ': '41.84.224.0/19',
4691 'TC': '65.255.48.0/20',
4692 'TD': '154.68.128.0/19',
4693 'TG': '196.168.0.0/14',
4694 'TH': '171.96.0.0/13',
4695 'TJ': '85.9.128.0/18',
4696 'TK': '27.96.24.0/21',
4697 'TL': '180.189.160.0/20',
4698 'TM': '95.85.96.0/19',
4699 'TN': '197.0.0.0/11',
4700 'TO': '175.176.144.0/21',
4701 'TR': '78.160.0.0/11',
4702 'TT': '186.44.0.0/15',
4703 'TV': '202.2.96.0/19',
4704 'TW': '120.96.0.0/11',
4705 'TZ': '156.156.0.0/14',
4706 'UA': '37.52.0.0/14',
4707 'UG': '102.80.0.0/13',
4709 'UY': '167.56.0.0/13',
4710 'UZ': '84.54.64.0/18',
4711 'VA': '212.77.0.0/19',
4712 'VC': '207.191.240.0/21',
4713 'VE': '186.88.0.0/13',
4714 'VG': '66.81.192.0/20',
4715 'VI': '146.226.0.0/16',
4716 'VN': '14.160.0.0/11',
4717 'VU': '202.80.32.0/20',
4718 'WF': '117.20.32.0/21',
4719 'WS': '202.4.32.0/19',
4720 'YE': '134.35.0.0/16',
4721 'YT': '41.242.116.0/22',
4722 'ZA': '41.0.0.0/11',
4723 'ZM': '102.144.0.0/13',
4724 'ZW': '102.177.192.0/18',
4728 def random_ipv4(cls
, code_or_block
):
4729 if len(code_or_block
) == 2:
4730 block
= cls
._country
_ip
_map
.get(code_or_block
.upper())
4734 block
= code_or_block
4735 addr
, preflen
= block
.split('/')
4736 addr_min
= struct
.unpack('!L', socket
.inet_aton(addr
))[0]
4737 addr_max
= addr_min |
(0xffffffff >> int(preflen
))
4738 return str(socket
.inet_ntoa(
4739 struct
.pack('!L', random
.randint(addr_min
, addr_max
))))
4742 class PerRequestProxyHandler(urllib
.request
.ProxyHandler
):
4743 def __init__(self
, proxies
=None):
4744 # Set default handlers
4745 for type in ('http', 'https'):
4746 setattr(self
, '%s_open' % type,
4747 lambda r
, proxy
='__noproxy__', type=type, meth
=self
.proxy_open
:
4748 meth(r
, proxy
, type))
4749 urllib
.request
.ProxyHandler
.__init
__(self
, proxies
)
4751 def proxy_open(self
, req
, proxy
, type):
4752 req_proxy
= req
.headers
.get('Ytdl-request-proxy')
4753 if req_proxy
is not None:
4755 del req
.headers
['Ytdl-request-proxy']
4757 if proxy
== '__noproxy__':
4758 return None # No Proxy
4759 if urllib
.parse
.urlparse(proxy
).scheme
.lower() in ('socks', 'socks4', 'socks4a', 'socks5'):
4760 req
.add_header('Ytdl-socks-proxy', proxy
)
4761 # yt-dlp's http/https handlers do wrapping the socket with socks
4763 return urllib
.request
.ProxyHandler
.proxy_open(
4764 self
, req
, proxy
, type)
4767 # Both long_to_bytes and bytes_to_long are adapted from PyCrypto, which is
4768 # released into Public Domain
4769 # https://github.com/dlitz/pycrypto/blob/master/lib/Crypto/Util/number.py#L387
4771 def long_to_bytes(n
, blocksize
=0):
4772 """long_to_bytes(n:long, blocksize:int) : string
4773 Convert a long integer to a byte string.
4775 If optional blocksize is given and greater than zero, pad the front of the
4776 byte string with binary zeros so that the length is a multiple of
4779 # after much testing, this algorithm was deemed to be the fastest
4783 s
= struct
.pack('>I', n
& 0xffffffff) + s
4785 # strip off leading zeros
4786 for i
in range(len(s
)):
4787 if s
[i
] != b
'\000'[0]:
4790 # only happens when n == 0
4794 # add back some pad bytes. this could be done more efficiently w.r.t. the
4795 # de-padding being done above, but sigh...
4796 if blocksize
> 0 and len(s
) % blocksize
:
4797 s
= (blocksize
- len(s
) % blocksize
) * b
'\000' + s
4801 def bytes_to_long(s
):
4802 """bytes_to_long(string) : long
4803 Convert a byte string to a long integer.
4805 This is (essentially) the inverse of long_to_bytes().
4810 extra
= (4 - length
% 4)
4811 s
= b
'\000' * extra
+ s
4812 length
= length
+ extra
4813 for i
in range(0, length
, 4):
4814 acc
= (acc
<< 32) + struct
.unpack('>I', s
[i
:i
+ 4])[0]
4818 def ohdave_rsa_encrypt(data
, exponent
, modulus
):
4820 Implement OHDave's RSA algorithm. See http://www.ohdave.com/rsa/
4823 data: data to encrypt, bytes-like object
4824 exponent, modulus: parameter e and N of RSA algorithm, both integer
4825 Output: hex string of encrypted data
4827 Limitation: supports one block encryption only
4830 payload
= int(binascii
.hexlify(data
[::-1]), 16)
4831 encrypted
= pow(payload
, exponent
, modulus
)
4832 return '%x' % encrypted
4835 def pkcs1pad(data
, length
):
4837 Padding input data with PKCS#1 scheme
4839 @param {int[]} data input data
4840 @param {int} length target length
4841 @returns {int[]} padded data
4843 if len(data
) > length
- 11:
4844 raise ValueError('Input data too long for PKCS#1 padding')
4846 pseudo_random
= [random
.randint(0, 254) for _
in range(length
- len(data
) - 3)]
4847 return [0, 2] + pseudo_random
+ [0] + data
4850 def _base_n_table(n
, table
):
4851 if not table
and not n
:
4852 raise ValueError('Either table or n must be specified')
4853 table
= (table
or '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ')[:n
]
4855 if n
and n
!= len(table
):
4856 raise ValueError(f
'base {n} exceeds table length {len(table)}')
4860 def encode_base_n(num
, n
=None, table
=None):
4861 """Convert given int to a base-n string"""
4862 table
= _base_n_table(n
, table
)
4866 result
, base
= '', len(table
)
4868 result
= table
[num
% base
] + result
4873 def decode_base_n(string
, n
=None, table
=None):
4874 """Convert given base-n string to int"""
4875 table
= {char: index for index, char in enumerate(_base_n_table(n, table))}
4876 result
, base
= 0, len(table
)
4878 result
= result
* base
+ table
[char
]
4882 def decode_packed_codes(code
):
4883 mobj
= re
.search(PACKED_CODES_RE
, code
)
4884 obfuscated_code
, base
, count
, symbols
= mobj
.groups()
4887 symbols
= symbols
.split('|')
4892 base_n_count
= encode_base_n(count
, base
)
4893 symbol_table
[base_n_count
] = symbols
[count
] or base_n_count
4896 r
'\b(\w+)\b', lambda mobj
: symbol_table
[mobj
.group(0)],
4900 def caesar(s
, alphabet
, shift
):
4905 alphabet
[(alphabet
.index(c
) + shift
) % l
] if c
in alphabet
else c
4910 return caesar(s
, r
'''!"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~''', 47)
4913 def parse_m3u8_attributes(attrib
):
4915 for (key
, val
) in re
.findall(r
'(?P<key>[A-Z0-9-]+)=(?P<val>"[^"]+"|[^",]+)(?:,|$)', attrib
):
4916 if val
.startswith('"'):
4922 def urshift(val
, n
):
4923 return val
>> n
if val
>= 0 else (val
+ 0x100000000) >> n
4926 def write_xattr(path
, key
, value
):
4927 # Windows: Write xattrs to NTFS Alternate Data Streams:
4928 # http://en.wikipedia.org/wiki/NTFS#Alternate_data_streams_.28ADS.29
4929 if compat_os_name
== 'nt':
4930 assert ':' not in key
4931 assert os
.path
.exists(path
)
4934 with open(f
'{path}:{key}', 'wb') as f
:
4936 except OSError as e
:
4937 raise XAttrMetadataError(e
.errno
, e
.strerror
)
4940 # UNIX Method 1. Use xattrs/pyxattrs modules
4943 if getattr(xattr
, '_yt_dlp__identifier', None) == 'pyxattr':
4944 # Unicode arguments are not supported in pyxattr until version 0.5.0
4945 # See https://github.com/ytdl-org/youtube-dl/issues/5498
4946 if version_tuple(xattr
.__version
__) >= (0, 5, 0):
4947 setxattr
= xattr
.set
4949 setxattr
= xattr
.setxattr
4953 setxattr(path
, key
, value
)
4954 except OSError as e
:
4955 raise XAttrMetadataError(e
.errno
, e
.strerror
)
4958 # UNIX Method 2. Use setfattr/xattr executables
4959 exe
= ('setfattr' if check_executable('setfattr', ['--version'])
4960 else 'xattr' if check_executable('xattr', ['-h']) else None)
4962 raise XAttrUnavailableError(
4963 'Couldn\'t find a tool to set the xattrs. Install either the python "xattr" or "pyxattr" modules or the '
4964 + ('"xattr" binary' if sys
.platform
!= 'linux' else 'GNU "attr" package (which contains the "setfattr" tool)'))
4966 value
= value
.decode()
4968 _
, stderr
, returncode
= Popen
.run(
4969 [exe
, '-w', key
, value
, path
] if exe
== 'xattr' else [exe
, '-n', key
, '-v', value
, path
],
4970 text
=True, stdout
=subprocess
.PIPE
, stderr
=subprocess
.PIPE
, stdin
=subprocess
.PIPE
)
4971 except OSError as e
:
4972 raise XAttrMetadataError(e
.errno
, e
.strerror
)
4974 raise XAttrMetadataError(returncode
, stderr
)
4977 def random_birthday(year_field
, month_field
, day_field
):
4978 start_date
= datetime
.date(1950, 1, 1)
4979 end_date
= datetime
.date(1995, 12, 31)
4980 offset
= random
.randint(0, (end_date
- start_date
).days
)
4981 random_date
= start_date
+ datetime
.timedelta(offset
)
4983 year_field
: str(random_date
.year
),
4984 month_field
: str(random_date
.month
),
4985 day_field
: str(random_date
.day
),
4989 def find_available_port(interface
=''):
4991 with socket
.socket() as sock
:
4992 sock
.bind((interface
, 0))
4993 return sock
.getsockname()[1]
4998 # Templates for internet shortcut files, which are plain text files.
4999 DOT_URL_LINK_TEMPLATE
= '''\
5004 DOT_WEBLOC_LINK_TEMPLATE
= '''\
5005 <?xml version="1.0" encoding="UTF-8"?>
5006 <!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
5007 <plist version="1.0">
5010 \t<string>%(url)s</string>
5015 DOT_DESKTOP_LINK_TEMPLATE
= '''\
5025 'url': DOT_URL_LINK_TEMPLATE
,
5026 'desktop': DOT_DESKTOP_LINK_TEMPLATE
,
5027 'webloc': DOT_WEBLOC_LINK_TEMPLATE
,
5031 def iri_to_uri(iri
):
5033 Converts an IRI (Internationalized Resource Identifier, allowing Unicode characters) to a URI (Uniform Resource Identifier, ASCII-only).
5035 The function doesn't add an additional layer of escaping; e.g., it doesn't escape `%3C` as `%253C`. Instead, it percent-escapes characters with an underlying UTF-8 encoding *besides* those already escaped, leaving the URI intact.
5038 iri_parts
= urllib
.parse
.urlparse(iri
)
5040 if '[' in iri_parts
.netloc
:
5041 raise ValueError('IPv6 URIs are not, yet, supported.')
5042 # Querying `.netloc`, when there's only one bracket, also raises a ValueError.
5044 # The `safe` argument values, that the following code uses, contain the characters that should not be percent-encoded. Everything else but letters, digits and '_.-' will be percent-encoded with an underlying UTF-8 encoding. Everything already percent-encoded will be left as is.
5047 if iri_parts
.username
:
5048 net_location
+= urllib
.parse
.quote(iri_parts
.username
, safe
=r
"!$%&'()*+,~")
5049 if iri_parts
.password
is not None:
5050 net_location
+= ':' + urllib
.parse
.quote(iri_parts
.password
, safe
=r
"!$%&'()*+,~")
5053 net_location
+= iri_parts
.hostname
.encode('idna').decode() # Punycode for Unicode hostnames.
5054 # The 'idna' encoding produces ASCII text.
5055 if iri_parts
.port
is not None and iri_parts
.port
!= 80:
5056 net_location
+= ':' + str(iri_parts
.port
)
5058 return urllib
.parse
.urlunparse(
5062 urllib
.parse
.quote_plus(iri_parts
.path
, safe
=r
"!$%&'()*+,/:;=@|~"),
5064 # Unsure about the `safe` argument, since this is a legacy way of handling parameters.
5065 urllib
.parse
.quote_plus(iri_parts
.params
, safe
=r
"!$%&'()*+,/:;=@|~"),
5067 # Not totally sure about the `safe` argument, since the source does not explicitly mention the query URI component.
5068 urllib
.parse
.quote_plus(iri_parts
.query
, safe
=r
"!$%&'()*+,/:;=?@{|}~"),
5070 urllib
.parse
.quote_plus(iri_parts
.fragment
, safe
=r
"!#$%&'()*+,/:;=?@{|}~")))
5072 # Source for `safe` arguments: https://url.spec.whatwg.org/#percent-encoded-bytes.
5075 def to_high_limit_path(path
):
5076 if sys
.platform
in ['win32', 'cygwin']:
5077 # Work around MAX_PATH limitation on Windows. The maximum allowed length for the individual path segments may still be quite limited.
5078 return '\\\\?\\' + os
.path
.abspath(path
)
5083 def format_field(obj
, field
=None, template
='%s', ignore
=NO_DEFAULT
, default
='', func
=IDENTITY
):
5084 val
= traversal
.traverse_obj(obj
, *variadic(field
))
5085 if not val
if ignore
is NO_DEFAULT
else val
in variadic(ignore
):
5087 return template
% func(val
)
5090 def clean_podcast_url(url
):
5091 return re
.sub(r
'''(?x)
5095 media\.blubrry\.com| # https://create.blubrry.com/resources/podcast-media-download-statistics/getting-started/
5098 (?:dts|www)\.podtrac\.com/(?:pts/)?redirect\.[0-9a-z]{3,4}| # http://analytics.podtrac.com/how-to-measure
5101 cn\.co| # https://podcorn.com/analytics-prefix/
5102 st\.fm # https://podsights.com/docs/
5107 _HEX_TABLE
= '0123456789abcdef'
5110 def random_uuidv4():
5111 return re
.sub(r
'[xy]', lambda x
: _HEX_TABLE
[random
.randint(0, 15)], 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx')
5114 def make_dir(path
, to_screen
=None):
5116 dn
= os
.path
.dirname(path
)
5118 os
.makedirs(dn
, exist_ok
=True)
5120 except OSError as err
:
5121 if callable(to_screen
) is not None:
5122 to_screen(f
'unable to create directory {err}')
5126 def get_executable_path():
5127 from ..update
import _get_variant_and_executable_path
5129 return os
.path
.dirname(os
.path
.abspath(_get_variant_and_executable_path()[1]))
5132 def get_user_config_dirs(package_name
):
5133 # .config (e.g. ~/.config/package_name)
5134 xdg_config_home
= os
.getenv('XDG_CONFIG_HOME') or compat_expanduser('~/.config')
5135 yield os
.path
.join(xdg_config_home
, package_name
)
5137 # appdata (%APPDATA%/package_name)
5138 appdata_dir
= os
.getenv('appdata')
5140 yield os
.path
.join(appdata_dir
, package_name
)
5142 # home (~/.package_name)
5143 yield os
.path
.join(compat_expanduser('~'), f
'.{package_name}')
5146 def get_system_config_dirs(package_name
):
5148 yield os
.path
.join('/etc', package_name
)
5151 def time_seconds(**kwargs
):
5153 Returns TZ-aware time in seconds since the epoch (1970-01-01T00:00:00Z)
5155 return time
.time() + datetime
.timedelta(**kwargs
).total_seconds()
5158 # create a JSON Web Signature (jws) with HS256 algorithm
5159 # the resulting format is in JWS Compact Serialization
5160 # implemented following JWT https://www.rfc-editor.org/rfc/rfc7519.html
5161 # implemented following JWS https://www.rfc-editor.org/rfc/rfc7515.html
5162 def jwt_encode_hs256(payload_data
, key
, headers
={}):
5168 header_data
.update(headers
)
5169 header_b64
= base64
.b64encode(json
.dumps(header_data
).encode())
5170 payload_b64
= base64
.b64encode(json
.dumps(payload_data
).encode())
5171 h
= hmac
.new(key
.encode(), header_b64
+ b
'.' + payload_b64
, hashlib
.sha256
)
5172 signature_b64
= base64
.b64encode(h
.digest())
5173 token
= header_b64
+ b
'.' + payload_b64
+ b
'.' + signature_b64
5177 # can be extended in future to verify the signature and parse header and return the algorithm used if it's not HS256
5178 def jwt_decode_hs256(jwt
):
5179 header_b64
, payload_b64
, signature_b64
= jwt
.split('.')
5180 # add trailing ='s that may have been stripped, superfluous ='s are ignored
5181 payload_data
= json
.loads(base64
.urlsafe_b64decode(f
'{payload_b64}==='))
5185 WINDOWS_VT_MODE
= False if compat_os_name
== 'nt' else None
5189 def supports_terminal_sequences(stream
):
5190 if compat_os_name
== 'nt':
5191 if not WINDOWS_VT_MODE
:
5193 elif not os
.getenv('TERM'):
5196 return stream
.isatty()
5197 except BaseException
:
5201 def windows_enable_vt_mode():
5202 """Ref: https://bugs.python.org/issue30075 """
5203 if get_windows_version() < (10, 0, 10586):
5207 import ctypes
.wintypes
5210 ENABLE_VIRTUAL_TERMINAL_PROCESSING
= 0x0004
5212 dll
= ctypes
.WinDLL('kernel32', use_last_error
=False)
5213 handle
= os
.open('CONOUT$', os
.O_RDWR
)
5215 h_out
= ctypes
.wintypes
.HANDLE(msvcrt
.get_osfhandle(handle
))
5216 dw_original_mode
= ctypes
.wintypes
.DWORD()
5217 success
= dll
.GetConsoleMode(h_out
, ctypes
.byref(dw_original_mode
))
5219 raise Exception('GetConsoleMode failed')
5221 success
= dll
.SetConsoleMode(h_out
, ctypes
.wintypes
.DWORD(
5222 dw_original_mode
.value | ENABLE_VIRTUAL_TERMINAL_PROCESSING
))
5224 raise Exception('SetConsoleMode failed')
5228 global WINDOWS_VT_MODE
5229 WINDOWS_VT_MODE
= True
5230 supports_terminal_sequences
.cache_clear()
5233 _terminal_sequences_re
= re
.compile('\033\\[[^m]+m')
5236 def remove_terminal_sequences(string
):
5237 return _terminal_sequences_re
.sub('', string
)
5240 def number_of_digits(number
):
5241 return len('%d' % number
)
5244 def join_nonempty(*values
, delim
='-', from_dict
=None):
5245 if from_dict
is not None:
5246 values
= (traversal
.traverse_obj(from_dict
, variadic(v
)) for v
in values
)
5247 return delim
.join(map(str, filter(None, values
)))
5250 def scale_thumbnails_to_max_format_width(formats
, thumbnails
, url_width_re
):
5252 Find the largest format dimensions in terms of video width and, for each thumbnail:
5253 * Modify the URL: Match the width with the provided regex and replace with the former width
5256 This function is useful with video services that scale the provided thumbnails on demand
5258 _keys
= ('width', 'height')
5259 max_dimensions
= max(
5260 (tuple(format
.get(k
) or 0 for k
in _keys
) for format
in formats
),
5262 if not max_dimensions
[0]:
5266 {'url': re.sub(url_width_re, str(max_dimensions[0]), thumbnail['url'])}
,
5267 dict(zip(_keys
, max_dimensions
)), thumbnail
)
5268 for thumbnail
in thumbnails
5272 def parse_http_range(range):
5273 """ Parse value of "Range" or "Content-Range" HTTP header into tuple. """
5275 return None, None, None
5276 crg
= re
.search(r
'bytes[ =](\d+)-(\d+)?(?:/(\d+))?', range)
5278 return None, None, None
5279 return int(crg
.group(1)), int_or_none(crg
.group(2)), int_or_none(crg
.group(3))
5282 def read_stdin(what
):
5283 eof
= 'Ctrl+Z' if compat_os_name
== 'nt' else 'Ctrl+D'
5284 write_string(f
'Reading {what} from STDIN - EOF ({eof}) to end:\n')
5288 def determine_file_encoding(data
):
5290 Detect the text encoding used
5291 @returns (encoding, bytes to skip)
5294 # BOM marks are given priority over declarations
5295 for bom
, enc
in BOMS
:
5296 if data
.startswith(bom
):
5297 return enc
, len(bom
)
5299 # Strip off all null bytes to match even when UTF-16 or UTF-32 is used.
5300 # We ignore the endianness to get a good enough match
5301 data
= data
.replace(b
'\0', b
'')
5302 mobj
= re
.match(rb
'(?m)^#\s*coding\s*:\s*(\S+)\s*$', data
)
5303 return mobj
.group(1).decode() if mobj
else None, 0
5310 __initialized
= False
5312 def __init__(self
, parser
, label
=None):
5313 self
.parser
, self
.label
= parser
, label
5314 self
._loaded
_paths
, self
.configs
= set(), []
5316 def init(self
, args
=None, filename
=None):
5317 assert not self
.__initialized
5318 self
.own_args
, self
.filename
= args
, filename
5319 return self
.load_configs()
5321 def load_configs(self
):
5324 location
= os
.path
.realpath(self
.filename
)
5325 directory
= os
.path
.dirname(location
)
5326 if location
in self
._loaded
_paths
:
5328 self
._loaded
_paths
.add(location
)
5330 self
.__initialized
= True
5331 opts
, _
= self
.parser
.parse_known_args(self
.own_args
)
5332 self
.parsed_args
= self
.own_args
5333 for location
in opts
.config_locations
or []:
5335 if location
in self
._loaded
_paths
:
5337 self
._loaded
_paths
.add(location
)
5338 self
.append_config(shlex
.split(read_stdin('options'), comments
=True), label
='stdin')
5340 location
= os
.path
.join(directory
, expand_path(location
))
5341 if os
.path
.isdir(location
):
5342 location
= os
.path
.join(location
, 'yt-dlp.conf')
5343 if not os
.path
.exists(location
):
5344 self
.parser
.error(f
'config location {location} does not exist')
5345 self
.append_config(self
.read_file(location
), location
)
5349 label
= join_nonempty(
5350 self
.label
, 'config', f
'"{self.filename}"' if self
.filename
else '',
5352 return join_nonempty(
5353 self
.own_args
is not None and f
'{label[0].upper()}{label[1:]}: {self.hide_login_info(self.own_args)}',
5354 *(f
'\n{c}'.replace('\n', '\n| ')[1:] for c
in self
.configs
),
5358 def read_file(filename
, default
=[]):
5360 optionf
= open(filename
, 'rb')
5362 return default
# silently skip if file is not present
5364 enc
, skip
= determine_file_encoding(optionf
.read(512))
5365 optionf
.seek(skip
, io
.SEEK_SET
)
5367 enc
= None # silently skip read errors
5369 # FIXME: https://github.com/ytdl-org/youtube-dl/commit/dfe5fa49aed02cf36ba9f743b11b0903554b5e56
5370 contents
= optionf
.read().decode(enc
or preferredencoding())
5371 res
= shlex
.split(contents
, comments
=True)
5372 except Exception as err
:
5373 raise ValueError(f
'Unable to parse "{filename}": {err}')
5379 def hide_login_info(opts
):
5380 PRIVATE_OPTS
= {'-p', '--password', '-u', '--username', '--video-password', '--ap-password', '--ap-username'}
5381 eqre
= re
.compile('^(?P<key>' + ('|'.join(re
.escape(po
) for po
in PRIVATE_OPTS
)) + ')=.+$')
5386 return m
.group('key') + '=PRIVATE'
5390 opts
= list(map(_scrub_eq
, opts
))
5391 for idx
, opt
in enumerate(opts
):
5392 if opt
in PRIVATE_OPTS
and idx
+ 1 < len(opts
):
5393 opts
[idx
+ 1] = 'PRIVATE'
5396 def append_config(self
, *args
, label
=None):
5397 config
= type(self
)(self
.parser
, label
)
5398 config
._loaded
_paths
= self
._loaded
_paths
5399 if config
.init(*args
):
5400 self
.configs
.append(config
)
5404 for config
in reversed(self
.configs
):
5405 yield from config
.all_args
5406 yield from self
.parsed_args
or []
5408 def parse_known_args(self
, **kwargs
):
5409 return self
.parser
.parse_known_args(self
.all_args
, **kwargs
)
5411 def parse_args(self
):
5412 return self
.parser
.parse_args(self
.all_args
)
5415 class WebSocketsWrapper
:
5416 """Wraps websockets module to use in non-async scopes"""
5419 def __init__(self
, url
, headers
=None, connect
=True):
5420 self
.loop
= asyncio
.new_event_loop()
5421 # XXX: "loop" is deprecated
5422 self
.conn
= websockets
.connect(
5423 url
, extra_headers
=headers
, ping_interval
=None,
5424 close_timeout
=float('inf'), loop
=self
.loop
, ping_timeout
=float('inf'))
5427 atexit
.register(self
.__exit
__, None, None, None)
5429 def __enter__(self
):
5431 self
.pool
= self
.run_with_loop(self
.conn
.__aenter
__(), self
.loop
)
5434 def send(self
, *args
):
5435 self
.run_with_loop(self
.pool
.send(*args
), self
.loop
)
5437 def recv(self
, *args
):
5438 return self
.run_with_loop(self
.pool
.recv(*args
), self
.loop
)
5440 def __exit__(self
, type, value
, traceback
):
5442 return self
.run_with_loop(self
.conn
.__aexit
__(type, value
, traceback
), self
.loop
)
5445 self
._cancel
_all
_tasks
(self
.loop
)
5447 # taken from https://github.com/python/cpython/blob/3.9/Lib/asyncio/runners.py with modifications
5448 # for contributors: If there's any new library using asyncio needs to be run in non-async, move these function out of this class
5450 def run_with_loop(main
, loop
):
5451 if not asyncio
.iscoroutine(main
):
5452 raise ValueError(f
'a coroutine was expected, got {main!r}')
5455 return loop
.run_until_complete(main
)
5457 loop
.run_until_complete(loop
.shutdown_asyncgens())
5458 if hasattr(loop
, 'shutdown_default_executor'):
5459 loop
.run_until_complete(loop
.shutdown_default_executor())
5462 def _cancel_all_tasks(loop
):
5463 to_cancel
= asyncio
.all_tasks(loop
)
5468 for task
in to_cancel
:
5471 # XXX: "loop" is removed in python 3.10+
5472 loop
.run_until_complete(
5473 asyncio
.gather(*to_cancel
, loop
=loop
, return_exceptions
=True))
5475 for task
in to_cancel
:
5476 if task
.cancelled():
5478 if task
.exception() is not None:
5479 loop
.call_exception_handler({
5480 'message': 'unhandled exception during asyncio.run() shutdown',
5481 'exception': task
.exception(),
5486 def merge_headers(*dicts
):
5487 """Merge dicts of http headers case insensitively, prioritizing the latter ones"""
5488 return {k.title(): v for k, v in itertools.chain.from_iterable(map(dict.items, dicts))}
5491 def cached_method(f
):
5492 """Cache a method"""
5493 signature
= inspect
.signature(f
)
5496 def wrapper(self
, *args
, **kwargs
):
5497 bound_args
= signature
.bind(self
, *args
, **kwargs
)
5498 bound_args
.apply_defaults()
5499 key
= tuple(bound_args
.arguments
.values())[1:]
5501 cache
= vars(self
).setdefault('_cached_method__cache', {}).setdefault(f.__name__, {}
)
5502 if key
not in cache
:
5503 cache
[key
] = f(self
, *args
, **kwargs
)
5508 class classproperty
:
5509 """property access for class methods with optional caching"""
5510 def __new__(cls
, func
=None, *args
, **kwargs
):
5512 return functools
.partial(cls
, *args
, **kwargs
)
5513 return super().__new
__(cls
)
5515 def __init__(self
, func
, *, cache
=False):
5516 functools
.update_wrapper(self
, func
)
5518 self
._cache
= {} if cache
else None
5520 def __get__(self
, _
, cls
):
5521 if self
._cache
is None:
5522 return self
.func(cls
)
5523 elif cls
not in self
._cache
:
5524 self
._cache
[cls
] = self
.func(cls
)
5525 return self
._cache
[cls
]
5528 class function_with_repr
:
5529 def __init__(self
, func
, repr_
=None):
5530 functools
.update_wrapper(self
, func
)
5531 self
.func
, self
.__repr
= func
, repr_
5533 def __call__(self
, *args
, **kwargs
):
5534 return self
.func(*args
, **kwargs
)
5539 return f
'{self.func.__module__}.{self.func.__qualname__}'
5542 class Namespace(types
.SimpleNamespace
):
5543 """Immutable namespace"""
5546 return iter(self
.__dict
__.values())
5550 return self
.__dict
__.items()
5553 MEDIA_EXTENSIONS
= Namespace(
5554 common_video
=('avi', 'flv', 'mkv', 'mov', 'mp4', 'webm'),
5555 video
=('3g2', '3gp', 'f4v', 'mk3d', 'divx', 'mpg', 'ogv', 'm4v', 'wmv'),
5556 common_audio
=('aiff', 'alac', 'flac', 'm4a', 'mka', 'mp3', 'ogg', 'opus', 'wav'),
5557 audio
=('aac', 'ape', 'asf', 'f4a', 'f4b', 'm4b', 'm4p', 'm4r', 'oga', 'ogx', 'spx', 'vorbis', 'wma', 'weba'),
5558 thumbnails
=('jpg', 'png', 'webp'),
5559 storyboards
=('mhtml', ),
5560 subtitles
=('srt', 'vtt', 'ass', 'lrc'),
5561 manifests
=('f4f', 'f4m', 'm3u8', 'smil', 'mpd'),
5563 MEDIA_EXTENSIONS
.video
+= MEDIA_EXTENSIONS
.common_video
5564 MEDIA_EXTENSIONS
.audio
+= MEDIA_EXTENSIONS
.common_audio
5566 KNOWN_EXTENSIONS
= (*MEDIA_EXTENSIONS
.video
, *MEDIA_EXTENSIONS
.audio
, *MEDIA_EXTENSIONS
.manifests
)
5571 for retry in RetryManager(...):
5574 except SomeException as err:
5578 attempt
, _error
= 0, None
5580 def __init__(self
, _retries
, _error_callback
, **kwargs
):
5581 self
.retries
= _retries
or 0
5582 self
.error_callback
= functools
.partial(_error_callback
, **kwargs
)
5584 def _should_retry(self
):
5585 return self
._error
is not NO_DEFAULT
and self
.attempt
<= self
.retries
5589 if self
._error
is NO_DEFAULT
:
5594 def error(self
, value
):
5598 while self
._should
_retry
():
5599 self
.error
= NO_DEFAULT
5603 self
.error_callback(self
.error
, self
.attempt
, self
.retries
)
5606 def report_retry(e
, count
, retries
, *, sleep_func
, info
, warn
, error
=None, suffix
=None):
5607 """Utility function for reporting retries"""
5610 return error(f
'{e}. Giving up after {count - 1} retries') if count
> 1 else error(str(e
))
5615 elif isinstance(e
, ExtractorError
):
5616 e
= remove_end(str_or_none(e
.cause
) or e
.orig_msg
, '.')
5617 warn(f
'{e}. Retrying{format_field(suffix, None, " %s")} ({count}/{retries})...')
5619 delay
= float_or_none(sleep_func(n
=count
- 1)) if callable(sleep_func
) else sleep_func
5621 info(f
'Sleeping {delay:.2f} seconds ...')
5625 def make_archive_id(ie
, video_id
):
5626 ie_key
= ie
if isinstance(ie
, str) else ie
.ie_key()
5627 return f
'{ie_key.lower()} {video_id}'
5630 def truncate_string(s
, left
, right
=0):
5631 assert left
> 3 and right
>= 0
5632 if s
is None or len(s
) <= left
+ right
:
5634 return f
'{s[:left-3]}...{s[-right:] if right else ""}'
5637 def orderedSet_from_options(options
, alias_dict
, *, use_regex
=False, start
=None):
5638 assert 'all' in alias_dict
, '"all" alias is required'
5639 requested
= list(start
or [])
5641 discard
= val
.startswith('-')
5645 if val
in alias_dict
:
5646 val
= alias_dict
[val
] if not discard
else [
5647 i
[1:] if i
.startswith('-') else f
'-{i}' for i
in alias_dict
[val
]]
5648 # NB: Do not allow regex in aliases for performance
5649 requested
= orderedSet_from_options(val
, alias_dict
, start
=requested
)
5652 current
= (filter(re
.compile(val
, re
.I
).fullmatch
, alias_dict
['all']) if use_regex
5653 else [val
] if val
in alias_dict
['all'] else None)
5655 raise ValueError(val
)
5658 for item
in current
:
5659 while item
in requested
:
5660 requested
.remove(item
)
5662 requested
.extend(current
)
5664 return orderedSet(requested
)
5668 regex
= r
' *((?P<reverse>\+)?(?P<field>[a-zA-Z0-9_]+)((?P<separator>[~:])(?P<limit>.*?))?)? *$'
5670 default
= ('hidden', 'aud_or_vid', 'hasvid', 'ie_pref', 'lang', 'quality',
5671 'res', 'fps', 'hdr:12', 'vcodec:vp9.2', 'channels', 'acodec',
5672 'size', 'br', 'asr', 'proto', 'ext', 'hasaud', 'source', 'id') # These must not be aliases
5673 ytdl_default
= ('hasaud', 'lang', 'quality', 'tbr', 'filesize', 'vbr',
5674 'height', 'width', 'proto', 'vext', 'abr', 'aext',
5675 'fps', 'fs_approx', 'source', 'id')
5678 'vcodec': {'type': 'ordered', 'regex': True,
5679 'order': ['av0?1', 'vp0?9.2', 'vp0?9', '[hx]265|he?vc?', '[hx]264|avc', 'vp0?8', 'mp4v|h263', 'theora', '', None, 'none']},
5680 'acodec': {'type': 'ordered', 'regex': True,
5681 'order': ['[af]lac', 'wav|aiff', 'opus', 'vorbis|ogg', 'aac', 'mp?4a?', 'mp3', 'ac-?4', 'e-?a?c-?3', 'ac-?3', 'dts', '', None, 'none']},
5682 'hdr': {'type': 'ordered', 'regex': True, 'field': 'dynamic_range',
5683 'order': ['dv', '(hdr)?12', r
'(hdr)?10\+', '(hdr)?10', 'hlg', '', 'sdr', None]},
5684 'proto': {'type': 'ordered', 'regex': True, 'field': 'protocol',
5685 'order': ['(ht|f)tps', '(ht|f)tp$', 'm3u8.*', '.*dash', 'websocket_frag', 'rtmpe?', '', 'mms|rtsp', 'ws|websocket', 'f4']},
5686 'vext': {'type': 'ordered', 'field': 'video_ext',
5687 'order': ('mp4', 'mov', 'webm', 'flv', '', 'none'),
5688 'order_free': ('webm', 'mp4', 'mov', 'flv', '', 'none')},
5689 'aext': {'type': 'ordered', 'regex': True, 'field': 'audio_ext',
5690 'order': ('m4a', 'aac', 'mp3', 'ogg', 'opus', 'web[am]', '', 'none'),
5691 'order_free': ('ogg', 'opus', 'web[am]', 'mp3', 'm4a', 'aac', '', 'none')},
5692 'hidden': {'visible': False, 'forced': True, 'type': 'extractor', 'max': -1000}
,
5693 'aud_or_vid': {'visible': False, 'forced': True, 'type': 'multiple',
5694 'field': ('vcodec', 'acodec'),
5695 'function': lambda it
: int(any(v
!= 'none' for v
in it
))},
5696 'ie_pref': {'priority': True, 'type': 'extractor'}
,
5697 'hasvid': {'priority': True, 'field': 'vcodec', 'type': 'boolean', 'not_in_list': ('none',)}
,
5698 'hasaud': {'field': 'acodec', 'type': 'boolean', 'not_in_list': ('none',)}
,
5699 'lang': {'convert': 'float', 'field': 'language_preference', 'default': -1}
,
5700 'quality': {'convert': 'float', 'default': -1}
,
5701 'filesize': {'convert': 'bytes'}
,
5702 'fs_approx': {'convert': 'bytes', 'field': 'filesize_approx'}
,
5703 'id': {'convert': 'string', 'field': 'format_id'}
,
5704 'height': {'convert': 'float_none'}
,
5705 'width': {'convert': 'float_none'}
,
5706 'fps': {'convert': 'float_none'}
,
5707 'channels': {'convert': 'float_none', 'field': 'audio_channels'}
,
5708 'tbr': {'convert': 'float_none'}
,
5709 'vbr': {'convert': 'float_none'}
,
5710 'abr': {'convert': 'float_none'}
,
5711 'asr': {'convert': 'float_none'}
,
5712 'source': {'convert': 'float', 'field': 'source_preference', 'default': -1}
,
5714 'codec': {'type': 'combined', 'field': ('vcodec', 'acodec')}
,
5715 'br': {'type': 'combined', 'field': ('tbr', 'vbr', 'abr'), 'same_limit': True}
,
5716 'size': {'type': 'combined', 'same_limit': True, 'field': ('filesize', 'fs_approx')}
,
5717 'ext': {'type': 'combined', 'field': ('vext', 'aext')}
,
5718 'res': {'type': 'multiple', 'field': ('height', 'width'),
5719 'function': lambda it
: (lambda l
: min(l
) if l
else 0)(tuple(filter(None, it
)))},
5721 # Actual field names
5722 'format_id': {'type': 'alias', 'field': 'id'}
,
5723 'preference': {'type': 'alias', 'field': 'ie_pref'}
,
5724 'language_preference': {'type': 'alias', 'field': 'lang'}
,
5725 'source_preference': {'type': 'alias', 'field': 'source'}
,
5726 'protocol': {'type': 'alias', 'field': 'proto'}
,
5727 'filesize_approx': {'type': 'alias', 'field': 'fs_approx'}
,
5728 'audio_channels': {'type': 'alias', 'field': 'channels'}
,
5731 'dimension': {'type': 'alias', 'field': 'res', 'deprecated': True}
,
5732 'resolution': {'type': 'alias', 'field': 'res', 'deprecated': True}
,
5733 'extension': {'type': 'alias', 'field': 'ext', 'deprecated': True}
,
5734 'bitrate': {'type': 'alias', 'field': 'br', 'deprecated': True}
,
5735 'total_bitrate': {'type': 'alias', 'field': 'tbr', 'deprecated': True}
,
5736 'video_bitrate': {'type': 'alias', 'field': 'vbr', 'deprecated': True}
,
5737 'audio_bitrate': {'type': 'alias', 'field': 'abr', 'deprecated': True}
,
5738 'framerate': {'type': 'alias', 'field': 'fps', 'deprecated': True}
,
5739 'filesize_estimate': {'type': 'alias', 'field': 'size', 'deprecated': True}
,
5740 'samplerate': {'type': 'alias', 'field': 'asr', 'deprecated': True}
,
5741 'video_ext': {'type': 'alias', 'field': 'vext', 'deprecated': True}
,
5742 'audio_ext': {'type': 'alias', 'field': 'aext', 'deprecated': True}
,
5743 'video_codec': {'type': 'alias', 'field': 'vcodec', 'deprecated': True}
,
5744 'audio_codec': {'type': 'alias', 'field': 'acodec', 'deprecated': True}
,
5745 'video': {'type': 'alias', 'field': 'hasvid', 'deprecated': True}
,
5746 'has_video': {'type': 'alias', 'field': 'hasvid', 'deprecated': True}
,
5747 'audio': {'type': 'alias', 'field': 'hasaud', 'deprecated': True}
,
5748 'has_audio': {'type': 'alias', 'field': 'hasaud', 'deprecated': True}
,
5749 'extractor': {'type': 'alias', 'field': 'ie_pref', 'deprecated': True}
,
5750 'extractor_preference': {'type': 'alias', 'field': 'ie_pref', 'deprecated': True}
,
5753 def __init__(self
, ydl
, field_preference
):
5756 self
.evaluate_params(self
.ydl
.params
, field_preference
)
5757 if ydl
.params
.get('verbose'):
5758 self
.print_verbose_info(self
.ydl
.write_debug
)
5760 def _get_field_setting(self
, field
, key
):
5761 if field
not in self
.settings
:
5762 if key
in ('forced', 'priority'):
5764 self
.ydl
.deprecated_feature(f
'Using arbitrary fields ({field}) for format sorting is '
5765 'deprecated and may be removed in a future version')
5766 self
.settings
[field
] = {}
5767 propObj
= self
.settings
[field
]
5768 if key
not in propObj
:
5769 type = propObj
.get('type')
5771 default
= 'preference' if type == 'extractor' else (field
,) if type in ('combined', 'multiple') else field
5772 elif key
== 'convert':
5773 default
= 'order' if type == 'ordered' else 'float_string' if field
else 'ignore'
5775 default
= {'type': 'field', 'visible': True, 'order': [], 'not_in_list': (None,)}
.get(key
, None)
5776 propObj
[key
] = default
5779 def _resolve_field_value(self
, field
, value
, convertNone
=False):
5784 value
= value
.lower()
5785 conversion
= self
._get
_field
_setting
(field
, 'convert')
5786 if conversion
== 'ignore':
5788 if conversion
== 'string':
5790 elif conversion
== 'float_none':
5791 return float_or_none(value
)
5792 elif conversion
== 'bytes':
5793 return parse_bytes(value
)
5794 elif conversion
== 'order':
5795 order_list
= (self
._use
_free
_order
and self
._get
_field
_setting
(field
, 'order_free')) or self
._get
_field
_setting
(field
, 'order')
5796 use_regex
= self
._get
_field
_setting
(field
, 'regex')
5797 list_length
= len(order_list
)
5798 empty_pos
= order_list
.index('') if '' in order_list
else list_length
+ 1
5799 if use_regex
and value
is not None:
5800 for i
, regex
in enumerate(order_list
):
5801 if regex
and re
.match(regex
, value
):
5802 return list_length
- i
5803 return list_length
- empty_pos
# not in list
5804 else: # not regex or value = None
5805 return list_length
- (order_list
.index(value
) if value
in order_list
else empty_pos
)
5807 if value
.isnumeric():
5810 self
.settings
[field
]['convert'] = 'string'
5813 def evaluate_params(self
, params
, sort_extractor
):
5814 self
._use
_free
_order
= params
.get('prefer_free_formats', False)
5815 self
._sort
_user
= params
.get('format_sort', [])
5816 self
._sort
_extractor
= sort_extractor
5818 def add_item(field
, reverse
, closest
, limit_text
):
5819 field
= field
.lower()
5820 if field
in self
._order
:
5822 self
._order
.append(field
)
5823 limit
= self
._resolve
_field
_value
(field
, limit_text
)
5826 'closest': False if limit
is None else closest
,
5827 'limit_text': limit_text
,
5829 if field
in self
.settings
:
5830 self
.settings
[field
].update(data
)
5832 self
.settings
[field
] = data
5835 tuple(field
for field
in self
.default
if self
._get
_field
_setting
(field
, 'forced'))
5836 + (tuple() if params
.get('format_sort_force', False)
5837 else tuple(field
for field
in self
.default
if self
._get
_field
_setting
(field
, 'priority')))
5838 + tuple(self
._sort
_user
) + tuple(sort_extractor
) + self
.default
)
5840 for item
in sort_list
:
5841 match
= re
.match(self
.regex
, item
)
5843 raise ExtractorError('Invalid format sort string "%s" given by extractor' % item
)
5844 field
= match
.group('field')
5847 if self
._get
_field
_setting
(field
, 'type') == 'alias':
5848 alias
, field
= field
, self
._get
_field
_setting
(field
, 'field')
5849 if self
._get
_field
_setting
(alias
, 'deprecated'):
5850 self
.ydl
.deprecated_feature(f
'Format sorting alias {alias} is deprecated and may '
5851 f
'be removed in a future version. Please use {field} instead')
5852 reverse
= match
.group('reverse') is not None
5853 closest
= match
.group('separator') == '~'
5854 limit_text
= match
.group('limit')
5856 has_limit
= limit_text
is not None
5857 has_multiple_fields
= self
._get
_field
_setting
(field
, 'type') == 'combined'
5858 has_multiple_limits
= has_limit
and has_multiple_fields
and not self
._get
_field
_setting
(field
, 'same_limit')
5860 fields
= self
._get
_field
_setting
(field
, 'field') if has_multiple_fields
else (field
,)
5861 limits
= limit_text
.split(':') if has_multiple_limits
else (limit_text
,) if has_limit
else tuple()
5862 limit_count
= len(limits
)
5863 for (i
, f
) in enumerate(fields
):
5864 add_item(f
, reverse
, closest
,
5865 limits
[i
] if i
< limit_count
5866 else limits
[0] if has_limit
and not has_multiple_limits
5869 def print_verbose_info(self
, write_debug
):
5871 write_debug('Sort order given by user: %s' % ', '.join(self
._sort
_user
))
5872 if self
._sort
_extractor
:
5873 write_debug('Sort order given by extractor: %s' % ', '.join(self
._sort
_extractor
))
5874 write_debug('Formats sorted by: %s' % ', '.join(['%s%s%s' % (
5875 '+' if self
._get
_field
_setting
(field
, 'reverse') else '', field
,
5876 '%s%s(%s)' % ('~' if self
._get
_field
_setting
(field
, 'closest') else ':',
5877 self
._get
_field
_setting
(field
, 'limit_text'),
5878 self
._get
_field
_setting
(field
, 'limit'))
5879 if self
._get
_field
_setting
(field
, 'limit_text') is not None else '')
5880 for field
in self
._order
if self
._get
_field
_setting
(field
, 'visible')]))
5882 def _calculate_field_preference_from_value(self
, format
, field
, type, value
):
5883 reverse
= self
._get
_field
_setting
(field
, 'reverse')
5884 closest
= self
._get
_field
_setting
(field
, 'closest')
5885 limit
= self
._get
_field
_setting
(field
, 'limit')
5887 if type == 'extractor':
5888 maximum
= self
._get
_field
_setting
(field
, 'max')
5889 if value
is None or (maximum
is not None and value
>= maximum
):
5891 elif type == 'boolean':
5892 in_list
= self
._get
_field
_setting
(field
, 'in_list')
5893 not_in_list
= self
._get
_field
_setting
(field
, 'not_in_list')
5894 value
= 0 if ((in_list
is None or value
in in_list
) and (not_in_list
is None or value
not in not_in_list
)) else -1
5895 elif type == 'ordered':
5896 value
= self
._resolve
_field
_value
(field
, value
, True)
5898 # try to convert to number
5899 val_num
= float_or_none(value
, default
=self
._get
_field
_setting
(field
, 'default'))
5900 is_num
= self
._get
_field
_setting
(field
, 'convert') != 'string' and val_num
is not None
5904 return ((-10, 0) if value
is None
5905 else (1, value
, 0) if not is_num
# if a field has mixed strings and numbers, strings are sorted higher
5906 else (0, -abs(value
- limit
), value
- limit
if reverse
else limit
- value
) if closest
5907 else (0, value
, 0) if not reverse
and (limit
is None or value
<= limit
)
5908 else (0, -value
, 0) if limit
is None or (reverse
and value
== limit
) or value
> limit
5909 else (-1, value
, 0))
5911 def _calculate_field_preference(self
, format
, field
):
5912 type = self
._get
_field
_setting
(field
, 'type') # extractor, boolean, ordered, field, multiple
5913 get_value
= lambda f
: format
.get(self
._get
_field
_setting
(f
, 'field'))
5914 if type == 'multiple':
5915 type = 'field' # Only 'field' is allowed in multiple for now
5916 actual_fields
= self
._get
_field
_setting
(field
, 'field')
5918 value
= self
._get
_field
_setting
(field
, 'function')(get_value(f
) for f
in actual_fields
)
5920 value
= get_value(field
)
5921 return self
._calculate
_field
_preference
_from
_value
(format
, field
, type, value
)
5923 def calculate_preference(self
, format
):
5924 # Determine missing protocol
5925 if not format
.get('protocol'):
5926 format
['protocol'] = determine_protocol(format
)
5928 # Determine missing ext
5929 if not format
.get('ext') and 'url' in format
:
5930 format
['ext'] = determine_ext(format
['url'])
5931 if format
.get('vcodec') == 'none':
5932 format
['audio_ext'] = format
['ext'] if format
.get('acodec') != 'none' else 'none'
5933 format
['video_ext'] = 'none'
5935 format
['video_ext'] = format
['ext']
5936 format
['audio_ext'] = 'none'
5937 # if format.get('preference') is None and format.get('ext') in ('f4f', 'f4m'): # Not supported?
5938 # format['preference'] = -1000
5940 if format
.get('preference') is None and format
.get('ext') == 'flv' and re
.match('[hx]265|he?vc?', format
.get('vcodec') or ''):
5941 # HEVC-over-FLV is out-of-spec by FLV's original spec
5942 # ref. https://trac.ffmpeg.org/ticket/6389
5943 # ref. https://github.com/yt-dlp/yt-dlp/pull/5821
5944 format
['preference'] = -100
5946 # Determine missing bitrates
5947 if format
.get('tbr') is None:
5948 if format
.get('vbr') is not None and format
.get('abr') is not None:
5949 format
['tbr'] = format
.get('vbr', 0) + format
.get('abr', 0)
5951 if format
.get('vcodec') != 'none' and format
.get('vbr') is None:
5952 format
['vbr'] = format
.get('tbr') - format
.get('abr', 0)
5953 if format
.get('acodec') != 'none' and format
.get('abr') is None:
5954 format
['abr'] = format
.get('tbr') - format
.get('vbr', 0)
5956 return tuple(self
._calculate
_field
_preference
(format
, field
) for field
in self
._order
)