48 import xml
.etree
.ElementTree
51 from . import traversal
53 from ..compat
import functools
# isort: split
54 from ..compat
import (
55 compat_etree_fromstring
,
57 compat_HTMLParseError
,
61 from ..dependencies
import brotli
, certifi
, websockets
, xattr
62 from ..socks
import ProxyType
, sockssocket
64 __name__
= __name__
.rsplit('.', 1)[0] # Pretend to be the parent module
66 # This is not clearly defined otherwise
67 compiled_regex_type
= type(re
.compile(''))
70 def random_user_agent():
71 _USER_AGENT_TPL
= 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/%s Safari/537.36'
112 return _USER_AGENT_TPL
% random
.choice(_CHROME_VERSIONS
)
115 SUPPORTED_ENCODINGS
= [
119 SUPPORTED_ENCODINGS
.append('br')
122 'User-Agent': random_user_agent(),
123 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
124 'Accept-Language': 'en-us,en;q=0.5',
125 'Sec-Fetch-Mode': 'navigate',
130 'Safari': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27',
142 ENGLISH_MONTH_NAMES
= [
143 'January', 'February', 'March', 'April', 'May', 'June',
144 'July', 'August', 'September', 'October', 'November', 'December']
147 'en': ENGLISH_MONTH_NAMES
,
149 'janvier', 'février', 'mars', 'avril', 'mai', 'juin',
150 'juillet', 'août', 'septembre', 'octobre', 'novembre', 'décembre'],
151 # these follow the genitive grammatical case (dopełniacz)
152 # some websites might be using nominative, which will require another month list
153 # https://en.wikibooks.org/wiki/Polish/Noun_cases
154 'pl': ['stycznia', 'lutego', 'marca', 'kwietnia', 'maja', 'czerwca',
155 'lipca', 'sierpnia', 'września', 'października', 'listopada', 'grudnia'],
158 # From https://github.com/python/cpython/blob/3.11/Lib/email/_parseaddr.py#L36-L42
160 'UT': 0, 'UTC': 0, 'GMT': 0, 'Z': 0,
161 'AST': -4, 'ADT': -3, # Atlantic (used in Canada)
162 'EST': -5, 'EDT': -4, # Eastern
163 'CST': -6, 'CDT': -5, # Central
164 'MST': -7, 'MDT': -6, # Mountain
165 'PST': -8, 'PDT': -7 # Pacific
168 # needed for sanitizing filenames in restricted mode
169 ACCENT_CHARS
= dict(zip('ÂÃÄÀÁÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖŐØŒÙÚÛÜŰÝÞßàáâãäåæçèéêëìíîïðñòóôõöőøœùúûüűýþÿ',
170 itertools
.chain('AAAAAA', ['AE'], 'CEEEEIIIIDNOOOOOOO', ['OE'], 'UUUUUY', ['TH', 'ss'],
171 'aaaaaa', ['ae'], 'ceeeeiiiionooooooo', ['oe'], 'uuuuuy', ['th'], 'y')))
201 '%Y-%m-%d %H:%M:%S.%f',
202 '%Y-%m-%d %H:%M:%S:%f',
205 '%Y-%m-%dT%H:%M:%SZ',
206 '%Y-%m-%dT%H:%M:%S.%fZ',
207 '%Y-%m-%dT%H:%M:%S.%f0Z',
209 '%Y-%m-%dT%H:%M:%S.%f',
212 '%b %d %Y at %H:%M:%S',
214 '%B %d %Y at %H:%M:%S',
218 DATE_FORMATS_DAY_FIRST
= list(DATE_FORMATS
)
219 DATE_FORMATS_DAY_FIRST
.extend([
230 DATE_FORMATS_MONTH_FIRST
= list(DATE_FORMATS
)
231 DATE_FORMATS_MONTH_FIRST
.extend([
239 PACKED_CODES_RE
= r
"}\('(.+)',(\d+),(\d+),'([^']+)'\.split\('\|'\)"
240 JSON_LD_RE
= r
'(?is)<script[^>]+type=(["\']?
)application
/ld\
+json\
1[^
>]*>\s
*(?P
<json_ld
>{.+?}|\
[.+?\
])\s
*</script
>'
242 NUMBER_RE = r'\d
+(?
:\
.\d
+)?
'
246 def preferredencoding():
247 """Get preferred encoding.
249 Returns the best encoding scheme for the system, based on
250 locale.getpreferredencoding() and some further tweaks.
253 pref = locale.getpreferredencoding()
261 def write_json_file(obj, fn):
262 """ Encode obj as JSON and write it to fn, atomically if possible """
264 tf = tempfile.NamedTemporaryFile(
265 prefix=f'{os.path.basename(fn)}
.', dir=os.path.dirname(fn),
266 suffix='.tmp
', delete=False, mode='w
', encoding='utf
-8')
270 json.dump(obj, tf, ensure_ascii=False)
271 if sys.platform == 'win32
':
272 # Need to remove existing file on Windows, else os.rename raises
273 # WindowsError or FileExistsError.
274 with contextlib.suppress(OSError):
276 with contextlib.suppress(OSError):
279 os.chmod(tf.name, 0o666 & ~mask)
280 os.rename(tf.name, fn)
282 with contextlib.suppress(OSError):
287 def find_xpath_attr(node, xpath, key, val=None):
288 """ Find the xpath xpath[@key=val] """
289 assert re.match(r'^
[a
-zA
-Z_
-]+$
', key)
290 expr = xpath + ('[@%s]' % key if val is None else f"[@{key}='{val}
']")
291 return node.find(expr)
293 # On python2.6 the xml.etree.ElementTree.Element methods don't support
294 # the namespace parameter
297 def xpath_with_ns(path
, ns_map
):
298 components
= [c
.split(':') for c
in path
.split('/')]
302 replaced
.append(c
[0])
305 replaced
.append('{%s}%s' % (ns_map
[ns
], tag
))
306 return '/'.join(replaced
)
309 def xpath_element(node
, xpath
, name
=None, fatal
=False, default
=NO_DEFAULT
):
310 def _find_xpath(xpath
):
311 return node
.find(xpath
)
313 if isinstance(xpath
, str):
314 n
= _find_xpath(xpath
)
322 if default
is not NO_DEFAULT
:
325 name
= xpath
if name
is None else name
326 raise ExtractorError('Could not find XML element %s' % name
)
332 def xpath_text(node
, xpath
, name
=None, fatal
=False, default
=NO_DEFAULT
):
333 n
= xpath_element(node
, xpath
, name
, fatal
=fatal
, default
=default
)
334 if n
is None or n
== default
:
337 if default
is not NO_DEFAULT
:
340 name
= xpath
if name
is None else name
341 raise ExtractorError('Could not find XML element\'s text %s' % name
)
347 def xpath_attr(node
, xpath
, key
, name
=None, fatal
=False, default
=NO_DEFAULT
):
348 n
= find_xpath_attr(node
, xpath
, key
)
350 if default
is not NO_DEFAULT
:
353 name
= f
'{xpath}[@{key}]' if name
is None else name
354 raise ExtractorError('Could not find XML attribute %s' % name
)
360 def get_element_by_id(id, html
, **kwargs
):
361 """Return the content of the tag with the specified ID in the passed HTML document"""
362 return get_element_by_attribute('id', id, html
, **kwargs
)
365 def get_element_html_by_id(id, html
, **kwargs
):
366 """Return the html of the tag with the specified ID in the passed HTML document"""
367 return get_element_html_by_attribute('id', id, html
, **kwargs
)
370 def get_element_by_class(class_name
, html
):
371 """Return the content of the first tag with the specified class in the passed HTML document"""
372 retval
= get_elements_by_class(class_name
, html
)
373 return retval
[0] if retval
else None
376 def get_element_html_by_class(class_name
, html
):
377 """Return the html of the first tag with the specified class in the passed HTML document"""
378 retval
= get_elements_html_by_class(class_name
, html
)
379 return retval
[0] if retval
else None
382 def get_element_by_attribute(attribute
, value
, html
, **kwargs
):
383 retval
= get_elements_by_attribute(attribute
, value
, html
, **kwargs
)
384 return retval
[0] if retval
else None
387 def get_element_html_by_attribute(attribute
, value
, html
, **kargs
):
388 retval
= get_elements_html_by_attribute(attribute
, value
, html
, **kargs
)
389 return retval
[0] if retval
else None
392 def get_elements_by_class(class_name
, html
, **kargs
):
393 """Return the content of all tags with the specified class in the passed HTML document as a list"""
394 return get_elements_by_attribute(
395 'class', r
'[^\'"]*(?<=[\'"\s
])%s(?
=[\'"\s])[^\'"]*' % re.escape(class_name),
396 html, escape_value=False)
399 def get_elements_html_by_class(class_name, html):
400 """Return the html of all tags with the specified class in the passed HTML document as a list"""
401 return get_elements_html_by_attribute(
402 'class', r'[^
\'"]*(?<=[\'"\s
])%s(?
=[\'"\s])[^\'"]*' % re.escape(class_name),
403 html, escape_value=False)
406 def get_elements_by_attribute(*args, **kwargs):
407 """Return the content of the tag with the specified attribute in the passed HTML document"""
408 return [content for content, _ in get_elements_text_and_html_by_attribute(*args, **kwargs)]
411 def get_elements_html_by_attribute(*args, **kwargs):
412 """Return the html of the tag with the specified attribute in the passed HTML document"""
413 return [whole for _, whole in get_elements_text_and_html_by_attribute(*args, **kwargs)]
416 def get_elements_text_and_html_by_attribute(attribute, value, html, *, tag=r'[\w
:.-]+', escape_value=True):
418 Return the text (content) and the html (whole) of the tag with the specified
419 attribute in the passed HTML document
424 quote = '' if re.match(r'''[\s"'`
=<>]''', value) else '?'
426 value = re.escape(value) if escape_value else value
428 partial_element_re = rf'''(?x
)
430 (?
:\
s(?
:[^
>"']|"[^
"]*"|
'[^']*')*)?
431 \s{re.escape(attribute)}\s*=\s*(?P<_q>['"]{quote})(?-x:{value})(?P=_q)
434 for m in re.finditer(partial_element_re, html):
435 content, whole = get_element_text_and_html_by_tag(m.group('tag'), html[m.start():])
438 unescapeHTML(re.sub(r'^(?P<q>["\'])(?P
<content
>.*)(?P
=q
)$
', r'\g
<content
>', content, flags=re.DOTALL)),
443 class HTMLBreakOnClosingTagParser(html.parser.HTMLParser):
445 HTML parser which raises HTMLBreakOnClosingTagException upon reaching the
446 closing tag for the first opening tag it has encountered, and can be used
450 class HTMLBreakOnClosingTagException(Exception):
454 self.tagstack = collections.deque()
455 html.parser.HTMLParser.__init__(self)
460 def __exit__(self, *_):
464 # handle_endtag does not return upon raising HTMLBreakOnClosingTagException,
465 # so data remains buffered; we no longer have any interest in it, thus
466 # override this method to discard it
469 def handle_starttag(self, tag, _):
470 self.tagstack.append(tag)
472 def handle_endtag(self, tag):
473 if not self.tagstack:
474 raise compat_HTMLParseError('no tags
in the stack
')
476 inner_tag = self.tagstack.pop()
480 raise compat_HTMLParseError(f'matching opening tag
for closing {tag} tag
not found
')
481 if not self.tagstack:
482 raise self.HTMLBreakOnClosingTagException()
485 # XXX: This should be far less strict
486 def get_element_text_and_html_by_tag(tag, html):
488 For the first element with the specified tag in the passed HTML document
489 return its' content (text
) and the whole
element (html
)
491 def find_or_raise(haystack, needle, exc):
493 return haystack.index(needle)
496 closing_tag = f'</{tag}>'
497 whole_start = find_or_raise(
498 html, f'<{tag}', compat_HTMLParseError(f'opening {tag} tag not found'))
499 content_start = find_or_raise(
500 html[whole_start:], '>', compat_HTMLParseError(f'malformed opening {tag} tag'))
501 content_start += whole_start + 1
502 with HTMLBreakOnClosingTagParser() as parser:
503 parser.feed(html[whole_start:content_start])
504 if not parser.tagstack or parser.tagstack[0] != tag:
505 raise compat_HTMLParseError(f'parser did not match opening {tag} tag')
506 offset = content_start
507 while offset < len(html):
508 next_closing_tag_start = find_or_raise(
509 html[offset:], closing_tag,
510 compat_HTMLParseError(f'closing {tag} tag not found'))
511 next_closing_tag_end = next_closing_tag_start + len(closing_tag)
513 parser.feed(html[offset:offset + next_closing_tag_end])
514 offset += next_closing_tag_end
515 except HTMLBreakOnClosingTagParser.HTMLBreakOnClosingTagException:
516 return html[content_start:offset + next_closing_tag_start], \
517 html[whole_start:offset + next_closing_tag_end]
518 raise compat_HTMLParseError('unexpected end of html')
521 class HTMLAttributeParser(html.parser.HTMLParser):
522 """Trivial HTML parser to gather the attributes
for a single element
"""
526 html.parser.HTMLParser.__init__(self)
528 def handle_starttag(self, tag, attrs):
529 self.attrs = dict(attrs)
530 raise compat_HTMLParseError('done')
533 class HTMLListAttrsParser(html.parser.HTMLParser):
534 """HTML parser to gather the attributes
for the elements of a
list"""
537 html.parser.HTMLParser.__init__(self)
541 def handle_starttag(self, tag, attrs):
542 if tag == 'li' and self._level == 0:
543 self.items.append(dict(attrs))
546 def handle_endtag(self, tag):
550 def extract_attributes(html_element):
551 """Given a string
for an HTML element such
as
553 a
="foo" B
="bar" c
="&98;az" d
=boz
554 empty
= noval entity
="&"
557 Decode
and return a dictionary of attributes
.
559 'a': 'foo', 'b': 'bar', c
: 'baz', d
: 'boz',
560 'empty': '', 'noval': None, 'entity': '&',
561 'sq': '"', 'dq': '\''
564 parser = HTMLAttributeParser()
565 with contextlib.suppress(compat_HTMLParseError):
566 parser.feed(html_element)
571 def parse_list(webpage):
572 """Given a string
for an series of HTML
<li
> elements
,
573 return a dictionary of their attributes
"""
574 parser = HTMLListAttrsParser()
580 def clean_html(html):
581 """Clean an HTML snippet into a readable string
"""
583 if html is None: # Convenience for sanitizing descriptions etc.
586 html = re.sub(r'\s+', ' ', html)
587 html = re.sub(r'(?u)\s?<\s?br\s?/?\s?>\s?', '\n', html)
588 html = re.sub(r'(?u)<\s?/\s?p\s?>\s?<\s?p[^>]*>', '\n', html)
590 html = re.sub('<.*?>', '', html)
591 # Replace html entities
592 html = unescapeHTML(html)
596 class LenientJSONDecoder(json.JSONDecoder):
598 def __init__(self, *args, transform_source=None, ignore_extra=False, close_objects=0, **kwargs):
599 self.transform_source, self.ignore_extra = transform_source, ignore_extra
600 self._close_attempts = 2 * close_objects
601 super().__init__(*args, **kwargs)
604 def _close_object(err):
605 doc = err.doc[:err.pos]
606 # We need to add comma first to get the correct error message
607 if err.msg.startswith('Expecting \',\''):
609 elif not doc.endswith(','):
612 if err.msg.startswith('Expecting property name'):
613 return doc[:-1] + '}'
614 elif err.msg.startswith('Expecting value'):
615 return doc[:-1] + ']'
618 if self.transform_source:
619 s = self.transform_source(s)
620 for attempt in range(self._close_attempts + 1):
622 if self.ignore_extra:
623 return self.raw_decode(s.lstrip())[0]
624 return super().decode(s)
625 except json.JSONDecodeError as e:
628 elif attempt < self._close_attempts:
629 s = self._close_object(e)
632 raise type(e)(f'{e.msg} in {s[e.pos-10:e.pos+10]!r}', s, e.pos)
633 assert False, 'Too many attempts to decode JSON'
636 def sanitize_open(filename, open_mode):
637 """Try to
open the given filename
, and slightly tweak it
if this fails
.
639 Attempts to
open the given filename
. If this fails
, it tries to change
640 the filename slightly
, step by step
, until it
's either able to open it
641 or it fails and raises a final exception, like the standard open()
644 It returns the tuple (stream, definitive_file_name).
647 if sys.platform == 'win32
':
650 # stdout may be any IO stream, e.g. when using contextlib.redirect_stdout
651 with contextlib.suppress(io.UnsupportedOperation):
652 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
653 return (sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout, filename)
655 for attempt in range(2):
658 if sys.platform == 'win32
':
659 # FIXME: An exclusive lock also locks the file from being read.
660 # Since windows locks are mandatory, don't lock the
file on
windows (for now
).
661 # Ref: https://github.com/yt-dlp/yt-dlp/issues/3124
662 raise LockingUnsupportedError()
663 stream
= locked_file(filename
, open_mode
, block
=False).__enter
__()
665 stream
= open(filename
, open_mode
)
666 return stream
, filename
667 except OSError as err
:
668 if attempt
or err
.errno
in (errno
.EACCES
,):
670 old_filename
, filename
= filename
, sanitize_path(filename
)
671 if old_filename
== filename
:
675 def timeconvert(timestr
):
676 """Convert RFC 2822 defined time string into system timestamp"""
678 timetuple
= email
.utils
.parsedate_tz(timestr
)
679 if timetuple
is not None:
680 timestamp
= email
.utils
.mktime_tz(timetuple
)
684 def sanitize_filename(s
, restricted
=False, is_id
=NO_DEFAULT
):
685 """Sanitizes a string so it could be used as part of a filename.
686 @param restricted Use a stricter subset of allowed characters
687 @param is_id Whether this is an ID that should be kept unchanged if possible.
688 If unset, yt-dlp's new sanitization rules are in effect
693 def replace_insane(char
):
694 if restricted
and char
in ACCENT_CHARS
:
695 return ACCENT_CHARS
[char
]
696 elif not restricted
and char
== '\n':
698 elif is_id
is NO_DEFAULT
and not restricted
and char
in '"*:<>?|/\\':
699 # Replace with their full-width unicode counterparts
700 return {'/': '\u29F8', '\\': '\u29f9'}
.get(char
, chr(ord(char
) + 0xfee0))
701 elif char
== '?' or ord(char
) < 32 or ord(char
) == 127:
704 return '' if restricted
else '\''
706 return '\0_\0-' if restricted
else '\0 \0-'
707 elif char
in '\\/|*<>':
709 if restricted
and (char
in '!&\'()[]{}$;`^,#' or char
.isspace() or ord(char
) > 127):
713 # Replace look-alike Unicode glyphs
714 if restricted
and (is_id
is NO_DEFAULT
or not is_id
):
715 s
= unicodedata
.normalize('NFKC', s
)
716 s
= re
.sub(r
'[0-9]+(?::[0-9]+)+', lambda m
: m
.group(0).replace(':', '_'), s
) # Handle timestamps
717 result
= ''.join(map(replace_insane
, s
))
718 if is_id
is NO_DEFAULT
:
719 result
= re
.sub(r
'(\0.)(?:(?=\1)..)+', r
'\1', result
) # Remove repeated substitute chars
720 STRIP_RE
= r
'(?:\0.|[ _-])*'
721 result
= re
.sub(f
'^\0.{STRIP_RE}|{STRIP_RE}\0.$', '', result
) # Remove substitute chars from start/end
722 result
= result
.replace('\0', '') or '_'
725 while '__' in result
:
726 result
= result
.replace('__', '_')
727 result
= result
.strip('_')
728 # Common case of "Foreign band name - English song title"
729 if restricted
and result
.startswith('-_'):
731 if result
.startswith('-'):
732 result
= '_' + result
[len('-'):]
733 result
= result
.lstrip('.')
739 def sanitize_path(s
, force
=False):
740 """Sanitizes and normalizes path on Windows"""
741 if sys
.platform
== 'win32':
743 drive_or_unc
, _
= os
.path
.splitdrive(s
)
749 norm_path
= os
.path
.normpath(remove_start(s
, drive_or_unc
)).split(os
.path
.sep
)
753 path_part
if path_part
in ['.', '..'] else re
.sub(r
'(?:[/<>:"\|\\?\*]|[\s.]$)', '#', path_part
)
754 for path_part
in norm_path
]
756 sanitized_path
.insert(0, drive_or_unc
+ os
.path
.sep
)
757 elif force
and s
and s
[0] == os
.path
.sep
:
758 sanitized_path
.insert(0, os
.path
.sep
)
759 return os
.path
.join(*sanitized_path
)
762 def sanitize_url(url
, *, scheme
='http'):
763 # Prepend protocol-less URLs with `http:` scheme in order to mitigate
764 # the number of unwanted failures due to missing protocol
767 elif url
.startswith('//'):
768 return f
'{scheme}:{url}'
769 # Fix some common typos seen so far
771 # https://github.com/ytdl-org/youtube-dl/issues/15649
772 (r
'^httpss://', r
'https://'),
773 # https://bx1.be/lives/direct-tv/
774 (r
'^rmtp([es]?)://', r
'rtmp\1://'),
776 for mistake
, fixup
in COMMON_TYPOS
:
777 if re
.match(mistake
, url
):
778 return re
.sub(mistake
, fixup
, url
)
782 def extract_basic_auth(url
):
783 parts
= urllib
.parse
.urlsplit(url
)
784 if parts
.username
is None:
786 url
= urllib
.parse
.urlunsplit(parts
._replace
(netloc
=(
787 parts
.hostname
if parts
.port
is None
788 else '%s:%d' % (parts
.hostname
, parts
.port
))))
789 auth_payload
= base64
.b64encode(
790 ('%s:%s' % (parts
.username
, parts
.password
or '')).encode())
791 return url
, f
'Basic {auth_payload.decode()}'
794 def sanitized_Request(url
, *args
, **kwargs
):
795 url
, auth_header
= extract_basic_auth(escape_url(sanitize_url(url
)))
796 if auth_header
is not None:
797 headers
= args
[1] if len(args
) >= 2 else kwargs
.setdefault('headers', {})
798 headers
['Authorization'] = auth_header
799 return urllib
.request
.Request(url
, *args
, **kwargs
)
803 """Expand shell variables and ~"""
804 return os
.path
.expandvars(compat_expanduser(s
))
807 def orderedSet(iterable
, *, lazy
=False):
808 """Remove all duplicates from the input iterable"""
810 seen
= [] # Do not use set since the items can be unhashable
816 return _iter() if lazy
else list(_iter())
819 def _htmlentity_transform(entity_with_semicolon
):
820 """Transforms an HTML entity to a character."""
821 entity
= entity_with_semicolon
[:-1]
823 # Known non-numeric HTML entity
824 if entity
in html
.entities
.name2codepoint
:
825 return chr(html
.entities
.name2codepoint
[entity
])
827 # TODO: HTML5 allows entities without a semicolon.
828 # E.g. 'Éric' should be decoded as 'Éric'.
829 if entity_with_semicolon
in html
.entities
.html5
:
830 return html
.entities
.html5
[entity_with_semicolon
]
832 mobj
= re
.match(r
'#(x[0-9a-fA-F]+|[0-9]+)', entity
)
834 numstr
= mobj
.group(1)
835 if numstr
.startswith('x'):
837 numstr
= '0%s' % numstr
840 # See https://github.com/ytdl-org/youtube-dl/issues/7518
841 with contextlib
.suppress(ValueError):
842 return chr(int(numstr
, base
))
844 # Unknown entity in name, return its literal representation
845 return '&%s;' % entity
851 assert isinstance(s
, str)
854 r
'&([^&;]+;)', lambda m
: _htmlentity_transform(m
.group(1)), s
)
857 def escapeHTML(text
):
860 .replace('&', '&')
861 .replace('<', '<')
862 .replace('>', '>')
863 .replace('"', '"')
864 .replace("'", ''')
868 class netrc_from_content(netrc
.netrc
):
869 def __init__(self
, content
):
870 self
.hosts
, self
.macros
= {}, {}
871 with io
.StringIO(content
) as stream
:
872 self
._parse
('-', stream
, False)
875 class Popen(subprocess
.Popen
):
876 if sys
.platform
== 'win32':
877 _startupinfo
= subprocess
.STARTUPINFO()
878 _startupinfo
.dwFlags |
= subprocess
.STARTF_USESHOWWINDOW
883 def _fix_pyinstaller_ld_path(env
):
884 """Restore LD_LIBRARY_PATH when using PyInstaller
885 Ref: https://github.com/pyinstaller/pyinstaller/blob/develop/doc/runtime-information.rst#ld_library_path--libpath-considerations
886 https://github.com/yt-dlp/yt-dlp/issues/4573
888 if not hasattr(sys
, '_MEIPASS'):
892 orig
= env
.get(f
'{key}_ORIG')
898 _fix('LD_LIBRARY_PATH') # Linux
899 _fix('DYLD_LIBRARY_PATH') # macOS
901 def __init__(self
, *args
, env
=None, text
=False, **kwargs
):
903 env
= os
.environ
.copy()
904 self
._fix
_pyinstaller
_ld
_path
(env
)
906 self
.__text
_mode
= kwargs
.get('encoding') or kwargs
.get('errors') or text
or kwargs
.get('universal_newlines')
908 kwargs
['universal_newlines'] = True # For 3.6 compatibility
909 kwargs
.setdefault('encoding', 'utf-8')
910 kwargs
.setdefault('errors', 'replace')
911 super().__init
__(*args
, env
=env
, **kwargs
, startupinfo
=self
._startupinfo
)
913 def communicate_or_kill(self
, *args
, **kwargs
):
915 return self
.communicate(*args
, **kwargs
)
916 except BaseException
: # Including KeyboardInterrupt
917 self
.kill(timeout
=None)
920 def kill(self
, *, timeout
=0):
923 self
.wait(timeout
=timeout
)
926 def run(cls
, *args
, timeout
=None, **kwargs
):
927 with cls(*args
, **kwargs
) as proc
:
928 default
= '' if proc
.__text
_mode
else b
''
929 stdout
, stderr
= proc
.communicate_or_kill(timeout
=timeout
)
930 return stdout
or default
, stderr
or default
, proc
.returncode
933 def encodeArgument(s
):
934 # Legacy code that uses byte strings
935 # Uncomment the following line after fixing all post processors
936 # assert isinstance(s, str), 'Internal error: %r should be of type %r, is %r' % (s, str, type(s))
937 return s
if isinstance(s
, str) else s
.decode('ascii')
940 _timetuple
= collections
.namedtuple('Time', ('hours', 'minutes', 'seconds', 'milliseconds'))
943 def timetuple_from_msec(msec
):
944 secs
, msec
= divmod(msec
, 1000)
945 mins
, secs
= divmod(secs
, 60)
946 hrs
, mins
= divmod(mins
, 60)
947 return _timetuple(hrs
, mins
, secs
, msec
)
950 def formatSeconds(secs
, delim
=':', msec
=False):
951 time
= timetuple_from_msec(secs
* 1000)
953 ret
= '%d%s%02d%s%02d' % (time
.hours
, delim
, time
.minutes
, delim
, time
.seconds
)
955 ret
= '%d%s%02d' % (time
.minutes
, delim
, time
.seconds
)
957 ret
= '%d' % time
.seconds
958 return '%s.%03d' % (ret
, time
.milliseconds
) if msec
else ret
961 def _ssl_load_windows_store_certs(ssl_context
, storename
):
962 # Code adapted from _load_windows_store_certs in https://github.com/python/cpython/blob/main/Lib/ssl.py
964 certs
= [cert
for cert
, encoding
, trust
in ssl
.enum_certificates(storename
)
965 if encoding
== 'x509_asn' and (
966 trust
is True or ssl
.Purpose
.SERVER_AUTH
.oid
in trust
)]
967 except PermissionError
:
970 with contextlib
.suppress(ssl
.SSLError
):
971 ssl_context
.load_verify_locations(cadata
=cert
)
974 def make_HTTPS_handler(params
, **kwargs
):
975 opts_check_certificate
= not params
.get('nocheckcertificate')
976 context
= ssl
.SSLContext(ssl
.PROTOCOL_TLS_CLIENT
)
977 context
.check_hostname
= opts_check_certificate
978 if params
.get('legacyserverconnect'):
979 context
.options |
= 4 # SSL_OP_LEGACY_SERVER_CONNECT
980 # Allow use of weaker ciphers in Python 3.10+. See https://bugs.python.org/issue43998
981 context
.set_ciphers('DEFAULT')
983 sys
.version_info
< (3, 10)
984 and ssl
.OPENSSL_VERSION_INFO
>= (1, 1, 1)
985 and not ssl
.OPENSSL_VERSION
.startswith('LibreSSL')
987 # Backport the default SSL ciphers and minimum TLS version settings from Python 3.10 [1].
988 # This is to ensure consistent behavior across Python versions, and help avoid fingerprinting
989 # in some situations [2][3].
990 # Python 3.10 only supports OpenSSL 1.1.1+ [4]. Because this change is likely
991 # untested on older versions, we only apply this to OpenSSL 1.1.1+ to be safe.
992 # LibreSSL is excluded until further investigation due to cipher support issues [5][6].
993 # 1. https://github.com/python/cpython/commit/e983252b516edb15d4338b0a47631b59ef1e2536
994 # 2. https://github.com/yt-dlp/yt-dlp/issues/4627
995 # 3. https://github.com/yt-dlp/yt-dlp/pull/5294
996 # 4. https://peps.python.org/pep-0644/
997 # 5. https://peps.python.org/pep-0644/#libressl-support
998 # 6. https://github.com/yt-dlp/yt-dlp/commit/5b9f253fa0aee996cf1ed30185d4b502e00609c4#commitcomment-89054368
999 context
.set_ciphers('@SECLEVEL=2:ECDH+AESGCM:ECDH+CHACHA20:ECDH+AES:DHE+AES:!aNULL:!eNULL:!aDSS:!SHA1:!AESCCM')
1000 context
.minimum_version
= ssl
.TLSVersion
.TLSv1_2
1002 context
.verify_mode
= ssl
.CERT_REQUIRED
if opts_check_certificate
else ssl
.CERT_NONE
1003 if opts_check_certificate
:
1004 if certifi
and 'no-certifi' not in params
.get('compat_opts', []):
1005 context
.load_verify_locations(cafile
=certifi
.where())
1008 context
.load_default_certs()
1009 # Work around the issue in load_default_certs when there are bad certificates. See:
1010 # https://github.com/yt-dlp/yt-dlp/issues/1060,
1011 # https://bugs.python.org/issue35665, https://bugs.python.org/issue45312
1012 except ssl
.SSLError
:
1013 # enum_certificates is not present in mingw python. See https://github.com/yt-dlp/yt-dlp/issues/1151
1014 if sys
.platform
== 'win32' and hasattr(ssl
, 'enum_certificates'):
1015 for storename
in ('CA', 'ROOT'):
1016 _ssl_load_windows_store_certs(context
, storename
)
1017 context
.set_default_verify_paths()
1019 client_certfile
= params
.get('client_certificate')
1022 context
.load_cert_chain(
1023 client_certfile
, keyfile
=params
.get('client_certificate_key'),
1024 password
=params
.get('client_certificate_password'))
1025 except ssl
.SSLError
:
1026 raise YoutubeDLError('Unable to load client certificate')
1028 # Some servers may reject requests if ALPN extension is not sent. See:
1029 # https://github.com/python/cpython/issues/85140
1030 # https://github.com/yt-dlp/yt-dlp/issues/3878
1031 with contextlib
.suppress(NotImplementedError):
1032 context
.set_alpn_protocols(['http/1.1'])
1034 return YoutubeDLHTTPSHandler(params
, context
=context
, **kwargs
)
1037 def bug_reports_message(before
=';'):
1038 from ..update
import REPOSITORY
1040 msg
= (f
'please report this issue on https://github.com/{REPOSITORY}/issues?q= , '
1041 'filling out the appropriate issue template. Confirm you are on the latest version using yt-dlp -U')
1043 before
= before
.rstrip()
1044 if not before
or before
.endswith(('.', '!', '?')):
1045 msg
= msg
[0].title() + msg
[1:]
1047 return (before
+ ' ' if before
else '') + msg
1050 class YoutubeDLError(Exception):
1051 """Base exception for YoutubeDL errors."""
1054 def __init__(self
, msg
=None):
1057 elif self
.msg
is None:
1058 self
.msg
= type(self
).__name
__
1059 super().__init
__(self
.msg
)
1062 network_exceptions
= [urllib
.error
.URLError
, http
.client
.HTTPException
, socket
.error
]
1063 if hasattr(ssl
, 'CertificateError'):
1064 network_exceptions
.append(ssl
.CertificateError
)
1065 network_exceptions
= tuple(network_exceptions
)
1068 class ExtractorError(YoutubeDLError
):
1069 """Error during info extraction."""
1071 def __init__(self
, msg
, tb
=None, expected
=False, cause
=None, video_id
=None, ie
=None):
1072 """ tb, if given, is the original traceback (so that it can be printed out).
1073 If expected is set, this is a normal error message and most likely not a bug in yt-dlp.
1075 if sys
.exc_info()[0] in network_exceptions
:
1078 self
.orig_msg
= str(msg
)
1080 self
.expected
= expected
1082 self
.video_id
= video_id
1084 self
.exc_info
= sys
.exc_info() # preserve original exception
1085 if isinstance(self
.exc_info
[1], ExtractorError
):
1086 self
.exc_info
= self
.exc_info
[1].exc_info
1087 super().__init
__(self
.__msg
)
1092 format_field(self
.ie
, None, '[%s] '),
1093 format_field(self
.video_id
, None, '%s: '),
1095 format_field(self
.cause
, None, ' (caused by %r)'),
1096 '' if self
.expected
else bug_reports_message()))
1098 def format_traceback(self
):
1099 return join_nonempty(
1100 self
.traceback
and ''.join(traceback
.format_tb(self
.traceback
)),
1101 self
.cause
and ''.join(traceback
.format_exception(None, self
.cause
, self
.cause
.__traceback
__)[1:]),
1104 def __setattr__(self
, name
, value
):
1105 super().__setattr
__(name
, value
)
1106 if getattr(self
, 'msg', None) and name
not in ('msg', 'args'):
1107 self
.msg
= self
.__msg
or type(self
).__name
__
1108 self
.args
= (self
.msg
, ) # Cannot be property
1111 class UnsupportedError(ExtractorError
):
1112 def __init__(self
, url
):
1114 'Unsupported URL: %s' % url
, expected
=True)
1118 class RegexNotFoundError(ExtractorError
):
1119 """Error when a regex didn't match"""
1123 class GeoRestrictedError(ExtractorError
):
1124 """Geographic restriction Error exception.
1126 This exception may be thrown when a video is not available from your
1127 geographic location due to geographic restrictions imposed by a website.
1130 def __init__(self
, msg
, countries
=None, **kwargs
):
1131 kwargs
['expected'] = True
1132 super().__init
__(msg
, **kwargs
)
1133 self
.countries
= countries
1136 class UserNotLive(ExtractorError
):
1137 """Error when a channel/user is not live"""
1139 def __init__(self
, msg
=None, **kwargs
):
1140 kwargs
['expected'] = True
1141 super().__init
__(msg
or 'The channel is not currently live', **kwargs
)
1144 class DownloadError(YoutubeDLError
):
1145 """Download Error exception.
1147 This exception may be thrown by FileDownloader objects if they are not
1148 configured to continue on errors. They will contain the appropriate
1152 def __init__(self
, msg
, exc_info
=None):
1153 """ exc_info, if given, is the original exception that caused the trouble (as returned by sys.exc_info()). """
1154 super().__init
__(msg
)
1155 self
.exc_info
= exc_info
1158 class EntryNotInPlaylist(YoutubeDLError
):
1159 """Entry not in playlist exception.
1161 This exception will be thrown by YoutubeDL when a requested entry
1162 is not found in the playlist info_dict
1164 msg
= 'Entry not found in info'
1167 class SameFileError(YoutubeDLError
):
1168 """Same File exception.
1170 This exception will be thrown by FileDownloader objects if they detect
1171 multiple files would have to be downloaded to the same file on disk.
1173 msg
= 'Fixed output name but more than one file to download'
1175 def __init__(self
, filename
=None):
1176 if filename
is not None:
1177 self
.msg
+= f
': {filename}'
1178 super().__init
__(self
.msg
)
1181 class PostProcessingError(YoutubeDLError
):
1182 """Post Processing exception.
1184 This exception may be raised by PostProcessor's .run() method to
1185 indicate an error in the postprocessing task.
1189 class DownloadCancelled(YoutubeDLError
):
1190 """ Exception raised when the download queue should be interrupted """
1191 msg
= 'The download was cancelled'
1194 class ExistingVideoReached(DownloadCancelled
):
1195 """ --break-on-existing triggered """
1196 msg
= 'Encountered a video that is already in the archive, stopping due to --break-on-existing'
1199 class RejectedVideoReached(DownloadCancelled
):
1200 """ --break-match-filter triggered """
1201 msg
= 'Encountered a video that did not match filter, stopping due to --break-match-filter'
1204 class MaxDownloadsReached(DownloadCancelled
):
1205 """ --max-downloads limit has been reached. """
1206 msg
= 'Maximum number of downloads reached, stopping due to --max-downloads'
1209 class ReExtractInfo(YoutubeDLError
):
1210 """ Video info needs to be re-extracted. """
1212 def __init__(self
, msg
, expected
=False):
1213 super().__init
__(msg
)
1214 self
.expected
= expected
1217 class ThrottledDownload(ReExtractInfo
):
1218 """ Download speed below --throttled-rate. """
1219 msg
= 'The download speed is below throttle limit'
1222 super().__init
__(self
.msg
, expected
=False)
1225 class UnavailableVideoError(YoutubeDLError
):
1226 """Unavailable Format exception.
1228 This exception will be thrown when a video is requested
1229 in a format that is not available for that video.
1231 msg
= 'Unable to download video'
1233 def __init__(self
, err
=None):
1235 self
.msg
+= f
': {err}'
1236 super().__init
__(self
.msg
)
1239 class ContentTooShortError(YoutubeDLError
):
1240 """Content Too Short exception.
1242 This exception may be raised by FileDownloader objects when a file they
1243 download is too small for what the server announced first, indicating
1244 the connection was probably interrupted.
1247 def __init__(self
, downloaded
, expected
):
1248 super().__init
__(f
'Downloaded {downloaded} bytes, expected {expected} bytes')
1250 self
.downloaded
= downloaded
1251 self
.expected
= expected
1254 class XAttrMetadataError(YoutubeDLError
):
1255 def __init__(self
, code
=None, msg
='Unknown error'):
1256 super().__init
__(msg
)
1260 # Parsing code and msg
1261 if (self
.code
in (errno
.ENOSPC
, errno
.EDQUOT
)
1262 or 'No space left' in self
.msg
or 'Disk quota exceeded' in self
.msg
):
1263 self
.reason
= 'NO_SPACE'
1264 elif self
.code
== errno
.E2BIG
or 'Argument list too long' in self
.msg
:
1265 self
.reason
= 'VALUE_TOO_LONG'
1267 self
.reason
= 'NOT_SUPPORTED'
1270 class XAttrUnavailableError(YoutubeDLError
):
1274 def _create_http_connection(ydl_handler
, http_class
, is_https
, *args
, **kwargs
):
1275 hc
= http_class(*args
, **kwargs
)
1276 source_address
= ydl_handler
._params
.get('source_address')
1278 if source_address
is not None:
1279 # This is to workaround _create_connection() from socket where it will try all
1280 # address data from getaddrinfo() including IPv6. This filters the result from
1281 # getaddrinfo() based on the source_address value.
1282 # This is based on the cpython socket.create_connection() function.
1283 # https://github.com/python/cpython/blob/master/Lib/socket.py#L691
1284 def _create_connection(address
, timeout
=socket
._GLOBAL
_DEFAULT
_TIMEOUT
, source_address
=None):
1285 host
, port
= address
1287 addrs
= socket
.getaddrinfo(host
, port
, 0, socket
.SOCK_STREAM
)
1288 af
= socket
.AF_INET
if '.' in source_address
[0] else socket
.AF_INET6
1289 ip_addrs
= [addr
for addr
in addrs
if addr
[0] == af
]
1290 if addrs
and not ip_addrs
:
1291 ip_version
= 'v4' if af
== socket
.AF_INET
else 'v6'
1293 "No remote IP%s addresses available for connect, can't use '%s' as source address"
1294 % (ip_version
, source_address
[0]))
1295 for res
in ip_addrs
:
1296 af
, socktype
, proto
, canonname
, sa
= res
1299 sock
= socket
.socket(af
, socktype
, proto
)
1300 if timeout
is not socket
._GLOBAL
_DEFAULT
_TIMEOUT
:
1301 sock
.settimeout(timeout
)
1302 sock
.bind(source_address
)
1304 err
= None # Explicitly break reference cycle
1306 except OSError as _
:
1308 if sock
is not None:
1313 raise OSError('getaddrinfo returns an empty list')
1314 if hasattr(hc
, '_create_connection'):
1315 hc
._create
_connection
= _create_connection
1316 hc
.source_address
= (source_address
, 0)
1321 class YoutubeDLHandler(urllib
.request
.HTTPHandler
):
1322 """Handler for HTTP requests and responses.
1324 This class, when installed with an OpenerDirector, automatically adds
1325 the standard headers to every HTTP request and handles gzipped, deflated and
1326 brotli responses from web servers.
1328 Part of this code was copied from:
1330 http://techknack.net/python-urllib2-handlers/
1332 Andrew Rowls, the author of that code, agreed to release it to the
1336 def __init__(self
, params
, *args
, **kwargs
):
1337 urllib
.request
.HTTPHandler
.__init
__(self
, *args
, **kwargs
)
1338 self
._params
= params
1340 def http_open(self
, req
):
1341 conn_class
= http
.client
.HTTPConnection
1343 socks_proxy
= req
.headers
.get('Ytdl-socks-proxy')
1345 conn_class
= make_socks_conn_class(conn_class
, socks_proxy
)
1346 del req
.headers
['Ytdl-socks-proxy']
1348 return self
.do_open(functools
.partial(
1349 _create_http_connection
, self
, conn_class
, False),
1357 return zlib
.decompress(data
, -zlib
.MAX_WBITS
)
1359 return zlib
.decompress(data
)
1365 return brotli
.decompress(data
)
1369 gz
= gzip
.GzipFile(fileobj
=io
.BytesIO(data
), mode
='rb')
1372 except OSError as original_oserror
:
1373 # There may be junk add the end of the file
1374 # See http://stackoverflow.com/q/4928560/35070 for details
1375 for i
in range(1, 1024):
1377 gz
= gzip
.GzipFile(fileobj
=io
.BytesIO(data
[:-i
]), mode
='rb')
1382 raise original_oserror
1384 def http_request(self
, req
):
1385 # According to RFC 3986, URLs can not contain non-ASCII characters, however this is not
1386 # always respected by websites, some tend to give out URLs with non percent-encoded
1387 # non-ASCII characters (see telemb.py, ard.py [#3412])
1388 # urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991)
1389 # To work around aforementioned issue we will replace request's original URL with
1390 # percent-encoded one
1391 # Since redirects are also affected (e.g. http://www.southpark.de/alle-episoden/s18e09)
1392 # the code of this workaround has been moved here from YoutubeDL.urlopen()
1393 url
= req
.get_full_url()
1394 url_escaped
= escape_url(url
)
1396 # Substitute URL if any change after escaping
1397 if url
!= url_escaped
:
1398 req
= update_Request(req
, url
=url_escaped
)
1400 for h
, v
in self
._params
.get('http_headers', std_headers
).items():
1401 # Capitalize is needed because of Python bug 2275: http://bugs.python.org/issue2275
1402 # The dict keys are capitalized because of this bug by urllib
1403 if h
.capitalize() not in req
.headers
:
1404 req
.add_header(h
, v
)
1406 if 'Youtubedl-no-compression' in req
.headers
: # deprecated
1407 req
.headers
.pop('Youtubedl-no-compression', None)
1408 req
.add_header('Accept-encoding', 'identity')
1410 if 'Accept-encoding' not in req
.headers
:
1411 req
.add_header('Accept-encoding', ', '.join(SUPPORTED_ENCODINGS
))
1413 return super().do_request_(req
)
1415 def http_response(self
, req
, resp
):
1418 # Content-Encoding header lists the encodings in order that they were applied [1].
1419 # To decompress, we simply do the reverse.
1420 # [1]: https://datatracker.ietf.org/doc/html/rfc9110#name-content-encoding
1421 decoded_response
= None
1422 for encoding
in (e
.strip() for e
in reversed(resp
.headers
.get('Content-encoding', '').split(','))):
1423 if encoding
== 'gzip':
1424 decoded_response
= self
.gz(decoded_response
or resp
.read())
1425 elif encoding
== 'deflate':
1426 decoded_response
= self
.deflate(decoded_response
or resp
.read())
1427 elif encoding
== 'br' and brotli
:
1428 decoded_response
= self
.brotli(decoded_response
or resp
.read())
1430 if decoded_response
is not None:
1431 resp
= urllib
.request
.addinfourl(io
.BytesIO(decoded_response
), old_resp
.headers
, old_resp
.url
, old_resp
.code
)
1432 resp
.msg
= old_resp
.msg
1433 # Percent-encode redirect URL of Location HTTP header to satisfy RFC 3986 (see
1434 # https://github.com/ytdl-org/youtube-dl/issues/6457).
1435 if 300 <= resp
.code
< 400:
1436 location
= resp
.headers
.get('Location')
1438 # As of RFC 2616 default charset is iso-8859-1 that is respected by python 3
1439 location
= location
.encode('iso-8859-1').decode()
1440 location_escaped
= escape_url(location
)
1441 if location
!= location_escaped
:
1442 del resp
.headers
['Location']
1443 resp
.headers
['Location'] = location_escaped
1446 https_request
= http_request
1447 https_response
= http_response
1450 def make_socks_conn_class(base_class
, socks_proxy
):
1451 assert issubclass(base_class
, (
1452 http
.client
.HTTPConnection
, http
.client
.HTTPSConnection
))
1454 url_components
= urllib
.parse
.urlparse(socks_proxy
)
1455 if url_components
.scheme
.lower() == 'socks5':
1456 socks_type
= ProxyType
.SOCKS5
1457 elif url_components
.scheme
.lower() in ('socks', 'socks4'):
1458 socks_type
= ProxyType
.SOCKS4
1459 elif url_components
.scheme
.lower() == 'socks4a':
1460 socks_type
= ProxyType
.SOCKS4A
1462 def unquote_if_non_empty(s
):
1465 return urllib
.parse
.unquote_plus(s
)
1469 url_components
.hostname
, url_components
.port
or 1080,
1471 unquote_if_non_empty(url_components
.username
),
1472 unquote_if_non_empty(url_components
.password
),
1475 class SocksConnection(base_class
):
1477 self
.sock
= sockssocket()
1478 self
.sock
.setproxy(*proxy_args
)
1479 if isinstance(self
.timeout
, (int, float)):
1480 self
.sock
.settimeout(self
.timeout
)
1481 self
.sock
.connect((self
.host
, self
.port
))
1483 if isinstance(self
, http
.client
.HTTPSConnection
):
1484 if hasattr(self
, '_context'): # Python > 2.6
1485 self
.sock
= self
._context
.wrap_socket(
1486 self
.sock
, server_hostname
=self
.host
)
1488 self
.sock
= ssl
.wrap_socket(self
.sock
)
1490 return SocksConnection
1493 class YoutubeDLHTTPSHandler(urllib
.request
.HTTPSHandler
):
1494 def __init__(self
, params
, https_conn_class
=None, *args
, **kwargs
):
1495 urllib
.request
.HTTPSHandler
.__init
__(self
, *args
, **kwargs
)
1496 self
._https
_conn
_class
= https_conn_class
or http
.client
.HTTPSConnection
1497 self
._params
= params
1499 def https_open(self
, req
):
1501 conn_class
= self
._https
_conn
_class
1503 if hasattr(self
, '_context'): # python > 2.6
1504 kwargs
['context'] = self
._context
1505 if hasattr(self
, '_check_hostname'): # python 3.x
1506 kwargs
['check_hostname'] = self
._check
_hostname
1508 socks_proxy
= req
.headers
.get('Ytdl-socks-proxy')
1510 conn_class
= make_socks_conn_class(conn_class
, socks_proxy
)
1511 del req
.headers
['Ytdl-socks-proxy']
1514 return self
.do_open(
1515 functools
.partial(_create_http_connection
, self
, conn_class
, True), req
, **kwargs
)
1516 except urllib
.error
.URLError
as e
:
1517 if (isinstance(e
.reason
, ssl
.SSLError
)
1518 and getattr(e
.reason
, 'reason', None) == 'SSLV3_ALERT_HANDSHAKE_FAILURE'):
1519 raise YoutubeDLError('SSLV3_ALERT_HANDSHAKE_FAILURE: Try using --legacy-server-connect')
1523 def is_path_like(f
):
1524 return isinstance(f
, (str, bytes, os
.PathLike
))
1527 class YoutubeDLCookieProcessor(urllib
.request
.HTTPCookieProcessor
):
1528 def __init__(self
, cookiejar
=None):
1529 urllib
.request
.HTTPCookieProcessor
.__init
__(self
, cookiejar
)
1531 def http_response(self
, request
, response
):
1532 return urllib
.request
.HTTPCookieProcessor
.http_response(self
, request
, response
)
1534 https_request
= urllib
.request
.HTTPCookieProcessor
.http_request
1535 https_response
= http_response
1538 class YoutubeDLRedirectHandler(urllib
.request
.HTTPRedirectHandler
):
1539 """YoutubeDL redirect handler
1541 The code is based on HTTPRedirectHandler implementation from CPython [1].
1543 This redirect handler fixes and improves the logic to better align with RFC7261
1544 and what browsers tend to do [2][3]
1546 1. https://github.com/python/cpython/blob/master/Lib/urllib/request.py
1547 2. https://datatracker.ietf.org/doc/html/rfc7231
1548 3. https://github.com/python/cpython/issues/91306
1551 http_error_301
= http_error_303
= http_error_307
= http_error_308
= urllib
.request
.HTTPRedirectHandler
.http_error_302
1553 def redirect_request(self
, req
, fp
, code
, msg
, headers
, newurl
):
1554 if code
not in (301, 302, 303, 307, 308):
1555 raise urllib
.error
.HTTPError(req
.full_url
, code
, msg
, headers
, fp
)
1557 new_method
= req
.get_method()
1560 # A 303 must either use GET or HEAD for subsequent request
1561 # https://datatracker.ietf.org/doc/html/rfc7231#section-6.4.4
1562 if code
== 303 and req
.get_method() != 'HEAD':
1564 # 301 and 302 redirects are commonly turned into a GET from a POST
1565 # for subsequent requests by browsers, so we'll do the same.
1566 # https://datatracker.ietf.org/doc/html/rfc7231#section-6.4.2
1567 # https://datatracker.ietf.org/doc/html/rfc7231#section-6.4.3
1568 elif code
in (301, 302) and req
.get_method() == 'POST':
1571 # only remove payload if method changed (e.g. POST to GET)
1572 if new_method
!= req
.get_method():
1574 remove_headers
.extend(['Content-Length', 'Content-Type'])
1576 new_headers
= {k: v for k, v in req.headers.items() if k.lower() not in remove_headers}
1578 return urllib
.request
.Request(
1579 newurl
, headers
=new_headers
, origin_req_host
=req
.origin_req_host
,
1580 unverifiable
=True, method
=new_method
, data
=new_data
)
1583 def extract_timezone(date_str
):
1586 ^.{8,}? # >=8 char non-TZ prefix, if present
1587 (?P<tz>Z| # just the UTC Z, or
1588 (?:(?<=.\b\d{4}|\b\d{2}:\d\d)| # preceded by 4 digits or hh:mm or
1589 (?<!.\b[a-zA-Z]{3}|[a-zA-Z]{4}|..\b\d\d)) # not preceded by 3 alpha word or >= 4 alpha or 2 digits
1590 [ ]? # optional space
1591 (?P<sign>\+|-) # +/-
1592 (?P<hours>[0-9]{2}):?(?P<minutes>[0-9]{2}) # hh[:]mm
1596 m
= re
.search(r
'\d{1,2}:\d{1,2}(?:\.\d+)?(?P<tz>\s*[A-Z]+)$', date_str
)
1597 timezone
= TIMEZONE_NAMES
.get(m
and m
.group('tz').strip())
1598 if timezone
is not None:
1599 date_str
= date_str
[:-len(m
.group('tz'))]
1600 timezone
= datetime
.timedelta(hours
=timezone
or 0)
1602 date_str
= date_str
[:-len(m
.group('tz'))]
1603 if not m
.group('sign'):
1604 timezone
= datetime
.timedelta()
1606 sign
= 1 if m
.group('sign') == '+' else -1
1607 timezone
= datetime
.timedelta(
1608 hours
=sign
* int(m
.group('hours')),
1609 minutes
=sign
* int(m
.group('minutes')))
1610 return timezone
, date_str
1613 def parse_iso8601(date_str
, delimiter
='T', timezone
=None):
1614 """ Return a UNIX timestamp from the given date """
1616 if date_str
is None:
1619 date_str
= re
.sub(r
'\.[0-9]+', '', date_str
)
1621 if timezone
is None:
1622 timezone
, date_str
= extract_timezone(date_str
)
1624 with contextlib
.suppress(ValueError):
1625 date_format
= f
'%Y-%m-%d{delimiter}%H:%M:%S'
1626 dt
= datetime
.datetime
.strptime(date_str
, date_format
) - timezone
1627 return calendar
.timegm(dt
.timetuple())
1630 def date_formats(day_first
=True):
1631 return DATE_FORMATS_DAY_FIRST
if day_first
else DATE_FORMATS_MONTH_FIRST
1634 def unified_strdate(date_str
, day_first
=True):
1635 """Return a string with the date in the format YYYYMMDD"""
1637 if date_str
is None:
1641 date_str
= date_str
.replace(',', ' ')
1642 # Remove AM/PM + timezone
1643 date_str
= re
.sub(r
'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str
)
1644 _
, date_str
= extract_timezone(date_str
)
1646 for expression
in date_formats(day_first
):
1647 with contextlib
.suppress(ValueError):
1648 upload_date
= datetime
.datetime
.strptime(date_str
, expression
).strftime('%Y%m%d')
1649 if upload_date
is None:
1650 timetuple
= email
.utils
.parsedate_tz(date_str
)
1652 with contextlib
.suppress(ValueError):
1653 upload_date
= datetime
.datetime(*timetuple
[:6]).strftime('%Y%m%d')
1654 if upload_date
is not None:
1655 return str(upload_date
)
1658 def unified_timestamp(date_str
, day_first
=True):
1659 if not isinstance(date_str
, str):
1662 date_str
= re
.sub(r
'\s+', ' ', re
.sub(
1663 r
'(?i)[,|]|(mon|tues?|wed(nes)?|thu(rs)?|fri|sat(ur)?)(day)?', '', date_str
))
1665 pm_delta
= 12 if re
.search(r
'(?i)PM', date_str
) else 0
1666 timezone
, date_str
= extract_timezone(date_str
)
1668 # Remove AM/PM + timezone
1669 date_str
= re
.sub(r
'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str
)
1671 # Remove unrecognized timezones from ISO 8601 alike timestamps
1672 m
= re
.search(r
'\d{1,2}:\d{1,2}(?:\.\d+)?(?P<tz>\s*[A-Z]+)$', date_str
)
1674 date_str
= date_str
[:-len(m
.group('tz'))]
1676 # Python only supports microseconds, so remove nanoseconds
1677 m
= re
.search(r
'^([0-9]{4,}-[0-9]{1,2}-[0-9]{1,2}T[0-9]{1,2}:[0-9]{1,2}:[0-9]{1,2}\.[0-9]{6})[0-9]+$', date_str
)
1679 date_str
= m
.group(1)
1681 for expression
in date_formats(day_first
):
1682 with contextlib
.suppress(ValueError):
1683 dt
= datetime
.datetime
.strptime(date_str
, expression
) - timezone
+ datetime
.timedelta(hours
=pm_delta
)
1684 return calendar
.timegm(dt
.timetuple())
1686 timetuple
= email
.utils
.parsedate_tz(date_str
)
1688 return calendar
.timegm(timetuple
) + pm_delta
* 3600 - timezone
.total_seconds()
1691 def determine_ext(url
, default_ext
='unknown_video'):
1692 if url
is None or '.' not in url
:
1694 guess
= url
.partition('?')[0].rpartition('.')[2]
1695 if re
.match(r
'^[A-Za-z0-9]+$', guess
):
1697 # Try extract ext from URLs like http://example.com/foo/bar.mp4/?download
1698 elif guess
.rstrip('/') in KNOWN_EXTENSIONS
:
1699 return guess
.rstrip('/')
1704 def subtitles_filename(filename
, sub_lang
, sub_format
, expected_real_ext
=None):
1705 return replace_extension(filename
, sub_lang
+ '.' + sub_format
, expected_real_ext
)
1708 def datetime_from_str(date_str
, precision
='auto', format
='%Y%m%d'):
1710 Return a datetime object from a string.
1712 (now|today|yesterday|DATE)([+-]\d+(microsecond|second|minute|hour|day|week|month|year)s?)?
1714 @param format strftime format of DATE
1715 @param precision Round the datetime object: auto|microsecond|second|minute|hour|day
1716 auto: round to the unit provided in date_str (if applicable).
1718 auto_precision
= False
1719 if precision
== 'auto':
1720 auto_precision
= True
1721 precision
= 'microsecond'
1722 today
= datetime_round(datetime
.datetime
.utcnow(), precision
)
1723 if date_str
in ('now', 'today'):
1725 if date_str
== 'yesterday':
1726 return today
- datetime
.timedelta(days
=1)
1728 r
'(?P<start>.+)(?P<sign>[+-])(?P<time>\d+)(?P<unit>microsecond|second|minute|hour|day|week|month|year)s?',
1730 if match
is not None:
1731 start_time
= datetime_from_str(match
.group('start'), precision
, format
)
1732 time
= int(match
.group('time')) * (-1 if match
.group('sign') == '-' else 1)
1733 unit
= match
.group('unit')
1734 if unit
== 'month' or unit
== 'year':
1735 new_date
= datetime_add_months(start_time
, time
* 12 if unit
== 'year' else time
)
1741 delta
= datetime
.timedelta(**{unit + 's': time}
)
1742 new_date
= start_time
+ delta
1744 return datetime_round(new_date
, unit
)
1747 return datetime_round(datetime
.datetime
.strptime(date_str
, format
), precision
)
1750 def date_from_str(date_str
, format
='%Y%m%d', strict
=False):
1752 Return a date object from a string using datetime_from_str
1754 @param strict Restrict allowed patterns to "YYYYMMDD" and
1755 (now|today|yesterday)(-\d+(day|week|month|year)s?)?
1757 if strict
and not re
.fullmatch(r
'\d{8}|(now|today|yesterday)(-\d+(day|week|month|year)s?)?', date_str
):
1758 raise ValueError(f
'Invalid date format "{date_str}"')
1759 return datetime_from_str(date_str
, precision
='microsecond', format
=format
).date()
1762 def datetime_add_months(dt
, months
):
1763 """Increment/Decrement a datetime object by months."""
1764 month
= dt
.month
+ months
- 1
1765 year
= dt
.year
+ month
// 12
1766 month
= month
% 12 + 1
1767 day
= min(dt
.day
, calendar
.monthrange(year
, month
)[1])
1768 return dt
.replace(year
, month
, day
)
1771 def datetime_round(dt
, precision
='day'):
1773 Round a datetime object's time to a specific precision
1775 if precision
== 'microsecond':
1784 roundto
= lambda x
, n
: ((x
+ n
/ 2) // n
) * n
1785 timestamp
= calendar
.timegm(dt
.timetuple())
1786 return datetime
.datetime
.utcfromtimestamp(roundto(timestamp
, unit_seconds
[precision
]))
1789 def hyphenate_date(date_str
):
1791 Convert a date in 'YYYYMMDD' format to 'YYYY-MM-DD' format"""
1792 match
= re
.match(r
'^(\d\d\d\d)(\d\d)(\d\d)$', date_str
)
1793 if match
is not None:
1794 return '-'.join(match
.groups())
1800 """Represents a time interval between two dates"""
1802 def __init__(self
, start
=None, end
=None):
1803 """start and end must be strings in the format accepted by date"""
1804 if start
is not None:
1805 self
.start
= date_from_str(start
, strict
=True)
1807 self
.start
= datetime
.datetime
.min.date()
1809 self
.end
= date_from_str(end
, strict
=True)
1811 self
.end
= datetime
.datetime
.max.date()
1812 if self
.start
> self
.end
:
1813 raise ValueError('Date range: "%s" , the start date must be before the end date' % self
)
1817 """Returns a range that only contains the given day"""
1818 return cls(day
, day
)
1820 def __contains__(self
, date
):
1821 """Check if the date is in the range"""
1822 if not isinstance(date
, datetime
.date
):
1823 date
= date_from_str(date
)
1824 return self
.start
<= date
<= self
.end
1827 return f
'{__name__}.{type(self).__name__}({self.start.isoformat()!r}, {self.end.isoformat()!r})'
1829 def __eq__(self
, other
):
1830 return (isinstance(other
, DateRange
)
1831 and self
.start
== other
.start
and self
.end
== other
.end
)
1835 def system_identifier():
1836 python_implementation
= platform
.python_implementation()
1837 if python_implementation
== 'PyPy' and hasattr(sys
, 'pypy_version_info'):
1838 python_implementation
+= ' version %d.%d.%d' % sys
.pypy_version_info
[:3]
1840 with contextlib
.suppress(OSError): # We may not have access to the executable
1841 libc_ver
= platform
.libc_ver()
1843 return 'Python %s (%s %s %s) - %s (%s%s)' % (
1844 platform
.python_version(),
1845 python_implementation
,
1847 platform
.architecture()[0],
1848 platform
.platform(),
1849 ssl
.OPENSSL_VERSION
,
1850 format_field(join_nonempty(*libc_ver
, delim
=' '), None, ', %s'),
1855 def get_windows_version():
1856 ''' Get Windows version. returns () if it's not running on Windows '''
1857 if compat_os_name
== 'nt':
1858 return version_tuple(platform
.win32_ver()[1])
1863 def write_string(s
, out
=None, encoding
=None):
1864 assert isinstance(s
, str)
1865 out
= out
or sys
.stderr
1866 # `sys.stderr` might be `None` (Ref: https://github.com/pyinstaller/pyinstaller/pull/7217)
1870 if compat_os_name
== 'nt' and supports_terminal_sequences(out
):
1871 s
= re
.sub(r
'([\r\n]+)', r
' \1', s
)
1873 enc
, buffer = None, out
1874 if 'b' in getattr(out
, 'mode', ''):
1875 enc
= encoding
or preferredencoding()
1876 elif hasattr(out
, 'buffer'):
1878 enc
= encoding
or getattr(out
, 'encoding', None) or preferredencoding()
1880 buffer.write(s
.encode(enc
, 'ignore') if enc
else s
)
1884 def deprecation_warning(msg
, *, printer
=None, stacklevel
=0, **kwargs
):
1885 from .. import _IN_CLI
1887 if msg
in deprecation_warning
._cache
:
1889 deprecation_warning
._cache
.add(msg
)
1891 return printer(f
'{msg}{bug_reports_message()}', **kwargs
)
1892 return write_string(f
'ERROR: {msg}{bug_reports_message()}\n', **kwargs
)
1895 warnings
.warn(DeprecationWarning(msg
), stacklevel
=stacklevel
+ 3)
1898 deprecation_warning
._cache
= set()
1901 def bytes_to_intlist(bs
):
1904 if isinstance(bs
[0], int): # Python 3
1907 return [ord(c
) for c
in bs
]
1910 def intlist_to_bytes(xs
):
1913 return struct
.pack('%dB' % len(xs
), *xs
)
1916 class LockingUnsupportedError(OSError):
1917 msg
= 'File locking is not supported'
1920 super().__init
__(self
.msg
)
1923 # Cross-platform file locking
1924 if sys
.platform
== 'win32':
1926 import ctypes
.wintypes
1929 class OVERLAPPED(ctypes
.Structure
):
1931 ('Internal', ctypes
.wintypes
.LPVOID
),
1932 ('InternalHigh', ctypes
.wintypes
.LPVOID
),
1933 ('Offset', ctypes
.wintypes
.DWORD
),
1934 ('OffsetHigh', ctypes
.wintypes
.DWORD
),
1935 ('hEvent', ctypes
.wintypes
.HANDLE
),
1938 kernel32
= ctypes
.WinDLL('kernel32')
1939 LockFileEx
= kernel32
.LockFileEx
1940 LockFileEx
.argtypes
= [
1941 ctypes
.wintypes
.HANDLE
, # hFile
1942 ctypes
.wintypes
.DWORD
, # dwFlags
1943 ctypes
.wintypes
.DWORD
, # dwReserved
1944 ctypes
.wintypes
.DWORD
, # nNumberOfBytesToLockLow
1945 ctypes
.wintypes
.DWORD
, # nNumberOfBytesToLockHigh
1946 ctypes
.POINTER(OVERLAPPED
) # Overlapped
1948 LockFileEx
.restype
= ctypes
.wintypes
.BOOL
1949 UnlockFileEx
= kernel32
.UnlockFileEx
1950 UnlockFileEx
.argtypes
= [
1951 ctypes
.wintypes
.HANDLE
, # hFile
1952 ctypes
.wintypes
.DWORD
, # dwReserved
1953 ctypes
.wintypes
.DWORD
, # nNumberOfBytesToLockLow
1954 ctypes
.wintypes
.DWORD
, # nNumberOfBytesToLockHigh
1955 ctypes
.POINTER(OVERLAPPED
) # Overlapped
1957 UnlockFileEx
.restype
= ctypes
.wintypes
.BOOL
1958 whole_low
= 0xffffffff
1959 whole_high
= 0x7fffffff
1961 def _lock_file(f
, exclusive
, block
):
1962 overlapped
= OVERLAPPED()
1963 overlapped
.Offset
= 0
1964 overlapped
.OffsetHigh
= 0
1965 overlapped
.hEvent
= 0
1966 f
._lock
_file
_overlapped
_p
= ctypes
.pointer(overlapped
)
1968 if not LockFileEx(msvcrt
.get_osfhandle(f
.fileno()),
1969 (0x2 if exclusive
else 0x0) |
(0x0 if block
else 0x1),
1970 0, whole_low
, whole_high
, f
._lock
_file
_overlapped
_p
):
1971 # NB: No argument form of "ctypes.FormatError" does not work on PyPy
1972 raise BlockingIOError(f
'Locking file failed: {ctypes.FormatError(ctypes.GetLastError())!r}')
1974 def _unlock_file(f
):
1975 assert f
._lock
_file
_overlapped
_p
1976 handle
= msvcrt
.get_osfhandle(f
.fileno())
1977 if not UnlockFileEx(handle
, 0, whole_low
, whole_high
, f
._lock
_file
_overlapped
_p
):
1978 raise OSError('Unlocking file failed: %r' % ctypes
.FormatError())
1984 def _lock_file(f
, exclusive
, block
):
1985 flags
= fcntl
.LOCK_EX
if exclusive
else fcntl
.LOCK_SH
1987 flags |
= fcntl
.LOCK_NB
1989 fcntl
.flock(f
, flags
)
1990 except BlockingIOError
:
1992 except OSError: # AOSP does not have flock()
1993 fcntl
.lockf(f
, flags
)
1995 def _unlock_file(f
):
1996 with contextlib
.suppress(OSError):
1997 return fcntl
.flock(f
, fcntl
.LOCK_UN
)
1998 with contextlib
.suppress(OSError):
1999 return fcntl
.lockf(f
, fcntl
.LOCK_UN
) # AOSP does not have flock()
2000 return fcntl
.flock(f
, fcntl
.LOCK_UN | fcntl
.LOCK_NB
) # virtiofs needs LOCK_NB on unlocking
2004 def _lock_file(f
, exclusive
, block
):
2005 raise LockingUnsupportedError()
2007 def _unlock_file(f
):
2008 raise LockingUnsupportedError()
2014 def __init__(self
, filename
, mode
, block
=True, encoding
=None):
2015 if mode
not in {'r', 'rb', 'a', 'ab', 'w', 'wb'}
:
2016 raise NotImplementedError(mode
)
2017 self
.mode
, self
.block
= mode
, block
2019 writable
= any(f
in mode
for f
in 'wax+')
2020 readable
= any(f
in mode
for f
in 'r+')
2021 flags
= functools
.reduce(operator
.ior
, (
2022 getattr(os
, 'O_CLOEXEC', 0), # UNIX only
2023 getattr(os
, 'O_BINARY', 0), # Windows only
2024 getattr(os
, 'O_NOINHERIT', 0), # Windows only
2025 os
.O_CREAT
if writable
else 0, # O_TRUNC only after locking
2026 os
.O_APPEND
if 'a' in mode
else 0,
2027 os
.O_EXCL
if 'x' in mode
else 0,
2028 os
.O_RDONLY
if not writable
else os
.O_RDWR
if readable
else os
.O_WRONLY
,
2031 self
.f
= os
.fdopen(os
.open(filename
, flags
, 0o666), mode
, encoding
=encoding
)
2033 def __enter__(self
):
2034 exclusive
= 'r' not in self
.mode
2036 _lock_file(self
.f
, exclusive
, self
.block
)
2041 if 'w' in self
.mode
:
2044 except OSError as e
:
2046 errno
.ESPIPE
, # Illegal seek - expected for FIFO
2047 errno
.EINVAL
, # Invalid argument - expected for /dev/null
2056 _unlock_file(self
.f
)
2060 def __exit__(self
, *_
):
2069 def __getattr__(self
, attr
):
2070 return getattr(self
.f
, attr
)
2077 def get_filesystem_encoding():
2078 encoding
= sys
.getfilesystemencoding()
2079 return encoding
if encoding
is not None else 'utf-8'
2082 def shell_quote(args
):
2084 encoding
= get_filesystem_encoding()
2086 if isinstance(a
, bytes):
2087 # We may get a filename encoded with 'encodeFilename'
2088 a
= a
.decode(encoding
)
2089 quoted_args
.append(compat_shlex_quote(a
))
2090 return ' '.join(quoted_args
)
2093 def smuggle_url(url
, data
):
2094 """ Pass additional data in a URL for internal use. """
2096 url
, idata
= unsmuggle_url(url
, {})
2098 sdata
= urllib
.parse
.urlencode(
2099 {'__youtubedl_smuggle': json.dumps(data)}
)
2100 return url
+ '#' + sdata
2103 def unsmuggle_url(smug_url
, default
=None):
2104 if '#__youtubedl_smuggle' not in smug_url
:
2105 return smug_url
, default
2106 url
, _
, sdata
= smug_url
.rpartition('#')
2107 jsond
= urllib
.parse
.parse_qs(sdata
)['__youtubedl_smuggle'][0]
2108 data
= json
.loads(jsond
)
2112 def format_decimal_suffix(num
, fmt
='%d%s', *, factor
=1000):
2113 """ Formats numbers with decimal sufixes like K, M, etc """
2114 num
, factor
= float_or_none(num
), float(factor
)
2115 if num
is None or num
< 0:
2117 POSSIBLE_SUFFIXES
= 'kMGTPEZY'
2118 exponent
= 0 if num
== 0 else min(int(math
.log(num
, factor
)), len(POSSIBLE_SUFFIXES
))
2119 suffix
= ['', *POSSIBLE_SUFFIXES
][exponent
]
2121 suffix
= {'k': 'Ki', '': ''}
.get(suffix
, f
'{suffix}i')
2122 converted
= num
/ (factor
** exponent
)
2123 return fmt
% (converted
, suffix
)
2126 def format_bytes(bytes):
2127 return format_decimal_suffix(bytes, '%.2f%sB', factor
=1024) or 'N/A'
2130 def lookup_unit_table(unit_table
, s
, strict
=False):
2131 num_re
= NUMBER_RE
if strict
else NUMBER_RE
.replace(R
'\.', '[,.]')
2132 units_re
= '|'.join(re
.escape(u
) for u
in unit_table
)
2133 m
= (re
.fullmatch
if strict
else re
.match
)(
2134 rf
'(?P<num>{num_re})\s*(?P<unit>{units_re})\b', s
)
2138 num
= float(m
.group('num').replace(',', '.'))
2139 mult
= unit_table
[m
.group('unit')]
2140 return round(num
* mult
)
2144 """Parse a string indicating a byte quantity into an integer"""
2145 return lookup_unit_table(
2146 {u: 1024**i for i, u in enumerate(['', *'KMGTPEZY'])}
,
2147 s
.upper(), strict
=True)
2150 def parse_filesize(s
):
2154 # The lower-case forms are of course incorrect and unofficial,
2155 # but we support those too
2172 'megabytes': 1000 ** 2,
2173 'mebibytes': 1024 ** 2,
2179 'gigabytes': 1000 ** 3,
2180 'gibibytes': 1024 ** 3,
2186 'terabytes': 1000 ** 4,
2187 'tebibytes': 1024 ** 4,
2193 'petabytes': 1000 ** 5,
2194 'pebibytes': 1024 ** 5,
2200 'exabytes': 1000 ** 6,
2201 'exbibytes': 1024 ** 6,
2207 'zettabytes': 1000 ** 7,
2208 'zebibytes': 1024 ** 7,
2214 'yottabytes': 1000 ** 8,
2215 'yobibytes': 1024 ** 8,
2218 return lookup_unit_table(_UNIT_TABLE
, s
)
2225 s
= re
.sub(r
'^[^\d]+\s', '', s
).strip()
2227 if re
.match(r
'^[\d,.]+$', s
):
2228 return str_to_int(s
)
2241 ret
= lookup_unit_table(_UNIT_TABLE
, s
)
2245 mobj
= re
.match(r
'([\d,.]+)(?:$|\s)', s
)
2247 return str_to_int(mobj
.group(1))
2250 def parse_resolution(s
, *, lenient
=False):
2255 mobj
= re
.search(r
'(?P<w>\d+)\s*[xX×,]\s*(?P<h>\d+)', s
)
2257 mobj
= re
.search(r
'(?<![a-zA-Z0-9])(?P<w>\d+)\s*[xX×,]\s*(?P<h>\d+)(?![a-zA-Z0-9])', s
)
2260 'width': int(mobj
.group('w')),
2261 'height': int(mobj
.group('h')),
2264 mobj
= re
.search(r
'(?<![a-zA-Z0-9])(\d+)[pPiI](?![a-zA-Z0-9])', s
)
2266 return {'height': int(mobj.group(1))}
2268 mobj
= re
.search(r
'\b([48])[kK]\b', s
)
2270 return {'height': int(mobj.group(1)) * 540}
2275 def parse_bitrate(s
):
2276 if not isinstance(s
, str):
2278 mobj
= re
.search(r
'\b(\d+)\s*kbps', s
)
2280 return int(mobj
.group(1))
2283 def month_by_name(name
, lang
='en'):
2284 """ Return the number of a month by (locale-independently) English name """
2286 month_names
= MONTH_NAMES
.get(lang
, MONTH_NAMES
['en'])
2289 return month_names
.index(name
) + 1
2294 def month_by_abbreviation(abbrev
):
2295 """ Return the number of a month by (locale-independently) English
2299 return [s
[:3] for s
in ENGLISH_MONTH_NAMES
].index(abbrev
) + 1
2304 def fix_xml_ampersands(xml_str
):
2305 """Replace all the '&' by '&' in XML"""
2307 r
'&(?!amp;|lt;|gt;|apos;|quot;|#x[0-9a-fA-F]{,4};|#[0-9]{,4};)',
2312 def setproctitle(title
):
2313 assert isinstance(title
, str)
2315 # Workaround for https://github.com/yt-dlp/yt-dlp/issues/4541
2322 libc
= ctypes
.cdll
.LoadLibrary('libc.so.6')
2326 # LoadLibrary in Windows Python 2.7.13 only expects
2327 # a bytestring, but since unicode_literals turns
2328 # every string into a unicode string, it fails.
2330 title_bytes
= title
.encode()
2331 buf
= ctypes
.create_string_buffer(len(title_bytes
))
2332 buf
.value
= title_bytes
2334 libc
.prctl(15, buf
, 0, 0, 0)
2335 except AttributeError:
2336 return # Strange libc, just skip this
2339 def remove_start(s
, start
):
2340 return s
[len(start
):] if s
is not None and s
.startswith(start
) else s
2343 def remove_end(s
, end
):
2344 return s
[:-len(end
)] if s
is not None and s
.endswith(end
) else s
2347 def remove_quotes(s
):
2348 if s
is None or len(s
) < 2:
2350 for quote
in ('"', "'", ):
2351 if s
[0] == quote
and s
[-1] == quote
:
2356 def get_domain(url
):
2358 This implementation is inconsistent, but is kept for compatibility.
2359 Use this only for "webpage_url_domain"
2361 return remove_start(urllib
.parse
.urlparse(url
).netloc
, 'www.') or None
2364 def url_basename(url
):
2365 path
= urllib
.parse
.urlparse(url
).path
2366 return path
.strip('/').split('/')[-1]
2370 return re
.match(r
'https?://[^?#]+/', url
).group()
2373 def urljoin(base
, path
):
2374 if isinstance(path
, bytes):
2375 path
= path
.decode()
2376 if not isinstance(path
, str) or not path
:
2378 if re
.match(r
'^(?:[a-zA-Z][a-zA-Z0-9+-.]*:)?//', path
):
2380 if isinstance(base
, bytes):
2381 base
= base
.decode()
2382 if not isinstance(base
, str) or not re
.match(
2383 r
'^(?:https?:)?//', base
):
2385 return urllib
.parse
.urljoin(base
, path
)
2388 class HEADRequest(urllib
.request
.Request
):
2389 def get_method(self
):
2393 class PUTRequest(urllib
.request
.Request
):
2394 def get_method(self
):
2398 def int_or_none(v
, scale
=1, default
=None, get_attr
=None, invscale
=1):
2399 if get_attr
and v
is not None:
2400 v
= getattr(v
, get_attr
, None)
2402 return int(v
) * invscale
// scale
2403 except (ValueError, TypeError, OverflowError):
2407 def str_or_none(v
, default
=None):
2408 return default
if v
is None else str(v
)
2411 def str_to_int(int_str
):
2412 """ A more relaxed version of int_or_none """
2413 if isinstance(int_str
, int):
2415 elif isinstance(int_str
, str):
2416 int_str
= re
.sub(r
'[,\.\+]', '', int_str
)
2417 return int_or_none(int_str
)
2420 def float_or_none(v
, scale
=1, invscale
=1, default
=None):
2424 return float(v
) * invscale
/ scale
2425 except (ValueError, TypeError):
2429 def bool_or_none(v
, default
=None):
2430 return v
if isinstance(v
, bool) else default
2433 def strip_or_none(v
, default
=None):
2434 return v
.strip() if isinstance(v
, str) else default
2437 def url_or_none(url
):
2438 if not url
or not isinstance(url
, str):
2441 return url
if re
.match(r
'^(?:(?:https?|rt(?:m(?:pt?[es]?|fp)|sp[su]?)|mms|ftps?):)?//', url
) else None
2444 def request_to_url(req
):
2445 if isinstance(req
, urllib
.request
.Request
):
2446 return req
.get_full_url()
2451 def strftime_or_none(timestamp
, date_format
='%Y%m%d', default
=None):
2452 datetime_object
= None
2454 if isinstance(timestamp
, (int, float)): # unix timestamp
2455 # Using naive datetime here can break timestamp() in Windows
2456 # Ref: https://github.com/yt-dlp/yt-dlp/issues/5185, https://github.com/python/cpython/issues/94414
2457 # Also, datetime.datetime.fromtimestamp breaks for negative timestamps
2458 # Ref: https://github.com/yt-dlp/yt-dlp/issues/6706#issuecomment-1496842642
2459 datetime_object
= (datetime
.datetime
.fromtimestamp(0, datetime
.timezone
.utc
)
2460 + datetime
.timedelta(seconds
=timestamp
))
2461 elif isinstance(timestamp
, str): # assume YYYYMMDD
2462 datetime_object
= datetime
.datetime
.strptime(timestamp
, '%Y%m%d')
2463 date_format
= re
.sub( # Support %s on windows
2464 r
'(?<!%)(%%)*%s', rf
'\g<1>{int(datetime_object.timestamp())}', date_format
)
2465 return datetime_object
.strftime(date_format
)
2466 except (ValueError, TypeError, AttributeError):
2470 def parse_duration(s
):
2471 if not isinstance(s
, str):
2477 days
, hours
, mins
, secs
, ms
= [None] * 5
2478 m
= re
.match(r
'''(?x)
2480 (?:(?:(?P<days>[0-9]+):)?(?P<hours>[0-9]+):)?(?P<mins>[0-9]+):)?
2481 (?P<secs>(?(before_secs)[0-9]{1,2}|[0-9]+))
2482 (?P<ms>[.:][0-9]+)?Z?$
2485 days
, hours
, mins
, secs
, ms
= m
.group('days', 'hours', 'mins', 'secs', 'ms')
2490 [0-9]+\s*y(?:ears?)?,?\s*
2493 [0-9]+\s*m(?:onths?)?,?\s*
2496 [0-9]+\s*w(?:eeks?)?,?\s*
2499 (?P<days>[0-9]+)\s*d(?:ays?)?,?\s*
2503 (?P<hours>[0-9]+)\s*h(?:ours?)?,?\s*
2506 (?P<mins>[0-9]+)\s*m(?:in(?:ute)?s?)?,?\s*
2509 (?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?\s*s(?:ec(?:ond)?s?)?\s*
2512 days
, hours
, mins
, secs
, ms
= m
.groups()
2514 m
= re
.match(r
'(?i)(?:(?P<hours>[0-9.]+)\s*(?:hours?)|(?P<mins>[0-9.]+)\s*(?:mins?\.?|minutes?)\s*)Z?$', s
)
2516 hours
, mins
= m
.groups()
2521 ms
= ms
.replace(':', '.')
2522 return sum(float(part
or 0) * mult
for part
, mult
in (
2523 (days
, 86400), (hours
, 3600), (mins
, 60), (secs
, 1), (ms
, 1)))
2526 def prepend_extension(filename
, ext
, expected_real_ext
=None):
2527 name
, real_ext
= os
.path
.splitext(filename
)
2529 f
'{name}.{ext}{real_ext}'
2530 if not expected_real_ext
or real_ext
[1:] == expected_real_ext
2531 else f
'{filename}.{ext}')
2534 def replace_extension(filename
, ext
, expected_real_ext
=None):
2535 name
, real_ext
= os
.path
.splitext(filename
)
2536 return '{}.{}'.format(
2537 name
if not expected_real_ext
or real_ext
[1:] == expected_real_ext
else filename
,
2541 def check_executable(exe
, args
=[]):
2542 """ Checks if the given binary is installed somewhere in PATH, and returns its name.
2543 args can be a list of arguments for a short output (like -version) """
2545 Popen
.run([exe
] + args
, stdout
=subprocess
.PIPE
, stderr
=subprocess
.PIPE
)
2551 def _get_exe_version_output(exe
, args
):
2553 # STDIN should be redirected too. On UNIX-like systems, ffmpeg triggers
2554 # SIGTTOU if yt-dlp is run in the background.
2555 # See https://github.com/ytdl-org/youtube-dl/issues/955#issuecomment-209789656
2556 stdout
, _
, ret
= Popen
.run([encodeArgument(exe
)] + args
, text
=True,
2557 stdin
=subprocess
.PIPE
, stdout
=subprocess
.PIPE
, stderr
=subprocess
.STDOUT
)
2565 def detect_exe_version(output
, version_re
=None, unrecognized
='present'):
2566 assert isinstance(output
, str)
2567 if version_re
is None:
2568 version_re
= r
'version\s+([-0-9._a-zA-Z]+)'
2569 m
= re
.search(version_re
, output
)
2576 def get_exe_version(exe
, args
=['--version'],
2577 version_re
=None, unrecognized
=('present', 'broken')):
2578 """ Returns the version of the specified executable,
2579 or False if the executable is not present """
2580 unrecognized
= variadic(unrecognized
)
2581 assert len(unrecognized
) in (1, 2)
2582 out
= _get_exe_version_output(exe
, args
)
2584 return unrecognized
[-1]
2585 return out
and detect_exe_version(out
, version_re
, unrecognized
[0])
2588 def frange(start
=0, stop
=None, step
=1):
2591 start
, stop
= 0, start
2592 sign
= [-1, 1][step
> 0] if step
else 0
2593 while sign
* start
< sign
* stop
:
2598 class LazyList(collections
.abc
.Sequence
):
2599 """Lazy immutable list from an iterable
2600 Note that slices of a LazyList are lists and not LazyList"""
2602 class IndexError(IndexError):
2605 def __init__(self
, iterable
, *, reverse
=False, _cache
=None):
2606 self
._iterable
= iter(iterable
)
2607 self
._cache
= [] if _cache
is None else _cache
2608 self
._reversed
= reverse
2612 # We need to consume the entire iterable to iterate in reverse
2613 yield from self
.exhaust()
2615 yield from self
._cache
2616 for item
in self
._iterable
:
2617 self
._cache
.append(item
)
2621 self
._cache
.extend(self
._iterable
)
2622 self
._iterable
= [] # Discard the emptied iterable to make it pickle-able
2626 """Evaluate the entire iterable"""
2627 return self
._exhaust
()[::-1 if self
._reversed
else 1]
2630 def _reverse_index(x
):
2631 return None if x
is None else ~x
2633 def __getitem__(self
, idx
):
2634 if isinstance(idx
, slice):
2636 idx
= slice(self
._reverse
_index
(idx
.start
), self
._reverse
_index
(idx
.stop
), -(idx
.step
or 1))
2637 start
, stop
, step
= idx
.start
, idx
.stop
, idx
.step
or 1
2638 elif isinstance(idx
, int):
2640 idx
= self
._reverse
_index
(idx
)
2641 start
, stop
, step
= idx
, idx
, 0
2643 raise TypeError('indices must be integers or slices')
2644 if ((start
or 0) < 0 or (stop
or 0) < 0
2645 or (start
is None and step
< 0)
2646 or (stop
is None and step
> 0)):
2647 # We need to consume the entire iterable to be able to slice from the end
2648 # Obviously, never use this with infinite iterables
2651 return self
._cache
[idx
]
2652 except IndexError as e
:
2653 raise self
.IndexError(e
) from e
2654 n
= max(start
or 0, stop
or 0) - len(self
._cache
) + 1
2656 self
._cache
.extend(itertools
.islice(self
._iterable
, n
))
2658 return self
._cache
[idx
]
2659 except IndexError as e
:
2660 raise self
.IndexError(e
) from e
2664 self
[-1] if self
._reversed
else self
[0]
2665 except self
.IndexError:
2671 return len(self
._cache
)
2673 def __reversed__(self
):
2674 return type(self
)(self
._iterable
, reverse
=not self
._reversed
, _cache
=self
._cache
)
2677 return type(self
)(self
._iterable
, reverse
=self
._reversed
, _cache
=self
._cache
)
2680 # repr and str should mimic a list. So we exhaust the iterable
2681 return repr(self
.exhaust())
2684 return repr(self
.exhaust())
2689 class IndexError(IndexError):
2693 # This is only useful for tests
2694 return len(self
.getslice())
2696 def __init__(self
, pagefunc
, pagesize
, use_cache
=True):
2697 self
._pagefunc
= pagefunc
2698 self
._pagesize
= pagesize
2699 self
._pagecount
= float('inf')
2700 self
._use
_cache
= use_cache
2703 def getpage(self
, pagenum
):
2704 page_results
= self
._cache
.get(pagenum
)
2705 if page_results
is None:
2706 page_results
= [] if pagenum
> self
._pagecount
else list(self
._pagefunc
(pagenum
))
2708 self
._cache
[pagenum
] = page_results
2711 def getslice(self
, start
=0, end
=None):
2712 return list(self
._getslice
(start
, end
))
2714 def _getslice(self
, start
, end
):
2715 raise NotImplementedError('This method must be implemented by subclasses')
2717 def __getitem__(self
, idx
):
2718 assert self
._use
_cache
, 'Indexing PagedList requires cache'
2719 if not isinstance(idx
, int) or idx
< 0:
2720 raise TypeError('indices must be non-negative integers')
2721 entries
= self
.getslice(idx
, idx
+ 1)
2723 raise self
.IndexError()
2727 class OnDemandPagedList(PagedList
):
2728 """Download pages until a page with less than maximum results"""
2730 def _getslice(self
, start
, end
):
2731 for pagenum
in itertools
.count(start
// self
._pagesize
):
2732 firstid
= pagenum
* self
._pagesize
2733 nextfirstid
= pagenum
* self
._pagesize
+ self
._pagesize
2734 if start
>= nextfirstid
:
2738 start
% self
._pagesize
2739 if firstid
<= start
< nextfirstid
2742 ((end
- 1) % self
._pagesize
) + 1
2743 if (end
is not None and firstid
<= end
<= nextfirstid
)
2747 page_results
= self
.getpage(pagenum
)
2749 self
._pagecount
= pagenum
- 1
2751 if startv
!= 0 or endv
is not None:
2752 page_results
= page_results
[startv
:endv
]
2753 yield from page_results
2755 # A little optimization - if current page is not "full", ie. does
2756 # not contain page_size videos then we can assume that this page
2757 # is the last one - there are no more ids on further pages -
2758 # i.e. no need to query again.
2759 if len(page_results
) + startv
< self
._pagesize
:
2762 # If we got the whole page, but the next page is not interesting,
2763 # break out early as well
2764 if end
== nextfirstid
:
2768 class InAdvancePagedList(PagedList
):
2769 """PagedList with total number of pages known in advance"""
2771 def __init__(self
, pagefunc
, pagecount
, pagesize
):
2772 PagedList
.__init
__(self
, pagefunc
, pagesize
, True)
2773 self
._pagecount
= pagecount
2775 def _getslice(self
, start
, end
):
2776 start_page
= start
// self
._pagesize
2777 end_page
= self
._pagecount
if end
is None else min(self
._pagecount
, end
// self
._pagesize
+ 1)
2778 skip_elems
= start
- start_page
* self
._pagesize
2779 only_more
= None if end
is None else end
- start
2780 for pagenum
in range(start_page
, end_page
):
2781 page_results
= self
.getpage(pagenum
)
2783 page_results
= page_results
[skip_elems
:]
2785 if only_more
is not None:
2786 if len(page_results
) < only_more
:
2787 only_more
-= len(page_results
)
2789 yield from page_results
[:only_more
]
2791 yield from page_results
2794 class PlaylistEntries
:
2795 MissingEntry
= object()
2796 is_exhausted
= False
2798 def __init__(self
, ydl
, info_dict
):
2801 # _entries must be assigned now since infodict can change during iteration
2802 entries
= info_dict
.get('entries')
2804 raise EntryNotInPlaylist('There are no entries')
2805 elif isinstance(entries
, list):
2806 self
.is_exhausted
= True
2808 requested_entries
= info_dict
.get('requested_entries')
2809 self
.is_incomplete
= requested_entries
is not None
2810 if self
.is_incomplete
:
2811 assert self
.is_exhausted
2812 self
._entries
= [self
.MissingEntry
] * max(requested_entries
or [0])
2813 for i
, entry
in zip(requested_entries
, entries
):
2814 self
._entries
[i
- 1] = entry
2815 elif isinstance(entries
, (list, PagedList
, LazyList
)):
2816 self
._entries
= entries
2818 self
._entries
= LazyList(entries
)
2820 PLAYLIST_ITEMS_RE
= re
.compile(r
'''(?x)
2821 (?P<start>[+-]?\d+)?
2823 (?P<end>[+-]?\d+|inf(?:inite)?)?
2824 (?::(?P<step>[+-]?\d+))?
2828 def parse_playlist_items(cls
, string
):
2829 for segment
in string
.split(','):
2831 raise ValueError('There is two or more consecutive commas')
2832 mobj
= cls
.PLAYLIST_ITEMS_RE
.fullmatch(segment
)
2834 raise ValueError(f
'{segment!r} is not a valid specification')
2835 start
, end
, step
, has_range
= mobj
.group('start', 'end', 'step', 'range')
2836 if int_or_none(step
) == 0:
2837 raise ValueError(f
'Step in {segment!r} cannot be zero')
2838 yield slice(int_or_none(start
), float_or_none(end
), int_or_none(step
)) if has_range
else int(start
)
2840 def get_requested_items(self
):
2841 playlist_items
= self
.ydl
.params
.get('playlist_items')
2842 playlist_start
= self
.ydl
.params
.get('playliststart', 1)
2843 playlist_end
= self
.ydl
.params
.get('playlistend')
2844 # For backwards compatibility, interpret -1 as whole list
2845 if playlist_end
in (-1, None):
2847 if not playlist_items
:
2848 playlist_items
= f
'{playlist_start}:{playlist_end}'
2849 elif playlist_start
!= 1 or playlist_end
:
2850 self
.ydl
.report_warning('Ignoring playliststart and playlistend because playlistitems was given', only_once
=True)
2852 for index
in self
.parse_playlist_items(playlist_items
):
2853 for i
, entry
in self
[index
]:
2858 # The item may have just been added to archive. Don't break due to it
2859 if not self
.ydl
.params
.get('lazy_playlist'):
2860 # TODO: Add auto-generated fields
2861 self
.ydl
._match
_entry
(entry
, incomplete
=True, silent
=True)
2862 except (ExistingVideoReached
, RejectedVideoReached
):
2865 def get_full_count(self
):
2866 if self
.is_exhausted
and not self
.is_incomplete
:
2868 elif isinstance(self
._entries
, InAdvancePagedList
):
2869 if self
._entries
._pagesize
== 1:
2870 return self
._entries
._pagecount
2872 @functools.cached_property
2874 if isinstance(self
._entries
, list):
2877 entry
= self
._entries
[i
]
2879 entry
= self
.MissingEntry
2880 if not self
.is_incomplete
:
2881 raise self
.IndexError()
2882 if entry
is self
.MissingEntry
:
2883 raise EntryNotInPlaylist(f
'Entry {i + 1} cannot be found')
2888 return type(self
.ydl
)._handle
_extraction
_exceptions
(lambda _
, i
: self
._entries
[i
])(self
.ydl
, i
)
2889 except (LazyList
.IndexError, PagedList
.IndexError):
2890 raise self
.IndexError()
2893 def __getitem__(self
, idx
):
2894 if isinstance(idx
, int):
2895 idx
= slice(idx
, idx
)
2897 # NB: PlaylistEntries[1:10] => (0, 1, ... 9)
2898 step
= 1 if idx
.step
is None else idx
.step
2899 if idx
.start
is None:
2900 start
= 0 if step
> 0 else len(self
) - 1
2902 start
= idx
.start
- 1 if idx
.start
>= 0 else len(self
) + idx
.start
2904 # NB: Do not call len(self) when idx == [:]
2905 if idx
.stop
is None:
2906 stop
= 0 if step
< 0 else float('inf')
2908 stop
= idx
.stop
- 1 if idx
.stop
>= 0 else len(self
) + idx
.stop
2909 stop
+= [-1, 1][step
> 0]
2911 for i
in frange(start
, stop
, step
):
2915 entry
= self
._getter
(i
)
2916 except self
.IndexError:
2917 self
.is_exhausted
= True
2924 return len(tuple(self
[:]))
2926 class IndexError(IndexError):
2930 def uppercase_escape(s
):
2931 unicode_escape
= codecs
.getdecoder('unicode_escape')
2933 r
'\\U[0-9a-fA-F]{8}',
2934 lambda m
: unicode_escape(m
.group(0))[0],
2938 def lowercase_escape(s
):
2939 unicode_escape
= codecs
.getdecoder('unicode_escape')
2941 r
'\\u[0-9a-fA-F]{4}',
2942 lambda m
: unicode_escape(m
.group(0))[0],
2946 def escape_rfc3986(s
):
2947 """Escape non-ASCII characters as suggested by RFC 3986"""
2948 return urllib
.parse
.quote(s
, b
"%/;:@&=+$,!~*'()?#[]")
2951 def escape_url(url
):
2952 """Escape URL as suggested by RFC 3986"""
2953 url_parsed
= urllib
.parse
.urlparse(url
)
2954 return url_parsed
._replace
(
2955 netloc
=url_parsed
.netloc
.encode('idna').decode('ascii'),
2956 path
=escape_rfc3986(url_parsed
.path
),
2957 params
=escape_rfc3986(url_parsed
.params
),
2958 query
=escape_rfc3986(url_parsed
.query
),
2959 fragment
=escape_rfc3986(url_parsed
.fragment
)
2963 def parse_qs(url
, **kwargs
):
2964 return urllib
.parse
.parse_qs(urllib
.parse
.urlparse(url
).query
, **kwargs
)
2967 def read_batch_urls(batch_fd
):
2969 if not isinstance(url
, str):
2970 url
= url
.decode('utf-8', 'replace')
2971 BOM_UTF8
= ('\xef\xbb\xbf', '\ufeff')
2972 for bom
in BOM_UTF8
:
2973 if url
.startswith(bom
):
2974 url
= url
[len(bom
):]
2976 if not url
or url
.startswith(('#', ';', ']')):
2978 # "#" cannot be stripped out since it is part of the URI
2979 # However, it can be safely stripped out if following a whitespace
2980 return re
.split(r
'\s#', url
, 1)[0].rstrip()
2982 with contextlib
.closing(batch_fd
) as fd
:
2983 return [url
for url
in map(fixup
, fd
) if url
]
2986 def urlencode_postdata(*args
, **kargs
):
2987 return urllib
.parse
.urlencode(*args
, **kargs
).encode('ascii')
2990 def update_url(url
, *, query_update
=None, **kwargs
):
2991 """Replace URL components specified by kwargs
2992 @param url str or parse url tuple
2993 @param query_update update query
2996 if isinstance(url
, str):
2997 if not kwargs
and not query_update
:
3000 url
= urllib
.parse
.urlparse(url
)
3002 assert 'query' not in kwargs
, 'query_update and query cannot be specified at the same time'
3003 kwargs
['query'] = urllib
.parse
.urlencode({
3004 **urllib
.parse
.parse_qs(url
.query
),
3007 return urllib
.parse
.urlunparse(url
._replace
(**kwargs
))
3010 def update_url_query(url
, query
):
3011 return update_url(url
, query_update
=query
)
3014 def update_Request(req
, url
=None, data
=None, headers
=None, query
=None):
3015 req_headers
= req
.headers
.copy()
3016 req_headers
.update(headers
or {})
3017 req_data
= data
or req
.data
3018 req_url
= update_url_query(url
or req
.get_full_url(), query
)
3019 req_get_method
= req
.get_method()
3020 if req_get_method
== 'HEAD':
3021 req_type
= HEADRequest
3022 elif req_get_method
== 'PUT':
3023 req_type
= PUTRequest
3025 req_type
= urllib
.request
.Request
3027 req_url
, data
=req_data
, headers
=req_headers
,
3028 origin_req_host
=req
.origin_req_host
, unverifiable
=req
.unverifiable
)
3029 if hasattr(req
, 'timeout'):
3030 new_req
.timeout
= req
.timeout
3034 def _multipart_encode_impl(data
, boundary
):
3035 content_type
= 'multipart/form-data; boundary=%s' % boundary
3038 for k
, v
in data
.items():
3039 out
+= b
'--' + boundary
.encode('ascii') + b
'\r\n'
3040 if isinstance(k
, str):
3042 if isinstance(v
, str):
3044 # RFC 2047 requires non-ASCII field names to be encoded, while RFC 7578
3045 # suggests sending UTF-8 directly. Firefox sends UTF-8, too
3046 content
= b
'Content-Disposition: form-data; name="' + k
+ b
'"\r\n\r\n' + v
+ b
'\r\n'
3047 if boundary
.encode('ascii') in content
:
3048 raise ValueError('Boundary overlaps with data')
3051 out
+= b
'--' + boundary
.encode('ascii') + b
'--\r\n'
3053 return out
, content_type
3056 def multipart_encode(data
, boundary
=None):
3058 Encode a dict to RFC 7578-compliant form-data
3061 A dict where keys and values can be either Unicode or bytes-like
3064 If specified a Unicode object, it's used as the boundary. Otherwise
3065 a random boundary is generated.
3067 Reference: https://tools.ietf.org/html/rfc7578
3069 has_specified_boundary
= boundary
is not None
3072 if boundary
is None:
3073 boundary
= '---------------' + str(random
.randrange(0x0fffffff, 0xffffffff))
3076 out
, content_type
= _multipart_encode_impl(data
, boundary
)
3079 if has_specified_boundary
:
3083 return out
, content_type
3086 def is_iterable_like(x
, allowed_types
=collections
.abc
.Iterable
, blocked_types
=NO_DEFAULT
):
3087 if blocked_types
is NO_DEFAULT
:
3088 blocked_types
= (str, bytes, collections
.abc
.Mapping
)
3089 return isinstance(x
, allowed_types
) and not isinstance(x
, blocked_types
)
3092 def variadic(x
, allowed_types
=NO_DEFAULT
):
3093 if not isinstance(allowed_types
, (tuple, type)):
3094 deprecation_warning('allowed_types should be a tuple or a type')
3095 allowed_types
= tuple(allowed_types
)
3096 return x
if is_iterable_like(x
, blocked_types
=allowed_types
) else (x
, )
3099 def try_call(*funcs
, expected_type
=None, args
=[], kwargs
={}):
3102 val
= f(*args
, **kwargs
)
3103 except (AttributeError, KeyError, TypeError, IndexError, ValueError, ZeroDivisionError):
3106 if expected_type
is None or isinstance(val
, expected_type
):
3110 def try_get(src
, getter
, expected_type
=None):
3111 return try_call(*variadic(getter
), args
=(src
,), expected_type
=expected_type
)
3114 def filter_dict(dct
, cndn
=lambda _
, v
: v
is not None):
3115 return {k: v for k, v in dct.items() if cndn(k, v)}
3118 def merge_dicts(*dicts
):
3120 for a_dict
in dicts
:
3121 for k
, v
in a_dict
.items():
3122 if (v
is not None and k
not in merged
3123 or isinstance(v
, str) and merged
[k
] == ''):
3128 def encode_compat_str(string
, encoding
=preferredencoding(), errors
='strict'):
3129 return string
if isinstance(string
, str) else str(string
, encoding
, errors
)
3141 TV_PARENTAL_GUIDELINES
= {
3151 def parse_age_limit(s
):
3152 # isinstance(False, int) is True. So type() must be used instead
3153 if type(s
) is int: # noqa: E721
3154 return s
if 0 <= s
<= 21 else None
3155 elif not isinstance(s
, str):
3157 m
= re
.match(r
'^(?P<age>\d{1,2})\+?$', s
)
3159 return int(m
.group('age'))
3162 return US_RATINGS
[s
]
3163 m
= re
.match(r
'^TV[_-]?(%s)$' % '|'.join(k
[3:] for k
in TV_PARENTAL_GUIDELINES
), s
)
3165 return TV_PARENTAL_GUIDELINES
['TV-' + m
.group(1)]
3169 def strip_jsonp(code
):
3172 (?:window\.)?(?P<func_name>[a-zA-Z0-9_.$]*)
3173 (?:\s*&&\s*(?P=func_name))?
3174 \s*\(\s*(?P<callback_data>.*)\);?
3175 \s*?(?://[^\n]*)*$''',
3176 r
'\g<callback_data>', code
)
3179 def js_to_json(code
, vars={}, *, strict
=False):
3180 # vars is a dict of var, val pairs to substitute
3181 STRING_QUOTES
= '\'"`'
3182 STRING_RE
= '|'.join(rf
'{q}(?:\\.|[^\\{q}])*{q}' for q
in STRING_QUOTES
)
3183 COMMENT_RE
= r
'/\*(?:(?!\*/).)*?\*/|//[^\n]*\n'
3184 SKIP_RE
= fr
'\s*(?:{COMMENT_RE})?\s*'
3186 (fr
'(?s)^(0[xX][0-9a-fA-F]+){SKIP_RE}:?$', 16),
3187 (fr
'(?s)^(0+[0-7]+){SKIP_RE}:?$', 8),
3190 def process_escape(match
):
3191 JSON_PASSTHROUGH_ESCAPES
= R
'"\bfnrtu'
3192 escape
= match
.group(1) or match
.group(2)
3194 return (Rf
'\{escape}' if escape
in JSON_PASSTHROUGH_ESCAPES
3195 else R
'\u00' if escape
== 'x'
3196 else '' if escape
== '\n'
3199 def template_substitute(match
):
3200 evaluated
= js_to_json(match
.group(1), vars, strict
=strict
)
3201 if evaluated
[0] == '"':
3202 return json
.loads(evaluated
)
3207 if v
in ('true', 'false', 'null'):
3209 elif v
in ('undefined', 'void 0'):
3211 elif v
.startswith('/*') or v
.startswith('//') or v
.startswith('!') or v
== ',':
3214 if v
[0] in STRING_QUOTES
:
3215 v
= re
.sub(r
'(?s)\${([^}]+)}', template_substitute
, v
[1:-1]) if v
[0] == '`' else v
[1:-1]
3216 escaped
= re
.sub(r
'(?s)(")|\\(.)', process_escape
, v
)
3217 return f
'"{escaped}"'
3219 for regex
, base
in INTEGER_TABLE
:
3220 im
= re
.match(regex
, v
)
3222 i
= int(im
.group(1), base
)
3223 return f
'"{i}":' if v
.endswith(':') else str(i
)
3229 except json
.JSONDecodeError
:
3230 return json
.dumps(vars[v
])
3237 raise ValueError(f
'Unknown value: {v}')
3239 def create_map(mobj
):
3240 return json
.dumps(dict(json
.loads(js_to_json(mobj
.group(1) or '[]', vars=vars))))
3242 code
= re
.sub(r
'new Map\((\[.*?\])?\)', create_map
, code
)
3244 code
= re
.sub(r
'new Date\((".+")\)', r
'\g<1>', code
)
3245 code
= re
.sub(r
'new \w+\((.*?)\)', lambda m
: json
.dumps(m
.group(0)), code
)
3246 code
= re
.sub(r
'parseInt\([^\d]+(\d+)[^\d]+\)', r
'\1', code
)
3247 code
= re
.sub(r
'\(function\([^)]*\)\s*\{[^}]*\}\s*\)\s*\(\s*(["\'][^
)]*["\'])\s*\)', r'\1', code)
3249 return re.sub(rf'''(?sx)
3251 {COMMENT_RE}|,(?={SKIP_RE}[\]}}])|
3252 void\s0|(?:(?<![0-9])[eE]|[a-df-zA-DF-Z_$])[.a-zA-Z_$0-9]*|
3253 \b(?:0[xX][0-9a-fA-F]+|0+[0-7]+)(?:{SKIP_RE}:)?|
3254 [0-9]+(?={SKIP_RE}:)|
3259 def qualities(quality_ids):
3260 """ Get a numeric quality value out of a list of possible values """
3263 return quality_ids.index(qid)
3269 POSTPROCESS_WHEN = ('pre_process', 'after_filter', 'video', 'before_dl', 'post_process', 'after_move', 'after_video', 'playlist')
3273 'default': '%(title)s [%(id)s].%(ext)s',
3274 'chapter': '%(title)s - %(section_number)03d %(section_title)s [%(id)s].%(ext)s',
3280 'description': 'description',
3281 'annotation': 'annotations.xml',
3282 'infojson': 'info.json',
3285 'pl_thumbnail': None,
3286 'pl_description': 'description',
3287 'pl_infojson': 'info.json',
3290 # As of [1] format syntax is:
3291 # %[mapping_key][conversion_flags][minimum_width][.precision][length_modifier]type
3292 # 1. https://docs.python.org/2/library/stdtypes.html#string-formatting
3293 STR_FORMAT_RE_TMPL = r'''(?x)
3294 (?<!%)(?P<prefix>(?:%%)*)
3296 (?P<has_key>\((?P<key>{0})\))?
3298 (?P<conversion>[#0\-+ ]+)?
3300 (?P<precision>\.\d+)?
3301 (?P<len_mod>[hlL])? # unused in python
3302 {1} # conversion type
3307 STR_FORMAT_TYPES = 'diouxXeEfFgGcrsa'
3310 def limit_length(s, length):
3311 """ Add ellipses to overly long strings """
3316 return s[:length - len(ELLIPSES)] + ELLIPSES
3320 def version_tuple(v):
3321 return tuple(int(e) for e in re.split(r'[-.]', v))
3324 def is_outdated_version(version, limit, assume_new=True):
3326 return not assume_new
3328 return version_tuple(version) < version_tuple(limit)
3330 return not assume_new
3333 def ytdl_is_updateable():
3334 """ Returns if yt-dlp can be updated with -U """
3336 from ..update import is_non_updateable
3338 return not is_non_updateable()
3341 def args_to_str(args):
3342 # Get a short string representation for a subprocess command
3343 return ' '.join(compat_shlex_quote(a) for a in args)
3346 def error_to_str(err):
3347 return f'{type(err).__name__}: {err}'
3350 def mimetype2ext(mt, default=NO_DEFAULT):
3351 if not isinstance(mt, str):
3352 if default is not NO_DEFAULT:
3368 'x-matroska': 'mkv',
3370 'x-mp4-fragmented': 'mp4',
3375 # application (streaming playlists)
3379 'vnd.apple.mpegurl': 'm3u8',
3380 'vnd.ms-sstr+xml': 'ism',
3381 'x-mpegurl': 'm3u8',
3385 # Per RFC 3003, audio/mpeg can be .mp1, .mp2 or .mp3.
3386 # Using .mp3 as it's the most popular one
3387 'audio/mpeg': 'mp3',
3388 'audio/webm': 'webm',
3389 'audio/x-matroska': 'mka',
3390 'audio/x-mpegurl': 'm3u',
3398 'x-realaudio': 'ra',
3409 'vnd.wap.wbmp': 'wbmp',
3416 'filmstrip+json': 'fs',
3417 'smptett+xml': 'tt',
3420 'x-ms-sami': 'sami',
3429 mimetype = mt.partition(';')[0].strip().lower()
3430 _, _, subtype = mimetype.rpartition('/')
3432 ext = traversal.traverse_obj(MAP, mimetype, subtype, subtype.rsplit('+')[-1])
3435 elif default is not NO_DEFAULT:
3437 return subtype.replace('+', '.')
3440 def ext2mimetype(ext_or_url):
3443 if '.' not in ext_or_url:
3444 ext_or_url = f'file.{ext_or_url}'
3445 return mimetypes.guess_type(ext_or_url)[0]
3448 def parse_codecs(codecs_str):
3449 # http://tools.ietf.org/html/rfc6381
3452 split_codecs = list(filter(None, map(
3453 str.strip, codecs_str.strip().strip(',').split(','))))
3454 vcodec, acodec, scodec, hdr = None, None, None, None
3455 for full_codec in split_codecs:
3456 parts = re.sub(r'0+(?=\d)', '', full_codec).split('.')
3457 if parts[0] in ('avc1', 'avc2', 'avc3', 'avc4', 'vp9', 'vp8', 'hev1', 'hev2',
3458 'h263', 'h264', 'mp4v', 'hvc1', 'av1', 'theora', 'dvh1', 'dvhe'):
3462 if parts[0] in ('dvh1', 'dvhe'):
3464 elif parts[0] == 'av1' and traversal.traverse_obj(parts, 3) == '10':
3466 elif parts[:2] == ['vp9', '2']:
3468 elif parts[0] in ('flac', 'mp4a', 'opus', 'vorbis', 'mp3', 'aac', 'ac-4',
3469 'ac-3', 'ec-3', 'eac3', 'dtsc', 'dtse', 'dtsh', 'dtsl'):
3470 acodec = acodec or full_codec
3471 elif parts[0] in ('stpp', 'wvtt'):
3472 scodec = scodec or full_codec
3474 write_string(f'WARNING: Unknown codec {full_codec}\n')
3475 if vcodec or acodec or scodec:
3477 'vcodec': vcodec or 'none',
3478 'acodec': acodec or 'none',
3479 'dynamic_range': hdr,
3480 **({'scodec': scodec} if scodec is not None else {}),
3482 elif len(split_codecs) == 2:
3484 'vcodec': split_codecs[0],
3485 'acodec': split_codecs[1],
3490 def get_compatible_ext(*, vcodecs, acodecs, vexts, aexts, preferences=None):
3491 assert len(vcodecs) == len(vexts) and len(acodecs) == len(aexts)
3493 allow_mkv = not preferences or 'mkv' in preferences
3495 if allow_mkv and max(len(acodecs), len(vcodecs)) > 1:
3496 return 'mkv' # TODO: any other format allows this?
3498 # TODO: All codecs supported by parse_codecs isn't handled here
3499 COMPATIBLE_CODECS = {
3501 'av1', 'hevc', 'avc1', 'mp4a', 'ac-4', # fourcc (m3u8, mpd)
3502 'h264', 'aacl', 'ec-3', # Set in ISM
3505 'av1', 'vp9', 'vp8', 'opus', 'vrbs',
3506 'vp9x', 'vp8x', # in the webm spec
3510 sanitize_codec = functools.partial(try_get, getter=lambda x: x[0].split('.')[0].replace('0', ''))
3511 vcodec, acodec = sanitize_codec(vcodecs), sanitize_codec(acodecs)
3513 for ext in preferences or COMPATIBLE_CODECS.keys():
3514 codec_set = COMPATIBLE_CODECS.get(ext, set())
3515 if ext == 'mkv' or codec_set.issuperset((vcodec, acodec)):
3519 {'mp3', 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'ismv', 'isma', 'mov'},
3522 for ext in preferences or vexts:
3523 current_exts = {ext, *vexts, *aexts}
3524 if ext == 'mkv' or current_exts == {ext} or any(
3525 ext_sets.issuperset(current_exts) for ext_sets in COMPATIBLE_EXTS):
3527 return 'mkv' if allow_mkv else preferences[-1]
3530 def urlhandle_detect_ext(url_handle, default=NO_DEFAULT):
3531 getheader = url_handle.headers.get
3533 cd = getheader('Content-Disposition')
3535 m = re.match(r'attachment;\s*filename="(?P
<filename
>[^
"]+)"', cd)
3537 e = determine_ext(m.group('filename
'), default_ext=None)
3541 meta_ext = getheader('x
-amz
-meta
-name
')
3543 e = meta_ext.rpartition('.')[2]
3547 return mimetype2ext(getheader('Content
-Type
'), default=default)
3550 def encode_data_uri(data, mime_type):
3551 return 'data
:%s;base64
,%s' % (mime_type, base64.b64encode(data).decode('ascii
'))
3554 def age_restricted(content_limit, age_limit):
3555 """ Returns True iff the content should be blocked """
3557 if age_limit is None: # No limit set
3559 if content_limit is None:
3560 return False # Content available for everyone
3561 return age_limit < content_limit
3564 # List of known byte-order-marks (BOM)
3566 (b'\xef\xbb\xbf', 'utf
-8'),
3567 (b'\x00\x00\xfe\xff', 'utf
-32-be
'),
3568 (b'\xff\xfe\x00\x00', 'utf
-32-le
'),
3569 (b'\xff\xfe', 'utf
-16-le
'),
3570 (b'\xfe\xff', 'utf
-16-be
'),
3574 def is_html(first_bytes):
3575 """ Detect whether a file contains HTML by examining its first bytes. """
3578 for bom, enc in BOMS:
3579 while first_bytes.startswith(bom):
3580 encoding, first_bytes = enc, first_bytes[len(bom):]
3582 return re.match(r'^\s
*<', first_bytes.decode(encoding, 'replace
'))
3585 def determine_protocol(info_dict):
3586 protocol = info_dict.get('protocol
')
3587 if protocol is not None:
3590 url = sanitize_url(info_dict['url
'])
3591 if url.startswith('rtmp
'):
3593 elif url.startswith('mms
'):
3595 elif url.startswith('rtsp
'):
3598 ext = determine_ext(url)
3600 return 'm3u8
' if info_dict.get('is_live
') else 'm3u8_native
'
3604 return urllib.parse.urlparse(url).scheme
3607 def render_table(header_row, data, delim=False, extra_gap=0, hide_empty=False):
3608 """ Render a list of rows, each as a list of values.
3609 Text after a \t will be right aligned """
3611 return len(remove_terminal_sequences(string).replace('\t', ''))
3613 def get_max_lens(table):
3614 return [max(width(str(v)) for v in col) for col in zip(*table)]
3616 def filter_using_list(row, filterArray):
3617 return [col for take, col in itertools.zip_longest(filterArray, row, fillvalue=True) if take]
3619 max_lens = get_max_lens(data) if hide_empty else []
3620 header_row = filter_using_list(header_row, max_lens)
3621 data = [filter_using_list(row, max_lens) for row in data]
3623 table = [header_row] + data
3624 max_lens = get_max_lens(table)
3627 table = [header_row, [delim * (ml + extra_gap) for ml in max_lens]] + data
3628 table[1][-1] = table[1][-1][:-extra_gap * len(delim)] # Remove extra_gap from end of delimiter
3630 for pos, text in enumerate(map(str, row)):
3632 row[pos] = text.replace('\t', ' ' * (max_lens[pos] - width(text))) + ' ' * extra_gap
3634 row[pos] = text + ' ' * (max_lens[pos] - width(text) + extra_gap)
3635 ret = '\n'.join(''.join(row).rstrip() for row in table)
3639 def _match_one(filter_part, dct, incomplete):
3640 # TODO: Generalize code with YoutubeDL._build_format_filter
3641 STRING_OPERATORS = {
3642 '*=': operator.contains,
3643 '^
=': lambda attr, value: attr.startswith(value),
3644 '$
=': lambda attr, value: attr.endswith(value),
3645 '~
=': lambda attr, value: re.search(value, attr),
3647 COMPARISON_OPERATORS = {
3649 '<=': operator.le, # "<=" must be defined above "<"
3656 if isinstance(incomplete, bool):
3657 is_incomplete = lambda _: incomplete
3659 is_incomplete = lambda k: k in incomplete
3661 operator_rex = re.compile(r'''(?x)
3663 \s*(?P<negation>!\s*)?(?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
3665 (?P<quote>["\'])(?P<quotedstrval>.+?)(?P=quote)|
3668 ''' % '|
'.join(map(re.escape, COMPARISON_OPERATORS.keys())))
3669 m = operator_rex.fullmatch(filter_part.strip())
3672 unnegated_op = COMPARISON_OPERATORS[m['op
']]
3674 op = lambda attr, value: not unnegated_op(attr, value)
3677 comparison_value = m['quotedstrval
'] or m['strval
'] or m['intval
']
3679 comparison_value = comparison_value.replace(r'\
%s' % m['quote
'], m['quote
'])
3680 actual_value = dct.get(m['key
'])
3681 numeric_comparison = None
3682 if isinstance(actual_value, (int, float)):
3683 # If the original field is a string and matching comparisonvalue is
3684 # a number we should respect the origin of the original field
3685 # and process comparison value as a string (see
3686 # https://github.com/ytdl-org/youtube-dl/issues/11082)
3688 numeric_comparison = int(comparison_value)
3690 numeric_comparison = parse_filesize(comparison_value)
3691 if numeric_comparison is None:
3692 numeric_comparison = parse_filesize(f'{comparison_value}B
')
3693 if numeric_comparison is None:
3694 numeric_comparison = parse_duration(comparison_value)
3695 if numeric_comparison is not None and m['op
'] in STRING_OPERATORS:
3696 raise ValueError('Operator
%s only supports string values
!' % m['op
'])
3697 if actual_value is None:
3698 return is_incomplete(m['key
']) or m['none_inclusive
']
3699 return op(actual_value, comparison_value if numeric_comparison is None else numeric_comparison)
3702 '': lambda v: (v is True) if isinstance(v, bool) else (v is not None),
3703 '!': lambda v: (v is False) if isinstance(v, bool) else (v is None),
3705 operator_rex = re.compile(r'''(?x)
3706 (?P<op>%s)\s*(?P<key>[a-z_]+)
3707 ''' % '|
'.join(map(re.escape, UNARY_OPERATORS.keys())))
3708 m = operator_rex.fullmatch(filter_part.strip())
3710 op = UNARY_OPERATORS[m.group('op
')]
3711 actual_value = dct.get(m.group('key
'))
3712 if is_incomplete(m.group('key
')) and actual_value is None:
3714 return op(actual_value)
3716 raise ValueError('Invalid
filter part
%r' % filter_part)
3719 def match_str(filter_str, dct, incomplete=False):
3720 """ Filter a dictionary with a simple string syntax.
3721 @returns Whether the filter passes
3722 @param incomplete Set of keys that is expected to be missing from dct.
3723 Can be True/False to indicate all/none of the keys may be missing.
3724 All conditions on incomplete keys pass if the key is missing
3727 _match_one(filter_part.replace(r'\
&', '&'), dct, incomplete)
3728 for filter_part in re.split(r'(?
<!\\)&', filter_str))
3731 def match_filter_func(filters, breaking_filters=None):
3732 if not filters and not breaking_filters:
3734 breaking_filters = match_filter_func(breaking_filters) or (lambda _, __: None)
3735 filters = set(variadic(filters or []))
3737 interactive = '-' in filters
3741 def _match_func(info_dict, incomplete=False):
3742 ret = breaking_filters(info_dict, incomplete)
3744 raise RejectedVideoReached(ret)
3746 if not filters or any(match_str(f, info_dict, incomplete) for f in filters):
3747 return NO_DEFAULT if interactive and not incomplete else None
3749 video_title = info_dict.get('title
') or info_dict.get('id') or 'entry
'
3750 filter_str = ') |
('.join(map(str.strip, filters))
3751 return f'{video_title} does
not pass filter ({filter_str}
), skipping
..'
3755 class download_range_func:
3756 def __init__(self, chapters, ranges):
3757 self.chapters, self.ranges = chapters, ranges
3759 def __call__(self, info_dict, ydl):
3760 if not self.ranges and not self.chapters:
3763 warning = ('There are no chapters matching the regex
' if info_dict.get('chapters
')
3764 else 'Cannot match chapters since chapter information
is unavailable
')
3765 for regex in self.chapters or []:
3766 for i, chapter in enumerate(info_dict.get('chapters
') or []):
3767 if re.search(regex, chapter['title
']):
3769 yield {**chapter, 'index': i}
3770 if self.chapters and warning:
3771 ydl.to_screen(f'[info
] {info_dict["id"]}
: {warning}
')
3773 yield from ({'start_time': start, 'end_time': end} for start, end in self.ranges or [])
3775 def __eq__(self, other):
3776 return (isinstance(other, download_range_func)
3777 and self.chapters == other.chapters and self.ranges == other.ranges)
3780 return f'{__name__}
.{type(self).__name__}
({self.chapters}
, {self.ranges}
)'
3783 def parse_dfxp_time_expr(time_expr):
3787 mobj = re.match(rf'^
(?P
<time_offset
>{NUMBER_RE}
)s?$
', time_expr)
3789 return float(mobj.group('time_offset
'))
3791 mobj = re.match(r'^
(\d
+):(\d\d
):(\d\
d(?
:(?
:\
.|
:)\d
+)?
)$
', time_expr)
3793 return 3600 * int(mobj.group(1)) + 60 * int(mobj.group(2)) + float(mobj.group(3).replace(':', '.'))
3796 def srt_subtitles_timecode(seconds):
3797 return '%02d
:%02d
:%02d
,%03d
' % timetuple_from_msec(seconds * 1000)
3800 def ass_subtitles_timecode(seconds):
3801 time = timetuple_from_msec(seconds * 1000)
3802 return '%01d
:%02d
:%02d
.%02d
' % (*time[:-1], time.milliseconds / 10)
3805 def dfxp2srt(dfxp_data):
3807 @param dfxp_data A bytes-like object containing DFXP data
3808 @returns A unicode object containing converted SRT data
3810 LEGACY_NAMESPACES = (
3811 (b'http
://www
.w3
.org
/ns
/ttml
', [
3812 b'http
://www
.w3
.org
/2004/11/ttaf1
',
3813 b'http
://www
.w3
.org
/2006/04/ttaf1
',
3814 b'http
://www
.w3
.org
/2006/10/ttaf1
',
3816 (b'http
://www
.w3
.org
/ns
/ttml
#styling', [
3817 b
'http://www.w3.org/ns/ttml#style',
3821 SUPPORTED_STYLING
= [
3830 _x
= functools
.partial(xpath_with_ns
, ns_map
={
3831 'xml': 'http://www.w3.org/XML/1998/namespace',
3832 'ttml': 'http://www.w3.org/ns/ttml',
3833 'tts': 'http://www.w3.org/ns/ttml#styling',
3839 class TTMLPElementParser
:
3841 _unclosed_elements
= []
3842 _applied_styles
= []
3844 def start(self
, tag
, attrib
):
3845 if tag
in (_x('ttml:br'), 'br'):
3848 unclosed_elements
= []
3850 element_style_id
= attrib
.get('style')
3852 style
.update(default_style
)
3853 if element_style_id
:
3854 style
.update(styles
.get(element_style_id
, {}))
3855 for prop
in SUPPORTED_STYLING
:
3856 prop_val
= attrib
.get(_x('tts:' + prop
))
3858 style
[prop
] = prop_val
3861 for k
, v
in sorted(style
.items()):
3862 if self
._applied
_styles
and self
._applied
_styles
[-1].get(k
) == v
:
3865 font
+= ' color="%s"' % v
3866 elif k
== 'fontSize':
3867 font
+= ' size="%s"' % v
3868 elif k
== 'fontFamily':
3869 font
+= ' face="%s"' % v
3870 elif k
== 'fontWeight' and v
== 'bold':
3872 unclosed_elements
.append('b')
3873 elif k
== 'fontStyle' and v
== 'italic':
3875 unclosed_elements
.append('i')
3876 elif k
== 'textDecoration' and v
== 'underline':
3878 unclosed_elements
.append('u')
3880 self
._out
+= '<font' + font
+ '>'
3881 unclosed_elements
.append('font')
3883 if self
._applied
_styles
:
3884 applied_style
.update(self
._applied
_styles
[-1])
3885 applied_style
.update(style
)
3886 self
._applied
_styles
.append(applied_style
)
3887 self
._unclosed
_elements
.append(unclosed_elements
)
3890 if tag
not in (_x('ttml:br'), 'br'):
3891 unclosed_elements
= self
._unclosed
_elements
.pop()
3892 for element
in reversed(unclosed_elements
):
3893 self
._out
+= '</%s>' % element
3894 if unclosed_elements
and self
._applied
_styles
:
3895 self
._applied
_styles
.pop()
3897 def data(self
, data
):
3901 return self
._out
.strip()
3903 # Fix UTF-8 encoded file wrongly marked as UTF-16. See https://github.com/yt-dlp/yt-dlp/issues/6543#issuecomment-1477169870
3904 # This will not trigger false positives since only UTF-8 text is being replaced
3905 dfxp_data
= dfxp_data
.replace(b
'encoding=\'UTF-16\'', b
'encoding=\'UTF-8\'')
3907 def parse_node(node
):
3908 target
= TTMLPElementParser()
3909 parser
= xml
.etree
.ElementTree
.XMLParser(target
=target
)
3910 parser
.feed(xml
.etree
.ElementTree
.tostring(node
))
3911 return parser
.close()
3913 for k
, v
in LEGACY_NAMESPACES
:
3915 dfxp_data
= dfxp_data
.replace(ns
, k
)
3917 dfxp
= compat_etree_fromstring(dfxp_data
)
3919 paras
= dfxp
.findall(_x('.//ttml:p')) or dfxp
.findall('.//p')
3922 raise ValueError('Invalid dfxp/TTML subtitle')
3926 for style
in dfxp
.findall(_x('.//ttml:style')):
3927 style_id
= style
.get('id') or style
.get(_x('xml:id'))
3930 parent_style_id
= style
.get('style')
3932 if parent_style_id
not in styles
:
3935 styles
[style_id
] = styles
[parent_style_id
].copy()
3936 for prop
in SUPPORTED_STYLING
:
3937 prop_val
= style
.get(_x('tts:' + prop
))
3939 styles
.setdefault(style_id
, {})[prop
] = prop_val
3945 for p
in ('body', 'div'):
3946 ele
= xpath_element(dfxp
, [_x('.//ttml:' + p
), './/' + p
])
3949 style
= styles
.get(ele
.get('style'))
3952 default_style
.update(style
)
3954 for para
, index
in zip(paras
, itertools
.count(1)):
3955 begin_time
= parse_dfxp_time_expr(para
.attrib
.get('begin'))
3956 end_time
= parse_dfxp_time_expr(para
.attrib
.get('end'))
3957 dur
= parse_dfxp_time_expr(para
.attrib
.get('dur'))
3958 if begin_time
is None:
3963 end_time
= begin_time
+ dur
3964 out
.append('%d\n%s --> %s\n%s\n\n' % (
3966 srt_subtitles_timecode(begin_time
),
3967 srt_subtitles_timecode(end_time
),
3973 def cli_option(params
, command_option
, param
, separator
=None):
3974 param
= params
.get(param
)
3975 return ([] if param
is None
3976 else [command_option
, str(param
)] if separator
is None
3977 else [f
'{command_option}{separator}{param}'])
3980 def cli_bool_option(params
, command_option
, param
, true_value
='true', false_value
='false', separator
=None):
3981 param
= params
.get(param
)
3982 assert param
in (True, False, None)
3983 return cli_option({True: true_value, False: false_value}
, command_option
, param
, separator
)
3986 def cli_valueless_option(params
, command_option
, param
, expected_value
=True):
3987 return [command_option
] if params
.get(param
) == expected_value
else []
3990 def cli_configuration_args(argdict
, keys
, default
=[], use_compat
=True):
3991 if isinstance(argdict
, (list, tuple)): # for backward compatibility
3998 assert isinstance(argdict
, dict)
4000 assert isinstance(keys
, (list, tuple))
4001 for key_list
in keys
:
4002 arg_list
= list(filter(
4003 lambda x
: x
is not None,
4004 [argdict
.get(key
.lower()) for key
in variadic(key_list
)]))
4006 return [arg
for args
in arg_list
for arg
in args
]
4010 def _configuration_args(main_key
, argdict
, exe
, keys
=None, default
=[], use_compat
=True):
4011 main_key
, exe
= main_key
.lower(), exe
.lower()
4012 root_key
= exe
if main_key
== exe
else f
'{main_key}+{exe}'
4013 keys
= [f
'{root_key}{k}' for k
in (keys
or [''])]
4014 if root_key
in keys
:
4016 keys
.append((main_key
, exe
))
4017 keys
.append('default')
4020 return cli_configuration_args(argdict
, keys
, default
, use_compat
)
4024 # See http://www.loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt
4083 'iw': 'heb', # Replaced by he in 1989 revision
4093 'in': 'ind', # Replaced by id in 1989 revision
4209 'ji': 'yid', # Replaced by yi in 1989 revision
4217 def short2long(cls
, code
):
4218 """Convert language code from ISO 639-1 to ISO 639-2/T"""
4219 return cls
._lang
_map
.get(code
[:2])
4222 def long2short(cls
, code
):
4223 """Convert language code from ISO 639-2/T to ISO 639-1"""
4224 for short_name
, long_name
in cls
._lang
_map
.items():
4225 if long_name
== code
:
4230 # From http://data.okfn.org/data/core/country-list
4232 'AF': 'Afghanistan',
4233 'AX': 'Åland Islands',
4236 'AS': 'American Samoa',
4241 'AG': 'Antigua and Barbuda',
4258 'BO': 'Bolivia, Plurinational State of',
4259 'BQ': 'Bonaire, Sint Eustatius and Saba',
4260 'BA': 'Bosnia and Herzegovina',
4262 'BV': 'Bouvet Island',
4264 'IO': 'British Indian Ocean Territory',
4265 'BN': 'Brunei Darussalam',
4267 'BF': 'Burkina Faso',
4273 'KY': 'Cayman Islands',
4274 'CF': 'Central African Republic',
4278 'CX': 'Christmas Island',
4279 'CC': 'Cocos (Keeling) Islands',
4283 'CD': 'Congo, the Democratic Republic of the',
4284 'CK': 'Cook Islands',
4286 'CI': 'Côte d\'Ivoire',
4291 'CZ': 'Czech Republic',
4295 'DO': 'Dominican Republic',
4298 'SV': 'El Salvador',
4299 'GQ': 'Equatorial Guinea',
4303 'FK': 'Falkland Islands (Malvinas)',
4304 'FO': 'Faroe Islands',
4308 'GF': 'French Guiana',
4309 'PF': 'French Polynesia',
4310 'TF': 'French Southern Territories',
4325 'GW': 'Guinea-Bissau',
4328 'HM': 'Heard Island and McDonald Islands',
4329 'VA': 'Holy See (Vatican City State)',
4336 'IR': 'Iran, Islamic Republic of',
4339 'IM': 'Isle of Man',
4349 'KP': 'Korea, Democratic People\'s Republic of',
4350 'KR': 'Korea, Republic of',
4353 'LA': 'Lao People\'s Democratic Republic',
4359 'LI': 'Liechtenstein',
4363 'MK': 'Macedonia, the Former Yugoslav Republic of',
4370 'MH': 'Marshall Islands',
4376 'FM': 'Micronesia, Federated States of',
4377 'MD': 'Moldova, Republic of',
4388 'NL': 'Netherlands',
4389 'NC': 'New Caledonia',
4390 'NZ': 'New Zealand',
4395 'NF': 'Norfolk Island',
4396 'MP': 'Northern Mariana Islands',
4401 'PS': 'Palestine, State of',
4403 'PG': 'Papua New Guinea',
4406 'PH': 'Philippines',
4410 'PR': 'Puerto Rico',
4414 'RU': 'Russian Federation',
4416 'BL': 'Saint Barthélemy',
4417 'SH': 'Saint Helena, Ascension and Tristan da Cunha',
4418 'KN': 'Saint Kitts and Nevis',
4419 'LC': 'Saint Lucia',
4420 'MF': 'Saint Martin (French part)',
4421 'PM': 'Saint Pierre and Miquelon',
4422 'VC': 'Saint Vincent and the Grenadines',
4425 'ST': 'Sao Tome and Principe',
4426 'SA': 'Saudi Arabia',
4430 'SL': 'Sierra Leone',
4432 'SX': 'Sint Maarten (Dutch part)',
4435 'SB': 'Solomon Islands',
4437 'ZA': 'South Africa',
4438 'GS': 'South Georgia and the South Sandwich Islands',
4439 'SS': 'South Sudan',
4444 'SJ': 'Svalbard and Jan Mayen',
4447 'CH': 'Switzerland',
4448 'SY': 'Syrian Arab Republic',
4449 'TW': 'Taiwan, Province of China',
4451 'TZ': 'Tanzania, United Republic of',
4453 'TL': 'Timor-Leste',
4457 'TT': 'Trinidad and Tobago',
4460 'TM': 'Turkmenistan',
4461 'TC': 'Turks and Caicos Islands',
4465 'AE': 'United Arab Emirates',
4466 'GB': 'United Kingdom',
4467 'US': 'United States',
4468 'UM': 'United States Minor Outlying Islands',
4472 'VE': 'Venezuela, Bolivarian Republic of',
4474 'VG': 'Virgin Islands, British',
4475 'VI': 'Virgin Islands, U.S.',
4476 'WF': 'Wallis and Futuna',
4477 'EH': 'Western Sahara',
4481 # Not ISO 3166 codes, but used for IP blocks
4482 'AP': 'Asia/Pacific Region',
4487 def short2full(cls
, code
):
4488 """Convert an ISO 3166-2 country code to the corresponding full name"""
4489 return cls
._country
_map
.get(code
.upper())
4493 # Major IPv4 address blocks per country
4495 'AD': '46.172.224.0/19',
4496 'AE': '94.200.0.0/13',
4497 'AF': '149.54.0.0/17',
4498 'AG': '209.59.64.0/18',
4499 'AI': '204.14.248.0/21',
4500 'AL': '46.99.0.0/16',
4501 'AM': '46.70.0.0/15',
4502 'AO': '105.168.0.0/13',
4503 'AP': '182.50.184.0/21',
4504 'AQ': '23.154.160.0/24',
4505 'AR': '181.0.0.0/12',
4506 'AS': '202.70.112.0/20',
4507 'AT': '77.116.0.0/14',
4508 'AU': '1.128.0.0/11',
4509 'AW': '181.41.0.0/18',
4510 'AX': '185.217.4.0/22',
4511 'AZ': '5.197.0.0/16',
4512 'BA': '31.176.128.0/17',
4513 'BB': '65.48.128.0/17',
4514 'BD': '114.130.0.0/16',
4516 'BF': '102.178.0.0/15',
4517 'BG': '95.42.0.0/15',
4518 'BH': '37.131.0.0/17',
4519 'BI': '154.117.192.0/18',
4520 'BJ': '137.255.0.0/16',
4521 'BL': '185.212.72.0/23',
4522 'BM': '196.12.64.0/18',
4523 'BN': '156.31.0.0/16',
4524 'BO': '161.56.0.0/16',
4525 'BQ': '161.0.80.0/20',
4526 'BR': '191.128.0.0/12',
4527 'BS': '24.51.64.0/18',
4528 'BT': '119.2.96.0/19',
4529 'BW': '168.167.0.0/16',
4530 'BY': '178.120.0.0/13',
4531 'BZ': '179.42.192.0/18',
4532 'CA': '99.224.0.0/11',
4533 'CD': '41.243.0.0/16',
4534 'CF': '197.242.176.0/21',
4535 'CG': '160.113.0.0/16',
4536 'CH': '85.0.0.0/13',
4537 'CI': '102.136.0.0/14',
4538 'CK': '202.65.32.0/19',
4539 'CL': '152.172.0.0/14',
4540 'CM': '102.244.0.0/14',
4541 'CN': '36.128.0.0/10',
4542 'CO': '181.240.0.0/12',
4543 'CR': '201.192.0.0/12',
4544 'CU': '152.206.0.0/15',
4545 'CV': '165.90.96.0/19',
4546 'CW': '190.88.128.0/17',
4547 'CY': '31.153.0.0/16',
4548 'CZ': '88.100.0.0/14',
4550 'DJ': '197.241.0.0/17',
4551 'DK': '87.48.0.0/12',
4552 'DM': '192.243.48.0/20',
4553 'DO': '152.166.0.0/15',
4554 'DZ': '41.96.0.0/12',
4555 'EC': '186.68.0.0/15',
4556 'EE': '90.190.0.0/15',
4557 'EG': '156.160.0.0/11',
4558 'ER': '196.200.96.0/20',
4559 'ES': '88.0.0.0/11',
4560 'ET': '196.188.0.0/14',
4561 'EU': '2.16.0.0/13',
4562 'FI': '91.152.0.0/13',
4563 'FJ': '144.120.0.0/16',
4564 'FK': '80.73.208.0/21',
4565 'FM': '119.252.112.0/20',
4566 'FO': '88.85.32.0/19',
4568 'GA': '41.158.0.0/15',
4570 'GD': '74.122.88.0/21',
4571 'GE': '31.146.0.0/16',
4572 'GF': '161.22.64.0/18',
4573 'GG': '62.68.160.0/19',
4574 'GH': '154.160.0.0/12',
4575 'GI': '95.164.0.0/16',
4576 'GL': '88.83.0.0/19',
4577 'GM': '160.182.0.0/15',
4578 'GN': '197.149.192.0/18',
4579 'GP': '104.250.0.0/19',
4580 'GQ': '105.235.224.0/20',
4581 'GR': '94.64.0.0/13',
4582 'GT': '168.234.0.0/16',
4583 'GU': '168.123.0.0/16',
4584 'GW': '197.214.80.0/20',
4585 'GY': '181.41.64.0/18',
4586 'HK': '113.252.0.0/14',
4587 'HN': '181.210.0.0/16',
4588 'HR': '93.136.0.0/13',
4589 'HT': '148.102.128.0/17',
4590 'HU': '84.0.0.0/14',
4591 'ID': '39.192.0.0/10',
4592 'IE': '87.32.0.0/12',
4593 'IL': '79.176.0.0/13',
4594 'IM': '5.62.80.0/20',
4595 'IN': '117.192.0.0/10',
4596 'IO': '203.83.48.0/21',
4597 'IQ': '37.236.0.0/14',
4598 'IR': '2.176.0.0/12',
4599 'IS': '82.221.0.0/16',
4600 'IT': '79.0.0.0/10',
4601 'JE': '87.244.64.0/18',
4602 'JM': '72.27.0.0/17',
4603 'JO': '176.29.0.0/16',
4604 'JP': '133.0.0.0/8',
4605 'KE': '105.48.0.0/12',
4606 'KG': '158.181.128.0/17',
4607 'KH': '36.37.128.0/17',
4608 'KI': '103.25.140.0/22',
4609 'KM': '197.255.224.0/20',
4610 'KN': '198.167.192.0/19',
4611 'KP': '175.45.176.0/22',
4612 'KR': '175.192.0.0/10',
4613 'KW': '37.36.0.0/14',
4614 'KY': '64.96.0.0/15',
4615 'KZ': '2.72.0.0/13',
4616 'LA': '115.84.64.0/18',
4617 'LB': '178.135.0.0/16',
4618 'LC': '24.92.144.0/20',
4619 'LI': '82.117.0.0/19',
4620 'LK': '112.134.0.0/15',
4621 'LR': '102.183.0.0/16',
4622 'LS': '129.232.0.0/17',
4623 'LT': '78.56.0.0/13',
4624 'LU': '188.42.0.0/16',
4625 'LV': '46.109.0.0/16',
4626 'LY': '41.252.0.0/14',
4627 'MA': '105.128.0.0/11',
4628 'MC': '88.209.64.0/18',
4629 'MD': '37.246.0.0/16',
4630 'ME': '178.175.0.0/17',
4631 'MF': '74.112.232.0/21',
4632 'MG': '154.126.0.0/17',
4633 'MH': '117.103.88.0/21',
4634 'MK': '77.28.0.0/15',
4635 'ML': '154.118.128.0/18',
4636 'MM': '37.111.0.0/17',
4637 'MN': '49.0.128.0/17',
4638 'MO': '60.246.0.0/16',
4639 'MP': '202.88.64.0/20',
4640 'MQ': '109.203.224.0/19',
4641 'MR': '41.188.64.0/18',
4642 'MS': '208.90.112.0/22',
4643 'MT': '46.11.0.0/16',
4644 'MU': '105.16.0.0/12',
4645 'MV': '27.114.128.0/18',
4646 'MW': '102.70.0.0/15',
4647 'MX': '187.192.0.0/11',
4648 'MY': '175.136.0.0/13',
4649 'MZ': '197.218.0.0/15',
4650 'NA': '41.182.0.0/16',
4651 'NC': '101.101.0.0/18',
4652 'NE': '197.214.0.0/18',
4653 'NF': '203.17.240.0/22',
4654 'NG': '105.112.0.0/12',
4655 'NI': '186.76.0.0/15',
4656 'NL': '145.96.0.0/11',
4657 'NO': '84.208.0.0/13',
4658 'NP': '36.252.0.0/15',
4659 'NR': '203.98.224.0/19',
4660 'NU': '49.156.48.0/22',
4661 'NZ': '49.224.0.0/14',
4662 'OM': '5.36.0.0/15',
4663 'PA': '186.72.0.0/15',
4664 'PE': '186.160.0.0/14',
4665 'PF': '123.50.64.0/18',
4666 'PG': '124.240.192.0/19',
4667 'PH': '49.144.0.0/13',
4668 'PK': '39.32.0.0/11',
4669 'PL': '83.0.0.0/11',
4670 'PM': '70.36.0.0/20',
4671 'PR': '66.50.0.0/16',
4672 'PS': '188.161.0.0/16',
4673 'PT': '85.240.0.0/13',
4674 'PW': '202.124.224.0/20',
4675 'PY': '181.120.0.0/14',
4676 'QA': '37.210.0.0/15',
4677 'RE': '102.35.0.0/16',
4678 'RO': '79.112.0.0/13',
4679 'RS': '93.86.0.0/15',
4680 'RU': '5.136.0.0/13',
4681 'RW': '41.186.0.0/16',
4682 'SA': '188.48.0.0/13',
4683 'SB': '202.1.160.0/19',
4684 'SC': '154.192.0.0/11',
4685 'SD': '102.120.0.0/13',
4686 'SE': '78.64.0.0/12',
4687 'SG': '8.128.0.0/10',
4688 'SI': '188.196.0.0/14',
4689 'SK': '78.98.0.0/15',
4690 'SL': '102.143.0.0/17',
4691 'SM': '89.186.32.0/19',
4692 'SN': '41.82.0.0/15',
4693 'SO': '154.115.192.0/18',
4694 'SR': '186.179.128.0/17',
4695 'SS': '105.235.208.0/21',
4696 'ST': '197.159.160.0/19',
4697 'SV': '168.243.0.0/16',
4698 'SX': '190.102.0.0/20',
4700 'SZ': '41.84.224.0/19',
4701 'TC': '65.255.48.0/20',
4702 'TD': '154.68.128.0/19',
4703 'TG': '196.168.0.0/14',
4704 'TH': '171.96.0.0/13',
4705 'TJ': '85.9.128.0/18',
4706 'TK': '27.96.24.0/21',
4707 'TL': '180.189.160.0/20',
4708 'TM': '95.85.96.0/19',
4709 'TN': '197.0.0.0/11',
4710 'TO': '175.176.144.0/21',
4711 'TR': '78.160.0.0/11',
4712 'TT': '186.44.0.0/15',
4713 'TV': '202.2.96.0/19',
4714 'TW': '120.96.0.0/11',
4715 'TZ': '156.156.0.0/14',
4716 'UA': '37.52.0.0/14',
4717 'UG': '102.80.0.0/13',
4719 'UY': '167.56.0.0/13',
4720 'UZ': '84.54.64.0/18',
4721 'VA': '212.77.0.0/19',
4722 'VC': '207.191.240.0/21',
4723 'VE': '186.88.0.0/13',
4724 'VG': '66.81.192.0/20',
4725 'VI': '146.226.0.0/16',
4726 'VN': '14.160.0.0/11',
4727 'VU': '202.80.32.0/20',
4728 'WF': '117.20.32.0/21',
4729 'WS': '202.4.32.0/19',
4730 'YE': '134.35.0.0/16',
4731 'YT': '41.242.116.0/22',
4732 'ZA': '41.0.0.0/11',
4733 'ZM': '102.144.0.0/13',
4734 'ZW': '102.177.192.0/18',
4738 def random_ipv4(cls
, code_or_block
):
4739 if len(code_or_block
) == 2:
4740 block
= cls
._country
_ip
_map
.get(code_or_block
.upper())
4744 block
= code_or_block
4745 addr
, preflen
= block
.split('/')
4746 addr_min
= struct
.unpack('!L', socket
.inet_aton(addr
))[0]
4747 addr_max
= addr_min |
(0xffffffff >> int(preflen
))
4748 return str(socket
.inet_ntoa(
4749 struct
.pack('!L', random
.randint(addr_min
, addr_max
))))
4752 class PerRequestProxyHandler(urllib
.request
.ProxyHandler
):
4753 def __init__(self
, proxies
=None):
4754 # Set default handlers
4755 for type in ('http', 'https'):
4756 setattr(self
, '%s_open' % type,
4757 lambda r
, proxy
='__noproxy__', type=type, meth
=self
.proxy_open
:
4758 meth(r
, proxy
, type))
4759 urllib
.request
.ProxyHandler
.__init
__(self
, proxies
)
4761 def proxy_open(self
, req
, proxy
, type):
4762 req_proxy
= req
.headers
.get('Ytdl-request-proxy')
4763 if req_proxy
is not None:
4765 del req
.headers
['Ytdl-request-proxy']
4767 if proxy
== '__noproxy__':
4768 return None # No Proxy
4769 if urllib
.parse
.urlparse(proxy
).scheme
.lower() in ('socks', 'socks4', 'socks4a', 'socks5'):
4770 req
.add_header('Ytdl-socks-proxy', proxy
)
4771 # yt-dlp's http/https handlers do wrapping the socket with socks
4773 return urllib
.request
.ProxyHandler
.proxy_open(
4774 self
, req
, proxy
, type)
4777 # Both long_to_bytes and bytes_to_long are adapted from PyCrypto, which is
4778 # released into Public Domain
4779 # https://github.com/dlitz/pycrypto/blob/master/lib/Crypto/Util/number.py#L387
4781 def long_to_bytes(n
, blocksize
=0):
4782 """long_to_bytes(n:long, blocksize:int) : string
4783 Convert a long integer to a byte string.
4785 If optional blocksize is given and greater than zero, pad the front of the
4786 byte string with binary zeros so that the length is a multiple of
4789 # after much testing, this algorithm was deemed to be the fastest
4793 s
= struct
.pack('>I', n
& 0xffffffff) + s
4795 # strip off leading zeros
4796 for i
in range(len(s
)):
4797 if s
[i
] != b
'\000'[0]:
4800 # only happens when n == 0
4804 # add back some pad bytes. this could be done more efficiently w.r.t. the
4805 # de-padding being done above, but sigh...
4806 if blocksize
> 0 and len(s
) % blocksize
:
4807 s
= (blocksize
- len(s
) % blocksize
) * b
'\000' + s
4811 def bytes_to_long(s
):
4812 """bytes_to_long(string) : long
4813 Convert a byte string to a long integer.
4815 This is (essentially) the inverse of long_to_bytes().
4820 extra
= (4 - length
% 4)
4821 s
= b
'\000' * extra
+ s
4822 length
= length
+ extra
4823 for i
in range(0, length
, 4):
4824 acc
= (acc
<< 32) + struct
.unpack('>I', s
[i
:i
+ 4])[0]
4828 def ohdave_rsa_encrypt(data
, exponent
, modulus
):
4830 Implement OHDave's RSA algorithm. See http://www.ohdave.com/rsa/
4833 data: data to encrypt, bytes-like object
4834 exponent, modulus: parameter e and N of RSA algorithm, both integer
4835 Output: hex string of encrypted data
4837 Limitation: supports one block encryption only
4840 payload
= int(binascii
.hexlify(data
[::-1]), 16)
4841 encrypted
= pow(payload
, exponent
, modulus
)
4842 return '%x' % encrypted
4845 def pkcs1pad(data
, length
):
4847 Padding input data with PKCS#1 scheme
4849 @param {int[]} data input data
4850 @param {int} length target length
4851 @returns {int[]} padded data
4853 if len(data
) > length
- 11:
4854 raise ValueError('Input data too long for PKCS#1 padding')
4856 pseudo_random
= [random
.randint(0, 254) for _
in range(length
- len(data
) - 3)]
4857 return [0, 2] + pseudo_random
+ [0] + data
4860 def _base_n_table(n
, table
):
4861 if not table
and not n
:
4862 raise ValueError('Either table or n must be specified')
4863 table
= (table
or '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ')[:n
]
4865 if n
and n
!= len(table
):
4866 raise ValueError(f
'base {n} exceeds table length {len(table)}')
4870 def encode_base_n(num
, n
=None, table
=None):
4871 """Convert given int to a base-n string"""
4872 table
= _base_n_table(n
, table
)
4876 result
, base
= '', len(table
)
4878 result
= table
[num
% base
] + result
4883 def decode_base_n(string
, n
=None, table
=None):
4884 """Convert given base-n string to int"""
4885 table
= {char: index for index, char in enumerate(_base_n_table(n, table))}
4886 result
, base
= 0, len(table
)
4888 result
= result
* base
+ table
[char
]
4892 def decode_packed_codes(code
):
4893 mobj
= re
.search(PACKED_CODES_RE
, code
)
4894 obfuscated_code
, base
, count
, symbols
= mobj
.groups()
4897 symbols
= symbols
.split('|')
4902 base_n_count
= encode_base_n(count
, base
)
4903 symbol_table
[base_n_count
] = symbols
[count
] or base_n_count
4906 r
'\b(\w+)\b', lambda mobj
: symbol_table
[mobj
.group(0)],
4910 def caesar(s
, alphabet
, shift
):
4915 alphabet
[(alphabet
.index(c
) + shift
) % l
] if c
in alphabet
else c
4920 return caesar(s
, r
'''!"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~''', 47)
4923 def parse_m3u8_attributes(attrib
):
4925 for (key
, val
) in re
.findall(r
'(?P<key>[A-Z0-9-]+)=(?P<val>"[^"]+"|[^",]+)(?:,|$)', attrib
):
4926 if val
.startswith('"'):
4932 def urshift(val
, n
):
4933 return val
>> n
if val
>= 0 else (val
+ 0x100000000) >> n
4936 def write_xattr(path
, key
, value
):
4937 # Windows: Write xattrs to NTFS Alternate Data Streams:
4938 # http://en.wikipedia.org/wiki/NTFS#Alternate_data_streams_.28ADS.29
4939 if compat_os_name
== 'nt':
4940 assert ':' not in key
4941 assert os
.path
.exists(path
)
4944 with open(f
'{path}:{key}', 'wb') as f
:
4946 except OSError as e
:
4947 raise XAttrMetadataError(e
.errno
, e
.strerror
)
4950 # UNIX Method 1. Use xattrs/pyxattrs modules
4953 if getattr(xattr
, '_yt_dlp__identifier', None) == 'pyxattr':
4954 # Unicode arguments are not supported in pyxattr until version 0.5.0
4955 # See https://github.com/ytdl-org/youtube-dl/issues/5498
4956 if version_tuple(xattr
.__version
__) >= (0, 5, 0):
4957 setxattr
= xattr
.set
4959 setxattr
= xattr
.setxattr
4963 setxattr(path
, key
, value
)
4964 except OSError as e
:
4965 raise XAttrMetadataError(e
.errno
, e
.strerror
)
4968 # UNIX Method 2. Use setfattr/xattr executables
4969 exe
= ('setfattr' if check_executable('setfattr', ['--version'])
4970 else 'xattr' if check_executable('xattr', ['-h']) else None)
4972 raise XAttrUnavailableError(
4973 'Couldn\'t find a tool to set the xattrs. Install either the python "xattr" or "pyxattr" modules or the '
4974 + ('"xattr" binary' if sys
.platform
!= 'linux' else 'GNU "attr" package (which contains the "setfattr" tool)'))
4976 value
= value
.decode()
4978 _
, stderr
, returncode
= Popen
.run(
4979 [exe
, '-w', key
, value
, path
] if exe
== 'xattr' else [exe
, '-n', key
, '-v', value
, path
],
4980 text
=True, stdout
=subprocess
.PIPE
, stderr
=subprocess
.PIPE
, stdin
=subprocess
.PIPE
)
4981 except OSError as e
:
4982 raise XAttrMetadataError(e
.errno
, e
.strerror
)
4984 raise XAttrMetadataError(returncode
, stderr
)
4987 def random_birthday(year_field
, month_field
, day_field
):
4988 start_date
= datetime
.date(1950, 1, 1)
4989 end_date
= datetime
.date(1995, 12, 31)
4990 offset
= random
.randint(0, (end_date
- start_date
).days
)
4991 random_date
= start_date
+ datetime
.timedelta(offset
)
4993 year_field
: str(random_date
.year
),
4994 month_field
: str(random_date
.month
),
4995 day_field
: str(random_date
.day
),
4999 def find_available_port(interface
=''):
5001 with socket
.socket() as sock
:
5002 sock
.bind((interface
, 0))
5003 return sock
.getsockname()[1]
5008 # Templates for internet shortcut files, which are plain text files.
5009 DOT_URL_LINK_TEMPLATE
= '''\
5014 DOT_WEBLOC_LINK_TEMPLATE
= '''\
5015 <?xml version="1.0" encoding="UTF-8"?>
5016 <!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
5017 <plist version="1.0">
5020 \t<string>%(url)s</string>
5025 DOT_DESKTOP_LINK_TEMPLATE
= '''\
5035 'url': DOT_URL_LINK_TEMPLATE
,
5036 'desktop': DOT_DESKTOP_LINK_TEMPLATE
,
5037 'webloc': DOT_WEBLOC_LINK_TEMPLATE
,
5041 def iri_to_uri(iri
):
5043 Converts an IRI (Internationalized Resource Identifier, allowing Unicode characters) to a URI (Uniform Resource Identifier, ASCII-only).
5045 The function doesn't add an additional layer of escaping; e.g., it doesn't escape `%3C` as `%253C`. Instead, it percent-escapes characters with an underlying UTF-8 encoding *besides* those already escaped, leaving the URI intact.
5048 iri_parts
= urllib
.parse
.urlparse(iri
)
5050 if '[' in iri_parts
.netloc
:
5051 raise ValueError('IPv6 URIs are not, yet, supported.')
5052 # Querying `.netloc`, when there's only one bracket, also raises a ValueError.
5054 # The `safe` argument values, that the following code uses, contain the characters that should not be percent-encoded. Everything else but letters, digits and '_.-' will be percent-encoded with an underlying UTF-8 encoding. Everything already percent-encoded will be left as is.
5057 if iri_parts
.username
:
5058 net_location
+= urllib
.parse
.quote(iri_parts
.username
, safe
=r
"!$%&'()*+,~")
5059 if iri_parts
.password
is not None:
5060 net_location
+= ':' + urllib
.parse
.quote(iri_parts
.password
, safe
=r
"!$%&'()*+,~")
5063 net_location
+= iri_parts
.hostname
.encode('idna').decode() # Punycode for Unicode hostnames.
5064 # The 'idna' encoding produces ASCII text.
5065 if iri_parts
.port
is not None and iri_parts
.port
!= 80:
5066 net_location
+= ':' + str(iri_parts
.port
)
5068 return urllib
.parse
.urlunparse(
5072 urllib
.parse
.quote_plus(iri_parts
.path
, safe
=r
"!$%&'()*+,/:;=@|~"),
5074 # Unsure about the `safe` argument, since this is a legacy way of handling parameters.
5075 urllib
.parse
.quote_plus(iri_parts
.params
, safe
=r
"!$%&'()*+,/:;=@|~"),
5077 # Not totally sure about the `safe` argument, since the source does not explicitly mention the query URI component.
5078 urllib
.parse
.quote_plus(iri_parts
.query
, safe
=r
"!$%&'()*+,/:;=?@{|}~"),
5080 urllib
.parse
.quote_plus(iri_parts
.fragment
, safe
=r
"!#$%&'()*+,/:;=?@{|}~")))
5082 # Source for `safe` arguments: https://url.spec.whatwg.org/#percent-encoded-bytes.
5085 def to_high_limit_path(path
):
5086 if sys
.platform
in ['win32', 'cygwin']:
5087 # Work around MAX_PATH limitation on Windows. The maximum allowed length for the individual path segments may still be quite limited.
5088 return '\\\\?\\' + os
.path
.abspath(path
)
5093 def format_field(obj
, field
=None, template
='%s', ignore
=NO_DEFAULT
, default
='', func
=IDENTITY
):
5094 val
= traversal
.traverse_obj(obj
, *variadic(field
))
5095 if not val
if ignore
is NO_DEFAULT
else val
in variadic(ignore
):
5097 return template
% func(val
)
5100 def clean_podcast_url(url
):
5101 return re
.sub(r
'''(?x)
5105 media\.blubrry\.com| # https://create.blubrry.com/resources/podcast-media-download-statistics/getting-started/
5108 (?:dts|www)\.podtrac\.com/(?:pts/)?redirect\.[0-9a-z]{3,4}| # http://analytics.podtrac.com/how-to-measure
5111 cn\.co| # https://podcorn.com/analytics-prefix/
5112 st\.fm # https://podsights.com/docs/
5117 _HEX_TABLE
= '0123456789abcdef'
5120 def random_uuidv4():
5121 return re
.sub(r
'[xy]', lambda x
: _HEX_TABLE
[random
.randint(0, 15)], 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx')
5124 def make_dir(path
, to_screen
=None):
5126 dn
= os
.path
.dirname(path
)
5128 os
.makedirs(dn
, exist_ok
=True)
5130 except OSError as err
:
5131 if callable(to_screen
) is not None:
5132 to_screen(f
'unable to create directory {err}')
5136 def get_executable_path():
5137 from ..update
import _get_variant_and_executable_path
5139 return os
.path
.dirname(os
.path
.abspath(_get_variant_and_executable_path()[1]))
5142 def get_user_config_dirs(package_name
):
5143 # .config (e.g. ~/.config/package_name)
5144 xdg_config_home
= os
.getenv('XDG_CONFIG_HOME') or compat_expanduser('~/.config')
5145 yield os
.path
.join(xdg_config_home
, package_name
)
5147 # appdata (%APPDATA%/package_name)
5148 appdata_dir
= os
.getenv('appdata')
5150 yield os
.path
.join(appdata_dir
, package_name
)
5152 # home (~/.package_name)
5153 yield os
.path
.join(compat_expanduser('~'), f
'.{package_name}')
5156 def get_system_config_dirs(package_name
):
5158 yield os
.path
.join('/etc', package_name
)
5161 def time_seconds(**kwargs
):
5163 Returns TZ-aware time in seconds since the epoch (1970-01-01T00:00:00Z)
5165 return time
.time() + datetime
.timedelta(**kwargs
).total_seconds()
5168 # create a JSON Web Signature (jws) with HS256 algorithm
5169 # the resulting format is in JWS Compact Serialization
5170 # implemented following JWT https://www.rfc-editor.org/rfc/rfc7519.html
5171 # implemented following JWS https://www.rfc-editor.org/rfc/rfc7515.html
5172 def jwt_encode_hs256(payload_data
, key
, headers
={}):
5178 header_data
.update(headers
)
5179 header_b64
= base64
.b64encode(json
.dumps(header_data
).encode())
5180 payload_b64
= base64
.b64encode(json
.dumps(payload_data
).encode())
5181 h
= hmac
.new(key
.encode(), header_b64
+ b
'.' + payload_b64
, hashlib
.sha256
)
5182 signature_b64
= base64
.b64encode(h
.digest())
5183 token
= header_b64
+ b
'.' + payload_b64
+ b
'.' + signature_b64
5187 # can be extended in future to verify the signature and parse header and return the algorithm used if it's not HS256
5188 def jwt_decode_hs256(jwt
):
5189 header_b64
, payload_b64
, signature_b64
= jwt
.split('.')
5190 # add trailing ='s that may have been stripped, superfluous ='s are ignored
5191 payload_data
= json
.loads(base64
.urlsafe_b64decode(f
'{payload_b64}==='))
5195 WINDOWS_VT_MODE
= False if compat_os_name
== 'nt' else None
5199 def supports_terminal_sequences(stream
):
5200 if compat_os_name
== 'nt':
5201 if not WINDOWS_VT_MODE
:
5203 elif not os
.getenv('TERM'):
5206 return stream
.isatty()
5207 except BaseException
:
5211 def windows_enable_vt_mode():
5212 """Ref: https://bugs.python.org/issue30075 """
5213 if get_windows_version() < (10, 0, 10586):
5217 import ctypes
.wintypes
5220 ENABLE_VIRTUAL_TERMINAL_PROCESSING
= 0x0004
5222 dll
= ctypes
.WinDLL('kernel32', use_last_error
=False)
5223 handle
= os
.open('CONOUT$', os
.O_RDWR
)
5225 h_out
= ctypes
.wintypes
.HANDLE(msvcrt
.get_osfhandle(handle
))
5226 dw_original_mode
= ctypes
.wintypes
.DWORD()
5227 success
= dll
.GetConsoleMode(h_out
, ctypes
.byref(dw_original_mode
))
5229 raise Exception('GetConsoleMode failed')
5231 success
= dll
.SetConsoleMode(h_out
, ctypes
.wintypes
.DWORD(
5232 dw_original_mode
.value | ENABLE_VIRTUAL_TERMINAL_PROCESSING
))
5234 raise Exception('SetConsoleMode failed')
5238 global WINDOWS_VT_MODE
5239 WINDOWS_VT_MODE
= True
5240 supports_terminal_sequences
.cache_clear()
5243 _terminal_sequences_re
= re
.compile('\033\\[[^m]+m')
5246 def remove_terminal_sequences(string
):
5247 return _terminal_sequences_re
.sub('', string
)
5250 def number_of_digits(number
):
5251 return len('%d' % number
)
5254 def join_nonempty(*values
, delim
='-', from_dict
=None):
5255 if from_dict
is not None:
5256 values
= (traversal
.traverse_obj(from_dict
, variadic(v
)) for v
in values
)
5257 return delim
.join(map(str, filter(None, values
)))
5260 def scale_thumbnails_to_max_format_width(formats
, thumbnails
, url_width_re
):
5262 Find the largest format dimensions in terms of video width and, for each thumbnail:
5263 * Modify the URL: Match the width with the provided regex and replace with the former width
5266 This function is useful with video services that scale the provided thumbnails on demand
5268 _keys
= ('width', 'height')
5269 max_dimensions
= max(
5270 (tuple(format
.get(k
) or 0 for k
in _keys
) for format
in formats
),
5272 if not max_dimensions
[0]:
5276 {'url': re.sub(url_width_re, str(max_dimensions[0]), thumbnail['url'])}
,
5277 dict(zip(_keys
, max_dimensions
)), thumbnail
)
5278 for thumbnail
in thumbnails
5282 def parse_http_range(range):
5283 """ Parse value of "Range" or "Content-Range" HTTP header into tuple. """
5285 return None, None, None
5286 crg
= re
.search(r
'bytes[ =](\d+)-(\d+)?(?:/(\d+))?', range)
5288 return None, None, None
5289 return int(crg
.group(1)), int_or_none(crg
.group(2)), int_or_none(crg
.group(3))
5292 def read_stdin(what
):
5293 eof
= 'Ctrl+Z' if compat_os_name
== 'nt' else 'Ctrl+D'
5294 write_string(f
'Reading {what} from STDIN - EOF ({eof}) to end:\n')
5298 def determine_file_encoding(data
):
5300 Detect the text encoding used
5301 @returns (encoding, bytes to skip)
5304 # BOM marks are given priority over declarations
5305 for bom
, enc
in BOMS
:
5306 if data
.startswith(bom
):
5307 return enc
, len(bom
)
5309 # Strip off all null bytes to match even when UTF-16 or UTF-32 is used.
5310 # We ignore the endianness to get a good enough match
5311 data
= data
.replace(b
'\0', b
'')
5312 mobj
= re
.match(rb
'(?m)^#\s*coding\s*:\s*(\S+)\s*$', data
)
5313 return mobj
.group(1).decode() if mobj
else None, 0
5320 __initialized
= False
5322 def __init__(self
, parser
, label
=None):
5323 self
.parser
, self
.label
= parser
, label
5324 self
._loaded
_paths
, self
.configs
= set(), []
5326 def init(self
, args
=None, filename
=None):
5327 assert not self
.__initialized
5328 self
.own_args
, self
.filename
= args
, filename
5329 return self
.load_configs()
5331 def load_configs(self
):
5334 location
= os
.path
.realpath(self
.filename
)
5335 directory
= os
.path
.dirname(location
)
5336 if location
in self
._loaded
_paths
:
5338 self
._loaded
_paths
.add(location
)
5340 self
.__initialized
= True
5341 opts
, _
= self
.parser
.parse_known_args(self
.own_args
)
5342 self
.parsed_args
= self
.own_args
5343 for location
in opts
.config_locations
or []:
5345 if location
in self
._loaded
_paths
:
5347 self
._loaded
_paths
.add(location
)
5348 self
.append_config(shlex
.split(read_stdin('options'), comments
=True), label
='stdin')
5350 location
= os
.path
.join(directory
, expand_path(location
))
5351 if os
.path
.isdir(location
):
5352 location
= os
.path
.join(location
, 'yt-dlp.conf')
5353 if not os
.path
.exists(location
):
5354 self
.parser
.error(f
'config location {location} does not exist')
5355 self
.append_config(self
.read_file(location
), location
)
5359 label
= join_nonempty(
5360 self
.label
, 'config', f
'"{self.filename}"' if self
.filename
else '',
5362 return join_nonempty(
5363 self
.own_args
is not None and f
'{label[0].upper()}{label[1:]}: {self.hide_login_info(self.own_args)}',
5364 *(f
'\n{c}'.replace('\n', '\n| ')[1:] for c
in self
.configs
),
5368 def read_file(filename
, default
=[]):
5370 optionf
= open(filename
, 'rb')
5372 return default
# silently skip if file is not present
5374 enc
, skip
= determine_file_encoding(optionf
.read(512))
5375 optionf
.seek(skip
, io
.SEEK_SET
)
5377 enc
= None # silently skip read errors
5379 # FIXME: https://github.com/ytdl-org/youtube-dl/commit/dfe5fa49aed02cf36ba9f743b11b0903554b5e56
5380 contents
= optionf
.read().decode(enc
or preferredencoding())
5381 res
= shlex
.split(contents
, comments
=True)
5382 except Exception as err
:
5383 raise ValueError(f
'Unable to parse "{filename}": {err}')
5389 def hide_login_info(opts
):
5390 PRIVATE_OPTS
= {'-p', '--password', '-u', '--username', '--video-password', '--ap-password', '--ap-username'}
5391 eqre
= re
.compile('^(?P<key>' + ('|'.join(re
.escape(po
) for po
in PRIVATE_OPTS
)) + ')=.+$')
5396 return m
.group('key') + '=PRIVATE'
5400 opts
= list(map(_scrub_eq
, opts
))
5401 for idx
, opt
in enumerate(opts
):
5402 if opt
in PRIVATE_OPTS
and idx
+ 1 < len(opts
):
5403 opts
[idx
+ 1] = 'PRIVATE'
5406 def append_config(self
, *args
, label
=None):
5407 config
= type(self
)(self
.parser
, label
)
5408 config
._loaded
_paths
= self
._loaded
_paths
5409 if config
.init(*args
):
5410 self
.configs
.append(config
)
5414 for config
in reversed(self
.configs
):
5415 yield from config
.all_args
5416 yield from self
.parsed_args
or []
5418 def parse_known_args(self
, **kwargs
):
5419 return self
.parser
.parse_known_args(self
.all_args
, **kwargs
)
5421 def parse_args(self
):
5422 return self
.parser
.parse_args(self
.all_args
)
5425 class WebSocketsWrapper
:
5426 """Wraps websockets module to use in non-async scopes"""
5429 def __init__(self
, url
, headers
=None, connect
=True):
5430 self
.loop
= asyncio
.new_event_loop()
5431 # XXX: "loop" is deprecated
5432 self
.conn
= websockets
.connect(
5433 url
, extra_headers
=headers
, ping_interval
=None,
5434 close_timeout
=float('inf'), loop
=self
.loop
, ping_timeout
=float('inf'))
5437 atexit
.register(self
.__exit
__, None, None, None)
5439 def __enter__(self
):
5441 self
.pool
= self
.run_with_loop(self
.conn
.__aenter
__(), self
.loop
)
5444 def send(self
, *args
):
5445 self
.run_with_loop(self
.pool
.send(*args
), self
.loop
)
5447 def recv(self
, *args
):
5448 return self
.run_with_loop(self
.pool
.recv(*args
), self
.loop
)
5450 def __exit__(self
, type, value
, traceback
):
5452 return self
.run_with_loop(self
.conn
.__aexit
__(type, value
, traceback
), self
.loop
)
5455 self
._cancel
_all
_tasks
(self
.loop
)
5457 # taken from https://github.com/python/cpython/blob/3.9/Lib/asyncio/runners.py with modifications
5458 # for contributors: If there's any new library using asyncio needs to be run in non-async, move these function out of this class
5460 def run_with_loop(main
, loop
):
5461 if not asyncio
.iscoroutine(main
):
5462 raise ValueError(f
'a coroutine was expected, got {main!r}')
5465 return loop
.run_until_complete(main
)
5467 loop
.run_until_complete(loop
.shutdown_asyncgens())
5468 if hasattr(loop
, 'shutdown_default_executor'):
5469 loop
.run_until_complete(loop
.shutdown_default_executor())
5472 def _cancel_all_tasks(loop
):
5473 to_cancel
= asyncio
.all_tasks(loop
)
5478 for task
in to_cancel
:
5481 # XXX: "loop" is removed in python 3.10+
5482 loop
.run_until_complete(
5483 asyncio
.gather(*to_cancel
, loop
=loop
, return_exceptions
=True))
5485 for task
in to_cancel
:
5486 if task
.cancelled():
5488 if task
.exception() is not None:
5489 loop
.call_exception_handler({
5490 'message': 'unhandled exception during asyncio.run() shutdown',
5491 'exception': task
.exception(),
5496 def merge_headers(*dicts
):
5497 """Merge dicts of http headers case insensitively, prioritizing the latter ones"""
5498 return {k.title(): v for k, v in itertools.chain.from_iterable(map(dict.items, dicts))}
5501 def cached_method(f
):
5502 """Cache a method"""
5503 signature
= inspect
.signature(f
)
5506 def wrapper(self
, *args
, **kwargs
):
5507 bound_args
= signature
.bind(self
, *args
, **kwargs
)
5508 bound_args
.apply_defaults()
5509 key
= tuple(bound_args
.arguments
.values())[1:]
5511 cache
= vars(self
).setdefault('_cached_method__cache', {}).setdefault(f.__name__, {}
)
5512 if key
not in cache
:
5513 cache
[key
] = f(self
, *args
, **kwargs
)
5518 class classproperty
:
5519 """property access for class methods with optional caching"""
5520 def __new__(cls
, func
=None, *args
, **kwargs
):
5522 return functools
.partial(cls
, *args
, **kwargs
)
5523 return super().__new
__(cls
)
5525 def __init__(self
, func
, *, cache
=False):
5526 functools
.update_wrapper(self
, func
)
5528 self
._cache
= {} if cache
else None
5530 def __get__(self
, _
, cls
):
5531 if self
._cache
is None:
5532 return self
.func(cls
)
5533 elif cls
not in self
._cache
:
5534 self
._cache
[cls
] = self
.func(cls
)
5535 return self
._cache
[cls
]
5538 class function_with_repr
:
5539 def __init__(self
, func
, repr_
=None):
5540 functools
.update_wrapper(self
, func
)
5541 self
.func
, self
.__repr
= func
, repr_
5543 def __call__(self
, *args
, **kwargs
):
5544 return self
.func(*args
, **kwargs
)
5549 return f
'{self.func.__module__}.{self.func.__qualname__}'
5552 class Namespace(types
.SimpleNamespace
):
5553 """Immutable namespace"""
5556 return iter(self
.__dict
__.values())
5560 return self
.__dict
__.items()
5563 MEDIA_EXTENSIONS
= Namespace(
5564 common_video
=('avi', 'flv', 'mkv', 'mov', 'mp4', 'webm'),
5565 video
=('3g2', '3gp', 'f4v', 'mk3d', 'divx', 'mpg', 'ogv', 'm4v', 'wmv'),
5566 common_audio
=('aiff', 'alac', 'flac', 'm4a', 'mka', 'mp3', 'ogg', 'opus', 'wav'),
5567 audio
=('aac', 'ape', 'asf', 'f4a', 'f4b', 'm4b', 'm4p', 'm4r', 'oga', 'ogx', 'spx', 'vorbis', 'wma', 'weba'),
5568 thumbnails
=('jpg', 'png', 'webp'),
5569 storyboards
=('mhtml', ),
5570 subtitles
=('srt', 'vtt', 'ass', 'lrc'),
5571 manifests
=('f4f', 'f4m', 'm3u8', 'smil', 'mpd'),
5573 MEDIA_EXTENSIONS
.video
+= MEDIA_EXTENSIONS
.common_video
5574 MEDIA_EXTENSIONS
.audio
+= MEDIA_EXTENSIONS
.common_audio
5576 KNOWN_EXTENSIONS
= (*MEDIA_EXTENSIONS
.video
, *MEDIA_EXTENSIONS
.audio
, *MEDIA_EXTENSIONS
.manifests
)
5581 for retry in RetryManager(...):
5584 except SomeException as err:
5588 attempt
, _error
= 0, None
5590 def __init__(self
, _retries
, _error_callback
, **kwargs
):
5591 self
.retries
= _retries
or 0
5592 self
.error_callback
= functools
.partial(_error_callback
, **kwargs
)
5594 def _should_retry(self
):
5595 return self
._error
is not NO_DEFAULT
and self
.attempt
<= self
.retries
5599 if self
._error
is NO_DEFAULT
:
5604 def error(self
, value
):
5608 while self
._should
_retry
():
5609 self
.error
= NO_DEFAULT
5613 self
.error_callback(self
.error
, self
.attempt
, self
.retries
)
5616 def report_retry(e
, count
, retries
, *, sleep_func
, info
, warn
, error
=None, suffix
=None):
5617 """Utility function for reporting retries"""
5620 return error(f
'{e}. Giving up after {count - 1} retries') if count
> 1 else error(str(e
))
5625 elif isinstance(e
, ExtractorError
):
5626 e
= remove_end(str_or_none(e
.cause
) or e
.orig_msg
, '.')
5627 warn(f
'{e}. Retrying{format_field(suffix, None, " %s")} ({count}/{retries})...')
5629 delay
= float_or_none(sleep_func(n
=count
- 1)) if callable(sleep_func
) else sleep_func
5631 info(f
'Sleeping {delay:.2f} seconds ...')
5635 def make_archive_id(ie
, video_id
):
5636 ie_key
= ie
if isinstance(ie
, str) else ie
.ie_key()
5637 return f
'{ie_key.lower()} {video_id}'
5640 def truncate_string(s
, left
, right
=0):
5641 assert left
> 3 and right
>= 0
5642 if s
is None or len(s
) <= left
+ right
:
5644 return f
'{s[:left-3]}...{s[-right:] if right else ""}'
5647 def orderedSet_from_options(options
, alias_dict
, *, use_regex
=False, start
=None):
5648 assert 'all' in alias_dict
, '"all" alias is required'
5649 requested
= list(start
or [])
5651 discard
= val
.startswith('-')
5655 if val
in alias_dict
:
5656 val
= alias_dict
[val
] if not discard
else [
5657 i
[1:] if i
.startswith('-') else f
'-{i}' for i
in alias_dict
[val
]]
5658 # NB: Do not allow regex in aliases for performance
5659 requested
= orderedSet_from_options(val
, alias_dict
, start
=requested
)
5662 current
= (filter(re
.compile(val
, re
.I
).fullmatch
, alias_dict
['all']) if use_regex
5663 else [val
] if val
in alias_dict
['all'] else None)
5665 raise ValueError(val
)
5668 for item
in current
:
5669 while item
in requested
:
5670 requested
.remove(item
)
5672 requested
.extend(current
)
5674 return orderedSet(requested
)
5679 regex
= r
' *((?P<reverse>\+)?(?P<field>[a-zA-Z0-9_]+)((?P<separator>[~:])(?P<limit>.*?))?)? *$'
5681 default
= ('hidden', 'aud_or_vid', 'hasvid', 'ie_pref', 'lang', 'quality',
5682 'res', 'fps', 'hdr:12', 'vcodec:vp9.2', 'channels', 'acodec',
5683 'size', 'br', 'asr', 'proto', 'ext', 'hasaud', 'source', 'id') # These must not be aliases
5684 ytdl_default
= ('hasaud', 'lang', 'quality', 'tbr', 'filesize', 'vbr',
5685 'height', 'width', 'proto', 'vext', 'abr', 'aext',
5686 'fps', 'fs_approx', 'source', 'id')
5689 'vcodec': {'type': 'ordered', 'regex': True,
5690 'order': ['av0?1', 'vp0?9.2', 'vp0?9', '[hx]265|he?vc?', '[hx]264|avc', 'vp0?8', 'mp4v|h263', 'theora', '', None, 'none']},
5691 'acodec': {'type': 'ordered', 'regex': True,
5692 'order': ['[af]lac', 'wav|aiff', 'opus', 'vorbis|ogg', 'aac', 'mp?4a?', 'mp3', 'ac-?4', 'e-?a?c-?3', 'ac-?3', 'dts', '', None, 'none']},
5693 'hdr': {'type': 'ordered', 'regex': True, 'field': 'dynamic_range',
5694 'order': ['dv', '(hdr)?12', r
'(hdr)?10\+', '(hdr)?10', 'hlg', '', 'sdr', None]},
5695 'proto': {'type': 'ordered', 'regex': True, 'field': 'protocol',
5696 'order': ['(ht|f)tps', '(ht|f)tp$', 'm3u8.*', '.*dash', 'websocket_frag', 'rtmpe?', '', 'mms|rtsp', 'ws|websocket', 'f4']},
5697 'vext': {'type': 'ordered', 'field': 'video_ext',
5698 'order': ('mp4', 'mov', 'webm', 'flv', '', 'none'),
5699 'order_free': ('webm', 'mp4', 'mov', 'flv', '', 'none')},
5700 'aext': {'type': 'ordered', 'regex': True, 'field': 'audio_ext',
5701 'order': ('m4a', 'aac', 'mp3', 'ogg', 'opus', 'web[am]', '', 'none'),
5702 'order_free': ('ogg', 'opus', 'web[am]', 'mp3', 'm4a', 'aac', '', 'none')},
5703 'hidden': {'visible': False, 'forced': True, 'type': 'extractor', 'max': -1000}
,
5704 'aud_or_vid': {'visible': False, 'forced': True, 'type': 'multiple',
5705 'field': ('vcodec', 'acodec'),
5706 'function': lambda it
: int(any(v
!= 'none' for v
in it
))},
5707 'ie_pref': {'priority': True, 'type': 'extractor'}
,
5708 'hasvid': {'priority': True, 'field': 'vcodec', 'type': 'boolean', 'not_in_list': ('none',)}
,
5709 'hasaud': {'field': 'acodec', 'type': 'boolean', 'not_in_list': ('none',)}
,
5710 'lang': {'convert': 'float', 'field': 'language_preference', 'default': -1}
,
5711 'quality': {'convert': 'float', 'default': -1}
,
5712 'filesize': {'convert': 'bytes'}
,
5713 'fs_approx': {'convert': 'bytes', 'field': 'filesize_approx'}
,
5714 'id': {'convert': 'string', 'field': 'format_id'}
,
5715 'height': {'convert': 'float_none'}
,
5716 'width': {'convert': 'float_none'}
,
5717 'fps': {'convert': 'float_none'}
,
5718 'channels': {'convert': 'float_none', 'field': 'audio_channels'}
,
5719 'tbr': {'convert': 'float_none'}
,
5720 'vbr': {'convert': 'float_none'}
,
5721 'abr': {'convert': 'float_none'}
,
5722 'asr': {'convert': 'float_none'}
,
5723 'source': {'convert': 'float', 'field': 'source_preference', 'default': -1}
,
5725 'codec': {'type': 'combined', 'field': ('vcodec', 'acodec')}
,
5726 'br': {'type': 'multiple', 'field': ('tbr', 'vbr', 'abr'),
5727 'function': lambda it
: next(filter(None, it
), None)},
5728 'size': {'type': 'multiple', 'field': ('filesize', 'fs_approx'),
5729 'function': lambda it
: next(filter(None, it
), None)},
5730 'ext': {'type': 'combined', 'field': ('vext', 'aext')}
,
5731 'res': {'type': 'multiple', 'field': ('height', 'width'),
5732 'function': lambda it
: (lambda l
: min(l
) if l
else 0)(tuple(filter(None, it
)))},
5734 # Actual field names
5735 'format_id': {'type': 'alias', 'field': 'id'}
,
5736 'preference': {'type': 'alias', 'field': 'ie_pref'}
,
5737 'language_preference': {'type': 'alias', 'field': 'lang'}
,
5738 'source_preference': {'type': 'alias', 'field': 'source'}
,
5739 'protocol': {'type': 'alias', 'field': 'proto'}
,
5740 'filesize_approx': {'type': 'alias', 'field': 'fs_approx'}
,
5741 'audio_channels': {'type': 'alias', 'field': 'channels'}
,
5744 'dimension': {'type': 'alias', 'field': 'res', 'deprecated': True}
,
5745 'resolution': {'type': 'alias', 'field': 'res', 'deprecated': True}
,
5746 'extension': {'type': 'alias', 'field': 'ext', 'deprecated': True}
,
5747 'bitrate': {'type': 'alias', 'field': 'br', 'deprecated': True}
,
5748 'total_bitrate': {'type': 'alias', 'field': 'tbr', 'deprecated': True}
,
5749 'video_bitrate': {'type': 'alias', 'field': 'vbr', 'deprecated': True}
,
5750 'audio_bitrate': {'type': 'alias', 'field': 'abr', 'deprecated': True}
,
5751 'framerate': {'type': 'alias', 'field': 'fps', 'deprecated': True}
,
5752 'filesize_estimate': {'type': 'alias', 'field': 'size', 'deprecated': True}
,
5753 'samplerate': {'type': 'alias', 'field': 'asr', 'deprecated': True}
,
5754 'video_ext': {'type': 'alias', 'field': 'vext', 'deprecated': True}
,
5755 'audio_ext': {'type': 'alias', 'field': 'aext', 'deprecated': True}
,
5756 'video_codec': {'type': 'alias', 'field': 'vcodec', 'deprecated': True}
,
5757 'audio_codec': {'type': 'alias', 'field': 'acodec', 'deprecated': True}
,
5758 'video': {'type': 'alias', 'field': 'hasvid', 'deprecated': True}
,
5759 'has_video': {'type': 'alias', 'field': 'hasvid', 'deprecated': True}
,
5760 'audio': {'type': 'alias', 'field': 'hasaud', 'deprecated': True}
,
5761 'has_audio': {'type': 'alias', 'field': 'hasaud', 'deprecated': True}
,
5762 'extractor': {'type': 'alias', 'field': 'ie_pref', 'deprecated': True}
,
5763 'extractor_preference': {'type': 'alias', 'field': 'ie_pref', 'deprecated': True}
,
5766 def __init__(self
, ydl
, field_preference
):
5769 self
.evaluate_params(self
.ydl
.params
, field_preference
)
5770 if ydl
.params
.get('verbose'):
5771 self
.print_verbose_info(self
.ydl
.write_debug
)
5773 def _get_field_setting(self
, field
, key
):
5774 if field
not in self
.settings
:
5775 if key
in ('forced', 'priority'):
5777 self
.ydl
.deprecated_feature(f
'Using arbitrary fields ({field}) for format sorting is '
5778 'deprecated and may be removed in a future version')
5779 self
.settings
[field
] = {}
5780 propObj
= self
.settings
[field
]
5781 if key
not in propObj
:
5782 type = propObj
.get('type')
5784 default
= 'preference' if type == 'extractor' else (field
,) if type in ('combined', 'multiple') else field
5785 elif key
== 'convert':
5786 default
= 'order' if type == 'ordered' else 'float_string' if field
else 'ignore'
5788 default
= {'type': 'field', 'visible': True, 'order': [], 'not_in_list': (None,)}
.get(key
, None)
5789 propObj
[key
] = default
5792 def _resolve_field_value(self
, field
, value
, convertNone
=False):
5797 value
= value
.lower()
5798 conversion
= self
._get
_field
_setting
(field
, 'convert')
5799 if conversion
== 'ignore':
5801 if conversion
== 'string':
5803 elif conversion
== 'float_none':
5804 return float_or_none(value
)
5805 elif conversion
== 'bytes':
5806 return parse_bytes(value
)
5807 elif conversion
== 'order':
5808 order_list
= (self
._use
_free
_order
and self
._get
_field
_setting
(field
, 'order_free')) or self
._get
_field
_setting
(field
, 'order')
5809 use_regex
= self
._get
_field
_setting
(field
, 'regex')
5810 list_length
= len(order_list
)
5811 empty_pos
= order_list
.index('') if '' in order_list
else list_length
+ 1
5812 if use_regex
and value
is not None:
5813 for i
, regex
in enumerate(order_list
):
5814 if regex
and re
.match(regex
, value
):
5815 return list_length
- i
5816 return list_length
- empty_pos
# not in list
5817 else: # not regex or value = None
5818 return list_length
- (order_list
.index(value
) if value
in order_list
else empty_pos
)
5820 if value
.isnumeric():
5823 self
.settings
[field
]['convert'] = 'string'
5826 def evaluate_params(self
, params
, sort_extractor
):
5827 self
._use
_free
_order
= params
.get('prefer_free_formats', False)
5828 self
._sort
_user
= params
.get('format_sort', [])
5829 self
._sort
_extractor
= sort_extractor
5831 def add_item(field
, reverse
, closest
, limit_text
):
5832 field
= field
.lower()
5833 if field
in self
._order
:
5835 self
._order
.append(field
)
5836 limit
= self
._resolve
_field
_value
(field
, limit_text
)
5839 'closest': False if limit
is None else closest
,
5840 'limit_text': limit_text
,
5842 if field
in self
.settings
:
5843 self
.settings
[field
].update(data
)
5845 self
.settings
[field
] = data
5848 tuple(field
for field
in self
.default
if self
._get
_field
_setting
(field
, 'forced'))
5849 + (tuple() if params
.get('format_sort_force', False)
5850 else tuple(field
for field
in self
.default
if self
._get
_field
_setting
(field
, 'priority')))
5851 + tuple(self
._sort
_user
) + tuple(sort_extractor
) + self
.default
)
5853 for item
in sort_list
:
5854 match
= re
.match(self
.regex
, item
)
5856 raise ExtractorError('Invalid format sort string "%s" given by extractor' % item
)
5857 field
= match
.group('field')
5860 if self
._get
_field
_setting
(field
, 'type') == 'alias':
5861 alias
, field
= field
, self
._get
_field
_setting
(field
, 'field')
5862 if self
._get
_field
_setting
(alias
, 'deprecated'):
5863 self
.ydl
.deprecated_feature(f
'Format sorting alias {alias} is deprecated and may '
5864 f
'be removed in a future version. Please use {field} instead')
5865 reverse
= match
.group('reverse') is not None
5866 closest
= match
.group('separator') == '~'
5867 limit_text
= match
.group('limit')
5869 has_limit
= limit_text
is not None
5870 has_multiple_fields
= self
._get
_field
_setting
(field
, 'type') == 'combined'
5871 has_multiple_limits
= has_limit
and has_multiple_fields
and not self
._get
_field
_setting
(field
, 'same_limit')
5873 fields
= self
._get
_field
_setting
(field
, 'field') if has_multiple_fields
else (field
,)
5874 limits
= limit_text
.split(':') if has_multiple_limits
else (limit_text
,) if has_limit
else tuple()
5875 limit_count
= len(limits
)
5876 for (i
, f
) in enumerate(fields
):
5877 add_item(f
, reverse
, closest
,
5878 limits
[i
] if i
< limit_count
5879 else limits
[0] if has_limit
and not has_multiple_limits
5882 def print_verbose_info(self
, write_debug
):
5884 write_debug('Sort order given by user: %s' % ', '.join(self
._sort
_user
))
5885 if self
._sort
_extractor
:
5886 write_debug('Sort order given by extractor: %s' % ', '.join(self
._sort
_extractor
))
5887 write_debug('Formats sorted by: %s' % ', '.join(['%s%s%s' % (
5888 '+' if self
._get
_field
_setting
(field
, 'reverse') else '', field
,
5889 '%s%s(%s)' % ('~' if self
._get
_field
_setting
(field
, 'closest') else ':',
5890 self
._get
_field
_setting
(field
, 'limit_text'),
5891 self
._get
_field
_setting
(field
, 'limit'))
5892 if self
._get
_field
_setting
(field
, 'limit_text') is not None else '')
5893 for field
in self
._order
if self
._get
_field
_setting
(field
, 'visible')]))
5895 def _calculate_field_preference_from_value(self
, format
, field
, type, value
):
5896 reverse
= self
._get
_field
_setting
(field
, 'reverse')
5897 closest
= self
._get
_field
_setting
(field
, 'closest')
5898 limit
= self
._get
_field
_setting
(field
, 'limit')
5900 if type == 'extractor':
5901 maximum
= self
._get
_field
_setting
(field
, 'max')
5902 if value
is None or (maximum
is not None and value
>= maximum
):
5904 elif type == 'boolean':
5905 in_list
= self
._get
_field
_setting
(field
, 'in_list')
5906 not_in_list
= self
._get
_field
_setting
(field
, 'not_in_list')
5907 value
= 0 if ((in_list
is None or value
in in_list
) and (not_in_list
is None or value
not in not_in_list
)) else -1
5908 elif type == 'ordered':
5909 value
= self
._resolve
_field
_value
(field
, value
, True)
5911 # try to convert to number
5912 val_num
= float_or_none(value
, default
=self
._get
_field
_setting
(field
, 'default'))
5913 is_num
= self
._get
_field
_setting
(field
, 'convert') != 'string' and val_num
is not None
5917 return ((-10, 0) if value
is None
5918 else (1, value
, 0) if not is_num
# if a field has mixed strings and numbers, strings are sorted higher
5919 else (0, -abs(value
- limit
), value
- limit
if reverse
else limit
- value
) if closest
5920 else (0, value
, 0) if not reverse
and (limit
is None or value
<= limit
)
5921 else (0, -value
, 0) if limit
is None or (reverse
and value
== limit
) or value
> limit
5922 else (-1, value
, 0))
5924 def _calculate_field_preference(self
, format
, field
):
5925 type = self
._get
_field
_setting
(field
, 'type') # extractor, boolean, ordered, field, multiple
5926 get_value
= lambda f
: format
.get(self
._get
_field
_setting
(f
, 'field'))
5927 if type == 'multiple':
5928 type = 'field' # Only 'field' is allowed in multiple for now
5929 actual_fields
= self
._get
_field
_setting
(field
, 'field')
5931 value
= self
._get
_field
_setting
(field
, 'function')(get_value(f
) for f
in actual_fields
)
5933 value
= get_value(field
)
5934 return self
._calculate
_field
_preference
_from
_value
(format
, field
, type, value
)
5936 def calculate_preference(self
, format
):
5937 # Determine missing protocol
5938 if not format
.get('protocol'):
5939 format
['protocol'] = determine_protocol(format
)
5941 # Determine missing ext
5942 if not format
.get('ext') and 'url' in format
:
5943 format
['ext'] = determine_ext(format
['url'])
5944 if format
.get('vcodec') == 'none':
5945 format
['audio_ext'] = format
['ext'] if format
.get('acodec') != 'none' else 'none'
5946 format
['video_ext'] = 'none'
5948 format
['video_ext'] = format
['ext']
5949 format
['audio_ext'] = 'none'
5950 # if format.get('preference') is None and format.get('ext') in ('f4f', 'f4m'): # Not supported?
5951 # format['preference'] = -1000
5953 if format
.get('preference') is None and format
.get('ext') == 'flv' and re
.match('[hx]265|he?vc?', format
.get('vcodec') or ''):
5954 # HEVC-over-FLV is out-of-spec by FLV's original spec
5955 # ref. https://trac.ffmpeg.org/ticket/6389
5956 # ref. https://github.com/yt-dlp/yt-dlp/pull/5821
5957 format
['preference'] = -100
5959 # Determine missing bitrates
5960 if format
.get('vcodec') == 'none':
5962 if format
.get('acodec') == 'none':
5964 if not format
.get('vbr') and format
.get('vcodec') != 'none':
5965 format
['vbr'] = try_call(lambda: format
['tbr'] - format
['abr']) or None
5966 if not format
.get('abr') and format
.get('acodec') != 'none':
5967 format
['abr'] = try_call(lambda: format
['tbr'] - format
['vbr']) or None
5968 if not format
.get('tbr'):
5969 format
['tbr'] = try_call(lambda: format
['vbr'] + format
['abr']) or None
5971 return tuple(self
._calculate
_field
_preference
(format
, field
) for field
in self
._order
)