47 import xml
.etree
.ElementTree
50 from .compat
import functools
# isort: split
52 compat_etree_fromstring
,
54 compat_HTMLParseError
,
58 from .dependencies
import brotli
, certifi
, websockets
, xattr
59 from .socks
import ProxyType
, sockssocket
62 def register_socks_protocols():
63 # "Register" SOCKS protocols
64 # In Python < 2.6.5, urlsplit() suffers from bug https://bugs.python.org/issue7904
65 # URLs with protocols not in urlparse.uses_netloc are not handled correctly
66 for scheme
in ('socks', 'socks4', 'socks4a', 'socks5'):
67 if scheme
not in urllib
.parse
.uses_netloc
:
68 urllib
.parse
.uses_netloc
.append(scheme
)
71 # This is not clearly defined otherwise
72 compiled_regex_type
= type(re
.compile(''))
75 def random_user_agent():
76 _USER_AGENT_TPL
= 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/%s Safari/537.36'
117 return _USER_AGENT_TPL
% random
.choice(_CHROME_VERSIONS
)
120 SUPPORTED_ENCODINGS
= [
124 SUPPORTED_ENCODINGS
.append('br')
127 'User-Agent': random_user_agent(),
128 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
129 'Accept-Language': 'en-us,en;q=0.5',
130 'Sec-Fetch-Mode': 'navigate',
135 'Safari': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27',
139 NO_DEFAULT
= object()
140 IDENTITY
= lambda x
: x
142 ENGLISH_MONTH_NAMES
= [
143 'January', 'February', 'March', 'April', 'May', 'June',
144 'July', 'August', 'September', 'October', 'November', 'December']
147 'en': ENGLISH_MONTH_NAMES
,
149 'janvier', 'février', 'mars', 'avril', 'mai', 'juin',
150 'juillet', 'août', 'septembre', 'octobre', 'novembre', 'décembre'],
151 # these follow the genitive grammatical case (dopełniacz)
152 # some websites might be using nominative, which will require another month list
153 # https://en.wikibooks.org/wiki/Polish/Noun_cases
154 'pl': ['stycznia', 'lutego', 'marca', 'kwietnia', 'maja', 'czerwca',
155 'lipca', 'sierpnia', 'września', 'października', 'listopada', 'grudnia'],
158 # From https://github.com/python/cpython/blob/3.11/Lib/email/_parseaddr.py#L36-L42
160 'UT': 0, 'UTC': 0, 'GMT': 0, 'Z': 0,
161 'AST': -4, 'ADT': -3, # Atlantic (used in Canada)
162 'EST': -5, 'EDT': -4, # Eastern
163 'CST': -6, 'CDT': -5, # Central
164 'MST': -7, 'MDT': -6, # Mountain
165 'PST': -8, 'PDT': -7 # Pacific
168 # needed for sanitizing filenames in restricted mode
169 ACCENT_CHARS
= dict(zip('ÂÃÄÀÁÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖŐØŒÙÚÛÜŰÝÞßàáâãäåæçèéêëìíîïðñòóôõöőøœùúûüűýþÿ',
170 itertools
.chain('AAAAAA', ['AE'], 'CEEEEIIIIDNOOOOOOO', ['OE'], 'UUUUUY', ['TH', 'ss'],
171 'aaaaaa', ['ae'], 'ceeeeiiiionooooooo', ['oe'], 'uuuuuy', ['th'], 'y')))
201 '%Y-%m-%d %H:%M:%S.%f',
202 '%Y-%m-%d %H:%M:%S:%f',
205 '%Y-%m-%dT%H:%M:%SZ',
206 '%Y-%m-%dT%H:%M:%S.%fZ',
207 '%Y-%m-%dT%H:%M:%S.%f0Z',
209 '%Y-%m-%dT%H:%M:%S.%f',
212 '%b %d %Y at %H:%M:%S',
214 '%B %d %Y at %H:%M:%S',
218 DATE_FORMATS_DAY_FIRST
= list(DATE_FORMATS
)
219 DATE_FORMATS_DAY_FIRST
.extend([
229 DATE_FORMATS_MONTH_FIRST
= list(DATE_FORMATS
)
230 DATE_FORMATS_MONTH_FIRST
.extend([
238 PACKED_CODES_RE
= r
"}\('(.+)',(\d+),(\d+),'([^']+)'\.split\('\|'\)"
239 JSON_LD_RE
= r
'(?is)<script[^>]+type=(["\']?
)application
/ld\
+json\
1[^
>]*>\s
*(?P
<json_ld
>{.+?}|\
[.+?\
])\s
*</script
>'
241 NUMBER_RE = r'\d
+(?
:\
.\d
+)?
'
245 def preferredencoding():
246 """Get preferred encoding.
248 Returns the best encoding scheme for the system, based on
249 locale.getpreferredencoding() and some further tweaks.
252 pref = locale.getpreferredencoding()
260 def write_json_file(obj, fn):
261 """ Encode obj as JSON and write it to fn, atomically if possible """
263 tf = tempfile.NamedTemporaryFile(
264 prefix=f'{os.path.basename(fn)}
.', dir=os.path.dirname(fn),
265 suffix='.tmp
', delete=False, mode='w
', encoding='utf
-8')
269 json.dump(obj, tf, ensure_ascii=False)
270 if sys.platform == 'win32
':
271 # Need to remove existing file on Windows, else os.rename raises
272 # WindowsError or FileExistsError.
273 with contextlib.suppress(OSError):
275 with contextlib.suppress(OSError):
278 os.chmod(tf.name, 0o666 & ~mask)
279 os.rename(tf.name, fn)
281 with contextlib.suppress(OSError):
286 def find_xpath_attr(node, xpath, key, val=None):
287 """ Find the xpath xpath[@key=val] """
288 assert re.match(r'^
[a
-zA
-Z_
-]+$
', key)
289 expr = xpath + ('[@%s]' % key if val is None else f"[@{key}='{val}
']")
290 return node.find(expr)
292 # On python2.6 the xml.etree.ElementTree.Element methods don't support
293 # the namespace parameter
296 def xpath_with_ns(path
, ns_map
):
297 components
= [c
.split(':') for c
in path
.split('/')]
301 replaced
.append(c
[0])
304 replaced
.append('{%s}%s' % (ns_map
[ns
], tag
))
305 return '/'.join(replaced
)
308 def xpath_element(node
, xpath
, name
=None, fatal
=False, default
=NO_DEFAULT
):
309 def _find_xpath(xpath
):
310 return node
.find(xpath
)
312 if isinstance(xpath
, str):
313 n
= _find_xpath(xpath
)
321 if default
is not NO_DEFAULT
:
324 name
= xpath
if name
is None else name
325 raise ExtractorError('Could not find XML element %s' % name
)
331 def xpath_text(node
, xpath
, name
=None, fatal
=False, default
=NO_DEFAULT
):
332 n
= xpath_element(node
, xpath
, name
, fatal
=fatal
, default
=default
)
333 if n
is None or n
== default
:
336 if default
is not NO_DEFAULT
:
339 name
= xpath
if name
is None else name
340 raise ExtractorError('Could not find XML element\'s text %s' % name
)
346 def xpath_attr(node
, xpath
, key
, name
=None, fatal
=False, default
=NO_DEFAULT
):
347 n
= find_xpath_attr(node
, xpath
, key
)
349 if default
is not NO_DEFAULT
:
352 name
= f
'{xpath}[@{key}]' if name
is None else name
353 raise ExtractorError('Could not find XML attribute %s' % name
)
359 def get_element_by_id(id, html
, **kwargs
):
360 """Return the content of the tag with the specified ID in the passed HTML document"""
361 return get_element_by_attribute('id', id, html
, **kwargs
)
364 def get_element_html_by_id(id, html
, **kwargs
):
365 """Return the html of the tag with the specified ID in the passed HTML document"""
366 return get_element_html_by_attribute('id', id, html
, **kwargs
)
369 def get_element_by_class(class_name
, html
):
370 """Return the content of the first tag with the specified class in the passed HTML document"""
371 retval
= get_elements_by_class(class_name
, html
)
372 return retval
[0] if retval
else None
375 def get_element_html_by_class(class_name
, html
):
376 """Return the html of the first tag with the specified class in the passed HTML document"""
377 retval
= get_elements_html_by_class(class_name
, html
)
378 return retval
[0] if retval
else None
381 def get_element_by_attribute(attribute
, value
, html
, **kwargs
):
382 retval
= get_elements_by_attribute(attribute
, value
, html
, **kwargs
)
383 return retval
[0] if retval
else None
386 def get_element_html_by_attribute(attribute
, value
, html
, **kargs
):
387 retval
= get_elements_html_by_attribute(attribute
, value
, html
, **kargs
)
388 return retval
[0] if retval
else None
391 def get_elements_by_class(class_name
, html
, **kargs
):
392 """Return the content of all tags with the specified class in the passed HTML document as a list"""
393 return get_elements_by_attribute(
394 'class', r
'[^\'"]*(?<=[\'"\s
])%s(?
=[\'"\s])[^\'"]*' % re.escape(class_name),
395 html, escape_value=False)
398 def get_elements_html_by_class(class_name, html):
399 """Return the html of all tags with the specified class in the passed HTML document as a list"""
400 return get_elements_html_by_attribute(
401 'class', r'[^
\'"]*(?<=[\'"\s
])%s(?
=[\'"\s])[^\'"]*' % re.escape(class_name),
402 html, escape_value=False)
405 def get_elements_by_attribute(*args, **kwargs):
406 """Return the content of the tag with the specified attribute in the passed HTML document"""
407 return [content for content, _ in get_elements_text_and_html_by_attribute(*args, **kwargs)]
410 def get_elements_html_by_attribute(*args, **kwargs):
411 """Return the html of the tag with the specified attribute in the passed HTML document"""
412 return [whole for _, whole in get_elements_text_and_html_by_attribute(*args, **kwargs)]
415 def get_elements_text_and_html_by_attribute(attribute, value, html, *, tag=r'[\w
:.-]+', escape_value=True):
417 Return the text (content) and the html (whole) of the tag with the specified
418 attribute in the passed HTML document
423 quote = '' if re.match(r'''[\s"'`
=<>]''', value) else '?'
425 value = re.escape(value) if escape_value else value
427 partial_element_re = rf'''(?x
)
429 (?
:\
s(?
:[^
>"']|"[^
"]*"|
'[^']*')*)?
430 \s{re.escape(attribute)}\s*=\s*(?P<_q>['"]{quote})(?-x:{value})(?P=_q)
433 for m in re.finditer(partial_element_re, html):
434 content, whole = get_element_text_and_html_by_tag(m.group('tag'), html[m.start():])
437 unescapeHTML(re.sub(r'^(?P<q>["\'])(?P
<content
>.*)(?P
=q
)$
', r'\g
<content
>', content, flags=re.DOTALL)),
442 class HTMLBreakOnClosingTagParser(html.parser.HTMLParser):
444 HTML parser which raises HTMLBreakOnClosingTagException upon reaching the
445 closing tag for the first opening tag it has encountered, and can be used
449 class HTMLBreakOnClosingTagException(Exception):
453 self.tagstack = collections.deque()
454 html.parser.HTMLParser.__init__(self)
459 def __exit__(self, *_):
463 # handle_endtag does not return upon raising HTMLBreakOnClosingTagException,
464 # so data remains buffered; we no longer have any interest in it, thus
465 # override this method to discard it
468 def handle_starttag(self, tag, _):
469 self.tagstack.append(tag)
471 def handle_endtag(self, tag):
472 if not self.tagstack:
473 raise compat_HTMLParseError('no tags
in the stack
')
475 inner_tag = self.tagstack.pop()
479 raise compat_HTMLParseError(f'matching opening tag
for closing {tag} tag
not found
')
480 if not self.tagstack:
481 raise self.HTMLBreakOnClosingTagException()
484 # XXX: This should be far less strict
485 def get_element_text_and_html_by_tag(tag, html):
487 For the first element with the specified tag in the passed HTML document
488 return its' content (text
) and the whole
element (html
)
490 def find_or_raise(haystack, needle, exc):
492 return haystack.index(needle)
495 closing_tag = f'</{tag}>'
496 whole_start = find_or_raise(
497 html, f'<{tag}', compat_HTMLParseError(f'opening {tag} tag not found'))
498 content_start = find_or_raise(
499 html[whole_start:], '>', compat_HTMLParseError(f'malformed opening {tag} tag'))
500 content_start += whole_start + 1
501 with HTMLBreakOnClosingTagParser() as parser:
502 parser.feed(html[whole_start:content_start])
503 if not parser.tagstack or parser.tagstack[0] != tag:
504 raise compat_HTMLParseError(f'parser did not match opening {tag} tag')
505 offset = content_start
506 while offset < len(html):
507 next_closing_tag_start = find_or_raise(
508 html[offset:], closing_tag,
509 compat_HTMLParseError(f'closing {tag} tag not found'))
510 next_closing_tag_end = next_closing_tag_start + len(closing_tag)
512 parser.feed(html[offset:offset + next_closing_tag_end])
513 offset += next_closing_tag_end
514 except HTMLBreakOnClosingTagParser.HTMLBreakOnClosingTagException:
515 return html[content_start:offset + next_closing_tag_start], \
516 html[whole_start:offset + next_closing_tag_end]
517 raise compat_HTMLParseError('unexpected end of html')
520 class HTMLAttributeParser(html.parser.HTMLParser):
521 """Trivial HTML parser to gather the attributes
for a single element
"""
525 html.parser.HTMLParser.__init__(self)
527 def handle_starttag(self, tag, attrs):
528 self.attrs = dict(attrs)
529 raise compat_HTMLParseError('done')
532 class HTMLListAttrsParser(html.parser.HTMLParser):
533 """HTML parser to gather the attributes
for the elements of a
list"""
536 html.parser.HTMLParser.__init__(self)
540 def handle_starttag(self, tag, attrs):
541 if tag == 'li' and self._level == 0:
542 self.items.append(dict(attrs))
545 def handle_endtag(self, tag):
549 def extract_attributes(html_element):
550 """Given a string
for an HTML element such
as
552 a
="foo" B
="bar" c
="&98;az" d
=boz
553 empty
= noval entity
="&"
556 Decode
and return a dictionary of attributes
.
558 'a': 'foo', 'b': 'bar', c
: 'baz', d
: 'boz',
559 'empty': '', 'noval': None, 'entity': '&',
560 'sq': '"', 'dq': '\''
563 parser = HTMLAttributeParser()
564 with contextlib.suppress(compat_HTMLParseError):
565 parser.feed(html_element)
570 def parse_list(webpage):
571 """Given a string
for an series of HTML
<li
> elements
,
572 return a dictionary of their attributes
"""
573 parser = HTMLListAttrsParser()
579 def clean_html(html):
580 """Clean an HTML snippet into a readable string
"""
582 if html is None: # Convenience for sanitizing descriptions etc.
585 html = re.sub(r'\s+', ' ', html)
586 html = re.sub(r'(?u)\s?<\s?br\s?/?\s?>\s?', '\n', html)
587 html = re.sub(r'(?u)<\s?/\s?p\s?>\s?<\s?p[^>]*>', '\n', html)
589 html = re.sub('<.*?>', '', html)
590 # Replace html entities
591 html = unescapeHTML(html)
595 class LenientJSONDecoder(json.JSONDecoder):
596 def __init__(self, *args, transform_source=None, ignore_extra=False, **kwargs):
597 self.transform_source, self.ignore_extra = transform_source, ignore_extra
598 super().__init__(*args, **kwargs)
601 if self.transform_source:
602 s = self.transform_source(s)
604 if self.ignore_extra:
605 return self.raw_decode(s.lstrip())[0]
606 return super().decode(s)
607 except json.JSONDecodeError as e:
608 if e.pos is not None:
609 raise type(e)(f'{e.msg} in {s[e.pos-10:e.pos+10]!r}', s, e.pos)
613 def sanitize_open(filename, open_mode):
614 """Try to
open the given filename
, and slightly tweak it
if this fails
.
616 Attempts to
open the given filename
. If this fails
, it tries to change
617 the filename slightly
, step by step
, until it
's either able to open it
618 or it fails and raises a final exception, like the standard open()
621 It returns the tuple (stream, definitive_file_name).
624 if sys.platform == 'win32
':
627 # stdout may be any IO stream, e.g. when using contextlib.redirect_stdout
628 with contextlib.suppress(io.UnsupportedOperation):
629 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
630 return (sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout, filename)
632 for attempt in range(2):
635 if sys.platform == 'win32
':
636 # FIXME: An exclusive lock also locks the file from being read.
637 # Since windows locks are mandatory, don't lock the
file on
windows (for now
).
638 # Ref: https://github.com/yt-dlp/yt-dlp/issues/3124
639 raise LockingUnsupportedError()
640 stream
= locked_file(filename
, open_mode
, block
=False).__enter
__()
642 stream
= open(filename
, open_mode
)
643 return stream
, filename
644 except OSError as err
:
645 if attempt
or err
.errno
in (errno
.EACCES
,):
647 old_filename
, filename
= filename
, sanitize_path(filename
)
648 if old_filename
== filename
:
652 def timeconvert(timestr
):
653 """Convert RFC 2822 defined time string into system timestamp"""
655 timetuple
= email
.utils
.parsedate_tz(timestr
)
656 if timetuple
is not None:
657 timestamp
= email
.utils
.mktime_tz(timetuple
)
661 def sanitize_filename(s
, restricted
=False, is_id
=NO_DEFAULT
):
662 """Sanitizes a string so it could be used as part of a filename.
663 @param restricted Use a stricter subset of allowed characters
664 @param is_id Whether this is an ID that should be kept unchanged if possible.
665 If unset, yt-dlp's new sanitization rules are in effect
670 def replace_insane(char
):
671 if restricted
and char
in ACCENT_CHARS
:
672 return ACCENT_CHARS
[char
]
673 elif not restricted
and char
== '\n':
675 elif is_id
is NO_DEFAULT
and not restricted
and char
in '"*:<>?|/\\':
676 # Replace with their full-width unicode counterparts
677 return {'/': '\u29F8', '\\': '\u29f9'}
.get(char
, chr(ord(char
) + 0xfee0))
678 elif char
== '?' or ord(char
) < 32 or ord(char
) == 127:
681 return '' if restricted
else '\''
683 return '\0_\0-' if restricted
else '\0 \0-'
684 elif char
in '\\/|*<>':
686 if restricted
and (char
in '!&\'()[]{}$;`^,#' or char
.isspace() or ord(char
) > 127):
690 # Replace look-alike Unicode glyphs
691 if restricted
and (is_id
is NO_DEFAULT
or not is_id
):
692 s
= unicodedata
.normalize('NFKC', s
)
693 s
= re
.sub(r
'[0-9]+(?::[0-9]+)+', lambda m
: m
.group(0).replace(':', '_'), s
) # Handle timestamps
694 result
= ''.join(map(replace_insane
, s
))
695 if is_id
is NO_DEFAULT
:
696 result
= re
.sub(r
'(\0.)(?:(?=\1)..)+', r
'\1', result
) # Remove repeated substitute chars
697 STRIP_RE
= r
'(?:\0.|[ _-])*'
698 result
= re
.sub(f
'^\0.{STRIP_RE}|{STRIP_RE}\0.$', '', result
) # Remove substitute chars from start/end
699 result
= result
.replace('\0', '') or '_'
702 while '__' in result
:
703 result
= result
.replace('__', '_')
704 result
= result
.strip('_')
705 # Common case of "Foreign band name - English song title"
706 if restricted
and result
.startswith('-_'):
708 if result
.startswith('-'):
709 result
= '_' + result
[len('-'):]
710 result
= result
.lstrip('.')
716 def sanitize_path(s
, force
=False):
717 """Sanitizes and normalizes path on Windows"""
718 if sys
.platform
== 'win32':
720 drive_or_unc
, _
= os
.path
.splitdrive(s
)
726 norm_path
= os
.path
.normpath(remove_start(s
, drive_or_unc
)).split(os
.path
.sep
)
730 path_part
if path_part
in ['.', '..'] else re
.sub(r
'(?:[/<>:"\|\\?\*]|[\s.]$)', '#', path_part
)
731 for path_part
in norm_path
]
733 sanitized_path
.insert(0, drive_or_unc
+ os
.path
.sep
)
734 elif force
and s
and s
[0] == os
.path
.sep
:
735 sanitized_path
.insert(0, os
.path
.sep
)
736 return os
.path
.join(*sanitized_path
)
739 def sanitize_url(url
, *, scheme
='http'):
740 # Prepend protocol-less URLs with `http:` scheme in order to mitigate
741 # the number of unwanted failures due to missing protocol
744 elif url
.startswith('//'):
745 return f
'{scheme}:{url}'
746 # Fix some common typos seen so far
748 # https://github.com/ytdl-org/youtube-dl/issues/15649
749 (r
'^httpss://', r
'https://'),
750 # https://bx1.be/lives/direct-tv/
751 (r
'^rmtp([es]?)://', r
'rtmp\1://'),
753 for mistake
, fixup
in COMMON_TYPOS
:
754 if re
.match(mistake
, url
):
755 return re
.sub(mistake
, fixup
, url
)
759 def extract_basic_auth(url
):
760 parts
= urllib
.parse
.urlsplit(url
)
761 if parts
.username
is None:
763 url
= urllib
.parse
.urlunsplit(parts
._replace
(netloc
=(
764 parts
.hostname
if parts
.port
is None
765 else '%s:%d' % (parts
.hostname
, parts
.port
))))
766 auth_payload
= base64
.b64encode(
767 ('%s:%s' % (parts
.username
, parts
.password
or '')).encode())
768 return url
, f
'Basic {auth_payload.decode()}'
771 def sanitized_Request(url
, *args
, **kwargs
):
772 url
, auth_header
= extract_basic_auth(escape_url(sanitize_url(url
)))
773 if auth_header
is not None:
774 headers
= args
[1] if len(args
) >= 2 else kwargs
.setdefault('headers', {})
775 headers
['Authorization'] = auth_header
776 return urllib
.request
.Request(url
, *args
, **kwargs
)
780 """Expand shell variables and ~"""
781 return os
.path
.expandvars(compat_expanduser(s
))
784 def orderedSet(iterable
, *, lazy
=False):
785 """Remove all duplicates from the input iterable"""
787 seen
= [] # Do not use set since the items can be unhashable
793 return _iter() if lazy
else list(_iter())
796 def _htmlentity_transform(entity_with_semicolon
):
797 """Transforms an HTML entity to a character."""
798 entity
= entity_with_semicolon
[:-1]
800 # Known non-numeric HTML entity
801 if entity
in html
.entities
.name2codepoint
:
802 return chr(html
.entities
.name2codepoint
[entity
])
804 # TODO: HTML5 allows entities without a semicolon.
805 # E.g. 'Éric' should be decoded as 'Éric'.
806 if entity_with_semicolon
in html
.entities
.html5
:
807 return html
.entities
.html5
[entity_with_semicolon
]
809 mobj
= re
.match(r
'#(x[0-9a-fA-F]+|[0-9]+)', entity
)
811 numstr
= mobj
.group(1)
812 if numstr
.startswith('x'):
814 numstr
= '0%s' % numstr
817 # See https://github.com/ytdl-org/youtube-dl/issues/7518
818 with contextlib
.suppress(ValueError):
819 return chr(int(numstr
, base
))
821 # Unknown entity in name, return its literal representation
822 return '&%s;' % entity
828 assert isinstance(s
, str)
831 r
'&([^&;]+;)', lambda m
: _htmlentity_transform(m
.group(1)), s
)
834 def escapeHTML(text
):
837 .replace('&', '&')
838 .replace('<', '<')
839 .replace('>', '>')
840 .replace('"', '"')
841 .replace("'", ''')
845 def process_communicate_or_kill(p
, *args
, **kwargs
):
846 deprecation_warning(f
'"{__name__}.process_communicate_or_kill" is deprecated and may be removed '
847 f
'in a future version. Use "{__name__}.Popen.communicate_or_kill" instead')
848 return Popen
.communicate_or_kill(p
, *args
, **kwargs
)
851 class Popen(subprocess
.Popen
):
852 if sys
.platform
== 'win32':
853 _startupinfo
= subprocess
.STARTUPINFO()
854 _startupinfo
.dwFlags |
= subprocess
.STARTF_USESHOWWINDOW
859 def _fix_pyinstaller_ld_path(env
):
860 """Restore LD_LIBRARY_PATH when using PyInstaller
861 Ref: https://github.com/pyinstaller/pyinstaller/blob/develop/doc/runtime-information.rst#ld_library_path--libpath-considerations
862 https://github.com/yt-dlp/yt-dlp/issues/4573
864 if not hasattr(sys
, '_MEIPASS'):
868 orig
= env
.get(f
'{key}_ORIG')
874 _fix('LD_LIBRARY_PATH') # Linux
875 _fix('DYLD_LIBRARY_PATH') # macOS
877 def __init__(self
, *args
, env
=None, text
=False, **kwargs
):
879 env
= os
.environ
.copy()
880 self
._fix
_pyinstaller
_ld
_path
(env
)
883 kwargs
['universal_newlines'] = True # For 3.6 compatibility
884 kwargs
.setdefault('encoding', 'utf-8')
885 kwargs
.setdefault('errors', 'replace')
886 super().__init
__(*args
, env
=env
, **kwargs
, startupinfo
=self
._startupinfo
)
888 def communicate_or_kill(self
, *args
, **kwargs
):
890 return self
.communicate(*args
, **kwargs
)
891 except BaseException
: # Including KeyboardInterrupt
892 self
.kill(timeout
=None)
895 def kill(self
, *, timeout
=0):
898 self
.wait(timeout
=timeout
)
901 def run(cls
, *args
, timeout
=None, **kwargs
):
902 with cls(*args
, **kwargs
) as proc
:
903 default
= '' if proc
.text_mode
else b
''
904 stdout
, stderr
= proc
.communicate_or_kill(timeout
=timeout
)
905 return stdout
or default
, stderr
or default
, proc
.returncode
908 def get_subprocess_encoding():
909 if sys
.platform
== 'win32' and sys
.getwindowsversion()[0] >= 5:
910 # For subprocess calls, encode with locale encoding
911 # Refer to http://stackoverflow.com/a/9951851/35070
912 encoding
= preferredencoding()
914 encoding
= sys
.getfilesystemencoding()
920 def encodeFilename(s
, for_subprocess
=False):
921 assert isinstance(s
, str)
925 def decodeFilename(b
, for_subprocess
=False):
929 def encodeArgument(s
):
930 # Legacy code that uses byte strings
931 # Uncomment the following line after fixing all post processors
932 # assert isinstance(s, str), 'Internal error: %r should be of type %r, is %r' % (s, str, type(s))
933 return s
if isinstance(s
, str) else s
.decode('ascii')
936 def decodeArgument(b
):
940 def decodeOption(optval
):
943 if isinstance(optval
, bytes):
944 optval
= optval
.decode(preferredencoding())
946 assert isinstance(optval
, str)
950 _timetuple
= collections
.namedtuple('Time', ('hours', 'minutes', 'seconds', 'milliseconds'))
953 def timetuple_from_msec(msec
):
954 secs
, msec
= divmod(msec
, 1000)
955 mins
, secs
= divmod(secs
, 60)
956 hrs
, mins
= divmod(mins
, 60)
957 return _timetuple(hrs
, mins
, secs
, msec
)
960 def formatSeconds(secs
, delim
=':', msec
=False):
961 time
= timetuple_from_msec(secs
* 1000)
963 ret
= '%d%s%02d%s%02d' % (time
.hours
, delim
, time
.minutes
, delim
, time
.seconds
)
965 ret
= '%d%s%02d' % (time
.minutes
, delim
, time
.seconds
)
967 ret
= '%d' % time
.seconds
968 return '%s.%03d' % (ret
, time
.milliseconds
) if msec
else ret
971 def _ssl_load_windows_store_certs(ssl_context
, storename
):
972 # Code adapted from _load_windows_store_certs in https://github.com/python/cpython/blob/main/Lib/ssl.py
974 certs
= [cert
for cert
, encoding
, trust
in ssl
.enum_certificates(storename
)
975 if encoding
== 'x509_asn' and (
976 trust
is True or ssl
.Purpose
.SERVER_AUTH
.oid
in trust
)]
977 except PermissionError
:
980 with contextlib
.suppress(ssl
.SSLError
):
981 ssl_context
.load_verify_locations(cadata
=cert
)
984 def make_HTTPS_handler(params
, **kwargs
):
985 opts_check_certificate
= not params
.get('nocheckcertificate')
986 context
= ssl
.SSLContext(ssl
.PROTOCOL_TLS_CLIENT
)
987 context
.check_hostname
= opts_check_certificate
988 if params
.get('legacyserverconnect'):
989 context
.options |
= 4 # SSL_OP_LEGACY_SERVER_CONNECT
990 # Allow use of weaker ciphers in Python 3.10+. See https://bugs.python.org/issue43998
991 context
.set_ciphers('DEFAULT')
993 sys
.version_info
< (3, 10)
994 and ssl
.OPENSSL_VERSION_INFO
>= (1, 1, 1)
995 and not ssl
.OPENSSL_VERSION
.startswith('LibreSSL')
997 # Backport the default SSL ciphers and minimum TLS version settings from Python 3.10 [1].
998 # This is to ensure consistent behavior across Python versions, and help avoid fingerprinting
999 # in some situations [2][3].
1000 # Python 3.10 only supports OpenSSL 1.1.1+ [4]. Because this change is likely
1001 # untested on older versions, we only apply this to OpenSSL 1.1.1+ to be safe.
1002 # LibreSSL is excluded until further investigation due to cipher support issues [5][6].
1003 # 1. https://github.com/python/cpython/commit/e983252b516edb15d4338b0a47631b59ef1e2536
1004 # 2. https://github.com/yt-dlp/yt-dlp/issues/4627
1005 # 3. https://github.com/yt-dlp/yt-dlp/pull/5294
1006 # 4. https://peps.python.org/pep-0644/
1007 # 5. https://peps.python.org/pep-0644/#libressl-support
1008 # 6. https://github.com/yt-dlp/yt-dlp/commit/5b9f253fa0aee996cf1ed30185d4b502e00609c4#commitcomment-89054368
1009 context
.set_ciphers('@SECLEVEL=2:ECDH+AESGCM:ECDH+CHACHA20:ECDH+AES:DHE+AES:!aNULL:!eNULL:!aDSS:!SHA1:!AESCCM')
1010 context
.minimum_version
= ssl
.TLSVersion
.TLSv1_2
1012 context
.verify_mode
= ssl
.CERT_REQUIRED
if opts_check_certificate
else ssl
.CERT_NONE
1013 if opts_check_certificate
:
1014 if has_certifi
and 'no-certifi' not in params
.get('compat_opts', []):
1015 context
.load_verify_locations(cafile
=certifi
.where())
1018 context
.load_default_certs()
1019 # Work around the issue in load_default_certs when there are bad certificates. See:
1020 # https://github.com/yt-dlp/yt-dlp/issues/1060,
1021 # https://bugs.python.org/issue35665, https://bugs.python.org/issue45312
1022 except ssl
.SSLError
:
1023 # enum_certificates is not present in mingw python. See https://github.com/yt-dlp/yt-dlp/issues/1151
1024 if sys
.platform
== 'win32' and hasattr(ssl
, 'enum_certificates'):
1025 for storename
in ('CA', 'ROOT'):
1026 _ssl_load_windows_store_certs(context
, storename
)
1027 context
.set_default_verify_paths()
1029 client_certfile
= params
.get('client_certificate')
1032 context
.load_cert_chain(
1033 client_certfile
, keyfile
=params
.get('client_certificate_key'),
1034 password
=params
.get('client_certificate_password'))
1035 except ssl
.SSLError
:
1036 raise YoutubeDLError('Unable to load client certificate')
1038 # Some servers may reject requests if ALPN extension is not sent. See:
1039 # https://github.com/python/cpython/issues/85140
1040 # https://github.com/yt-dlp/yt-dlp/issues/3878
1041 with contextlib
.suppress(NotImplementedError):
1042 context
.set_alpn_protocols(['http/1.1'])
1044 return YoutubeDLHTTPSHandler(params
, context
=context
, **kwargs
)
1047 def bug_reports_message(before
=';'):
1048 from .update
import REPOSITORY
1050 msg
= (f
'please report this issue on https://github.com/{REPOSITORY}/issues?q= , '
1051 'filling out the appropriate issue template. Confirm you are on the latest version using yt-dlp -U')
1053 before
= before
.rstrip()
1054 if not before
or before
.endswith(('.', '!', '?')):
1055 msg
= msg
[0].title() + msg
[1:]
1057 return (before
+ ' ' if before
else '') + msg
1060 class YoutubeDLError(Exception):
1061 """Base exception for YoutubeDL errors."""
1064 def __init__(self
, msg
=None):
1067 elif self
.msg
is None:
1068 self
.msg
= type(self
).__name
__
1069 super().__init
__(self
.msg
)
1072 network_exceptions
= [urllib
.error
.URLError
, http
.client
.HTTPException
, socket
.error
]
1073 if hasattr(ssl
, 'CertificateError'):
1074 network_exceptions
.append(ssl
.CertificateError
)
1075 network_exceptions
= tuple(network_exceptions
)
1078 class ExtractorError(YoutubeDLError
):
1079 """Error during info extraction."""
1081 def __init__(self
, msg
, tb
=None, expected
=False, cause
=None, video_id
=None, ie
=None):
1082 """ tb, if given, is the original traceback (so that it can be printed out).
1083 If expected is set, this is a normal error message and most likely not a bug in yt-dlp.
1085 if sys
.exc_info()[0] in network_exceptions
:
1088 self
.orig_msg
= str(msg
)
1090 self
.expected
= expected
1092 self
.video_id
= video_id
1094 self
.exc_info
= sys
.exc_info() # preserve original exception
1095 if isinstance(self
.exc_info
[1], ExtractorError
):
1096 self
.exc_info
= self
.exc_info
[1].exc_info
1097 super().__init
__(self
.__msg
)
1102 format_field(self
.ie
, None, '[%s] '),
1103 format_field(self
.video_id
, None, '%s: '),
1105 format_field(self
.cause
, None, ' (caused by %r)'),
1106 '' if self
.expected
else bug_reports_message()))
1108 def format_traceback(self
):
1109 return join_nonempty(
1110 self
.traceback
and ''.join(traceback
.format_tb(self
.traceback
)),
1111 self
.cause
and ''.join(traceback
.format_exception(None, self
.cause
, self
.cause
.__traceback
__)[1:]),
1114 def __setattr__(self
, name
, value
):
1115 super().__setattr
__(name
, value
)
1116 if getattr(self
, 'msg', None) and name
not in ('msg', 'args'):
1117 self
.msg
= self
.__msg
or type(self
).__name
__
1118 self
.args
= (self
.msg
, ) # Cannot be property
1121 class UnsupportedError(ExtractorError
):
1122 def __init__(self
, url
):
1124 'Unsupported URL: %s' % url
, expected
=True)
1128 class RegexNotFoundError(ExtractorError
):
1129 """Error when a regex didn't match"""
1133 class GeoRestrictedError(ExtractorError
):
1134 """Geographic restriction Error exception.
1136 This exception may be thrown when a video is not available from your
1137 geographic location due to geographic restrictions imposed by a website.
1140 def __init__(self
, msg
, countries
=None, **kwargs
):
1141 kwargs
['expected'] = True
1142 super().__init
__(msg
, **kwargs
)
1143 self
.countries
= countries
1146 class UserNotLive(ExtractorError
):
1147 """Error when a channel/user is not live"""
1149 def __init__(self
, msg
=None, **kwargs
):
1150 kwargs
['expected'] = True
1151 super().__init
__(msg
or 'The channel is not currently live', **kwargs
)
1154 class DownloadError(YoutubeDLError
):
1155 """Download Error exception.
1157 This exception may be thrown by FileDownloader objects if they are not
1158 configured to continue on errors. They will contain the appropriate
1162 def __init__(self
, msg
, exc_info
=None):
1163 """ exc_info, if given, is the original exception that caused the trouble (as returned by sys.exc_info()). """
1164 super().__init
__(msg
)
1165 self
.exc_info
= exc_info
1168 class EntryNotInPlaylist(YoutubeDLError
):
1169 """Entry not in playlist exception.
1171 This exception will be thrown by YoutubeDL when a requested entry
1172 is not found in the playlist info_dict
1174 msg
= 'Entry not found in info'
1177 class SameFileError(YoutubeDLError
):
1178 """Same File exception.
1180 This exception will be thrown by FileDownloader objects if they detect
1181 multiple files would have to be downloaded to the same file on disk.
1183 msg
= 'Fixed output name but more than one file to download'
1185 def __init__(self
, filename
=None):
1186 if filename
is not None:
1187 self
.msg
+= f
': {filename}'
1188 super().__init
__(self
.msg
)
1191 class PostProcessingError(YoutubeDLError
):
1192 """Post Processing exception.
1194 This exception may be raised by PostProcessor's .run() method to
1195 indicate an error in the postprocessing task.
1199 class DownloadCancelled(YoutubeDLError
):
1200 """ Exception raised when the download queue should be interrupted """
1201 msg
= 'The download was cancelled'
1204 class ExistingVideoReached(DownloadCancelled
):
1205 """ --break-on-existing triggered """
1206 msg
= 'Encountered a video that is already in the archive, stopping due to --break-on-existing'
1209 class RejectedVideoReached(DownloadCancelled
):
1210 """ --break-on-reject triggered """
1211 msg
= 'Encountered a video that did not match filter, stopping due to --break-on-reject'
1214 class MaxDownloadsReached(DownloadCancelled
):
1215 """ --max-downloads limit has been reached. """
1216 msg
= 'Maximum number of downloads reached, stopping due to --max-downloads'
1219 class ReExtractInfo(YoutubeDLError
):
1220 """ Video info needs to be re-extracted. """
1222 def __init__(self
, msg
, expected
=False):
1223 super().__init
__(msg
)
1224 self
.expected
= expected
1227 class ThrottledDownload(ReExtractInfo
):
1228 """ Download speed below --throttled-rate. """
1229 msg
= 'The download speed is below throttle limit'
1232 super().__init
__(self
.msg
, expected
=False)
1235 class UnavailableVideoError(YoutubeDLError
):
1236 """Unavailable Format exception.
1238 This exception will be thrown when a video is requested
1239 in a format that is not available for that video.
1241 msg
= 'Unable to download video'
1243 def __init__(self
, err
=None):
1245 self
.msg
+= f
': {err}'
1246 super().__init
__(self
.msg
)
1249 class ContentTooShortError(YoutubeDLError
):
1250 """Content Too Short exception.
1252 This exception may be raised by FileDownloader objects when a file they
1253 download is too small for what the server announced first, indicating
1254 the connection was probably interrupted.
1257 def __init__(self
, downloaded
, expected
):
1258 super().__init
__(f
'Downloaded {downloaded} bytes, expected {expected} bytes')
1260 self
.downloaded
= downloaded
1261 self
.expected
= expected
1264 class XAttrMetadataError(YoutubeDLError
):
1265 def __init__(self
, code
=None, msg
='Unknown error'):
1266 super().__init
__(msg
)
1270 # Parsing code and msg
1271 if (self
.code
in (errno
.ENOSPC
, errno
.EDQUOT
)
1272 or 'No space left' in self
.msg
or 'Disk quota exceeded' in self
.msg
):
1273 self
.reason
= 'NO_SPACE'
1274 elif self
.code
== errno
.E2BIG
or 'Argument list too long' in self
.msg
:
1275 self
.reason
= 'VALUE_TOO_LONG'
1277 self
.reason
= 'NOT_SUPPORTED'
1280 class XAttrUnavailableError(YoutubeDLError
):
1284 def _create_http_connection(ydl_handler
, http_class
, is_https
, *args
, **kwargs
):
1285 hc
= http_class(*args
, **kwargs
)
1286 source_address
= ydl_handler
._params
.get('source_address')
1288 if source_address
is not None:
1289 # This is to workaround _create_connection() from socket where it will try all
1290 # address data from getaddrinfo() including IPv6. This filters the result from
1291 # getaddrinfo() based on the source_address value.
1292 # This is based on the cpython socket.create_connection() function.
1293 # https://github.com/python/cpython/blob/master/Lib/socket.py#L691
1294 def _create_connection(address
, timeout
=socket
._GLOBAL
_DEFAULT
_TIMEOUT
, source_address
=None):
1295 host
, port
= address
1297 addrs
= socket
.getaddrinfo(host
, port
, 0, socket
.SOCK_STREAM
)
1298 af
= socket
.AF_INET
if '.' in source_address
[0] else socket
.AF_INET6
1299 ip_addrs
= [addr
for addr
in addrs
if addr
[0] == af
]
1300 if addrs
and not ip_addrs
:
1301 ip_version
= 'v4' if af
== socket
.AF_INET
else 'v6'
1303 "No remote IP%s addresses available for connect, can't use '%s' as source address"
1304 % (ip_version
, source_address
[0]))
1305 for res
in ip_addrs
:
1306 af
, socktype
, proto
, canonname
, sa
= res
1309 sock
= socket
.socket(af
, socktype
, proto
)
1310 if timeout
is not socket
._GLOBAL
_DEFAULT
_TIMEOUT
:
1311 sock
.settimeout(timeout
)
1312 sock
.bind(source_address
)
1314 err
= None # Explicitly break reference cycle
1316 except OSError as _
:
1318 if sock
is not None:
1323 raise OSError('getaddrinfo returns an empty list')
1324 if hasattr(hc
, '_create_connection'):
1325 hc
._create
_connection
= _create_connection
1326 hc
.source_address
= (source_address
, 0)
1331 def handle_youtubedl_headers(headers
):
1332 filtered_headers
= headers
1334 if 'Youtubedl-no-compression' in filtered_headers
:
1335 filtered_headers
= {k: v for k, v in filtered_headers.items() if k.lower() != 'accept-encoding'}
1336 del filtered_headers
['Youtubedl-no-compression']
1338 return filtered_headers
1341 class YoutubeDLHandler(urllib
.request
.HTTPHandler
):
1342 """Handler for HTTP requests and responses.
1344 This class, when installed with an OpenerDirector, automatically adds
1345 the standard headers to every HTTP request and handles gzipped and
1346 deflated responses from web servers. If compression is to be avoided in
1347 a particular request, the original request in the program code only has
1348 to include the HTTP header "Youtubedl-no-compression", which will be
1349 removed before making the real request.
1351 Part of this code was copied from:
1353 http://techknack.net/python-urllib2-handlers/
1355 Andrew Rowls, the author of that code, agreed to release it to the
1359 def __init__(self
, params
, *args
, **kwargs
):
1360 urllib
.request
.HTTPHandler
.__init
__(self
, *args
, **kwargs
)
1361 self
._params
= params
1363 def http_open(self
, req
):
1364 conn_class
= http
.client
.HTTPConnection
1366 socks_proxy
= req
.headers
.get('Ytdl-socks-proxy')
1368 conn_class
= make_socks_conn_class(conn_class
, socks_proxy
)
1369 del req
.headers
['Ytdl-socks-proxy']
1371 return self
.do_open(functools
.partial(
1372 _create_http_connection
, self
, conn_class
, False),
1380 return zlib
.decompress(data
, -zlib
.MAX_WBITS
)
1382 return zlib
.decompress(data
)
1388 return brotli
.decompress(data
)
1390 def http_request(self
, req
):
1391 # According to RFC 3986, URLs can not contain non-ASCII characters, however this is not
1392 # always respected by websites, some tend to give out URLs with non percent-encoded
1393 # non-ASCII characters (see telemb.py, ard.py [#3412])
1394 # urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991)
1395 # To work around aforementioned issue we will replace request's original URL with
1396 # percent-encoded one
1397 # Since redirects are also affected (e.g. http://www.southpark.de/alle-episoden/s18e09)
1398 # the code of this workaround has been moved here from YoutubeDL.urlopen()
1399 url
= req
.get_full_url()
1400 url_escaped
= escape_url(url
)
1402 # Substitute URL if any change after escaping
1403 if url
!= url_escaped
:
1404 req
= update_Request(req
, url
=url_escaped
)
1406 for h
, v
in self
._params
.get('http_headers', std_headers
).items():
1407 # Capitalize is needed because of Python bug 2275: http://bugs.python.org/issue2275
1408 # The dict keys are capitalized because of this bug by urllib
1409 if h
.capitalize() not in req
.headers
:
1410 req
.add_header(h
, v
)
1412 if 'Accept-encoding' not in req
.headers
:
1413 req
.add_header('Accept-encoding', ', '.join(SUPPORTED_ENCODINGS
))
1415 req
.headers
= handle_youtubedl_headers(req
.headers
)
1417 return super().do_request_(req
)
1419 def http_response(self
, req
, resp
):
1422 if resp
.headers
.get('Content-encoding', '') == 'gzip':
1423 content
= resp
.read()
1424 gz
= gzip
.GzipFile(fileobj
=io
.BytesIO(content
), mode
='rb')
1426 uncompressed
= io
.BytesIO(gz
.read())
1427 except OSError as original_ioerror
:
1428 # There may be junk add the end of the file
1429 # See http://stackoverflow.com/q/4928560/35070 for details
1430 for i
in range(1, 1024):
1432 gz
= gzip
.GzipFile(fileobj
=io
.BytesIO(content
[:-i
]), mode
='rb')
1433 uncompressed
= io
.BytesIO(gz
.read())
1438 raise original_ioerror
1439 resp
= urllib
.request
.addinfourl(uncompressed
, old_resp
.headers
, old_resp
.url
, old_resp
.code
)
1440 resp
.msg
= old_resp
.msg
1442 if resp
.headers
.get('Content-encoding', '') == 'deflate':
1443 gz
= io
.BytesIO(self
.deflate(resp
.read()))
1444 resp
= urllib
.request
.addinfourl(gz
, old_resp
.headers
, old_resp
.url
, old_resp
.code
)
1445 resp
.msg
= old_resp
.msg
1447 if resp
.headers
.get('Content-encoding', '') == 'br':
1448 resp
= urllib
.request
.addinfourl(
1449 io
.BytesIO(self
.brotli(resp
.read())), old_resp
.headers
, old_resp
.url
, old_resp
.code
)
1450 resp
.msg
= old_resp
.msg
1451 # Percent-encode redirect URL of Location HTTP header to satisfy RFC 3986 (see
1452 # https://github.com/ytdl-org/youtube-dl/issues/6457).
1453 if 300 <= resp
.code
< 400:
1454 location
= resp
.headers
.get('Location')
1456 # As of RFC 2616 default charset is iso-8859-1 that is respected by python 3
1457 location
= location
.encode('iso-8859-1').decode()
1458 location_escaped
= escape_url(location
)
1459 if location
!= location_escaped
:
1460 del resp
.headers
['Location']
1461 resp
.headers
['Location'] = location_escaped
1464 https_request
= http_request
1465 https_response
= http_response
1468 def make_socks_conn_class(base_class
, socks_proxy
):
1469 assert issubclass(base_class
, (
1470 http
.client
.HTTPConnection
, http
.client
.HTTPSConnection
))
1472 url_components
= urllib
.parse
.urlparse(socks_proxy
)
1473 if url_components
.scheme
.lower() == 'socks5':
1474 socks_type
= ProxyType
.SOCKS5
1475 elif url_components
.scheme
.lower() in ('socks', 'socks4'):
1476 socks_type
= ProxyType
.SOCKS4
1477 elif url_components
.scheme
.lower() == 'socks4a':
1478 socks_type
= ProxyType
.SOCKS4A
1480 def unquote_if_non_empty(s
):
1483 return urllib
.parse
.unquote_plus(s
)
1487 url_components
.hostname
, url_components
.port
or 1080,
1489 unquote_if_non_empty(url_components
.username
),
1490 unquote_if_non_empty(url_components
.password
),
1493 class SocksConnection(base_class
):
1495 self
.sock
= sockssocket()
1496 self
.sock
.setproxy(*proxy_args
)
1497 if isinstance(self
.timeout
, (int, float)):
1498 self
.sock
.settimeout(self
.timeout
)
1499 self
.sock
.connect((self
.host
, self
.port
))
1501 if isinstance(self
, http
.client
.HTTPSConnection
):
1502 if hasattr(self
, '_context'): # Python > 2.6
1503 self
.sock
= self
._context
.wrap_socket(
1504 self
.sock
, server_hostname
=self
.host
)
1506 self
.sock
= ssl
.wrap_socket(self
.sock
)
1508 return SocksConnection
1511 class YoutubeDLHTTPSHandler(urllib
.request
.HTTPSHandler
):
1512 def __init__(self
, params
, https_conn_class
=None, *args
, **kwargs
):
1513 urllib
.request
.HTTPSHandler
.__init
__(self
, *args
, **kwargs
)
1514 self
._https
_conn
_class
= https_conn_class
or http
.client
.HTTPSConnection
1515 self
._params
= params
1517 def https_open(self
, req
):
1519 conn_class
= self
._https
_conn
_class
1521 if hasattr(self
, '_context'): # python > 2.6
1522 kwargs
['context'] = self
._context
1523 if hasattr(self
, '_check_hostname'): # python 3.x
1524 kwargs
['check_hostname'] = self
._check
_hostname
1526 socks_proxy
= req
.headers
.get('Ytdl-socks-proxy')
1528 conn_class
= make_socks_conn_class(conn_class
, socks_proxy
)
1529 del req
.headers
['Ytdl-socks-proxy']
1532 return self
.do_open(
1533 functools
.partial(_create_http_connection
, self
, conn_class
, True), req
, **kwargs
)
1534 except urllib
.error
.URLError
as e
:
1535 if (isinstance(e
.reason
, ssl
.SSLError
)
1536 and getattr(e
.reason
, 'reason', None) == 'SSLV3_ALERT_HANDSHAKE_FAILURE'):
1537 raise YoutubeDLError('SSLV3_ALERT_HANDSHAKE_FAILURE: Try using --legacy-server-connect')
1541 def is_path_like(f
):
1542 return isinstance(f
, (str, bytes, os
.PathLike
))
1545 class YoutubeDLCookieJar(http
.cookiejar
.MozillaCookieJar
):
1547 See [1] for cookie file format.
1549 1. https://curl.haxx.se/docs/http-cookies.html
1551 _HTTPONLY_PREFIX
= '#HttpOnly_'
1553 _HEADER
= '''# Netscape HTTP Cookie File
1554 # This file is generated by yt-dlp. Do not edit.
1557 _CookieFileEntry
= collections
.namedtuple(
1559 ('domain_name', 'include_subdomains', 'path', 'https_only', 'expires_at', 'name', 'value'))
1561 def __init__(self
, filename
=None, *args
, **kwargs
):
1562 super().__init
__(None, *args
, **kwargs
)
1563 if is_path_like(filename
):
1564 filename
= os
.fspath(filename
)
1565 self
.filename
= filename
1568 def _true_or_false(cndn
):
1569 return 'TRUE' if cndn
else 'FALSE'
1571 @contextlib.contextmanager
1572 def open(self
, file, *, write
=False):
1573 if is_path_like(file):
1574 with open(file, 'w' if write
else 'r', encoding
='utf-8') as f
:
1581 def _really_save(self
, f
, ignore_discard
=False, ignore_expires
=False):
1584 if (not ignore_discard
and cookie
.discard
1585 or not ignore_expires
and cookie
.is_expired(now
)):
1587 name
, value
= cookie
.name
, cookie
.value
1589 # cookies.txt regards 'Set-Cookie: foo' as a cookie
1590 # with no name, whereas http.cookiejar regards it as a
1591 # cookie with no value.
1592 name
, value
= '', name
1593 f
.write('%s\n' % '\t'.join((
1595 self
._true
_or
_false
(cookie
.domain
.startswith('.')),
1597 self
._true
_or
_false
(cookie
.secure
),
1598 str_or_none(cookie
.expires
, default
=''),
1602 def save(self
, filename
=None, *args
, **kwargs
):
1604 Save cookies to a file.
1605 Code is taken from CPython 3.6
1606 https://github.com/python/cpython/blob/8d999cbf4adea053be6dbb612b9844635c4dfb8e/Lib/http/cookiejar.py#L2091-L2117 """
1608 if filename
is None:
1609 if self
.filename
is not None:
1610 filename
= self
.filename
1612 raise ValueError(http
.cookiejar
.MISSING_FILENAME_TEXT
)
1614 # Store session cookies with `expires` set to 0 instead of an empty string
1616 if cookie
.expires
is None:
1619 with self
.open(filename
, write
=True) as f
:
1620 f
.write(self
._HEADER
)
1621 self
._really
_save
(f
, *args
, **kwargs
)
1623 def load(self
, filename
=None, ignore_discard
=False, ignore_expires
=False):
1624 """Load cookies from a file."""
1625 if filename
is None:
1626 if self
.filename
is not None:
1627 filename
= self
.filename
1629 raise ValueError(http
.cookiejar
.MISSING_FILENAME_TEXT
)
1631 def prepare_line(line
):
1632 if line
.startswith(self
._HTTPONLY
_PREFIX
):
1633 line
= line
[len(self
._HTTPONLY
_PREFIX
):]
1634 # comments and empty lines are fine
1635 if line
.startswith('#') or not line
.strip():
1637 cookie_list
= line
.split('\t')
1638 if len(cookie_list
) != self
._ENTRY
_LEN
:
1639 raise http
.cookiejar
.LoadError('invalid length %d' % len(cookie_list
))
1640 cookie
= self
._CookieFileEntry
(*cookie_list
)
1641 if cookie
.expires_at
and not cookie
.expires_at
.isdigit():
1642 raise http
.cookiejar
.LoadError('invalid expires at %s' % cookie
.expires_at
)
1646 with self
.open(filename
) as f
:
1649 cf
.write(prepare_line(line
))
1650 except http
.cookiejar
.LoadError
as e
:
1651 if f
'{line.strip()} '[0] in '[{"':
1652 raise http
.cookiejar
.LoadError(
1653 'Cookies file must be Netscape formatted, not JSON. See '
1654 'https://github.com/yt-dlp/yt-dlp/wiki/FAQ#how-do-i-pass-cookies-to-yt-dlp')
1655 write_string(f
'WARNING: skipping cookie file entry due to {e}: {line!r}\n')
1658 self
._really
_load
(cf
, filename
, ignore_discard
, ignore_expires
)
1659 # Session cookies are denoted by either `expires` field set to
1660 # an empty string or 0. MozillaCookieJar only recognizes the former
1661 # (see [1]). So we need force the latter to be recognized as session
1662 # cookies on our own.
1663 # Session cookies may be important for cookies-based authentication,
1664 # e.g. usually, when user does not check 'Remember me' check box while
1665 # logging in on a site, some important cookies are stored as session
1666 # cookies so that not recognizing them will result in failed login.
1667 # 1. https://bugs.python.org/issue17164
1669 # Treat `expires=0` cookies as session cookies
1670 if cookie
.expires
== 0:
1671 cookie
.expires
= None
1672 cookie
.discard
= True
1675 class YoutubeDLCookieProcessor(urllib
.request
.HTTPCookieProcessor
):
1676 def __init__(self
, cookiejar
=None):
1677 urllib
.request
.HTTPCookieProcessor
.__init
__(self
, cookiejar
)
1679 def http_response(self
, request
, response
):
1680 return urllib
.request
.HTTPCookieProcessor
.http_response(self
, request
, response
)
1682 https_request
= urllib
.request
.HTTPCookieProcessor
.http_request
1683 https_response
= http_response
1686 class YoutubeDLRedirectHandler(urllib
.request
.HTTPRedirectHandler
):
1687 """YoutubeDL redirect handler
1689 The code is based on HTTPRedirectHandler implementation from CPython [1].
1691 This redirect handler solves two issues:
1692 - ensures redirect URL is always unicode under python 2
1693 - introduces support for experimental HTTP response status code
1694 308 Permanent Redirect [2] used by some sites [3]
1696 1. https://github.com/python/cpython/blob/master/Lib/urllib/request.py
1697 2. https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/308
1698 3. https://github.com/ytdl-org/youtube-dl/issues/28768
1701 http_error_301
= http_error_303
= http_error_307
= http_error_308
= urllib
.request
.HTTPRedirectHandler
.http_error_302
1703 def redirect_request(self
, req
, fp
, code
, msg
, headers
, newurl
):
1704 """Return a Request or None in response to a redirect.
1706 This is called by the http_error_30x methods when a
1707 redirection response is received. If a redirection should
1708 take place, return a new Request to allow http_error_30x to
1709 perform the redirect. Otherwise, raise HTTPError if no-one
1710 else should try to handle this url. Return None if you can't
1711 but another Handler might.
1713 m
= req
.get_method()
1714 if (not (code
in (301, 302, 303, 307, 308) and m
in ("GET", "HEAD")
1715 or code
in (301, 302, 303) and m
== "POST")):
1716 raise urllib
.error
.HTTPError(req
.full_url
, code
, msg
, headers
, fp
)
1717 # Strictly (according to RFC 2616), 301 or 302 in response to
1718 # a POST MUST NOT cause a redirection without confirmation
1719 # from the user (of urllib.request, in this case). In practice,
1720 # essentially all clients do redirect in this case, so we do
1723 # Be conciliant with URIs containing a space. This is mainly
1724 # redundant with the more complete encoding done in http_error_302(),
1725 # but it is kept for compatibility with other callers.
1726 newurl
= newurl
.replace(' ', '%20')
1728 CONTENT_HEADERS
= ("content-length", "content-type")
1729 # NB: don't use dict comprehension for python 2.6 compatibility
1730 newheaders
= {k: v for k, v in req.headers.items() if k.lower() not in CONTENT_HEADERS}
1732 # A 303 must either use GET or HEAD for subsequent request
1733 # https://datatracker.ietf.org/doc/html/rfc7231#section-6.4.4
1734 if code
== 303 and m
!= 'HEAD':
1736 # 301 and 302 redirects are commonly turned into a GET from a POST
1737 # for subsequent requests by browsers, so we'll do the same.
1738 # https://datatracker.ietf.org/doc/html/rfc7231#section-6.4.2
1739 # https://datatracker.ietf.org/doc/html/rfc7231#section-6.4.3
1740 if code
in (301, 302) and m
== 'POST':
1743 return urllib
.request
.Request(
1744 newurl
, headers
=newheaders
, origin_req_host
=req
.origin_req_host
,
1745 unverifiable
=True, method
=m
)
1748 def extract_timezone(date_str
):
1751 ^.{8,}? # >=8 char non-TZ prefix, if present
1752 (?P<tz>Z| # just the UTC Z, or
1753 (?:(?<=.\b\d{4}|\b\d{2}:\d\d)| # preceded by 4 digits or hh:mm or
1754 (?<!.\b[a-zA-Z]{3}|[a-zA-Z]{4}|..\b\d\d)) # not preceded by 3 alpha word or >= 4 alpha or 2 digits
1755 [ ]? # optional space
1756 (?P<sign>\+|-) # +/-
1757 (?P<hours>[0-9]{2}):?(?P<minutes>[0-9]{2}) # hh[:]mm
1761 m
= re
.search(r
'\d{1,2}:\d{1,2}(?:\.\d+)?(?P<tz>\s*[A-Z]+)$', date_str
)
1762 timezone
= TIMEZONE_NAMES
.get(m
and m
.group('tz').strip())
1763 if timezone
is not None:
1764 date_str
= date_str
[:-len(m
.group('tz'))]
1765 timezone
= datetime
.timedelta(hours
=timezone
or 0)
1767 date_str
= date_str
[:-len(m
.group('tz'))]
1768 if not m
.group('sign'):
1769 timezone
= datetime
.timedelta()
1771 sign
= 1 if m
.group('sign') == '+' else -1
1772 timezone
= datetime
.timedelta(
1773 hours
=sign
* int(m
.group('hours')),
1774 minutes
=sign
* int(m
.group('minutes')))
1775 return timezone
, date_str
1778 def parse_iso8601(date_str
, delimiter
='T', timezone
=None):
1779 """ Return a UNIX timestamp from the given date """
1781 if date_str
is None:
1784 date_str
= re
.sub(r
'\.[0-9]+', '', date_str
)
1786 if timezone
is None:
1787 timezone
, date_str
= extract_timezone(date_str
)
1789 with contextlib
.suppress(ValueError):
1790 date_format
= f
'%Y-%m-%d{delimiter}%H:%M:%S'
1791 dt
= datetime
.datetime
.strptime(date_str
, date_format
) - timezone
1792 return calendar
.timegm(dt
.timetuple())
1795 def date_formats(day_first
=True):
1796 return DATE_FORMATS_DAY_FIRST
if day_first
else DATE_FORMATS_MONTH_FIRST
1799 def unified_strdate(date_str
, day_first
=True):
1800 """Return a string with the date in the format YYYYMMDD"""
1802 if date_str
is None:
1806 date_str
= date_str
.replace(',', ' ')
1807 # Remove AM/PM + timezone
1808 date_str
= re
.sub(r
'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str
)
1809 _
, date_str
= extract_timezone(date_str
)
1811 for expression
in date_formats(day_first
):
1812 with contextlib
.suppress(ValueError):
1813 upload_date
= datetime
.datetime
.strptime(date_str
, expression
).strftime('%Y%m%d')
1814 if upload_date
is None:
1815 timetuple
= email
.utils
.parsedate_tz(date_str
)
1817 with contextlib
.suppress(ValueError):
1818 upload_date
= datetime
.datetime(*timetuple
[:6]).strftime('%Y%m%d')
1819 if upload_date
is not None:
1820 return str(upload_date
)
1823 def unified_timestamp(date_str
, day_first
=True):
1824 if date_str
is None:
1827 date_str
= re
.sub(r
'\s+', ' ', re
.sub(
1828 r
'(?i)[,|]|(mon|tues?|wed(nes)?|thu(rs)?|fri|sat(ur)?)(day)?', '', date_str
))
1830 pm_delta
= 12 if re
.search(r
'(?i)PM', date_str
) else 0
1831 timezone
, date_str
= extract_timezone(date_str
)
1833 # Remove AM/PM + timezone
1834 date_str
= re
.sub(r
'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str
)
1836 # Remove unrecognized timezones from ISO 8601 alike timestamps
1837 m
= re
.search(r
'\d{1,2}:\d{1,2}(?:\.\d+)?(?P<tz>\s*[A-Z]+)$', date_str
)
1839 date_str
= date_str
[:-len(m
.group('tz'))]
1841 # Python only supports microseconds, so remove nanoseconds
1842 m
= re
.search(r
'^([0-9]{4,}-[0-9]{1,2}-[0-9]{1,2}T[0-9]{1,2}:[0-9]{1,2}:[0-9]{1,2}\.[0-9]{6})[0-9]+$', date_str
)
1844 date_str
= m
.group(1)
1846 for expression
in date_formats(day_first
):
1847 with contextlib
.suppress(ValueError):
1848 dt
= datetime
.datetime
.strptime(date_str
, expression
) - timezone
+ datetime
.timedelta(hours
=pm_delta
)
1849 return calendar
.timegm(dt
.timetuple())
1851 timetuple
= email
.utils
.parsedate_tz(date_str
)
1853 return calendar
.timegm(timetuple
) + pm_delta
* 3600 - timezone
.total_seconds()
1856 def determine_ext(url
, default_ext
='unknown_video'):
1857 if url
is None or '.' not in url
:
1859 guess
= url
.partition('?')[0].rpartition('.')[2]
1860 if re
.match(r
'^[A-Za-z0-9]+$', guess
):
1862 # Try extract ext from URLs like http://example.com/foo/bar.mp4/?download
1863 elif guess
.rstrip('/') in KNOWN_EXTENSIONS
:
1864 return guess
.rstrip('/')
1869 def subtitles_filename(filename
, sub_lang
, sub_format
, expected_real_ext
=None):
1870 return replace_extension(filename
, sub_lang
+ '.' + sub_format
, expected_real_ext
)
1873 def datetime_from_str(date_str
, precision
='auto', format
='%Y%m%d'):
1875 Return a datetime object from a string.
1877 (now|today|yesterday|DATE)([+-]\d+(microsecond|second|minute|hour|day|week|month|year)s?)?
1879 @param format strftime format of DATE
1880 @param precision Round the datetime object: auto|microsecond|second|minute|hour|day
1881 auto: round to the unit provided in date_str (if applicable).
1883 auto_precision
= False
1884 if precision
== 'auto':
1885 auto_precision
= True
1886 precision
= 'microsecond'
1887 today
= datetime_round(datetime
.datetime
.utcnow(), precision
)
1888 if date_str
in ('now', 'today'):
1890 if date_str
== 'yesterday':
1891 return today
- datetime
.timedelta(days
=1)
1893 r
'(?P<start>.+)(?P<sign>[+-])(?P<time>\d+)(?P<unit>microsecond|second|minute|hour|day|week|month|year)s?',
1895 if match
is not None:
1896 start_time
= datetime_from_str(match
.group('start'), precision
, format
)
1897 time
= int(match
.group('time')) * (-1 if match
.group('sign') == '-' else 1)
1898 unit
= match
.group('unit')
1899 if unit
== 'month' or unit
== 'year':
1900 new_date
= datetime_add_months(start_time
, time
* 12 if unit
== 'year' else time
)
1906 delta
= datetime
.timedelta(**{unit + 's': time}
)
1907 new_date
= start_time
+ delta
1909 return datetime_round(new_date
, unit
)
1912 return datetime_round(datetime
.datetime
.strptime(date_str
, format
), precision
)
1915 def date_from_str(date_str
, format
='%Y%m%d', strict
=False):
1917 Return a date object from a string using datetime_from_str
1919 @param strict Restrict allowed patterns to "YYYYMMDD" and
1920 (now|today|yesterday)(-\d+(day|week|month|year)s?)?
1922 if strict
and not re
.fullmatch(r
'\d{8}|(now|today|yesterday)(-\d+(day|week|month|year)s?)?', date_str
):
1923 raise ValueError(f
'Invalid date format "{date_str}"')
1924 return datetime_from_str(date_str
, precision
='microsecond', format
=format
).date()
1927 def datetime_add_months(dt
, months
):
1928 """Increment/Decrement a datetime object by months."""
1929 month
= dt
.month
+ months
- 1
1930 year
= dt
.year
+ month
// 12
1931 month
= month
% 12 + 1
1932 day
= min(dt
.day
, calendar
.monthrange(year
, month
)[1])
1933 return dt
.replace(year
, month
, day
)
1936 def datetime_round(dt
, precision
='day'):
1938 Round a datetime object's time to a specific precision
1940 if precision
== 'microsecond':
1949 roundto
= lambda x
, n
: ((x
+ n
/ 2) // n
) * n
1950 timestamp
= calendar
.timegm(dt
.timetuple())
1951 return datetime
.datetime
.utcfromtimestamp(roundto(timestamp
, unit_seconds
[precision
]))
1954 def hyphenate_date(date_str
):
1956 Convert a date in 'YYYYMMDD' format to 'YYYY-MM-DD' format"""
1957 match
= re
.match(r
'^(\d\d\d\d)(\d\d)(\d\d)$', date_str
)
1958 if match
is not None:
1959 return '-'.join(match
.groups())
1965 """Represents a time interval between two dates"""
1967 def __init__(self
, start
=None, end
=None):
1968 """start and end must be strings in the format accepted by date"""
1969 if start
is not None:
1970 self
.start
= date_from_str(start
, strict
=True)
1972 self
.start
= datetime
.datetime
.min.date()
1974 self
.end
= date_from_str(end
, strict
=True)
1976 self
.end
= datetime
.datetime
.max.date()
1977 if self
.start
> self
.end
:
1978 raise ValueError('Date range: "%s" , the start date must be before the end date' % self
)
1982 """Returns a range that only contains the given day"""
1983 return cls(day
, day
)
1985 def __contains__(self
, date
):
1986 """Check if the date is in the range"""
1987 if not isinstance(date
, datetime
.date
):
1988 date
= date_from_str(date
)
1989 return self
.start
<= date
<= self
.end
1992 return f
'{self.start.isoformat()} - {self.end.isoformat()}'
1994 def __eq__(self
, other
):
1995 return (isinstance(other
, DateRange
)
1996 and self
.start
== other
.start
and self
.end
== other
.end
)
1999 def platform_name():
2000 """ Returns the platform name as a str """
2001 deprecation_warning(f
'"{__name__}.platform_name" is deprecated, use "platform.platform" instead')
2002 return platform
.platform()
2006 def system_identifier():
2007 python_implementation
= platform
.python_implementation()
2008 if python_implementation
== 'PyPy' and hasattr(sys
, 'pypy_version_info'):
2009 python_implementation
+= ' version %d.%d.%d' % sys
.pypy_version_info
[:3]
2011 with contextlib
.suppress(OSError): # We may not have access to the executable
2012 libc_ver
= platform
.libc_ver()
2014 return 'Python %s (%s %s %s) - %s (%s%s)' % (
2015 platform
.python_version(),
2016 python_implementation
,
2018 platform
.architecture()[0],
2019 platform
.platform(),
2020 ssl
.OPENSSL_VERSION
,
2021 format_field(join_nonempty(*libc_ver
, delim
=' '), None, ', %s'),
2026 def get_windows_version():
2027 ''' Get Windows version. returns () if it's not running on Windows '''
2028 if compat_os_name
== 'nt':
2029 return version_tuple(platform
.win32_ver()[1])
2034 def write_string(s
, out
=None, encoding
=None):
2035 assert isinstance(s
, str)
2036 out
= out
or sys
.stderr
2038 if compat_os_name
== 'nt' and supports_terminal_sequences(out
):
2039 s
= re
.sub(r
'([\r\n]+)', r
' \1', s
)
2041 enc
, buffer = None, out
2042 if 'b' in getattr(out
, 'mode', ''):
2043 enc
= encoding
or preferredencoding()
2044 elif hasattr(out
, 'buffer'):
2046 enc
= encoding
or getattr(out
, 'encoding', None) or preferredencoding()
2048 buffer.write(s
.encode(enc
, 'ignore') if enc
else s
)
2052 def deprecation_warning(msg
, *, printer
=None, stacklevel
=0, **kwargs
):
2053 from . import _IN_CLI
2055 if msg
in deprecation_warning
._cache
:
2057 deprecation_warning
._cache
.add(msg
)
2059 return printer(f
'{msg}{bug_reports_message()}', **kwargs
)
2060 return write_string(f
'ERROR: {msg}{bug_reports_message()}\n', **kwargs
)
2063 warnings
.warn(DeprecationWarning(msg
), stacklevel
=stacklevel
+ 3)
2066 deprecation_warning
._cache
= set()
2069 def bytes_to_intlist(bs
):
2072 if isinstance(bs
[0], int): # Python 3
2075 return [ord(c
) for c
in bs
]
2078 def intlist_to_bytes(xs
):
2081 return struct
.pack('%dB' % len(xs
), *xs
)
2084 class LockingUnsupportedError(OSError):
2085 msg
= 'File locking is not supported'
2088 super().__init
__(self
.msg
)
2091 # Cross-platform file locking
2092 if sys
.platform
== 'win32':
2094 import ctypes
.wintypes
2097 class OVERLAPPED(ctypes
.Structure
):
2099 ('Internal', ctypes
.wintypes
.LPVOID
),
2100 ('InternalHigh', ctypes
.wintypes
.LPVOID
),
2101 ('Offset', ctypes
.wintypes
.DWORD
),
2102 ('OffsetHigh', ctypes
.wintypes
.DWORD
),
2103 ('hEvent', ctypes
.wintypes
.HANDLE
),
2106 kernel32
= ctypes
.WinDLL('kernel32')
2107 LockFileEx
= kernel32
.LockFileEx
2108 LockFileEx
.argtypes
= [
2109 ctypes
.wintypes
.HANDLE
, # hFile
2110 ctypes
.wintypes
.DWORD
, # dwFlags
2111 ctypes
.wintypes
.DWORD
, # dwReserved
2112 ctypes
.wintypes
.DWORD
, # nNumberOfBytesToLockLow
2113 ctypes
.wintypes
.DWORD
, # nNumberOfBytesToLockHigh
2114 ctypes
.POINTER(OVERLAPPED
) # Overlapped
2116 LockFileEx
.restype
= ctypes
.wintypes
.BOOL
2117 UnlockFileEx
= kernel32
.UnlockFileEx
2118 UnlockFileEx
.argtypes
= [
2119 ctypes
.wintypes
.HANDLE
, # hFile
2120 ctypes
.wintypes
.DWORD
, # dwReserved
2121 ctypes
.wintypes
.DWORD
, # nNumberOfBytesToLockLow
2122 ctypes
.wintypes
.DWORD
, # nNumberOfBytesToLockHigh
2123 ctypes
.POINTER(OVERLAPPED
) # Overlapped
2125 UnlockFileEx
.restype
= ctypes
.wintypes
.BOOL
2126 whole_low
= 0xffffffff
2127 whole_high
= 0x7fffffff
2129 def _lock_file(f
, exclusive
, block
):
2130 overlapped
= OVERLAPPED()
2131 overlapped
.Offset
= 0
2132 overlapped
.OffsetHigh
= 0
2133 overlapped
.hEvent
= 0
2134 f
._lock
_file
_overlapped
_p
= ctypes
.pointer(overlapped
)
2136 if not LockFileEx(msvcrt
.get_osfhandle(f
.fileno()),
2137 (0x2 if exclusive
else 0x0) |
(0x0 if block
else 0x1),
2138 0, whole_low
, whole_high
, f
._lock
_file
_overlapped
_p
):
2139 # NB: No argument form of "ctypes.FormatError" does not work on PyPy
2140 raise BlockingIOError(f
'Locking file failed: {ctypes.FormatError(ctypes.GetLastError())!r}')
2142 def _unlock_file(f
):
2143 assert f
._lock
_file
_overlapped
_p
2144 handle
= msvcrt
.get_osfhandle(f
.fileno())
2145 if not UnlockFileEx(handle
, 0, whole_low
, whole_high
, f
._lock
_file
_overlapped
_p
):
2146 raise OSError('Unlocking file failed: %r' % ctypes
.FormatError())
2152 def _lock_file(f
, exclusive
, block
):
2153 flags
= fcntl
.LOCK_EX
if exclusive
else fcntl
.LOCK_SH
2155 flags |
= fcntl
.LOCK_NB
2157 fcntl
.flock(f
, flags
)
2158 except BlockingIOError
:
2160 except OSError: # AOSP does not have flock()
2161 fcntl
.lockf(f
, flags
)
2163 def _unlock_file(f
):
2165 fcntl
.flock(f
, fcntl
.LOCK_UN
)
2167 fcntl
.lockf(f
, fcntl
.LOCK_UN
)
2171 def _lock_file(f
, exclusive
, block
):
2172 raise LockingUnsupportedError()
2174 def _unlock_file(f
):
2175 raise LockingUnsupportedError()
2181 def __init__(self
, filename
, mode
, block
=True, encoding
=None):
2182 if mode
not in {'r', 'rb', 'a', 'ab', 'w', 'wb'}
:
2183 raise NotImplementedError(mode
)
2184 self
.mode
, self
.block
= mode
, block
2186 writable
= any(f
in mode
for f
in 'wax+')
2187 readable
= any(f
in mode
for f
in 'r+')
2188 flags
= functools
.reduce(operator
.ior
, (
2189 getattr(os
, 'O_CLOEXEC', 0), # UNIX only
2190 getattr(os
, 'O_BINARY', 0), # Windows only
2191 getattr(os
, 'O_NOINHERIT', 0), # Windows only
2192 os
.O_CREAT
if writable
else 0, # O_TRUNC only after locking
2193 os
.O_APPEND
if 'a' in mode
else 0,
2194 os
.O_EXCL
if 'x' in mode
else 0,
2195 os
.O_RDONLY
if not writable
else os
.O_RDWR
if readable
else os
.O_WRONLY
,
2198 self
.f
= os
.fdopen(os
.open(filename
, flags
, 0o666), mode
, encoding
=encoding
)
2200 def __enter__(self
):
2201 exclusive
= 'r' not in self
.mode
2203 _lock_file(self
.f
, exclusive
, self
.block
)
2208 if 'w' in self
.mode
:
2211 except OSError as e
:
2213 errno
.ESPIPE
, # Illegal seek - expected for FIFO
2214 errno
.EINVAL
, # Invalid argument - expected for /dev/null
2223 _unlock_file(self
.f
)
2227 def __exit__(self
, *_
):
2236 def __getattr__(self
, attr
):
2237 return getattr(self
.f
, attr
)
2244 def get_filesystem_encoding():
2245 encoding
= sys
.getfilesystemencoding()
2246 return encoding
if encoding
is not None else 'utf-8'
2249 def shell_quote(args
):
2251 encoding
= get_filesystem_encoding()
2253 if isinstance(a
, bytes):
2254 # We may get a filename encoded with 'encodeFilename'
2255 a
= a
.decode(encoding
)
2256 quoted_args
.append(compat_shlex_quote(a
))
2257 return ' '.join(quoted_args
)
2260 def smuggle_url(url
, data
):
2261 """ Pass additional data in a URL for internal use. """
2263 url
, idata
= unsmuggle_url(url
, {})
2265 sdata
= urllib
.parse
.urlencode(
2266 {'__youtubedl_smuggle': json.dumps(data)}
)
2267 return url
+ '#' + sdata
2270 def unsmuggle_url(smug_url
, default
=None):
2271 if '#__youtubedl_smuggle' not in smug_url
:
2272 return smug_url
, default
2273 url
, _
, sdata
= smug_url
.rpartition('#')
2274 jsond
= urllib
.parse
.parse_qs(sdata
)['__youtubedl_smuggle'][0]
2275 data
= json
.loads(jsond
)
2279 def format_decimal_suffix(num
, fmt
='%d%s', *, factor
=1000):
2280 """ Formats numbers with decimal sufixes like K, M, etc """
2281 num
, factor
= float_or_none(num
), float(factor
)
2282 if num
is None or num
< 0:
2284 POSSIBLE_SUFFIXES
= 'kMGTPEZY'
2285 exponent
= 0 if num
== 0 else min(int(math
.log(num
, factor
)), len(POSSIBLE_SUFFIXES
))
2286 suffix
= ['', *POSSIBLE_SUFFIXES
][exponent
]
2288 suffix
= {'k': 'Ki', '': ''}
.get(suffix
, f
'{suffix}i')
2289 converted
= num
/ (factor
** exponent
)
2290 return fmt
% (converted
, suffix
)
2293 def format_bytes(bytes):
2294 return format_decimal_suffix(bytes, '%.2f%sB', factor
=1024) or 'N/A'
2297 def lookup_unit_table(unit_table
, s
, strict
=False):
2298 num_re
= NUMBER_RE
if strict
else NUMBER_RE
.replace(R
'\.', '[,.]')
2299 units_re
= '|'.join(re
.escape(u
) for u
in unit_table
)
2300 m
= (re
.fullmatch
if strict
else re
.match
)(
2301 rf
'(?P<num>{num_re})\s*(?P<unit>{units_re})\b', s
)
2305 num
= float(m
.group('num').replace(',', '.'))
2306 mult
= unit_table
[m
.group('unit')]
2307 return round(num
* mult
)
2311 """Parse a string indicating a byte quantity into an integer"""
2312 return lookup_unit_table(
2313 {u: 1024**i for i, u in enumerate(['', *'KMGTPEZY'])}
,
2314 s
.upper(), strict
=True)
2317 def parse_filesize(s
):
2321 # The lower-case forms are of course incorrect and unofficial,
2322 # but we support those too
2339 'megabytes': 1000 ** 2,
2340 'mebibytes': 1024 ** 2,
2346 'gigabytes': 1000 ** 3,
2347 'gibibytes': 1024 ** 3,
2353 'terabytes': 1000 ** 4,
2354 'tebibytes': 1024 ** 4,
2360 'petabytes': 1000 ** 5,
2361 'pebibytes': 1024 ** 5,
2367 'exabytes': 1000 ** 6,
2368 'exbibytes': 1024 ** 6,
2374 'zettabytes': 1000 ** 7,
2375 'zebibytes': 1024 ** 7,
2381 'yottabytes': 1000 ** 8,
2382 'yobibytes': 1024 ** 8,
2385 return lookup_unit_table(_UNIT_TABLE
, s
)
2392 s
= re
.sub(r
'^[^\d]+\s', '', s
).strip()
2394 if re
.match(r
'^[\d,.]+$', s
):
2395 return str_to_int(s
)
2408 ret
= lookup_unit_table(_UNIT_TABLE
, s
)
2412 mobj
= re
.match(r
'([\d,.]+)(?:$|\s)', s
)
2414 return str_to_int(mobj
.group(1))
2417 def parse_resolution(s
, *, lenient
=False):
2422 mobj
= re
.search(r
'(?P<w>\d+)\s*[xX×,]\s*(?P<h>\d+)', s
)
2424 mobj
= re
.search(r
'(?<![a-zA-Z0-9])(?P<w>\d+)\s*[xX×,]\s*(?P<h>\d+)(?![a-zA-Z0-9])', s
)
2427 'width': int(mobj
.group('w')),
2428 'height': int(mobj
.group('h')),
2431 mobj
= re
.search(r
'(?<![a-zA-Z0-9])(\d+)[pPiI](?![a-zA-Z0-9])', s
)
2433 return {'height': int(mobj.group(1))}
2435 mobj
= re
.search(r
'\b([48])[kK]\b', s
)
2437 return {'height': int(mobj.group(1)) * 540}
2442 def parse_bitrate(s
):
2443 if not isinstance(s
, str):
2445 mobj
= re
.search(r
'\b(\d+)\s*kbps', s
)
2447 return int(mobj
.group(1))
2450 def month_by_name(name
, lang
='en'):
2451 """ Return the number of a month by (locale-independently) English name """
2453 month_names
= MONTH_NAMES
.get(lang
, MONTH_NAMES
['en'])
2456 return month_names
.index(name
) + 1
2461 def month_by_abbreviation(abbrev
):
2462 """ Return the number of a month by (locale-independently) English
2466 return [s
[:3] for s
in ENGLISH_MONTH_NAMES
].index(abbrev
) + 1
2471 def fix_xml_ampersands(xml_str
):
2472 """Replace all the '&' by '&' in XML"""
2474 r
'&(?!amp;|lt;|gt;|apos;|quot;|#x[0-9a-fA-F]{,4};|#[0-9]{,4};)',
2479 def setproctitle(title
):
2480 assert isinstance(title
, str)
2482 # Workaround for https://github.com/yt-dlp/yt-dlp/issues/4541
2489 libc
= ctypes
.cdll
.LoadLibrary('libc.so.6')
2493 # LoadLibrary in Windows Python 2.7.13 only expects
2494 # a bytestring, but since unicode_literals turns
2495 # every string into a unicode string, it fails.
2497 title_bytes
= title
.encode()
2498 buf
= ctypes
.create_string_buffer(len(title_bytes
))
2499 buf
.value
= title_bytes
2501 libc
.prctl(15, buf
, 0, 0, 0)
2502 except AttributeError:
2503 return # Strange libc, just skip this
2506 def remove_start(s
, start
):
2507 return s
[len(start
):] if s
is not None and s
.startswith(start
) else s
2510 def remove_end(s
, end
):
2511 return s
[:-len(end
)] if s
is not None and s
.endswith(end
) else s
2514 def remove_quotes(s
):
2515 if s
is None or len(s
) < 2:
2517 for quote
in ('"', "'", ):
2518 if s
[0] == quote
and s
[-1] == quote
:
2523 def get_domain(url
):
2525 This implementation is inconsistent, but is kept for compatibility.
2526 Use this only for "webpage_url_domain"
2528 return remove_start(urllib
.parse
.urlparse(url
).netloc
, 'www.') or None
2531 def url_basename(url
):
2532 path
= urllib
.parse
.urlparse(url
).path
2533 return path
.strip('/').split('/')[-1]
2537 return re
.match(r
'https?://[^?#]+/', url
).group()
2540 def urljoin(base
, path
):
2541 if isinstance(path
, bytes):
2542 path
= path
.decode()
2543 if not isinstance(path
, str) or not path
:
2545 if re
.match(r
'^(?:[a-zA-Z][a-zA-Z0-9+-.]*:)?//', path
):
2547 if isinstance(base
, bytes):
2548 base
= base
.decode()
2549 if not isinstance(base
, str) or not re
.match(
2550 r
'^(?:https?:)?//', base
):
2552 return urllib
.parse
.urljoin(base
, path
)
2555 class HEADRequest(urllib
.request
.Request
):
2556 def get_method(self
):
2560 class PUTRequest(urllib
.request
.Request
):
2561 def get_method(self
):
2565 def int_or_none(v
, scale
=1, default
=None, get_attr
=None, invscale
=1):
2566 if get_attr
and v
is not None:
2567 v
= getattr(v
, get_attr
, None)
2569 return int(v
) * invscale
// scale
2570 except (ValueError, TypeError, OverflowError):
2574 def str_or_none(v
, default
=None):
2575 return default
if v
is None else str(v
)
2578 def str_to_int(int_str
):
2579 """ A more relaxed version of int_or_none """
2580 if isinstance(int_str
, int):
2582 elif isinstance(int_str
, str):
2583 int_str
= re
.sub(r
'[,\.\+]', '', int_str
)
2584 return int_or_none(int_str
)
2587 def float_or_none(v
, scale
=1, invscale
=1, default
=None):
2591 return float(v
) * invscale
/ scale
2592 except (ValueError, TypeError):
2596 def bool_or_none(v
, default
=None):
2597 return v
if isinstance(v
, bool) else default
2600 def strip_or_none(v
, default
=None):
2601 return v
.strip() if isinstance(v
, str) else default
2604 def url_or_none(url
):
2605 if not url
or not isinstance(url
, str):
2608 return url
if re
.match(r
'^(?:(?:https?|rt(?:m(?:pt?[es]?|fp)|sp[su]?)|mms|ftps?):)?//', url
) else None
2611 def request_to_url(req
):
2612 if isinstance(req
, urllib
.request
.Request
):
2613 return req
.get_full_url()
2618 def strftime_or_none(timestamp
, date_format
, default
=None):
2619 datetime_object
= None
2621 if isinstance(timestamp
, (int, float)): # unix timestamp
2622 # Using naive datetime here can break timestamp() in Windows
2623 # Ref: https://github.com/yt-dlp/yt-dlp/issues/5185, https://github.com/python/cpython/issues/94414
2624 datetime_object
= datetime
.datetime
.fromtimestamp(timestamp
, datetime
.timezone
.utc
)
2625 elif isinstance(timestamp
, str): # assume YYYYMMDD
2626 datetime_object
= datetime
.datetime
.strptime(timestamp
, '%Y%m%d')
2627 date_format
= re
.sub( # Support %s on windows
2628 r
'(?<!%)(%%)*%s', rf
'\g<1>{int(datetime_object.timestamp())}', date_format
)
2629 return datetime_object
.strftime(date_format
)
2630 except (ValueError, TypeError, AttributeError):
2634 def parse_duration(s
):
2635 if not isinstance(s
, str):
2641 days
, hours
, mins
, secs
, ms
= [None] * 5
2642 m
= re
.match(r
'''(?x)
2644 (?:(?:(?P<days>[0-9]+):)?(?P<hours>[0-9]+):)?(?P<mins>[0-9]+):)?
2645 (?P<secs>(?(before_secs)[0-9]{1,2}|[0-9]+))
2646 (?P<ms>[.:][0-9]+)?Z?$
2649 days
, hours
, mins
, secs
, ms
= m
.group('days', 'hours', 'mins', 'secs', 'ms')
2654 [0-9]+\s*y(?:ears?)?,?\s*
2657 [0-9]+\s*m(?:onths?)?,?\s*
2660 [0-9]+\s*w(?:eeks?)?,?\s*
2663 (?P<days>[0-9]+)\s*d(?:ays?)?,?\s*
2667 (?P<hours>[0-9]+)\s*h(?:ours?)?,?\s*
2670 (?P<mins>[0-9]+)\s*m(?:in(?:ute)?s?)?,?\s*
2673 (?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?\s*s(?:ec(?:ond)?s?)?\s*
2676 days
, hours
, mins
, secs
, ms
= m
.groups()
2678 m
= re
.match(r
'(?i)(?:(?P<hours>[0-9.]+)\s*(?:hours?)|(?P<mins>[0-9.]+)\s*(?:mins?\.?|minutes?)\s*)Z?$', s
)
2680 hours
, mins
= m
.groups()
2685 ms
= ms
.replace(':', '.')
2686 return sum(float(part
or 0) * mult
for part
, mult
in (
2687 (days
, 86400), (hours
, 3600), (mins
, 60), (secs
, 1), (ms
, 1)))
2690 def prepend_extension(filename
, ext
, expected_real_ext
=None):
2691 name
, real_ext
= os
.path
.splitext(filename
)
2693 f
'{name}.{ext}{real_ext}'
2694 if not expected_real_ext
or real_ext
[1:] == expected_real_ext
2695 else f
'{filename}.{ext}')
2698 def replace_extension(filename
, ext
, expected_real_ext
=None):
2699 name
, real_ext
= os
.path
.splitext(filename
)
2700 return '{}.{}'.format(
2701 name
if not expected_real_ext
or real_ext
[1:] == expected_real_ext
else filename
,
2705 def check_executable(exe
, args
=[]):
2706 """ Checks if the given binary is installed somewhere in PATH, and returns its name.
2707 args can be a list of arguments for a short output (like -version) """
2709 Popen
.run([exe
] + args
, stdout
=subprocess
.PIPE
, stderr
=subprocess
.PIPE
)
2715 def _get_exe_version_output(exe
, args
):
2717 # STDIN should be redirected too. On UNIX-like systems, ffmpeg triggers
2718 # SIGTTOU if yt-dlp is run in the background.
2719 # See https://github.com/ytdl-org/youtube-dl/issues/955#issuecomment-209789656
2720 stdout
, _
, ret
= Popen
.run([encodeArgument(exe
)] + args
, text
=True,
2721 stdin
=subprocess
.PIPE
, stdout
=subprocess
.PIPE
, stderr
=subprocess
.STDOUT
)
2729 def detect_exe_version(output
, version_re
=None, unrecognized
='present'):
2730 assert isinstance(output
, str)
2731 if version_re
is None:
2732 version_re
= r
'version\s+([-0-9._a-zA-Z]+)'
2733 m
= re
.search(version_re
, output
)
2740 def get_exe_version(exe
, args
=['--version'],
2741 version_re
=None, unrecognized
=('present', 'broken')):
2742 """ Returns the version of the specified executable,
2743 or False if the executable is not present """
2744 unrecognized
= variadic(unrecognized
)
2745 assert len(unrecognized
) in (1, 2)
2746 out
= _get_exe_version_output(exe
, args
)
2748 return unrecognized
[-1]
2749 return out
and detect_exe_version(out
, version_re
, unrecognized
[0])
2752 def frange(start
=0, stop
=None, step
=1):
2755 start
, stop
= 0, start
2756 sign
= [-1, 1][step
> 0] if step
else 0
2757 while sign
* start
< sign
* stop
:
2762 class LazyList(collections
.abc
.Sequence
):
2763 """Lazy immutable list from an iterable
2764 Note that slices of a LazyList are lists and not LazyList"""
2766 class IndexError(IndexError):
2769 def __init__(self
, iterable
, *, reverse
=False, _cache
=None):
2770 self
._iterable
= iter(iterable
)
2771 self
._cache
= [] if _cache
is None else _cache
2772 self
._reversed
= reverse
2776 # We need to consume the entire iterable to iterate in reverse
2777 yield from self
.exhaust()
2779 yield from self
._cache
2780 for item
in self
._iterable
:
2781 self
._cache
.append(item
)
2785 self
._cache
.extend(self
._iterable
)
2786 self
._iterable
= [] # Discard the emptied iterable to make it pickle-able
2790 """Evaluate the entire iterable"""
2791 return self
._exhaust
()[::-1 if self
._reversed
else 1]
2794 def _reverse_index(x
):
2795 return None if x
is None else ~x
2797 def __getitem__(self
, idx
):
2798 if isinstance(idx
, slice):
2800 idx
= slice(self
._reverse
_index
(idx
.start
), self
._reverse
_index
(idx
.stop
), -(idx
.step
or 1))
2801 start
, stop
, step
= idx
.start
, idx
.stop
, idx
.step
or 1
2802 elif isinstance(idx
, int):
2804 idx
= self
._reverse
_index
(idx
)
2805 start
, stop
, step
= idx
, idx
, 0
2807 raise TypeError('indices must be integers or slices')
2808 if ((start
or 0) < 0 or (stop
or 0) < 0
2809 or (start
is None and step
< 0)
2810 or (stop
is None and step
> 0)):
2811 # We need to consume the entire iterable to be able to slice from the end
2812 # Obviously, never use this with infinite iterables
2815 return self
._cache
[idx
]
2816 except IndexError as e
:
2817 raise self
.IndexError(e
) from e
2818 n
= max(start
or 0, stop
or 0) - len(self
._cache
) + 1
2820 self
._cache
.extend(itertools
.islice(self
._iterable
, n
))
2822 return self
._cache
[idx
]
2823 except IndexError as e
:
2824 raise self
.IndexError(e
) from e
2828 self
[-1] if self
._reversed
else self
[0]
2829 except self
.IndexError:
2835 return len(self
._cache
)
2837 def __reversed__(self
):
2838 return type(self
)(self
._iterable
, reverse
=not self
._reversed
, _cache
=self
._cache
)
2841 return type(self
)(self
._iterable
, reverse
=self
._reversed
, _cache
=self
._cache
)
2844 # repr and str should mimic a list. So we exhaust the iterable
2845 return repr(self
.exhaust())
2848 return repr(self
.exhaust())
2853 class IndexError(IndexError):
2857 # This is only useful for tests
2858 return len(self
.getslice())
2860 def __init__(self
, pagefunc
, pagesize
, use_cache
=True):
2861 self
._pagefunc
= pagefunc
2862 self
._pagesize
= pagesize
2863 self
._pagecount
= float('inf')
2864 self
._use
_cache
= use_cache
2867 def getpage(self
, pagenum
):
2868 page_results
= self
._cache
.get(pagenum
)
2869 if page_results
is None:
2870 page_results
= [] if pagenum
> self
._pagecount
else list(self
._pagefunc
(pagenum
))
2872 self
._cache
[pagenum
] = page_results
2875 def getslice(self
, start
=0, end
=None):
2876 return list(self
._getslice
(start
, end
))
2878 def _getslice(self
, start
, end
):
2879 raise NotImplementedError('This method must be implemented by subclasses')
2881 def __getitem__(self
, idx
):
2882 assert self
._use
_cache
, 'Indexing PagedList requires cache'
2883 if not isinstance(idx
, int) or idx
< 0:
2884 raise TypeError('indices must be non-negative integers')
2885 entries
= self
.getslice(idx
, idx
+ 1)
2887 raise self
.IndexError()
2891 class OnDemandPagedList(PagedList
):
2892 """Download pages until a page with less than maximum results"""
2894 def _getslice(self
, start
, end
):
2895 for pagenum
in itertools
.count(start
// self
._pagesize
):
2896 firstid
= pagenum
* self
._pagesize
2897 nextfirstid
= pagenum
* self
._pagesize
+ self
._pagesize
2898 if start
>= nextfirstid
:
2902 start
% self
._pagesize
2903 if firstid
<= start
< nextfirstid
2906 ((end
- 1) % self
._pagesize
) + 1
2907 if (end
is not None and firstid
<= end
<= nextfirstid
)
2911 page_results
= self
.getpage(pagenum
)
2913 self
._pagecount
= pagenum
- 1
2915 if startv
!= 0 or endv
is not None:
2916 page_results
= page_results
[startv
:endv
]
2917 yield from page_results
2919 # A little optimization - if current page is not "full", ie. does
2920 # not contain page_size videos then we can assume that this page
2921 # is the last one - there are no more ids on further pages -
2922 # i.e. no need to query again.
2923 if len(page_results
) + startv
< self
._pagesize
:
2926 # If we got the whole page, but the next page is not interesting,
2927 # break out early as well
2928 if end
== nextfirstid
:
2932 class InAdvancePagedList(PagedList
):
2933 """PagedList with total number of pages known in advance"""
2935 def __init__(self
, pagefunc
, pagecount
, pagesize
):
2936 PagedList
.__init
__(self
, pagefunc
, pagesize
, True)
2937 self
._pagecount
= pagecount
2939 def _getslice(self
, start
, end
):
2940 start_page
= start
// self
._pagesize
2941 end_page
= self
._pagecount
if end
is None else min(self
._pagecount
, end
// self
._pagesize
+ 1)
2942 skip_elems
= start
- start_page
* self
._pagesize
2943 only_more
= None if end
is None else end
- start
2944 for pagenum
in range(start_page
, end_page
):
2945 page_results
= self
.getpage(pagenum
)
2947 page_results
= page_results
[skip_elems
:]
2949 if only_more
is not None:
2950 if len(page_results
) < only_more
:
2951 only_more
-= len(page_results
)
2953 yield from page_results
[:only_more
]
2955 yield from page_results
2958 class PlaylistEntries
:
2959 MissingEntry
= object()
2960 is_exhausted
= False
2962 def __init__(self
, ydl
, info_dict
):
2965 # _entries must be assigned now since infodict can change during iteration
2966 entries
= info_dict
.get('entries')
2968 raise EntryNotInPlaylist('There are no entries')
2969 elif isinstance(entries
, list):
2970 self
.is_exhausted
= True
2972 requested_entries
= info_dict
.get('requested_entries')
2973 self
.is_incomplete
= requested_entries
is not None
2974 if self
.is_incomplete
:
2975 assert self
.is_exhausted
2976 self
._entries
= [self
.MissingEntry
] * max(requested_entries
or [0])
2977 for i
, entry
in zip(requested_entries
, entries
):
2978 self
._entries
[i
- 1] = entry
2979 elif isinstance(entries
, (list, PagedList
, LazyList
)):
2980 self
._entries
= entries
2982 self
._entries
= LazyList(entries
)
2984 PLAYLIST_ITEMS_RE
= re
.compile(r
'''(?x)
2985 (?P<start>[+-]?\d+)?
2987 (?P<end>[+-]?\d+|inf(?:inite)?)?
2988 (?::(?P<step>[+-]?\d+))?
2992 def parse_playlist_items(cls
, string
):
2993 for segment
in string
.split(','):
2995 raise ValueError('There is two or more consecutive commas')
2996 mobj
= cls
.PLAYLIST_ITEMS_RE
.fullmatch(segment
)
2998 raise ValueError(f
'{segment!r} is not a valid specification')
2999 start
, end
, step
, has_range
= mobj
.group('start', 'end', 'step', 'range')
3000 if int_or_none(step
) == 0:
3001 raise ValueError(f
'Step in {segment!r} cannot be zero')
3002 yield slice(int_or_none(start
), float_or_none(end
), int_or_none(step
)) if has_range
else int(start
)
3004 def get_requested_items(self
):
3005 playlist_items
= self
.ydl
.params
.get('playlist_items')
3006 playlist_start
= self
.ydl
.params
.get('playliststart', 1)
3007 playlist_end
= self
.ydl
.params
.get('playlistend')
3008 # For backwards compatibility, interpret -1 as whole list
3009 if playlist_end
in (-1, None):
3011 if not playlist_items
:
3012 playlist_items
= f
'{playlist_start}:{playlist_end}'
3013 elif playlist_start
!= 1 or playlist_end
:
3014 self
.ydl
.report_warning('Ignoring playliststart and playlistend because playlistitems was given', only_once
=True)
3016 for index
in self
.parse_playlist_items(playlist_items
):
3017 for i
, entry
in self
[index
]:
3022 # TODO: Add auto-generated fields
3023 self
.ydl
._match
_entry
(entry
, incomplete
=True, silent
=True)
3024 except (ExistingVideoReached
, RejectedVideoReached
):
3027 def get_full_count(self
):
3028 if self
.is_exhausted
and not self
.is_incomplete
:
3030 elif isinstance(self
._entries
, InAdvancePagedList
):
3031 if self
._entries
._pagesize
== 1:
3032 return self
._entries
._pagecount
3034 @functools.cached_property
3036 if isinstance(self
._entries
, list):
3039 entry
= self
._entries
[i
]
3041 entry
= self
.MissingEntry
3042 if not self
.is_incomplete
:
3043 raise self
.IndexError()
3044 if entry
is self
.MissingEntry
:
3045 raise EntryNotInPlaylist(f
'Entry {i + 1} cannot be found')
3050 return type(self
.ydl
)._handle
_extraction
_exceptions
(lambda _
, i
: self
._entries
[i
])(self
.ydl
, i
)
3051 except (LazyList
.IndexError, PagedList
.IndexError):
3052 raise self
.IndexError()
3055 def __getitem__(self
, idx
):
3056 if isinstance(idx
, int):
3057 idx
= slice(idx
, idx
)
3059 # NB: PlaylistEntries[1:10] => (0, 1, ... 9)
3060 step
= 1 if idx
.step
is None else idx
.step
3061 if idx
.start
is None:
3062 start
= 0 if step
> 0 else len(self
) - 1
3064 start
= idx
.start
- 1 if idx
.start
>= 0 else len(self
) + idx
.start
3066 # NB: Do not call len(self) when idx == [:]
3067 if idx
.stop
is None:
3068 stop
= 0 if step
< 0 else float('inf')
3070 stop
= idx
.stop
- 1 if idx
.stop
>= 0 else len(self
) + idx
.stop
3071 stop
+= [-1, 1][step
> 0]
3073 for i
in frange(start
, stop
, step
):
3077 entry
= self
._getter
(i
)
3078 except self
.IndexError:
3079 self
.is_exhausted
= True
3086 return len(tuple(self
[:]))
3088 class IndexError(IndexError):
3092 def uppercase_escape(s
):
3093 unicode_escape
= codecs
.getdecoder('unicode_escape')
3095 r
'\\U[0-9a-fA-F]{8}',
3096 lambda m
: unicode_escape(m
.group(0))[0],
3100 def lowercase_escape(s
):
3101 unicode_escape
= codecs
.getdecoder('unicode_escape')
3103 r
'\\u[0-9a-fA-F]{4}',
3104 lambda m
: unicode_escape(m
.group(0))[0],
3108 def escape_rfc3986(s
):
3109 """Escape non-ASCII characters as suggested by RFC 3986"""
3110 return urllib
.parse
.quote(s
, b
"%/;:@&=+$,!~*'()?#[]")
3113 def escape_url(url
):
3114 """Escape URL as suggested by RFC 3986"""
3115 url_parsed
= urllib
.parse
.urlparse(url
)
3116 return url_parsed
._replace
(
3117 netloc
=url_parsed
.netloc
.encode('idna').decode('ascii'),
3118 path
=escape_rfc3986(url_parsed
.path
),
3119 params
=escape_rfc3986(url_parsed
.params
),
3120 query
=escape_rfc3986(url_parsed
.query
),
3121 fragment
=escape_rfc3986(url_parsed
.fragment
)
3125 def parse_qs(url
, **kwargs
):
3126 return urllib
.parse
.parse_qs(urllib
.parse
.urlparse(url
).query
, **kwargs
)
3129 def read_batch_urls(batch_fd
):
3131 if not isinstance(url
, str):
3132 url
= url
.decode('utf-8', 'replace')
3133 BOM_UTF8
= ('\xef\xbb\xbf', '\ufeff')
3134 for bom
in BOM_UTF8
:
3135 if url
.startswith(bom
):
3136 url
= url
[len(bom
):]
3138 if not url
or url
.startswith(('#', ';', ']')):
3140 # "#" cannot be stripped out since it is part of the URI
3141 # However, it can be safely stripped out if following a whitespace
3142 return re
.split(r
'\s#', url
, 1)[0].rstrip()
3144 with contextlib
.closing(batch_fd
) as fd
:
3145 return [url
for url
in map(fixup
, fd
) if url
]
3148 def urlencode_postdata(*args
, **kargs
):
3149 return urllib
.parse
.urlencode(*args
, **kargs
).encode('ascii')
3152 def update_url_query(url
, query
):
3155 parsed_url
= urllib
.parse
.urlparse(url
)
3156 qs
= urllib
.parse
.parse_qs(parsed_url
.query
)
3158 return urllib
.parse
.urlunparse(parsed_url
._replace
(
3159 query
=urllib
.parse
.urlencode(qs
, True)))
3162 def update_Request(req
, url
=None, data
=None, headers
=None, query
=None):
3163 req_headers
= req
.headers
.copy()
3164 req_headers
.update(headers
or {})
3165 req_data
= data
or req
.data
3166 req_url
= update_url_query(url
or req
.get_full_url(), query
)
3167 req_get_method
= req
.get_method()
3168 if req_get_method
== 'HEAD':
3169 req_type
= HEADRequest
3170 elif req_get_method
== 'PUT':
3171 req_type
= PUTRequest
3173 req_type
= urllib
.request
.Request
3175 req_url
, data
=req_data
, headers
=req_headers
,
3176 origin_req_host
=req
.origin_req_host
, unverifiable
=req
.unverifiable
)
3177 if hasattr(req
, 'timeout'):
3178 new_req
.timeout
= req
.timeout
3182 def _multipart_encode_impl(data
, boundary
):
3183 content_type
= 'multipart/form-data; boundary=%s' % boundary
3186 for k
, v
in data
.items():
3187 out
+= b
'--' + boundary
.encode('ascii') + b
'\r\n'
3188 if isinstance(k
, str):
3190 if isinstance(v
, str):
3192 # RFC 2047 requires non-ASCII field names to be encoded, while RFC 7578
3193 # suggests sending UTF-8 directly. Firefox sends UTF-8, too
3194 content
= b
'Content-Disposition: form-data; name="' + k
+ b
'"\r\n\r\n' + v
+ b
'\r\n'
3195 if boundary
.encode('ascii') in content
:
3196 raise ValueError('Boundary overlaps with data')
3199 out
+= b
'--' + boundary
.encode('ascii') + b
'--\r\n'
3201 return out
, content_type
3204 def multipart_encode(data
, boundary
=None):
3206 Encode a dict to RFC 7578-compliant form-data
3209 A dict where keys and values can be either Unicode or bytes-like
3212 If specified a Unicode object, it's used as the boundary. Otherwise
3213 a random boundary is generated.
3215 Reference: https://tools.ietf.org/html/rfc7578
3217 has_specified_boundary
= boundary
is not None
3220 if boundary
is None:
3221 boundary
= '---------------' + str(random
.randrange(0x0fffffff, 0xffffffff))
3224 out
, content_type
= _multipart_encode_impl(data
, boundary
)
3227 if has_specified_boundary
:
3231 return out
, content_type
3234 def variadic(x
, allowed_types
=(str, bytes, dict)):
3235 return x
if isinstance(x
, collections
.abc
.Iterable
) and not isinstance(x
, allowed_types
) else (x
,)
3238 def dict_get(d
, key_or_keys
, default
=None, skip_false_values
=True):
3239 for val
in map(d
.get
, variadic(key_or_keys
)):
3240 if val
is not None and (val
or not skip_false_values
):
3245 def try_call(*funcs
, expected_type
=None, args
=[], kwargs
={}):
3248 val
= f(*args
, **kwargs
)
3249 except (AttributeError, KeyError, TypeError, IndexError, ValueError, ZeroDivisionError):
3252 if expected_type
is None or isinstance(val
, expected_type
):
3256 def try_get(src
, getter
, expected_type
=None):
3257 return try_call(*variadic(getter
), args
=(src
,), expected_type
=expected_type
)
3260 def filter_dict(dct
, cndn
=lambda _
, v
: v
is not None):
3261 return {k: v for k, v in dct.items() if cndn(k, v)}
3264 def merge_dicts(*dicts
):
3266 for a_dict
in dicts
:
3267 for k
, v
in a_dict
.items():
3268 if (v
is not None and k
not in merged
3269 or isinstance(v
, str) and merged
[k
] == ''):
3274 def encode_compat_str(string
, encoding
=preferredencoding(), errors
='strict'):
3275 return string
if isinstance(string
, str) else str(string
, encoding
, errors
)
3287 TV_PARENTAL_GUIDELINES
= {
3297 def parse_age_limit(s
):
3298 # isinstance(False, int) is True. So type() must be used instead
3299 if type(s
) is int: # noqa: E721
3300 return s
if 0 <= s
<= 21 else None
3301 elif not isinstance(s
, str):
3303 m
= re
.match(r
'^(?P<age>\d{1,2})\+?$', s
)
3305 return int(m
.group('age'))
3308 return US_RATINGS
[s
]
3309 m
= re
.match(r
'^TV[_-]?(%s)$' % '|'.join(k
[3:] for k
in TV_PARENTAL_GUIDELINES
), s
)
3311 return TV_PARENTAL_GUIDELINES
['TV-' + m
.group(1)]
3315 def strip_jsonp(code
):
3318 (?:window\.)?(?P<func_name>[a-zA-Z0-9_.$]*)
3319 (?:\s*&&\s*(?P=func_name))?
3320 \s*\(\s*(?P<callback_data>.*)\);?
3321 \s*?(?://[^\n]*)*$''',
3322 r
'\g<callback_data>', code
)
3325 def js_to_json(code
, vars={}, *, strict
=False):
3326 # vars is a dict of var, val pairs to substitute
3327 STRING_QUOTES
= '\'"'
3328 STRING_RE
= '|'.join(rf
'{q}(?:\\.|[^\\{q}])*{q}' for q
in STRING_QUOTES
)
3329 COMMENT_RE
= r
'/\*(?:(?!\*/).)*?\*/|//[^\n]*\n'
3330 SKIP_RE
= fr
'\s*(?:{COMMENT_RE})?\s*'
3332 (fr
'(?s)^(0[xX][0-9a-fA-F]+){SKIP_RE}:?$', 16),
3333 (fr
'(?s)^(0+[0-7]+){SKIP_RE}:?$', 8),
3336 def process_escape(match
):
3337 JSON_PASSTHROUGH_ESCAPES
= R
'"\bfnrtu'
3338 escape
= match
.group(1) or match
.group(2)
3340 return (Rf
'\{escape}' if escape
in JSON_PASSTHROUGH_ESCAPES
3341 else R
'\u00' if escape
== 'x'
3342 else '' if escape
== '\n'
3347 if v
in ('true', 'false', 'null'):
3349 elif v
in ('undefined', 'void 0'):
3351 elif v
.startswith('/*') or v
.startswith('//') or v
.startswith('!') or v
== ',':
3354 if v
[0] in STRING_QUOTES
:
3355 escaped
= re
.sub(r
'(?s)(")|\\(.)', process_escape
, v
[1:-1])
3356 return f
'"{escaped}"'
3358 for regex
, base
in INTEGER_TABLE
:
3359 im
= re
.match(regex
, v
)
3361 i
= int(im
.group(1), base
)
3362 return f
'"{i}":' if v
.endswith(':') else str(i
)
3368 except json
.JSONDecodeError
:
3369 return json
.dumps(vars[v
])
3376 raise ValueError(f
'Unknown value: {v}')
3378 def create_map(mobj
):
3379 return json
.dumps(dict(json
.loads(js_to_json(mobj
.group(1) or '[]', vars=vars))))
3381 code
= re
.sub(r
'new Map\((\[.*?\])?\)', create_map
, code
)
3383 code
= re
.sub(r
'new Date\((".+")\)', r
'\g<1>', code
)
3384 code
= re
.sub(r
'new \w+\((.*?)\)', lambda m
: json
.dumps(m
.group(0)), code
)
3385 code
= re
.sub(r
'parseInt\([^\d]+(\d+)[^\d]+\)', r
'\1', code
)
3386 code
= re
.sub(r
'\(function\([^)]*\)\s*\{[^}]*\}\s*\)\s*\(\s*(["\'][^
)]*["\'])\s*\)', r'\1', code)
3388 return re.sub(rf'''(?sx)
3390 {COMMENT_RE}|,(?={SKIP_RE}[\]}}])|
3391 void\s0|(?:(?<![0-9])[eE]|[a-df-zA-DF-Z_$])[.a-zA-Z_$0-9]*|
3392 \b(?:0[xX][0-9a-fA-F]+|0+[0-7]+)(?:{SKIP_RE}:)?|
3393 [0-9]+(?={SKIP_RE}:)|
3398 def qualities(quality_ids):
3399 """ Get a numeric quality value out of a list of possible values """
3402 return quality_ids.index(qid)
3408 POSTPROCESS_WHEN = ('pre_process', 'after_filter', 'video', 'before_dl', 'post_process', 'after_move', 'after_video', 'playlist')
3412 'default': '%(title)s [%(id)s].%(ext)s',
3413 'chapter': '%(title)s - %(section_number)03d %(section_title)s [%(id)s].%(ext)s',
3419 'description': 'description',
3420 'annotation': 'annotations.xml',
3421 'infojson': 'info.json',
3424 'pl_thumbnail': None,
3425 'pl_description': 'description',
3426 'pl_infojson': 'info.json',
3429 # As of [1] format syntax is:
3430 # %[mapping_key][conversion_flags][minimum_width][.precision][length_modifier]type
3431 # 1. https://docs.python.org/2/library/stdtypes.html#string-formatting
3432 STR_FORMAT_RE_TMPL = r'''(?x)
3433 (?<!%)(?P<prefix>(?:%%)*)
3435 (?P<has_key>\((?P<key>{0})\))?
3437 (?P<conversion>[#0\-+ ]+)?
3439 (?P<precision>\.\d+)?
3440 (?P<len_mod>[hlL])? # unused in python
3441 {1} # conversion type
3446 STR_FORMAT_TYPES = 'diouxXeEfFgGcrs'
3449 def limit_length(s, length):
3450 """ Add ellipses to overly long strings """
3455 return s[:length - len(ELLIPSES)] + ELLIPSES
3459 def version_tuple(v):
3460 return tuple(int(e) for e in re.split(r'[-.]', v))
3463 def is_outdated_version(version, limit, assume_new=True):
3465 return not assume_new
3467 return version_tuple(version) < version_tuple(limit)
3469 return not assume_new
3472 def ytdl_is_updateable():
3473 """ Returns if yt-dlp can be updated with -U """
3475 from .update import is_non_updateable
3477 return not is_non_updateable()
3480 def args_to_str(args):
3481 # Get a short string representation for a subprocess command
3482 return ' '.join(compat_shlex_quote(a) for a in args)
3485 def error_to_compat_str(err):
3489 def error_to_str(err):
3490 return f'{type(err).__name__}: {err}'
3493 def mimetype2ext(mt, default=NO_DEFAULT):
3494 if not isinstance(mt, str):
3495 if default is not NO_DEFAULT:
3511 'x-matroska': 'mkv',
3513 'x-mp4-fragmented': 'mp4',
3518 # application (streaming playlists)
3522 'vnd.apple.mpegurl': 'm3u8',
3523 'vnd.ms-sstr+xml': 'ism',
3524 'x-mpegurl': 'm3u8',
3528 # Per RFC 3003, audio/mpeg can be .mp1, .mp2 or .mp3.
3529 # Using .mp3 as it's the most popular one
3530 'audio/mpeg': 'mp3',
3531 'audio/webm': 'webm',
3532 'audio/x-matroska': 'mka',
3533 'audio/x-mpegurl': 'm3u',
3541 'x-realaudio': 'ra',
3552 'vnd.wap.wbmp': 'wbmp',
3559 'filmstrip+json': 'fs',
3560 'smptett+xml': 'tt',
3563 'x-ms-sami': 'sami',
3572 mimetype = mt.partition(';')[0].strip().lower()
3573 _, _, subtype = mimetype.rpartition('/')
3575 ext = traverse_obj(MAP, mimetype, subtype, subtype.rsplit('+')[-1])
3578 elif default is not NO_DEFAULT:
3580 return subtype.replace('+', '.')
3583 def ext2mimetype(ext_or_url):
3586 if '.' not in ext_or_url:
3587 ext_or_url = f'file.{ext_or_url}'
3588 return mimetypes.guess_type(ext_or_url)[0]
3591 def parse_codecs(codecs_str):
3592 # http://tools.ietf.org/html/rfc6381
3595 split_codecs = list(filter(None, map(
3596 str.strip, codecs_str.strip().strip(',').split(','))))
3597 vcodec, acodec, scodec, hdr = None, None, None, None
3598 for full_codec in split_codecs:
3599 parts = re.sub(r'0+(?=\d)', '', full_codec).split('.')
3600 if parts[0] in ('avc1', 'avc2', 'avc3', 'avc4', 'vp9', 'vp8', 'hev1', 'hev2',
3601 'h263', 'h264', 'mp4v', 'hvc1', 'av1', 'theora', 'dvh1', 'dvhe'):
3605 if parts[0] in ('dvh1', 'dvhe'):
3607 elif parts[0] == 'av1' and traverse_obj(parts, 3) == '10':
3609 elif parts[:2] == ['vp9', '2']:
3611 elif parts[0] in ('flac', 'mp4a', 'opus', 'vorbis', 'mp3', 'aac', 'ac-4',
3612 'ac-3', 'ec-3', 'eac3', 'dtsc', 'dtse', 'dtsh', 'dtsl'):
3613 acodec = acodec or full_codec
3614 elif parts[0] in ('stpp', 'wvtt'):
3615 scodec = scodec or full_codec
3617 write_string(f'WARNING: Unknown codec {full_codec}\n')
3618 if vcodec or acodec or scodec:
3620 'vcodec': vcodec or 'none',
3621 'acodec': acodec or 'none',
3622 'dynamic_range': hdr,
3623 **({'scodec': scodec} if scodec is not None else {}),
3625 elif len(split_codecs) == 2:
3627 'vcodec': split_codecs[0],
3628 'acodec': split_codecs[1],
3633 def get_compatible_ext(*, vcodecs, acodecs, vexts, aexts, preferences=None):
3634 assert len(vcodecs) == len(vexts) and len(acodecs) == len(aexts)
3636 allow_mkv = not preferences or 'mkv' in preferences
3638 if allow_mkv and max(len(acodecs), len(vcodecs)) > 1:
3639 return 'mkv' # TODO: any other format allows this?
3641 # TODO: All codecs supported by parse_codecs isn't handled here
3642 COMPATIBLE_CODECS = {
3644 'av1', 'hevc', 'avc1', 'mp4a', 'ac-4', # fourcc (m3u8, mpd)
3645 'h264', 'aacl', 'ec-3', # Set in ISM
3648 'av1', 'vp9', 'vp8', 'opus', 'vrbs',
3649 'vp9x', 'vp8x', # in the webm spec
3653 sanitize_codec = functools.partial(try_get, getter=lambda x: x[0].split('.')[0].replace('0', ''))
3654 vcodec, acodec = sanitize_codec(vcodecs), sanitize_codec(acodecs)
3656 for ext in preferences or COMPATIBLE_CODECS.keys():
3657 codec_set = COMPATIBLE_CODECS.get(ext, set())
3658 if ext == 'mkv' or codec_set.issuperset((vcodec, acodec)):
3662 {'mp3', 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'ismv', 'isma', 'mov'},
3665 for ext in preferences or vexts:
3666 current_exts = {ext, *vexts, *aexts}
3667 if ext == 'mkv' or current_exts == {ext} or any(
3668 ext_sets.issuperset(current_exts) for ext_sets in COMPATIBLE_EXTS):
3670 return 'mkv' if allow_mkv else preferences[-1]
3673 def urlhandle_detect_ext(url_handle, default=NO_DEFAULT):
3674 getheader = url_handle.headers.get
3676 cd = getheader('Content-Disposition')
3678 m = re.match(r'attachment;\s*filename="(?P
<filename
>[^
"]+)"', cd)
3680 e = determine_ext(m.group('filename
'), default_ext=None)
3684 meta_ext = getheader('x
-amz
-meta
-name
')
3686 e = meta_ext.rpartition('.')[2]
3690 return mimetype2ext(getheader('Content
-Type
'), default=default)
3693 def encode_data_uri(data, mime_type):
3694 return 'data
:%s;base64
,%s' % (mime_type, base64.b64encode(data).decode('ascii
'))
3697 def age_restricted(content_limit, age_limit):
3698 """ Returns True iff the content should be blocked """
3700 if age_limit is None: # No limit set
3702 if content_limit is None:
3703 return False # Content available for everyone
3704 return age_limit < content_limit
3707 # List of known byte-order-marks (BOM)
3709 (b'\xef\xbb\xbf', 'utf
-8'),
3710 (b'\x00\x00\xfe\xff', 'utf
-32-be
'),
3711 (b'\xff\xfe\x00\x00', 'utf
-32-le
'),
3712 (b'\xff\xfe', 'utf
-16-le
'),
3713 (b'\xfe\xff', 'utf
-16-be
'),
3717 def is_html(first_bytes):
3718 """ Detect whether a file contains HTML by examining its first bytes. """
3721 for bom, enc in BOMS:
3722 while first_bytes.startswith(bom):
3723 encoding, first_bytes = enc, first_bytes[len(bom):]
3725 return re.match(r'^\s
*<', first_bytes.decode(encoding, 'replace
'))
3728 def determine_protocol(info_dict):
3729 protocol = info_dict.get('protocol
')
3730 if protocol is not None:
3733 url = sanitize_url(info_dict['url
'])
3734 if url.startswith('rtmp
'):
3736 elif url.startswith('mms
'):
3738 elif url.startswith('rtsp
'):
3741 ext = determine_ext(url)
3743 return 'm3u8
' if info_dict.get('is_live
') else 'm3u8_native
'
3747 return urllib.parse.urlparse(url).scheme
3750 def render_table(header_row, data, delim=False, extra_gap=0, hide_empty=False):
3751 """ Render a list of rows, each as a list of values.
3752 Text after a \t will be right aligned """
3754 return len(remove_terminal_sequences(string).replace('\t', ''))
3756 def get_max_lens(table):
3757 return [max(width(str(v)) for v in col) for col in zip(*table)]
3759 def filter_using_list(row, filterArray):
3760 return [col for take, col in itertools.zip_longest(filterArray, row, fillvalue=True) if take]
3762 max_lens = get_max_lens(data) if hide_empty else []
3763 header_row = filter_using_list(header_row, max_lens)
3764 data = [filter_using_list(row, max_lens) for row in data]
3766 table = [header_row] + data
3767 max_lens = get_max_lens(table)
3770 table = [header_row, [delim * (ml + extra_gap) for ml in max_lens]] + data
3771 table[1][-1] = table[1][-1][:-extra_gap * len(delim)] # Remove extra_gap from end of delimiter
3773 for pos, text in enumerate(map(str, row)):
3775 row[pos] = text.replace('\t', ' ' * (max_lens[pos] - width(text))) + ' ' * extra_gap
3777 row[pos] = text + ' ' * (max_lens[pos] - width(text) + extra_gap)
3778 ret = '\n'.join(''.join(row).rstrip() for row in table)
3782 def _match_one(filter_part, dct, incomplete):
3783 # TODO: Generalize code with YoutubeDL._build_format_filter
3784 STRING_OPERATORS = {
3785 '*=': operator.contains,
3786 '^
=': lambda attr, value: attr.startswith(value),
3787 '$
=': lambda attr, value: attr.endswith(value),
3788 '~
=': lambda attr, value: re.search(value, attr),
3790 COMPARISON_OPERATORS = {
3792 '<=': operator.le, # "<=" must be defined above "<"
3799 if isinstance(incomplete, bool):
3800 is_incomplete = lambda _: incomplete
3802 is_incomplete = lambda k: k in incomplete
3804 operator_rex = re.compile(r'''(?x)
3806 \s*(?P<negation>!\s*)?(?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
3808 (?P<quote>["\'])(?P<quotedstrval>.+?)(?P=quote)|
3811 ''' % '|
'.join(map(re.escape, COMPARISON_OPERATORS.keys())))
3812 m = operator_rex.fullmatch(filter_part.strip())
3815 unnegated_op = COMPARISON_OPERATORS[m['op
']]
3817 op = lambda attr, value: not unnegated_op(attr, value)
3820 comparison_value = m['quotedstrval
'] or m['strval
'] or m['intval
']
3822 comparison_value = comparison_value.replace(r'\
%s' % m['quote
'], m['quote
'])
3823 actual_value = dct.get(m['key
'])
3824 numeric_comparison = None
3825 if isinstance(actual_value, (int, float)):
3826 # If the original field is a string and matching comparisonvalue is
3827 # a number we should respect the origin of the original field
3828 # and process comparison value as a string (see
3829 # https://github.com/ytdl-org/youtube-dl/issues/11082)
3831 numeric_comparison = int(comparison_value)
3833 numeric_comparison = parse_filesize(comparison_value)
3834 if numeric_comparison is None:
3835 numeric_comparison = parse_filesize(f'{comparison_value}B
')
3836 if numeric_comparison is None:
3837 numeric_comparison = parse_duration(comparison_value)
3838 if numeric_comparison is not None and m['op
'] in STRING_OPERATORS:
3839 raise ValueError('Operator
%s only supports string values
!' % m['op
'])
3840 if actual_value is None:
3841 return is_incomplete(m['key
']) or m['none_inclusive
']
3842 return op(actual_value, comparison_value if numeric_comparison is None else numeric_comparison)
3845 '': lambda v: (v is True) if isinstance(v, bool) else (v is not None),
3846 '!': lambda v: (v is False) if isinstance(v, bool) else (v is None),
3848 operator_rex = re.compile(r'''(?x)
3849 (?P<op>%s)\s*(?P<key>[a-z_]+)
3850 ''' % '|
'.join(map(re.escape, UNARY_OPERATORS.keys())))
3851 m = operator_rex.fullmatch(filter_part.strip())
3853 op = UNARY_OPERATORS[m.group('op
')]
3854 actual_value = dct.get(m.group('key
'))
3855 if is_incomplete(m.group('key
')) and actual_value is None:
3857 return op(actual_value)
3859 raise ValueError('Invalid
filter part
%r' % filter_part)
3862 def match_str(filter_str, dct, incomplete=False):
3863 """ Filter a dictionary with a simple string syntax.
3864 @returns Whether the filter passes
3865 @param incomplete Set of keys that is expected to be missing from dct.
3866 Can be True/False to indicate all/none of the keys may be missing.
3867 All conditions on incomplete keys pass if the key is missing
3870 _match_one(filter_part.replace(r'\
&', '&'), dct, incomplete)
3871 for filter_part in re.split(r'(?
<!\\)&', filter_str))
3874 def match_filter_func(filters):
3877 filters = set(variadic(filters))
3879 interactive = '-' in filters
3883 def _match_func(info_dict, incomplete=False):
3884 if not filters or any(match_str(f, info_dict, incomplete) for f in filters):
3885 return NO_DEFAULT if interactive and not incomplete else None
3887 video_title = info_dict.get('title
') or info_dict.get('id') or 'entry
'
3888 filter_str = ') |
('.join(map(str.strip, filters))
3889 return f'{video_title} does
not pass filter ({filter_str}
), skipping
..'
3893 class download_range_func:
3894 def __init__(self, chapters, ranges):
3895 self.chapters, self.ranges = chapters, ranges
3897 def __call__(self, info_dict, ydl):
3898 if not self.ranges and not self.chapters:
3901 warning = ('There are no chapters matching the regex
' if info_dict.get('chapters
')
3902 else 'Cannot match chapters since chapter information
is unavailable
')
3903 for regex in self.chapters or []:
3904 for i, chapter in enumerate(info_dict.get('chapters
') or []):
3905 if re.search(regex, chapter['title
']):
3907 yield {**chapter, 'index': i}
3908 if self.chapters and warning:
3909 ydl.to_screen(f'[info
] {info_dict["id"]}
: {warning}
')
3911 yield from ({'start_time': start, 'end_time': end} for start, end in self.ranges or [])
3913 def __eq__(self, other):
3914 return (isinstance(other, download_range_func)
3915 and self.chapters == other.chapters and self.ranges == other.ranges)
3918 return f'{type(self).__name__}
({self.chapters}
, {self.ranges}
)'
3921 def parse_dfxp_time_expr(time_expr):
3925 mobj = re.match(rf'^
(?P
<time_offset
>{NUMBER_RE}
)s?$
', time_expr)
3927 return float(mobj.group('time_offset
'))
3929 mobj = re.match(r'^
(\d
+):(\d\d
):(\d\
d(?
:(?
:\
.|
:)\d
+)?
)$
', time_expr)
3931 return 3600 * int(mobj.group(1)) + 60 * int(mobj.group(2)) + float(mobj.group(3).replace(':', '.'))
3934 def srt_subtitles_timecode(seconds):
3935 return '%02d
:%02d
:%02d
,%03d
' % timetuple_from_msec(seconds * 1000)
3938 def ass_subtitles_timecode(seconds):
3939 time = timetuple_from_msec(seconds * 1000)
3940 return '%01d
:%02d
:%02d
.%02d
' % (*time[:-1], time.milliseconds / 10)
3943 def dfxp2srt(dfxp_data):
3945 @param dfxp_data A bytes-like object containing DFXP data
3946 @returns A unicode object containing converted SRT data
3948 LEGACY_NAMESPACES = (
3949 (b'http
://www
.w3
.org
/ns
/ttml
', [
3950 b'http
://www
.w3
.org
/2004/11/ttaf1
',
3951 b'http
://www
.w3
.org
/2006/04/ttaf1
',
3952 b'http
://www
.w3
.org
/2006/10/ttaf1
',
3954 (b'http
://www
.w3
.org
/ns
/ttml
#styling', [
3955 b
'http://www.w3.org/ns/ttml#style',
3959 SUPPORTED_STYLING
= [
3968 _x
= functools
.partial(xpath_with_ns
, ns_map
={
3969 'xml': 'http://www.w3.org/XML/1998/namespace',
3970 'ttml': 'http://www.w3.org/ns/ttml',
3971 'tts': 'http://www.w3.org/ns/ttml#styling',
3977 class TTMLPElementParser
:
3979 _unclosed_elements
= []
3980 _applied_styles
= []
3982 def start(self
, tag
, attrib
):
3983 if tag
in (_x('ttml:br'), 'br'):
3986 unclosed_elements
= []
3988 element_style_id
= attrib
.get('style')
3990 style
.update(default_style
)
3991 if element_style_id
:
3992 style
.update(styles
.get(element_style_id
, {}))
3993 for prop
in SUPPORTED_STYLING
:
3994 prop_val
= attrib
.get(_x('tts:' + prop
))
3996 style
[prop
] = prop_val
3999 for k
, v
in sorted(style
.items()):
4000 if self
._applied
_styles
and self
._applied
_styles
[-1].get(k
) == v
:
4003 font
+= ' color="%s"' % v
4004 elif k
== 'fontSize':
4005 font
+= ' size="%s"' % v
4006 elif k
== 'fontFamily':
4007 font
+= ' face="%s"' % v
4008 elif k
== 'fontWeight' and v
== 'bold':
4010 unclosed_elements
.append('b')
4011 elif k
== 'fontStyle' and v
== 'italic':
4013 unclosed_elements
.append('i')
4014 elif k
== 'textDecoration' and v
== 'underline':
4016 unclosed_elements
.append('u')
4018 self
._out
+= '<font' + font
+ '>'
4019 unclosed_elements
.append('font')
4021 if self
._applied
_styles
:
4022 applied_style
.update(self
._applied
_styles
[-1])
4023 applied_style
.update(style
)
4024 self
._applied
_styles
.append(applied_style
)
4025 self
._unclosed
_elements
.append(unclosed_elements
)
4028 if tag
not in (_x('ttml:br'), 'br'):
4029 unclosed_elements
= self
._unclosed
_elements
.pop()
4030 for element
in reversed(unclosed_elements
):
4031 self
._out
+= '</%s>' % element
4032 if unclosed_elements
and self
._applied
_styles
:
4033 self
._applied
_styles
.pop()
4035 def data(self
, data
):
4039 return self
._out
.strip()
4041 def parse_node(node
):
4042 target
= TTMLPElementParser()
4043 parser
= xml
.etree
.ElementTree
.XMLParser(target
=target
)
4044 parser
.feed(xml
.etree
.ElementTree
.tostring(node
))
4045 return parser
.close()
4047 for k
, v
in LEGACY_NAMESPACES
:
4049 dfxp_data
= dfxp_data
.replace(ns
, k
)
4051 dfxp
= compat_etree_fromstring(dfxp_data
)
4053 paras
= dfxp
.findall(_x('.//ttml:p')) or dfxp
.findall('.//p')
4056 raise ValueError('Invalid dfxp/TTML subtitle')
4060 for style
in dfxp
.findall(_x('.//ttml:style')):
4061 style_id
= style
.get('id') or style
.get(_x('xml:id'))
4064 parent_style_id
= style
.get('style')
4066 if parent_style_id
not in styles
:
4069 styles
[style_id
] = styles
[parent_style_id
].copy()
4070 for prop
in SUPPORTED_STYLING
:
4071 prop_val
= style
.get(_x('tts:' + prop
))
4073 styles
.setdefault(style_id
, {})[prop
] = prop_val
4079 for p
in ('body', 'div'):
4080 ele
= xpath_element(dfxp
, [_x('.//ttml:' + p
), './/' + p
])
4083 style
= styles
.get(ele
.get('style'))
4086 default_style
.update(style
)
4088 for para
, index
in zip(paras
, itertools
.count(1)):
4089 begin_time
= parse_dfxp_time_expr(para
.attrib
.get('begin'))
4090 end_time
= parse_dfxp_time_expr(para
.attrib
.get('end'))
4091 dur
= parse_dfxp_time_expr(para
.attrib
.get('dur'))
4092 if begin_time
is None:
4097 end_time
= begin_time
+ dur
4098 out
.append('%d\n%s --> %s\n%s\n\n' % (
4100 srt_subtitles_timecode(begin_time
),
4101 srt_subtitles_timecode(end_time
),
4107 def cli_option(params
, command_option
, param
, separator
=None):
4108 param
= params
.get(param
)
4109 return ([] if param
is None
4110 else [command_option
, str(param
)] if separator
is None
4111 else [f
'{command_option}{separator}{param}'])
4114 def cli_bool_option(params
, command_option
, param
, true_value
='true', false_value
='false', separator
=None):
4115 param
= params
.get(param
)
4116 assert param
in (True, False, None)
4117 return cli_option({True: true_value, False: false_value}
, command_option
, param
, separator
)
4120 def cli_valueless_option(params
, command_option
, param
, expected_value
=True):
4121 return [command_option
] if params
.get(param
) == expected_value
else []
4124 def cli_configuration_args(argdict
, keys
, default
=[], use_compat
=True):
4125 if isinstance(argdict
, (list, tuple)): # for backward compatibility
4132 assert isinstance(argdict
, dict)
4134 assert isinstance(keys
, (list, tuple))
4135 for key_list
in keys
:
4136 arg_list
= list(filter(
4137 lambda x
: x
is not None,
4138 [argdict
.get(key
.lower()) for key
in variadic(key_list
)]))
4140 return [arg
for args
in arg_list
for arg
in args
]
4144 def _configuration_args(main_key
, argdict
, exe
, keys
=None, default
=[], use_compat
=True):
4145 main_key
, exe
= main_key
.lower(), exe
.lower()
4146 root_key
= exe
if main_key
== exe
else f
'{main_key}+{exe}'
4147 keys
= [f
'{root_key}{k}' for k
in (keys
or [''])]
4148 if root_key
in keys
:
4150 keys
.append((main_key
, exe
))
4151 keys
.append('default')
4154 return cli_configuration_args(argdict
, keys
, default
, use_compat
)
4158 # See http://www.loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt
4217 'iw': 'heb', # Replaced by he in 1989 revision
4227 'in': 'ind', # Replaced by id in 1989 revision
4342 'ji': 'yid', # Replaced by yi in 1989 revision
4350 def short2long(cls
, code
):
4351 """Convert language code from ISO 639-1 to ISO 639-2/T"""
4352 return cls
._lang
_map
.get(code
[:2])
4355 def long2short(cls
, code
):
4356 """Convert language code from ISO 639-2/T to ISO 639-1"""
4357 for short_name
, long_name
in cls
._lang
_map
.items():
4358 if long_name
== code
:
4363 # From http://data.okfn.org/data/core/country-list
4365 'AF': 'Afghanistan',
4366 'AX': 'Åland Islands',
4369 'AS': 'American Samoa',
4374 'AG': 'Antigua and Barbuda',
4391 'BO': 'Bolivia, Plurinational State of',
4392 'BQ': 'Bonaire, Sint Eustatius and Saba',
4393 'BA': 'Bosnia and Herzegovina',
4395 'BV': 'Bouvet Island',
4397 'IO': 'British Indian Ocean Territory',
4398 'BN': 'Brunei Darussalam',
4400 'BF': 'Burkina Faso',
4406 'KY': 'Cayman Islands',
4407 'CF': 'Central African Republic',
4411 'CX': 'Christmas Island',
4412 'CC': 'Cocos (Keeling) Islands',
4416 'CD': 'Congo, the Democratic Republic of the',
4417 'CK': 'Cook Islands',
4419 'CI': 'Côte d\'Ivoire',
4424 'CZ': 'Czech Republic',
4428 'DO': 'Dominican Republic',
4431 'SV': 'El Salvador',
4432 'GQ': 'Equatorial Guinea',
4436 'FK': 'Falkland Islands (Malvinas)',
4437 'FO': 'Faroe Islands',
4441 'GF': 'French Guiana',
4442 'PF': 'French Polynesia',
4443 'TF': 'French Southern Territories',
4458 'GW': 'Guinea-Bissau',
4461 'HM': 'Heard Island and McDonald Islands',
4462 'VA': 'Holy See (Vatican City State)',
4469 'IR': 'Iran, Islamic Republic of',
4472 'IM': 'Isle of Man',
4482 'KP': 'Korea, Democratic People\'s Republic of',
4483 'KR': 'Korea, Republic of',
4486 'LA': 'Lao People\'s Democratic Republic',
4492 'LI': 'Liechtenstein',
4496 'MK': 'Macedonia, the Former Yugoslav Republic of',
4503 'MH': 'Marshall Islands',
4509 'FM': 'Micronesia, Federated States of',
4510 'MD': 'Moldova, Republic of',
4521 'NL': 'Netherlands',
4522 'NC': 'New Caledonia',
4523 'NZ': 'New Zealand',
4528 'NF': 'Norfolk Island',
4529 'MP': 'Northern Mariana Islands',
4534 'PS': 'Palestine, State of',
4536 'PG': 'Papua New Guinea',
4539 'PH': 'Philippines',
4543 'PR': 'Puerto Rico',
4547 'RU': 'Russian Federation',
4549 'BL': 'Saint Barthélemy',
4550 'SH': 'Saint Helena, Ascension and Tristan da Cunha',
4551 'KN': 'Saint Kitts and Nevis',
4552 'LC': 'Saint Lucia',
4553 'MF': 'Saint Martin (French part)',
4554 'PM': 'Saint Pierre and Miquelon',
4555 'VC': 'Saint Vincent and the Grenadines',
4558 'ST': 'Sao Tome and Principe',
4559 'SA': 'Saudi Arabia',
4563 'SL': 'Sierra Leone',
4565 'SX': 'Sint Maarten (Dutch part)',
4568 'SB': 'Solomon Islands',
4570 'ZA': 'South Africa',
4571 'GS': 'South Georgia and the South Sandwich Islands',
4572 'SS': 'South Sudan',
4577 'SJ': 'Svalbard and Jan Mayen',
4580 'CH': 'Switzerland',
4581 'SY': 'Syrian Arab Republic',
4582 'TW': 'Taiwan, Province of China',
4584 'TZ': 'Tanzania, United Republic of',
4586 'TL': 'Timor-Leste',
4590 'TT': 'Trinidad and Tobago',
4593 'TM': 'Turkmenistan',
4594 'TC': 'Turks and Caicos Islands',
4598 'AE': 'United Arab Emirates',
4599 'GB': 'United Kingdom',
4600 'US': 'United States',
4601 'UM': 'United States Minor Outlying Islands',
4605 'VE': 'Venezuela, Bolivarian Republic of',
4607 'VG': 'Virgin Islands, British',
4608 'VI': 'Virgin Islands, U.S.',
4609 'WF': 'Wallis and Futuna',
4610 'EH': 'Western Sahara',
4614 # Not ISO 3166 codes, but used for IP blocks
4615 'AP': 'Asia/Pacific Region',
4620 def short2full(cls
, code
):
4621 """Convert an ISO 3166-2 country code to the corresponding full name"""
4622 return cls
._country
_map
.get(code
.upper())
4626 # Major IPv4 address blocks per country
4628 'AD': '46.172.224.0/19',
4629 'AE': '94.200.0.0/13',
4630 'AF': '149.54.0.0/17',
4631 'AG': '209.59.64.0/18',
4632 'AI': '204.14.248.0/21',
4633 'AL': '46.99.0.0/16',
4634 'AM': '46.70.0.0/15',
4635 'AO': '105.168.0.0/13',
4636 'AP': '182.50.184.0/21',
4637 'AQ': '23.154.160.0/24',
4638 'AR': '181.0.0.0/12',
4639 'AS': '202.70.112.0/20',
4640 'AT': '77.116.0.0/14',
4641 'AU': '1.128.0.0/11',
4642 'AW': '181.41.0.0/18',
4643 'AX': '185.217.4.0/22',
4644 'AZ': '5.197.0.0/16',
4645 'BA': '31.176.128.0/17',
4646 'BB': '65.48.128.0/17',
4647 'BD': '114.130.0.0/16',
4649 'BF': '102.178.0.0/15',
4650 'BG': '95.42.0.0/15',
4651 'BH': '37.131.0.0/17',
4652 'BI': '154.117.192.0/18',
4653 'BJ': '137.255.0.0/16',
4654 'BL': '185.212.72.0/23',
4655 'BM': '196.12.64.0/18',
4656 'BN': '156.31.0.0/16',
4657 'BO': '161.56.0.0/16',
4658 'BQ': '161.0.80.0/20',
4659 'BR': '191.128.0.0/12',
4660 'BS': '24.51.64.0/18',
4661 'BT': '119.2.96.0/19',
4662 'BW': '168.167.0.0/16',
4663 'BY': '178.120.0.0/13',
4664 'BZ': '179.42.192.0/18',
4665 'CA': '99.224.0.0/11',
4666 'CD': '41.243.0.0/16',
4667 'CF': '197.242.176.0/21',
4668 'CG': '160.113.0.0/16',
4669 'CH': '85.0.0.0/13',
4670 'CI': '102.136.0.0/14',
4671 'CK': '202.65.32.0/19',
4672 'CL': '152.172.0.0/14',
4673 'CM': '102.244.0.0/14',
4674 'CN': '36.128.0.0/10',
4675 'CO': '181.240.0.0/12',
4676 'CR': '201.192.0.0/12',
4677 'CU': '152.206.0.0/15',
4678 'CV': '165.90.96.0/19',
4679 'CW': '190.88.128.0/17',
4680 'CY': '31.153.0.0/16',
4681 'CZ': '88.100.0.0/14',
4683 'DJ': '197.241.0.0/17',
4684 'DK': '87.48.0.0/12',
4685 'DM': '192.243.48.0/20',
4686 'DO': '152.166.0.0/15',
4687 'DZ': '41.96.0.0/12',
4688 'EC': '186.68.0.0/15',
4689 'EE': '90.190.0.0/15',
4690 'EG': '156.160.0.0/11',
4691 'ER': '196.200.96.0/20',
4692 'ES': '88.0.0.0/11',
4693 'ET': '196.188.0.0/14',
4694 'EU': '2.16.0.0/13',
4695 'FI': '91.152.0.0/13',
4696 'FJ': '144.120.0.0/16',
4697 'FK': '80.73.208.0/21',
4698 'FM': '119.252.112.0/20',
4699 'FO': '88.85.32.0/19',
4701 'GA': '41.158.0.0/15',
4703 'GD': '74.122.88.0/21',
4704 'GE': '31.146.0.0/16',
4705 'GF': '161.22.64.0/18',
4706 'GG': '62.68.160.0/19',
4707 'GH': '154.160.0.0/12',
4708 'GI': '95.164.0.0/16',
4709 'GL': '88.83.0.0/19',
4710 'GM': '160.182.0.0/15',
4711 'GN': '197.149.192.0/18',
4712 'GP': '104.250.0.0/19',
4713 'GQ': '105.235.224.0/20',
4714 'GR': '94.64.0.0/13',
4715 'GT': '168.234.0.0/16',
4716 'GU': '168.123.0.0/16',
4717 'GW': '197.214.80.0/20',
4718 'GY': '181.41.64.0/18',
4719 'HK': '113.252.0.0/14',
4720 'HN': '181.210.0.0/16',
4721 'HR': '93.136.0.0/13',
4722 'HT': '148.102.128.0/17',
4723 'HU': '84.0.0.0/14',
4724 'ID': '39.192.0.0/10',
4725 'IE': '87.32.0.0/12',
4726 'IL': '79.176.0.0/13',
4727 'IM': '5.62.80.0/20',
4728 'IN': '117.192.0.0/10',
4729 'IO': '203.83.48.0/21',
4730 'IQ': '37.236.0.0/14',
4731 'IR': '2.176.0.0/12',
4732 'IS': '82.221.0.0/16',
4733 'IT': '79.0.0.0/10',
4734 'JE': '87.244.64.0/18',
4735 'JM': '72.27.0.0/17',
4736 'JO': '176.29.0.0/16',
4737 'JP': '133.0.0.0/8',
4738 'KE': '105.48.0.0/12',
4739 'KG': '158.181.128.0/17',
4740 'KH': '36.37.128.0/17',
4741 'KI': '103.25.140.0/22',
4742 'KM': '197.255.224.0/20',
4743 'KN': '198.167.192.0/19',
4744 'KP': '175.45.176.0/22',
4745 'KR': '175.192.0.0/10',
4746 'KW': '37.36.0.0/14',
4747 'KY': '64.96.0.0/15',
4748 'KZ': '2.72.0.0/13',
4749 'LA': '115.84.64.0/18',
4750 'LB': '178.135.0.0/16',
4751 'LC': '24.92.144.0/20',
4752 'LI': '82.117.0.0/19',
4753 'LK': '112.134.0.0/15',
4754 'LR': '102.183.0.0/16',
4755 'LS': '129.232.0.0/17',
4756 'LT': '78.56.0.0/13',
4757 'LU': '188.42.0.0/16',
4758 'LV': '46.109.0.0/16',
4759 'LY': '41.252.0.0/14',
4760 'MA': '105.128.0.0/11',
4761 'MC': '88.209.64.0/18',
4762 'MD': '37.246.0.0/16',
4763 'ME': '178.175.0.0/17',
4764 'MF': '74.112.232.0/21',
4765 'MG': '154.126.0.0/17',
4766 'MH': '117.103.88.0/21',
4767 'MK': '77.28.0.0/15',
4768 'ML': '154.118.128.0/18',
4769 'MM': '37.111.0.0/17',
4770 'MN': '49.0.128.0/17',
4771 'MO': '60.246.0.0/16',
4772 'MP': '202.88.64.0/20',
4773 'MQ': '109.203.224.0/19',
4774 'MR': '41.188.64.0/18',
4775 'MS': '208.90.112.0/22',
4776 'MT': '46.11.0.0/16',
4777 'MU': '105.16.0.0/12',
4778 'MV': '27.114.128.0/18',
4779 'MW': '102.70.0.0/15',
4780 'MX': '187.192.0.0/11',
4781 'MY': '175.136.0.0/13',
4782 'MZ': '197.218.0.0/15',
4783 'NA': '41.182.0.0/16',
4784 'NC': '101.101.0.0/18',
4785 'NE': '197.214.0.0/18',
4786 'NF': '203.17.240.0/22',
4787 'NG': '105.112.0.0/12',
4788 'NI': '186.76.0.0/15',
4789 'NL': '145.96.0.0/11',
4790 'NO': '84.208.0.0/13',
4791 'NP': '36.252.0.0/15',
4792 'NR': '203.98.224.0/19',
4793 'NU': '49.156.48.0/22',
4794 'NZ': '49.224.0.0/14',
4795 'OM': '5.36.0.0/15',
4796 'PA': '186.72.0.0/15',
4797 'PE': '186.160.0.0/14',
4798 'PF': '123.50.64.0/18',
4799 'PG': '124.240.192.0/19',
4800 'PH': '49.144.0.0/13',
4801 'PK': '39.32.0.0/11',
4802 'PL': '83.0.0.0/11',
4803 'PM': '70.36.0.0/20',
4804 'PR': '66.50.0.0/16',
4805 'PS': '188.161.0.0/16',
4806 'PT': '85.240.0.0/13',
4807 'PW': '202.124.224.0/20',
4808 'PY': '181.120.0.0/14',
4809 'QA': '37.210.0.0/15',
4810 'RE': '102.35.0.0/16',
4811 'RO': '79.112.0.0/13',
4812 'RS': '93.86.0.0/15',
4813 'RU': '5.136.0.0/13',
4814 'RW': '41.186.0.0/16',
4815 'SA': '188.48.0.0/13',
4816 'SB': '202.1.160.0/19',
4817 'SC': '154.192.0.0/11',
4818 'SD': '102.120.0.0/13',
4819 'SE': '78.64.0.0/12',
4820 'SG': '8.128.0.0/10',
4821 'SI': '188.196.0.0/14',
4822 'SK': '78.98.0.0/15',
4823 'SL': '102.143.0.0/17',
4824 'SM': '89.186.32.0/19',
4825 'SN': '41.82.0.0/15',
4826 'SO': '154.115.192.0/18',
4827 'SR': '186.179.128.0/17',
4828 'SS': '105.235.208.0/21',
4829 'ST': '197.159.160.0/19',
4830 'SV': '168.243.0.0/16',
4831 'SX': '190.102.0.0/20',
4833 'SZ': '41.84.224.0/19',
4834 'TC': '65.255.48.0/20',
4835 'TD': '154.68.128.0/19',
4836 'TG': '196.168.0.0/14',
4837 'TH': '171.96.0.0/13',
4838 'TJ': '85.9.128.0/18',
4839 'TK': '27.96.24.0/21',
4840 'TL': '180.189.160.0/20',
4841 'TM': '95.85.96.0/19',
4842 'TN': '197.0.0.0/11',
4843 'TO': '175.176.144.0/21',
4844 'TR': '78.160.0.0/11',
4845 'TT': '186.44.0.0/15',
4846 'TV': '202.2.96.0/19',
4847 'TW': '120.96.0.0/11',
4848 'TZ': '156.156.0.0/14',
4849 'UA': '37.52.0.0/14',
4850 'UG': '102.80.0.0/13',
4852 'UY': '167.56.0.0/13',
4853 'UZ': '84.54.64.0/18',
4854 'VA': '212.77.0.0/19',
4855 'VC': '207.191.240.0/21',
4856 'VE': '186.88.0.0/13',
4857 'VG': '66.81.192.0/20',
4858 'VI': '146.226.0.0/16',
4859 'VN': '14.160.0.0/11',
4860 'VU': '202.80.32.0/20',
4861 'WF': '117.20.32.0/21',
4862 'WS': '202.4.32.0/19',
4863 'YE': '134.35.0.0/16',
4864 'YT': '41.242.116.0/22',
4865 'ZA': '41.0.0.0/11',
4866 'ZM': '102.144.0.0/13',
4867 'ZW': '102.177.192.0/18',
4871 def random_ipv4(cls
, code_or_block
):
4872 if len(code_or_block
) == 2:
4873 block
= cls
._country
_ip
_map
.get(code_or_block
.upper())
4877 block
= code_or_block
4878 addr
, preflen
= block
.split('/')
4879 addr_min
= struct
.unpack('!L', socket
.inet_aton(addr
))[0]
4880 addr_max
= addr_min |
(0xffffffff >> int(preflen
))
4881 return str(socket
.inet_ntoa(
4882 struct
.pack('!L', random
.randint(addr_min
, addr_max
))))
4885 class PerRequestProxyHandler(urllib
.request
.ProxyHandler
):
4886 def __init__(self
, proxies
=None):
4887 # Set default handlers
4888 for type in ('http', 'https'):
4889 setattr(self
, '%s_open' % type,
4890 lambda r
, proxy
='__noproxy__', type=type, meth
=self
.proxy_open
:
4891 meth(r
, proxy
, type))
4892 urllib
.request
.ProxyHandler
.__init
__(self
, proxies
)
4894 def proxy_open(self
, req
, proxy
, type):
4895 req_proxy
= req
.headers
.get('Ytdl-request-proxy')
4896 if req_proxy
is not None:
4898 del req
.headers
['Ytdl-request-proxy']
4900 if proxy
== '__noproxy__':
4901 return None # No Proxy
4902 if urllib
.parse
.urlparse(proxy
).scheme
.lower() in ('socks', 'socks4', 'socks4a', 'socks5'):
4903 req
.add_header('Ytdl-socks-proxy', proxy
)
4904 # yt-dlp's http/https handlers do wrapping the socket with socks
4906 return urllib
.request
.ProxyHandler
.proxy_open(
4907 self
, req
, proxy
, type)
4910 # Both long_to_bytes and bytes_to_long are adapted from PyCrypto, which is
4911 # released into Public Domain
4912 # https://github.com/dlitz/pycrypto/blob/master/lib/Crypto/Util/number.py#L387
4914 def long_to_bytes(n
, blocksize
=0):
4915 """long_to_bytes(n:long, blocksize:int) : string
4916 Convert a long integer to a byte string.
4918 If optional blocksize is given and greater than zero, pad the front of the
4919 byte string with binary zeros so that the length is a multiple of
4922 # after much testing, this algorithm was deemed to be the fastest
4926 s
= struct
.pack('>I', n
& 0xffffffff) + s
4928 # strip off leading zeros
4929 for i
in range(len(s
)):
4930 if s
[i
] != b
'\000'[0]:
4933 # only happens when n == 0
4937 # add back some pad bytes. this could be done more efficiently w.r.t. the
4938 # de-padding being done above, but sigh...
4939 if blocksize
> 0 and len(s
) % blocksize
:
4940 s
= (blocksize
- len(s
) % blocksize
) * b
'\000' + s
4944 def bytes_to_long(s
):
4945 """bytes_to_long(string) : long
4946 Convert a byte string to a long integer.
4948 This is (essentially) the inverse of long_to_bytes().
4953 extra
= (4 - length
% 4)
4954 s
= b
'\000' * extra
+ s
4955 length
= length
+ extra
4956 for i
in range(0, length
, 4):
4957 acc
= (acc
<< 32) + struct
.unpack('>I', s
[i
:i
+ 4])[0]
4961 def ohdave_rsa_encrypt(data
, exponent
, modulus
):
4963 Implement OHDave's RSA algorithm. See http://www.ohdave.com/rsa/
4966 data: data to encrypt, bytes-like object
4967 exponent, modulus: parameter e and N of RSA algorithm, both integer
4968 Output: hex string of encrypted data
4970 Limitation: supports one block encryption only
4973 payload
= int(binascii
.hexlify(data
[::-1]), 16)
4974 encrypted
= pow(payload
, exponent
, modulus
)
4975 return '%x' % encrypted
4978 def pkcs1pad(data
, length
):
4980 Padding input data with PKCS#1 scheme
4982 @param {int[]} data input data
4983 @param {int} length target length
4984 @returns {int[]} padded data
4986 if len(data
) > length
- 11:
4987 raise ValueError('Input data too long for PKCS#1 padding')
4989 pseudo_random
= [random
.randint(0, 254) for _
in range(length
- len(data
) - 3)]
4990 return [0, 2] + pseudo_random
+ [0] + data
4993 def _base_n_table(n
, table
):
4994 if not table
and not n
:
4995 raise ValueError('Either table or n must be specified')
4996 table
= (table
or '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ')[:n
]
4998 if n
and n
!= len(table
):
4999 raise ValueError(f
'base {n} exceeds table length {len(table)}')
5003 def encode_base_n(num
, n
=None, table
=None):
5004 """Convert given int to a base-n string"""
5005 table
= _base_n_table(n
, table
)
5009 result
, base
= '', len(table
)
5011 result
= table
[num
% base
] + result
5016 def decode_base_n(string
, n
=None, table
=None):
5017 """Convert given base-n string to int"""
5018 table
= {char: index for index, char in enumerate(_base_n_table(n, table))}
5019 result
, base
= 0, len(table
)
5021 result
= result
* base
+ table
[char
]
5025 def decode_base(value
, digits
):
5026 deprecation_warning(f
'{__name__}.decode_base is deprecated and may be removed '
5027 f
'in a future version. Use {__name__}.decode_base_n instead')
5028 return decode_base_n(value
, table
=digits
)
5031 def decode_packed_codes(code
):
5032 mobj
= re
.search(PACKED_CODES_RE
, code
)
5033 obfuscated_code
, base
, count
, symbols
= mobj
.groups()
5036 symbols
= symbols
.split('|')
5041 base_n_count
= encode_base_n(count
, base
)
5042 symbol_table
[base_n_count
] = symbols
[count
] or base_n_count
5045 r
'\b(\w+)\b', lambda mobj
: symbol_table
[mobj
.group(0)],
5049 def caesar(s
, alphabet
, shift
):
5054 alphabet
[(alphabet
.index(c
) + shift
) % l
] if c
in alphabet
else c
5059 return caesar(s
, r
'''!"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~''', 47)
5062 def parse_m3u8_attributes(attrib
):
5064 for (key
, val
) in re
.findall(r
'(?P<key>[A-Z0-9-]+)=(?P<val>"[^"]+"|[^",]+)(?:,|$)', attrib
):
5065 if val
.startswith('"'):
5071 def urshift(val
, n
):
5072 return val
>> n
if val
>= 0 else (val
+ 0x100000000) >> n
5075 # Based on png2str() written by @gdkchan and improved by @yokrysty
5076 # Originally posted at https://github.com/ytdl-org/youtube-dl/issues/9706
5077 def decode_png(png_data
):
5078 # Reference: https://www.w3.org/TR/PNG/
5079 header
= png_data
[8:]
5081 if png_data
[:8] != b
'\x89PNG\x0d\x0a\x1a\x0a' or header
[4:8] != b
'IHDR':
5082 raise OSError('Not a valid PNG file.')
5084 int_map
= {1: '>B', 2: '>H', 4: '>I'}
5085 unpack_integer
= lambda x
: struct
.unpack(int_map
[len(x
)], x
)[0]
5090 length
= unpack_integer(header
[:4])
5093 chunk_type
= header
[:4]
5096 chunk_data
= header
[:length
]
5097 header
= header
[length
:]
5099 header
= header
[4:] # Skip CRC
5107 ihdr
= chunks
[0]['data']
5109 width
= unpack_integer(ihdr
[:4])
5110 height
= unpack_integer(ihdr
[4:8])
5114 for chunk
in chunks
:
5115 if chunk
['type'] == b
'IDAT':
5116 idat
+= chunk
['data']
5119 raise OSError('Unable to read PNG data.')
5121 decompressed_data
= bytearray(zlib
.decompress(idat
))
5126 def _get_pixel(idx
):
5131 for y
in range(height
):
5132 basePos
= y
* (1 + stride
)
5133 filter_type
= decompressed_data
[basePos
]
5137 pixels
.append(current_row
)
5139 for x
in range(stride
):
5140 color
= decompressed_data
[1 + basePos
+ x
]
5141 basex
= y
* stride
+ x
5146 left
= _get_pixel(basex
- 3)
5148 up
= _get_pixel(basex
- stride
)
5150 if filter_type
== 1: # Sub
5151 color
= (color
+ left
) & 0xff
5152 elif filter_type
== 2: # Up
5153 color
= (color
+ up
) & 0xff
5154 elif filter_type
== 3: # Average
5155 color
= (color
+ ((left
+ up
) >> 1)) & 0xff
5156 elif filter_type
== 4: # Paeth
5162 c
= _get_pixel(basex
- stride
- 3)
5170 if pa
<= pb
and pa
<= pc
:
5171 color
= (color
+ a
) & 0xff
5173 color
= (color
+ b
) & 0xff
5175 color
= (color
+ c
) & 0xff
5177 current_row
.append(color
)
5179 return width
, height
, pixels
5182 def write_xattr(path
, key
, value
):
5183 # Windows: Write xattrs to NTFS Alternate Data Streams:
5184 # http://en.wikipedia.org/wiki/NTFS#Alternate_data_streams_.28ADS.29
5185 if compat_os_name
== 'nt':
5186 assert ':' not in key
5187 assert os
.path
.exists(path
)
5190 with open(f
'{path}:{key}', 'wb') as f
:
5192 except OSError as e
:
5193 raise XAttrMetadataError(e
.errno
, e
.strerror
)
5196 # UNIX Method 1. Use xattrs/pyxattrs modules
5199 if getattr(xattr
, '_yt_dlp__identifier', None) == 'pyxattr':
5200 # Unicode arguments are not supported in pyxattr until version 0.5.0
5201 # See https://github.com/ytdl-org/youtube-dl/issues/5498
5202 if version_tuple(xattr
.__version
__) >= (0, 5, 0):
5203 setxattr
= xattr
.set
5205 setxattr
= xattr
.setxattr
5209 setxattr(path
, key
, value
)
5210 except OSError as e
:
5211 raise XAttrMetadataError(e
.errno
, e
.strerror
)
5214 # UNIX Method 2. Use setfattr/xattr executables
5215 exe
= ('setfattr' if check_executable('setfattr', ['--version'])
5216 else 'xattr' if check_executable('xattr', ['-h']) else None)
5218 raise XAttrUnavailableError(
5219 'Couldn\'t find a tool to set the xattrs. Install either the python "xattr" or "pyxattr" modules or the '
5220 + ('"xattr" binary' if sys
.platform
!= 'linux' else 'GNU "attr" package (which contains the "setfattr" tool)'))
5222 value
= value
.decode()
5224 _
, stderr
, returncode
= Popen
.run(
5225 [exe
, '-w', key
, value
, path
] if exe
== 'xattr' else [exe
, '-n', key
, '-v', value
, path
],
5226 text
=True, stdout
=subprocess
.PIPE
, stderr
=subprocess
.PIPE
, stdin
=subprocess
.PIPE
)
5227 except OSError as e
:
5228 raise XAttrMetadataError(e
.errno
, e
.strerror
)
5230 raise XAttrMetadataError(returncode
, stderr
)
5233 def random_birthday(year_field
, month_field
, day_field
):
5234 start_date
= datetime
.date(1950, 1, 1)
5235 end_date
= datetime
.date(1995, 12, 31)
5236 offset
= random
.randint(0, (end_date
- start_date
).days
)
5237 random_date
= start_date
+ datetime
.timedelta(offset
)
5239 year_field
: str(random_date
.year
),
5240 month_field
: str(random_date
.month
),
5241 day_field
: str(random_date
.day
),
5245 def find_available_port(interface
=''):
5247 with socket
.socket() as sock
:
5248 sock
.bind((interface
, 0))
5249 return sock
.getsockname()[1]
5254 # Templates for internet shortcut files, which are plain text files.
5255 DOT_URL_LINK_TEMPLATE
= '''\
5260 DOT_WEBLOC_LINK_TEMPLATE
= '''\
5261 <?xml version="1.0" encoding="UTF-8"?>
5262 <!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
5263 <plist version="1.0">
5266 \t<string>%(url)s</string>
5271 DOT_DESKTOP_LINK_TEMPLATE
= '''\
5281 'url': DOT_URL_LINK_TEMPLATE
,
5282 'desktop': DOT_DESKTOP_LINK_TEMPLATE
,
5283 'webloc': DOT_WEBLOC_LINK_TEMPLATE
,
5287 def iri_to_uri(iri
):
5289 Converts an IRI (Internationalized Resource Identifier, allowing Unicode characters) to a URI (Uniform Resource Identifier, ASCII-only).
5291 The function doesn't add an additional layer of escaping; e.g., it doesn't escape `%3C` as `%253C`. Instead, it percent-escapes characters with an underlying UTF-8 encoding *besides* those already escaped, leaving the URI intact.
5294 iri_parts
= urllib
.parse
.urlparse(iri
)
5296 if '[' in iri_parts
.netloc
:
5297 raise ValueError('IPv6 URIs are not, yet, supported.')
5298 # Querying `.netloc`, when there's only one bracket, also raises a ValueError.
5300 # The `safe` argument values, that the following code uses, contain the characters that should not be percent-encoded. Everything else but letters, digits and '_.-' will be percent-encoded with an underlying UTF-8 encoding. Everything already percent-encoded will be left as is.
5303 if iri_parts
.username
:
5304 net_location
+= urllib
.parse
.quote(iri_parts
.username
, safe
=r
"!$%&'()*+,~")
5305 if iri_parts
.password
is not None:
5306 net_location
+= ':' + urllib
.parse
.quote(iri_parts
.password
, safe
=r
"!$%&'()*+,~")
5309 net_location
+= iri_parts
.hostname
.encode('idna').decode() # Punycode for Unicode hostnames.
5310 # The 'idna' encoding produces ASCII text.
5311 if iri_parts
.port
is not None and iri_parts
.port
!= 80:
5312 net_location
+= ':' + str(iri_parts
.port
)
5314 return urllib
.parse
.urlunparse(
5318 urllib
.parse
.quote_plus(iri_parts
.path
, safe
=r
"!$%&'()*+,/:;=@|~"),
5320 # Unsure about the `safe` argument, since this is a legacy way of handling parameters.
5321 urllib
.parse
.quote_plus(iri_parts
.params
, safe
=r
"!$%&'()*+,/:;=@|~"),
5323 # Not totally sure about the `safe` argument, since the source does not explicitly mention the query URI component.
5324 urllib
.parse
.quote_plus(iri_parts
.query
, safe
=r
"!$%&'()*+,/:;=?@{|}~"),
5326 urllib
.parse
.quote_plus(iri_parts
.fragment
, safe
=r
"!#$%&'()*+,/:;=?@{|}~")))
5328 # Source for `safe` arguments: https://url.spec.whatwg.org/#percent-encoded-bytes.
5331 def to_high_limit_path(path
):
5332 if sys
.platform
in ['win32', 'cygwin']:
5333 # Work around MAX_PATH limitation on Windows. The maximum allowed length for the individual path segments may still be quite limited.
5334 return '\\\\?\\' + os
.path
.abspath(path
)
5339 def format_field(obj
, field
=None, template
='%s', ignore
=NO_DEFAULT
, default
='', func
=IDENTITY
):
5340 val
= traverse_obj(obj
, *variadic(field
))
5341 if (not val
and val
!= 0) if ignore
is NO_DEFAULT
else val
in variadic(ignore
):
5343 return template
% func(val
)
5346 def clean_podcast_url(url
):
5347 return re
.sub(r
'''(?x)
5351 media\.blubrry\.com| # https://create.blubrry.com/resources/podcast-media-download-statistics/getting-started/
5354 (?:dts|www)\.podtrac\.com/(?:pts/)?redirect\.[0-9a-z]{3,4}| # http://analytics.podtrac.com/how-to-measure
5357 cn\.co| # https://podcorn.com/analytics-prefix/
5358 st\.fm # https://podsights.com/docs/
5363 _HEX_TABLE
= '0123456789abcdef'
5366 def random_uuidv4():
5367 return re
.sub(r
'[xy]', lambda x
: _HEX_TABLE
[random
.randint(0, 15)], 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx')
5370 def make_dir(path
, to_screen
=None):
5372 dn
= os
.path
.dirname(path
)
5374 os
.makedirs(dn
, exist_ok
=True)
5376 except OSError as err
:
5377 if callable(to_screen
) is not None:
5378 to_screen('unable to create directory ' + error_to_compat_str(err
))
5382 def get_executable_path():
5383 from .update
import _get_variant_and_executable_path
5385 return os
.path
.dirname(os
.path
.abspath(_get_variant_and_executable_path()[1]))
5388 def get_user_config_dirs(package_name
):
5389 # .config (e.g. ~/.config/package_name)
5390 xdg_config_home
= os
.getenv('XDG_CONFIG_HOME') or compat_expanduser('~/.config')
5391 yield os
.path
.join(xdg_config_home
, package_name
)
5393 # appdata (%APPDATA%/package_name)
5394 appdata_dir
= os
.getenv('appdata')
5396 yield os
.path
.join(appdata_dir
, package_name
)
5398 # home (~/.package_name)
5399 yield os
.path
.join(compat_expanduser('~'), f
'.{package_name}')
5402 def get_system_config_dirs(package_name
):
5404 yield os
.path
.join('/etc', package_name
)
5408 obj
, *paths
, default
=NO_DEFAULT
, expected_type
=None, get_all
=True,
5409 casesense
=True, is_user_input
=False, traverse_string
=False):
5411 Safely traverse nested `dict`s and `Sequence`s
5413 >>> obj = [{}, {"key": "value"}]
5414 >>> traverse_obj(obj, (1, "key"))
5417 Each of the provided `paths` is tested and the first producing a valid result will be returned.
5418 The next path will also be tested if the path branched but no results could be found.
5419 Supported values for traversal are `Mapping`, `Sequence` and `re.Match`.
5420 Unhelpful values (`{}`, `None`) are treated as the absence of a value and discarded.
5422 The paths will be wrapped in `variadic`, so that `'key'` is conveniently the same as `('key', )`.
5424 The keys in the path can be one of:
5425 - `None`: Return the current object.
5426 - `set`: Requires the only item in the set to be a type or function,
5427 like `{type}`/`{func}`. If a `type`, returns only values
5428 of this type. If a function, returns `func(obj)`.
5429 - `str`/`int`: Return `obj[key]`. For `re.Match`, return `obj.group(key)`.
5430 - `slice`: Branch out and return all values in `obj[key]`.
5431 - `Ellipsis`: Branch out and return a list of all values.
5432 - `tuple`/`list`: Branch out and return a list of all matching values.
5433 Read as: `[traverse_obj(obj, branch) for branch in branches]`.
5434 - `function`: Branch out and return values filtered by the function.
5435 Read as: `[value for key, value in obj if function(key, value)]`.
5436 For `Sequence`s, `key` is the index of the value.
5437 For `re.Match`es, `key` is the group number (0 = full match)
5438 as well as additionally any group names, if given.
5439 - `dict` Transform the current object and return a matching dict.
5440 Read as: `{key: traverse_obj(obj, path) for key, path in dct.items()}`.
5442 `tuple`, `list`, and `dict` all support nested paths and branches.
5444 @params paths Paths which to traverse by.
5445 @param default Value to return if the paths do not match.
5446 If the last key in the path is a `dict`, it will apply to each value inside
5447 the dict instead, depth first. Try to avoid if using nested `dict` keys.
5448 @param expected_type If a `type`, only accept final values of this type.
5449 If any other callable, try to call the function on each result.
5450 If the last key in the path is a `dict`, it will apply to each value inside
5451 the dict instead, recursively. This does respect branching paths.
5452 @param get_all If `False`, return the first matching result, otherwise all matching ones.
5453 @param casesense If `False`, consider string dictionary keys as case insensitive.
5455 The following are only meant to be used by YoutubeDL.prepare_outtmpl and are not part of the API
5457 @param is_user_input Whether the keys are generated from user input.
5458 If `True` strings get converted to `int`/`slice` if needed.
5459 @param traverse_string Whether to traverse into objects as strings.
5460 If `True`, any non-compatible object will first be
5461 converted into a string and then traversed into.
5462 The return value of that path will be a string instead,
5463 not respecting any further branching.
5466 @returns The result of the object traversal.
5467 If successful, `get_all=True`, and the path branches at least once,
5468 then a list of results is returned instead.
5469 If no `default` is given and the last path branches, a `list` of results
5470 is always returned. If a path ends on a `dict` that result will always be a `dict`.
5472 is_sequence
= lambda x
: isinstance(x
, collections
.abc
.Sequence
) and not isinstance(x
, (str, bytes))
5473 casefold
= lambda k
: k
.casefold() if isinstance(k
, str) else k
5475 if isinstance(expected_type
, type):
5476 type_test
= lambda val
: val
if isinstance(val
, expected_type
) else None
5478 type_test
= lambda val
: try_call(expected_type
or IDENTITY
, args
=(val
,))
5480 def apply_key(key
, obj
, is_last
):
5484 if obj
is None and traverse_string
:
5490 elif isinstance(key
, set):
5491 assert len(key
) == 1, 'Set should only be used to wrap a single item'
5492 item
= next(iter(key
))
5493 if isinstance(item
, type):
5494 if isinstance(obj
, item
):
5497 result
= try_call(item
, args
=(obj
,))
5499 elif isinstance(key
, (list, tuple)):
5501 result
= itertools
.chain
.from_iterable(
5502 apply_path(obj
, branch
, is_last
)[0] for branch
in key
)
5506 if isinstance(obj
, collections
.abc
.Mapping
):
5507 result
= obj
.values()
5508 elif is_sequence(obj
):
5510 elif isinstance(obj
, re
.Match
):
5511 result
= obj
.groups()
5512 elif traverse_string
:
5520 if isinstance(obj
, collections
.abc
.Mapping
):
5521 iter_obj
= obj
.items()
5522 elif is_sequence(obj
):
5523 iter_obj
= enumerate(obj
)
5524 elif isinstance(obj
, re
.Match
):
5525 iter_obj
= itertools
.chain(
5526 enumerate((obj
.group(), *obj
.groups())),
5527 obj
.groupdict().items())
5528 elif traverse_string
:
5530 iter_obj
= enumerate(str(obj
))
5534 result
= (v
for k
, v
in iter_obj
if try_call(key
, args
=(k
, v
)))
5535 if not branching
: # string traversal
5536 result
= ''.join(result
)
5538 elif isinstance(key
, dict):
5539 iter_obj
= ((k
, _traverse_obj(obj
, v
, False, is_last
)) for k
, v
in key
.items())
5541 k
: v
if v
is not None else default
for k
, v
in iter_obj
5542 if v
is not None or default
is not NO_DEFAULT
5545 elif isinstance(obj
, collections
.abc
.Mapping
):
5546 result
= (obj
.get(key
) if casesense
or (key
in obj
) else
5547 next((v
for k
, v
in obj
.items() if casefold(k
) == key
), None))
5549 elif isinstance(obj
, re
.Match
):
5550 if isinstance(key
, int) or casesense
:
5551 with contextlib
.suppress(IndexError):
5552 result
= obj
.group(key
)
5554 elif isinstance(key
, str):
5555 result
= next((v
for k
, v
in obj
.groupdict().items() if casefold(k
) == key
), None)
5557 elif isinstance(key
, (int, slice)):
5558 if is_sequence(obj
):
5559 branching
= isinstance(key
, slice)
5560 with contextlib
.suppress(IndexError):
5562 elif traverse_string
:
5563 with contextlib
.suppress(IndexError):
5564 result
= str(obj
)[key
]
5566 return branching
, result
if branching
else (result
,)
5568 def lazy_last(iterable
):
5569 iterator
= iter(iterable
)
5570 prev
= next(iterator
, NO_DEFAULT
)
5571 if prev
is NO_DEFAULT
:
5574 for item
in iterator
:
5580 def apply_path(start_obj
, path
, test_type
):
5582 has_branched
= False
5585 for last
, key
in lazy_last(variadic(path
, (str, bytes, dict, set))):
5586 if is_user_input
and isinstance(key
, str):
5590 key
= slice(*map(int_or_none
, key
.split(':')))
5591 elif int_or_none(key
) is not None:
5594 if not casesense
and isinstance(key
, str):
5595 key
= key
.casefold()
5597 if __debug__
and callable(key
):
5598 # Verify function signature
5599 inspect
.signature(key
).bind(None, None)
5603 branching
, results
= apply_key(key
, obj
, last
)
5604 has_branched |
= branching
5605 new_objs
.append(results
)
5607 objs
= itertools
.chain
.from_iterable(new_objs
)
5609 if test_type
and not isinstance(key
, (dict, list, tuple)):
5610 objs
= map(type_test
, objs
)
5612 return objs
, has_branched
, isinstance(key
, dict)
5614 def _traverse_obj(obj
, path
, allow_empty
, test_type
):
5615 results
, has_branched
, is_dict
= apply_path(obj
, path
, test_type
)
5616 results
= LazyList(item
for item
in results
if item
not in (None, {}))
5617 if get_all
and has_branched
:
5619 return results
.exhaust()
5621 return [] if default
is NO_DEFAULT
else default
5624 return results
[0] if results
else {} if allow_empty
and is_dict
else None
5626 for index
, path
in enumerate(paths
, 1):
5627 result
= _traverse_obj(obj
, path
, index
== len(paths
), True)
5628 if result
is not None:
5631 return None if default
is NO_DEFAULT
else default
5634 def traverse_dict(dictn
, keys
, casesense
=True):
5635 deprecation_warning(f
'"{__name__}.traverse_dict" is deprecated and may be removed '
5636 f
'in a future version. Use "{__name__}.traverse_obj" instead')
5637 return traverse_obj(dictn
, keys
, casesense
=casesense
, is_user_input
=True, traverse_string
=True)
5640 def get_first(obj
, keys
, **kwargs
):
5641 return traverse_obj(obj
, (..., *variadic(keys
)), **kwargs
, get_all
=False)
5644 def time_seconds(**kwargs
):
5646 Returns TZ-aware time in seconds since the epoch (1970-01-01T00:00:00Z)
5648 return time
.time() + datetime
.timedelta(**kwargs
).total_seconds()
5651 # create a JSON Web Signature (jws) with HS256 algorithm
5652 # the resulting format is in JWS Compact Serialization
5653 # implemented following JWT https://www.rfc-editor.org/rfc/rfc7519.html
5654 # implemented following JWS https://www.rfc-editor.org/rfc/rfc7515.html
5655 def jwt_encode_hs256(payload_data
, key
, headers
={}):
5661 header_data
.update(headers
)
5662 header_b64
= base64
.b64encode(json
.dumps(header_data
).encode())
5663 payload_b64
= base64
.b64encode(json
.dumps(payload_data
).encode())
5664 h
= hmac
.new(key
.encode(), header_b64
+ b
'.' + payload_b64
, hashlib
.sha256
)
5665 signature_b64
= base64
.b64encode(h
.digest())
5666 token
= header_b64
+ b
'.' + payload_b64
+ b
'.' + signature_b64
5670 # can be extended in future to verify the signature and parse header and return the algorithm used if it's not HS256
5671 def jwt_decode_hs256(jwt
):
5672 header_b64
, payload_b64
, signature_b64
= jwt
.split('.')
5673 # add trailing ='s that may have been stripped, superfluous ='s are ignored
5674 payload_data
= json
.loads(base64
.urlsafe_b64decode(f
'{payload_b64}==='))
5678 WINDOWS_VT_MODE
= False if compat_os_name
== 'nt' else None
5682 def supports_terminal_sequences(stream
):
5683 if compat_os_name
== 'nt':
5684 if not WINDOWS_VT_MODE
:
5686 elif not os
.getenv('TERM'):
5689 return stream
.isatty()
5690 except BaseException
:
5694 def windows_enable_vt_mode():
5695 """Ref: https://bugs.python.org/issue30075 """
5696 if get_windows_version() < (10, 0, 10586):
5700 import ctypes
.wintypes
5703 ENABLE_VIRTUAL_TERMINAL_PROCESSING
= 0x0004
5705 dll
= ctypes
.WinDLL('kernel32', use_last_error
=False)
5706 handle
= os
.open('CONOUT$', os
.O_RDWR
)
5708 h_out
= ctypes
.wintypes
.HANDLE(msvcrt
.get_osfhandle(handle
))
5709 dw_original_mode
= ctypes
.wintypes
.DWORD()
5710 success
= dll
.GetConsoleMode(h_out
, ctypes
.byref(dw_original_mode
))
5712 raise Exception('GetConsoleMode failed')
5714 success
= dll
.SetConsoleMode(h_out
, ctypes
.wintypes
.DWORD(
5715 dw_original_mode
.value | ENABLE_VIRTUAL_TERMINAL_PROCESSING
))
5717 raise Exception('SetConsoleMode failed')
5721 global WINDOWS_VT_MODE
5722 WINDOWS_VT_MODE
= True
5723 supports_terminal_sequences
.cache_clear()
5726 _terminal_sequences_re
= re
.compile('\033\\[[^m]+m')
5729 def remove_terminal_sequences(string
):
5730 return _terminal_sequences_re
.sub('', string
)
5733 def number_of_digits(number
):
5734 return len('%d' % number
)
5737 def join_nonempty(*values
, delim
='-', from_dict
=None):
5738 if from_dict
is not None:
5739 values
= (traverse_obj(from_dict
, variadic(v
)) for v
in values
)
5740 return delim
.join(map(str, filter(None, values
)))
5743 def scale_thumbnails_to_max_format_width(formats
, thumbnails
, url_width_re
):
5745 Find the largest format dimensions in terms of video width and, for each thumbnail:
5746 * Modify the URL: Match the width with the provided regex and replace with the former width
5749 This function is useful with video services that scale the provided thumbnails on demand
5751 _keys
= ('width', 'height')
5752 max_dimensions
= max(
5753 (tuple(format
.get(k
) or 0 for k
in _keys
) for format
in formats
),
5755 if not max_dimensions
[0]:
5759 {'url': re.sub(url_width_re, str(max_dimensions[0]), thumbnail['url'])}
,
5760 dict(zip(_keys
, max_dimensions
)), thumbnail
)
5761 for thumbnail
in thumbnails
5765 def parse_http_range(range):
5766 """ Parse value of "Range" or "Content-Range" HTTP header into tuple. """
5768 return None, None, None
5769 crg
= re
.search(r
'bytes[ =](\d+)-(\d+)?(?:/(\d+))?', range)
5771 return None, None, None
5772 return int(crg
.group(1)), int_or_none(crg
.group(2)), int_or_none(crg
.group(3))
5775 def read_stdin(what
):
5776 eof
= 'Ctrl+Z' if compat_os_name
== 'nt' else 'Ctrl+D'
5777 write_string(f
'Reading {what} from STDIN - EOF ({eof}) to end:\n')
5781 def determine_file_encoding(data
):
5783 Detect the text encoding used
5784 @returns (encoding, bytes to skip)
5787 # BOM marks are given priority over declarations
5788 for bom
, enc
in BOMS
:
5789 if data
.startswith(bom
):
5790 return enc
, len(bom
)
5792 # Strip off all null bytes to match even when UTF-16 or UTF-32 is used.
5793 # We ignore the endianness to get a good enough match
5794 data
= data
.replace(b
'\0', b
'')
5795 mobj
= re
.match(rb
'(?m)^#\s*coding\s*:\s*(\S+)\s*$', data
)
5796 return mobj
.group(1).decode() if mobj
else None, 0
5803 __initialized
= False
5805 def __init__(self
, parser
, label
=None):
5806 self
.parser
, self
.label
= parser
, label
5807 self
._loaded
_paths
, self
.configs
= set(), []
5809 def init(self
, args
=None, filename
=None):
5810 assert not self
.__initialized
5811 self
.own_args
, self
.filename
= args
, filename
5812 return self
.load_configs()
5814 def load_configs(self
):
5817 location
= os
.path
.realpath(self
.filename
)
5818 directory
= os
.path
.dirname(location
)
5819 if location
in self
._loaded
_paths
:
5821 self
._loaded
_paths
.add(location
)
5823 self
.__initialized
= True
5824 opts
, _
= self
.parser
.parse_known_args(self
.own_args
)
5825 self
.parsed_args
= self
.own_args
5826 for location
in opts
.config_locations
or []:
5828 if location
in self
._loaded
_paths
:
5830 self
._loaded
_paths
.add(location
)
5831 self
.append_config(shlex
.split(read_stdin('options'), comments
=True), label
='stdin')
5833 location
= os
.path
.join(directory
, expand_path(location
))
5834 if os
.path
.isdir(location
):
5835 location
= os
.path
.join(location
, 'yt-dlp.conf')
5836 if not os
.path
.exists(location
):
5837 self
.parser
.error(f
'config location {location} does not exist')
5838 self
.append_config(self
.read_file(location
), location
)
5842 label
= join_nonempty(
5843 self
.label
, 'config', f
'"{self.filename}"' if self
.filename
else '',
5845 return join_nonempty(
5846 self
.own_args
is not None and f
'{label[0].upper()}{label[1:]}: {self.hide_login_info(self.own_args)}',
5847 *(f
'\n{c}'.replace('\n', '\n| ')[1:] for c
in self
.configs
),
5851 def read_file(filename
, default
=[]):
5853 optionf
= open(filename
, 'rb')
5855 return default
# silently skip if file is not present
5857 enc
, skip
= determine_file_encoding(optionf
.read(512))
5858 optionf
.seek(skip
, io
.SEEK_SET
)
5860 enc
= None # silently skip read errors
5862 # FIXME: https://github.com/ytdl-org/youtube-dl/commit/dfe5fa49aed02cf36ba9f743b11b0903554b5e56
5863 contents
= optionf
.read().decode(enc
or preferredencoding())
5864 res
= shlex
.split(contents
, comments
=True)
5865 except Exception as err
:
5866 raise ValueError(f
'Unable to parse "{filename}": {err}')
5872 def hide_login_info(opts
):
5873 PRIVATE_OPTS
= {'-p', '--password', '-u', '--username', '--video-password', '--ap-password', '--ap-username'}
5874 eqre
= re
.compile('^(?P<key>' + ('|'.join(re
.escape(po
) for po
in PRIVATE_OPTS
)) + ')=.+$')
5879 return m
.group('key') + '=PRIVATE'
5883 opts
= list(map(_scrub_eq
, opts
))
5884 for idx
, opt
in enumerate(opts
):
5885 if opt
in PRIVATE_OPTS
and idx
+ 1 < len(opts
):
5886 opts
[idx
+ 1] = 'PRIVATE'
5889 def append_config(self
, *args
, label
=None):
5890 config
= type(self
)(self
.parser
, label
)
5891 config
._loaded
_paths
= self
._loaded
_paths
5892 if config
.init(*args
):
5893 self
.configs
.append(config
)
5897 for config
in reversed(self
.configs
):
5898 yield from config
.all_args
5899 yield from self
.parsed_args
or []
5901 def parse_known_args(self
, **kwargs
):
5902 return self
.parser
.parse_known_args(self
.all_args
, **kwargs
)
5904 def parse_args(self
):
5905 return self
.parser
.parse_args(self
.all_args
)
5908 class WebSocketsWrapper
:
5909 """Wraps websockets module to use in non-async scopes"""
5912 def __init__(self
, url
, headers
=None, connect
=True):
5913 self
.loop
= asyncio
.new_event_loop()
5914 # XXX: "loop" is deprecated
5915 self
.conn
= websockets
.connect(
5916 url
, extra_headers
=headers
, ping_interval
=None,
5917 close_timeout
=float('inf'), loop
=self
.loop
, ping_timeout
=float('inf'))
5920 atexit
.register(self
.__exit
__, None, None, None)
5922 def __enter__(self
):
5924 self
.pool
= self
.run_with_loop(self
.conn
.__aenter
__(), self
.loop
)
5927 def send(self
, *args
):
5928 self
.run_with_loop(self
.pool
.send(*args
), self
.loop
)
5930 def recv(self
, *args
):
5931 return self
.run_with_loop(self
.pool
.recv(*args
), self
.loop
)
5933 def __exit__(self
, type, value
, traceback
):
5935 return self
.run_with_loop(self
.conn
.__aexit
__(type, value
, traceback
), self
.loop
)
5938 self
._cancel
_all
_tasks
(self
.loop
)
5940 # taken from https://github.com/python/cpython/blob/3.9/Lib/asyncio/runners.py with modifications
5941 # for contributors: If there's any new library using asyncio needs to be run in non-async, move these function out of this class
5943 def run_with_loop(main
, loop
):
5944 if not asyncio
.iscoroutine(main
):
5945 raise ValueError(f
'a coroutine was expected, got {main!r}')
5948 return loop
.run_until_complete(main
)
5950 loop
.run_until_complete(loop
.shutdown_asyncgens())
5951 if hasattr(loop
, 'shutdown_default_executor'):
5952 loop
.run_until_complete(loop
.shutdown_default_executor())
5955 def _cancel_all_tasks(loop
):
5956 to_cancel
= asyncio
.all_tasks(loop
)
5961 for task
in to_cancel
:
5964 # XXX: "loop" is removed in python 3.10+
5965 loop
.run_until_complete(
5966 asyncio
.gather(*to_cancel
, loop
=loop
, return_exceptions
=True))
5968 for task
in to_cancel
:
5969 if task
.cancelled():
5971 if task
.exception() is not None:
5972 loop
.call_exception_handler({
5973 'message': 'unhandled exception during asyncio.run() shutdown',
5974 'exception': task
.exception(),
5979 def merge_headers(*dicts
):
5980 """Merge dicts of http headers case insensitively, prioritizing the latter ones"""
5981 return {k.title(): v for k, v in itertools.chain.from_iterable(map(dict.items, dicts))}
5984 def cached_method(f
):
5985 """Cache a method"""
5986 signature
= inspect
.signature(f
)
5989 def wrapper(self
, *args
, **kwargs
):
5990 bound_args
= signature
.bind(self
, *args
, **kwargs
)
5991 bound_args
.apply_defaults()
5992 key
= tuple(bound_args
.arguments
.values())[1:]
5994 cache
= vars(self
).setdefault('_cached_method__cache', {}).setdefault(f.__name__, {}
)
5995 if key
not in cache
:
5996 cache
[key
] = f(self
, *args
, **kwargs
)
6001 class classproperty
:
6002 """property access for class methods with optional caching"""
6003 def __new__(cls
, func
=None, *args
, **kwargs
):
6005 return functools
.partial(cls
, *args
, **kwargs
)
6006 return super().__new
__(cls
)
6008 def __init__(self
, func
, *, cache
=False):
6009 functools
.update_wrapper(self
, func
)
6011 self
._cache
= {} if cache
else None
6013 def __get__(self
, _
, cls
):
6014 if self
._cache
is None:
6015 return self
.func(cls
)
6016 elif cls
not in self
._cache
:
6017 self
._cache
[cls
] = self
.func(cls
)
6018 return self
._cache
[cls
]
6021 class Namespace(types
.SimpleNamespace
):
6022 """Immutable namespace"""
6025 return iter(self
.__dict
__.values())
6029 return self
.__dict
__.items()
6032 MEDIA_EXTENSIONS
= Namespace(
6033 common_video
=('avi', 'flv', 'mkv', 'mov', 'mp4', 'webm'),
6034 video
=('3g2', '3gp', 'f4v', 'mk3d', 'divx', 'mpg', 'ogv', 'm4v', 'wmv'),
6035 common_audio
=('aiff', 'alac', 'flac', 'm4a', 'mka', 'mp3', 'ogg', 'opus', 'wav'),
6036 audio
=('aac', 'ape', 'asf', 'f4a', 'f4b', 'm4b', 'm4p', 'm4r', 'oga', 'ogx', 'spx', 'vorbis', 'wma', 'weba'),
6037 thumbnails
=('jpg', 'png', 'webp'),
6038 storyboards
=('mhtml', ),
6039 subtitles
=('srt', 'vtt', 'ass', 'lrc'),
6040 manifests
=('f4f', 'f4m', 'm3u8', 'smil', 'mpd'),
6042 MEDIA_EXTENSIONS
.video
+= MEDIA_EXTENSIONS
.common_video
6043 MEDIA_EXTENSIONS
.audio
+= MEDIA_EXTENSIONS
.common_audio
6045 KNOWN_EXTENSIONS
= (*MEDIA_EXTENSIONS
.video
, *MEDIA_EXTENSIONS
.audio
, *MEDIA_EXTENSIONS
.manifests
)
6050 for retry in RetryManager(...):
6053 except SomeException as err:
6057 attempt
, _error
= 0, None
6059 def __init__(self
, _retries
, _error_callback
, **kwargs
):
6060 self
.retries
= _retries
or 0
6061 self
.error_callback
= functools
.partial(_error_callback
, **kwargs
)
6063 def _should_retry(self
):
6064 return self
._error
is not NO_DEFAULT
and self
.attempt
<= self
.retries
6068 if self
._error
is NO_DEFAULT
:
6073 def error(self
, value
):
6077 while self
._should
_retry
():
6078 self
.error
= NO_DEFAULT
6082 self
.error_callback(self
.error
, self
.attempt
, self
.retries
)
6085 def report_retry(e
, count
, retries
, *, sleep_func
, info
, warn
, error
=None, suffix
=None):
6086 """Utility function for reporting retries"""
6089 return error(f
'{e}. Giving up after {count - 1} retries') if count
> 1 else error(str(e
))
6094 elif isinstance(e
, ExtractorError
):
6095 e
= remove_end(str_or_none(e
.cause
) or e
.orig_msg
, '.')
6096 warn(f
'{e}. Retrying{format_field(suffix, None, " %s")} ({count}/{retries})...')
6098 delay
= float_or_none(sleep_func(n
=count
- 1)) if callable(sleep_func
) else sleep_func
6100 info(f
'Sleeping {delay:.2f} seconds ...')
6104 def make_archive_id(ie
, video_id
):
6105 ie_key
= ie
if isinstance(ie
, str) else ie
.ie_key()
6106 return f
'{ie_key.lower()} {video_id}'
6109 def truncate_string(s
, left
, right
=0):
6110 assert left
> 3 and right
>= 0
6111 if s
is None or len(s
) <= left
+ right
:
6113 return f
'{s[:left-3]}...{s[-right:] if right else ""}'
6116 def orderedSet_from_options(options
, alias_dict
, *, use_regex
=False, start
=None):
6117 assert 'all' in alias_dict
, '"all" alias is required'
6118 requested
= list(start
or [])
6120 discard
= val
.startswith('-')
6124 if val
in alias_dict
:
6125 val
= alias_dict
[val
] if not discard
else [
6126 i
[1:] if i
.startswith('-') else f
'-{i}' for i
in alias_dict
[val
]]
6127 # NB: Do not allow regex in aliases for performance
6128 requested
= orderedSet_from_options(val
, alias_dict
, start
=requested
)
6131 current
= (filter(re
.compile(val
, re
.I
).fullmatch
, alias_dict
['all']) if use_regex
6132 else [val
] if val
in alias_dict
['all'] else None)
6134 raise ValueError(val
)
6137 for item
in current
:
6138 while item
in requested
:
6139 requested
.remove(item
)
6141 requested
.extend(current
)
6143 return orderedSet(requested
)
6147 regex
= r
' *((?P<reverse>\+)?(?P<field>[a-zA-Z0-9_]+)((?P<separator>[~:])(?P<limit>.*?))?)? *$'
6149 default
= ('hidden', 'aud_or_vid', 'hasvid', 'ie_pref', 'lang', 'quality',
6150 'res', 'fps', 'hdr:12', 'vcodec:vp9.2', 'channels', 'acodec',
6151 'size', 'br', 'asr', 'proto', 'ext', 'hasaud', 'source', 'id') # These must not be aliases
6152 ytdl_default
= ('hasaud', 'lang', 'quality', 'tbr', 'filesize', 'vbr',
6153 'height', 'width', 'proto', 'vext', 'abr', 'aext',
6154 'fps', 'fs_approx', 'source', 'id')
6157 'vcodec': {'type': 'ordered', 'regex': True,
6158 'order': ['av0?1', 'vp0?9.2', 'vp0?9', '[hx]265|he?vc?', '[hx]264|avc', 'vp0?8', 'mp4v|h263', 'theora', '', None, 'none']},
6159 'acodec': {'type': 'ordered', 'regex': True,
6160 'order': ['[af]lac', 'wav|aiff', 'opus', 'vorbis|ogg', 'aac', 'mp?4a?', 'mp3', 'ac-?4', 'e-?a?c-?3', 'ac-?3', 'dts', '', None, 'none']},
6161 'hdr': {'type': 'ordered', 'regex': True, 'field': 'dynamic_range',
6162 'order': ['dv', '(hdr)?12', r
'(hdr)?10\+', '(hdr)?10', 'hlg', '', 'sdr', None]},
6163 'proto': {'type': 'ordered', 'regex': True, 'field': 'protocol',
6164 'order': ['(ht|f)tps', '(ht|f)tp$', 'm3u8.*', '.*dash', 'websocket_frag', 'rtmpe?', '', 'mms|rtsp', 'ws|websocket', 'f4']},
6165 'vext': {'type': 'ordered', 'field': 'video_ext',
6166 'order': ('mp4', 'mov', 'webm', 'flv', '', 'none'),
6167 'order_free': ('webm', 'mp4', 'mov', 'flv', '', 'none')},
6168 'aext': {'type': 'ordered', 'regex': True, 'field': 'audio_ext',
6169 'order': ('m4a', 'aac', 'mp3', 'ogg', 'opus', 'web[am]', '', 'none'),
6170 'order_free': ('ogg', 'opus', 'web[am]', 'mp3', 'm4a', 'aac', '', 'none')},
6171 'hidden': {'visible': False, 'forced': True, 'type': 'extractor', 'max': -1000}
,
6172 'aud_or_vid': {'visible': False, 'forced': True, 'type': 'multiple',
6173 'field': ('vcodec', 'acodec'),
6174 'function': lambda it
: int(any(v
!= 'none' for v
in it
))},
6175 'ie_pref': {'priority': True, 'type': 'extractor'}
,
6176 'hasvid': {'priority': True, 'field': 'vcodec', 'type': 'boolean', 'not_in_list': ('none',)}
,
6177 'hasaud': {'field': 'acodec', 'type': 'boolean', 'not_in_list': ('none',)}
,
6178 'lang': {'convert': 'float', 'field': 'language_preference', 'default': -1}
,
6179 'quality': {'convert': 'float', 'default': -1}
,
6180 'filesize': {'convert': 'bytes'}
,
6181 'fs_approx': {'convert': 'bytes', 'field': 'filesize_approx'}
,
6182 'id': {'convert': 'string', 'field': 'format_id'}
,
6183 'height': {'convert': 'float_none'}
,
6184 'width': {'convert': 'float_none'}
,
6185 'fps': {'convert': 'float_none'}
,
6186 'channels': {'convert': 'float_none', 'field': 'audio_channels'}
,
6187 'tbr': {'convert': 'float_none'}
,
6188 'vbr': {'convert': 'float_none'}
,
6189 'abr': {'convert': 'float_none'}
,
6190 'asr': {'convert': 'float_none'}
,
6191 'source': {'convert': 'float', 'field': 'source_preference', 'default': -1}
,
6193 'codec': {'type': 'combined', 'field': ('vcodec', 'acodec')}
,
6194 'br': {'type': 'combined', 'field': ('tbr', 'vbr', 'abr'), 'same_limit': True}
,
6195 'size': {'type': 'combined', 'same_limit': True, 'field': ('filesize', 'fs_approx')}
,
6196 'ext': {'type': 'combined', 'field': ('vext', 'aext')}
,
6197 'res': {'type': 'multiple', 'field': ('height', 'width'),
6198 'function': lambda it
: (lambda l
: min(l
) if l
else 0)(tuple(filter(None, it
)))},
6200 # Actual field names
6201 'format_id': {'type': 'alias', 'field': 'id'}
,
6202 'preference': {'type': 'alias', 'field': 'ie_pref'}
,
6203 'language_preference': {'type': 'alias', 'field': 'lang'}
,
6204 'source_preference': {'type': 'alias', 'field': 'source'}
,
6205 'protocol': {'type': 'alias', 'field': 'proto'}
,
6206 'filesize_approx': {'type': 'alias', 'field': 'fs_approx'}
,
6207 'audio_channels': {'type': 'alias', 'field': 'channels'}
,
6210 'dimension': {'type': 'alias', 'field': 'res', 'deprecated': True}
,
6211 'resolution': {'type': 'alias', 'field': 'res', 'deprecated': True}
,
6212 'extension': {'type': 'alias', 'field': 'ext', 'deprecated': True}
,
6213 'bitrate': {'type': 'alias', 'field': 'br', 'deprecated': True}
,
6214 'total_bitrate': {'type': 'alias', 'field': 'tbr', 'deprecated': True}
,
6215 'video_bitrate': {'type': 'alias', 'field': 'vbr', 'deprecated': True}
,
6216 'audio_bitrate': {'type': 'alias', 'field': 'abr', 'deprecated': True}
,
6217 'framerate': {'type': 'alias', 'field': 'fps', 'deprecated': True}
,
6218 'filesize_estimate': {'type': 'alias', 'field': 'size', 'deprecated': True}
,
6219 'samplerate': {'type': 'alias', 'field': 'asr', 'deprecated': True}
,
6220 'video_ext': {'type': 'alias', 'field': 'vext', 'deprecated': True}
,
6221 'audio_ext': {'type': 'alias', 'field': 'aext', 'deprecated': True}
,
6222 'video_codec': {'type': 'alias', 'field': 'vcodec', 'deprecated': True}
,
6223 'audio_codec': {'type': 'alias', 'field': 'acodec', 'deprecated': True}
,
6224 'video': {'type': 'alias', 'field': 'hasvid', 'deprecated': True}
,
6225 'has_video': {'type': 'alias', 'field': 'hasvid', 'deprecated': True}
,
6226 'audio': {'type': 'alias', 'field': 'hasaud', 'deprecated': True}
,
6227 'has_audio': {'type': 'alias', 'field': 'hasaud', 'deprecated': True}
,
6228 'extractor': {'type': 'alias', 'field': 'ie_pref', 'deprecated': True}
,
6229 'extractor_preference': {'type': 'alias', 'field': 'ie_pref', 'deprecated': True}
,
6232 def __init__(self
, ydl
, field_preference
):
6235 self
.evaluate_params(self
.ydl
.params
, field_preference
)
6236 if ydl
.params
.get('verbose'):
6237 self
.print_verbose_info(self
.ydl
.write_debug
)
6239 def _get_field_setting(self
, field
, key
):
6240 if field
not in self
.settings
:
6241 if key
in ('forced', 'priority'):
6243 self
.ydl
.deprecated_feature(f
'Using arbitrary fields ({field}) for format sorting is '
6244 'deprecated and may be removed in a future version')
6245 self
.settings
[field
] = {}
6246 propObj
= self
.settings
[field
]
6247 if key
not in propObj
:
6248 type = propObj
.get('type')
6250 default
= 'preference' if type == 'extractor' else (field
,) if type in ('combined', 'multiple') else field
6251 elif key
== 'convert':
6252 default
= 'order' if type == 'ordered' else 'float_string' if field
else 'ignore'
6254 default
= {'type': 'field', 'visible': True, 'order': [], 'not_in_list': (None,)}
.get(key
, None)
6255 propObj
[key
] = default
6258 def _resolve_field_value(self
, field
, value
, convertNone
=False):
6263 value
= value
.lower()
6264 conversion
= self
._get
_field
_setting
(field
, 'convert')
6265 if conversion
== 'ignore':
6267 if conversion
== 'string':
6269 elif conversion
== 'float_none':
6270 return float_or_none(value
)
6271 elif conversion
== 'bytes':
6272 return parse_bytes(value
)
6273 elif conversion
== 'order':
6274 order_list
= (self
._use
_free
_order
and self
._get
_field
_setting
(field
, 'order_free')) or self
._get
_field
_setting
(field
, 'order')
6275 use_regex
= self
._get
_field
_setting
(field
, 'regex')
6276 list_length
= len(order_list
)
6277 empty_pos
= order_list
.index('') if '' in order_list
else list_length
+ 1
6278 if use_regex
and value
is not None:
6279 for i
, regex
in enumerate(order_list
):
6280 if regex
and re
.match(regex
, value
):
6281 return list_length
- i
6282 return list_length
- empty_pos
# not in list
6283 else: # not regex or value = None
6284 return list_length
- (order_list
.index(value
) if value
in order_list
else empty_pos
)
6286 if value
.isnumeric():
6289 self
.settings
[field
]['convert'] = 'string'
6292 def evaluate_params(self
, params
, sort_extractor
):
6293 self
._use
_free
_order
= params
.get('prefer_free_formats', False)
6294 self
._sort
_user
= params
.get('format_sort', [])
6295 self
._sort
_extractor
= sort_extractor
6297 def add_item(field
, reverse
, closest
, limit_text
):
6298 field
= field
.lower()
6299 if field
in self
._order
:
6301 self
._order
.append(field
)
6302 limit
= self
._resolve
_field
_value
(field
, limit_text
)
6305 'closest': False if limit
is None else closest
,
6306 'limit_text': limit_text
,
6308 if field
in self
.settings
:
6309 self
.settings
[field
].update(data
)
6311 self
.settings
[field
] = data
6314 tuple(field
for field
in self
.default
if self
._get
_field
_setting
(field
, 'forced'))
6315 + (tuple() if params
.get('format_sort_force', False)
6316 else tuple(field
for field
in self
.default
if self
._get
_field
_setting
(field
, 'priority')))
6317 + tuple(self
._sort
_user
) + tuple(sort_extractor
) + self
.default
)
6319 for item
in sort_list
:
6320 match
= re
.match(self
.regex
, item
)
6322 raise ExtractorError('Invalid format sort string "%s" given by extractor' % item
)
6323 field
= match
.group('field')
6326 if self
._get
_field
_setting
(field
, 'type') == 'alias':
6327 alias
, field
= field
, self
._get
_field
_setting
(field
, 'field')
6328 if self
._get
_field
_setting
(alias
, 'deprecated'):
6329 self
.ydl
.deprecated_feature(f
'Format sorting alias {alias} is deprecated and may '
6330 f
'be removed in a future version. Please use {field} instead')
6331 reverse
= match
.group('reverse') is not None
6332 closest
= match
.group('separator') == '~'
6333 limit_text
= match
.group('limit')
6335 has_limit
= limit_text
is not None
6336 has_multiple_fields
= self
._get
_field
_setting
(field
, 'type') == 'combined'
6337 has_multiple_limits
= has_limit
and has_multiple_fields
and not self
._get
_field
_setting
(field
, 'same_limit')
6339 fields
= self
._get
_field
_setting
(field
, 'field') if has_multiple_fields
else (field
,)
6340 limits
= limit_text
.split(':') if has_multiple_limits
else (limit_text
,) if has_limit
else tuple()
6341 limit_count
= len(limits
)
6342 for (i
, f
) in enumerate(fields
):
6343 add_item(f
, reverse
, closest
,
6344 limits
[i
] if i
< limit_count
6345 else limits
[0] if has_limit
and not has_multiple_limits
6348 def print_verbose_info(self
, write_debug
):
6350 write_debug('Sort order given by user: %s' % ', '.join(self
._sort
_user
))
6351 if self
._sort
_extractor
:
6352 write_debug('Sort order given by extractor: %s' % ', '.join(self
._sort
_extractor
))
6353 write_debug('Formats sorted by: %s' % ', '.join(['%s%s%s' % (
6354 '+' if self
._get
_field
_setting
(field
, 'reverse') else '', field
,
6355 '%s%s(%s)' % ('~' if self
._get
_field
_setting
(field
, 'closest') else ':',
6356 self
._get
_field
_setting
(field
, 'limit_text'),
6357 self
._get
_field
_setting
(field
, 'limit'))
6358 if self
._get
_field
_setting
(field
, 'limit_text') is not None else '')
6359 for field
in self
._order
if self
._get
_field
_setting
(field
, 'visible')]))
6361 def _calculate_field_preference_from_value(self
, format
, field
, type, value
):
6362 reverse
= self
._get
_field
_setting
(field
, 'reverse')
6363 closest
= self
._get
_field
_setting
(field
, 'closest')
6364 limit
= self
._get
_field
_setting
(field
, 'limit')
6366 if type == 'extractor':
6367 maximum
= self
._get
_field
_setting
(field
, 'max')
6368 if value
is None or (maximum
is not None and value
>= maximum
):
6370 elif type == 'boolean':
6371 in_list
= self
._get
_field
_setting
(field
, 'in_list')
6372 not_in_list
= self
._get
_field
_setting
(field
, 'not_in_list')
6373 value
= 0 if ((in_list
is None or value
in in_list
) and (not_in_list
is None or value
not in not_in_list
)) else -1
6374 elif type == 'ordered':
6375 value
= self
._resolve
_field
_value
(field
, value
, True)
6377 # try to convert to number
6378 val_num
= float_or_none(value
, default
=self
._get
_field
_setting
(field
, 'default'))
6379 is_num
= self
._get
_field
_setting
(field
, 'convert') != 'string' and val_num
is not None
6383 return ((-10, 0) if value
is None
6384 else (1, value
, 0) if not is_num
# if a field has mixed strings and numbers, strings are sorted higher
6385 else (0, -abs(value
- limit
), value
- limit
if reverse
else limit
- value
) if closest
6386 else (0, value
, 0) if not reverse
and (limit
is None or value
<= limit
)
6387 else (0, -value
, 0) if limit
is None or (reverse
and value
== limit
) or value
> limit
6388 else (-1, value
, 0))
6390 def _calculate_field_preference(self
, format
, field
):
6391 type = self
._get
_field
_setting
(field
, 'type') # extractor, boolean, ordered, field, multiple
6392 get_value
= lambda f
: format
.get(self
._get
_field
_setting
(f
, 'field'))
6393 if type == 'multiple':
6394 type = 'field' # Only 'field' is allowed in multiple for now
6395 actual_fields
= self
._get
_field
_setting
(field
, 'field')
6397 value
= self
._get
_field
_setting
(field
, 'function')(get_value(f
) for f
in actual_fields
)
6399 value
= get_value(field
)
6400 return self
._calculate
_field
_preference
_from
_value
(format
, field
, type, value
)
6402 def calculate_preference(self
, format
):
6403 # Determine missing protocol
6404 if not format
.get('protocol'):
6405 format
['protocol'] = determine_protocol(format
)
6407 # Determine missing ext
6408 if not format
.get('ext') and 'url' in format
:
6409 format
['ext'] = determine_ext(format
['url'])
6410 if format
.get('vcodec') == 'none':
6411 format
['audio_ext'] = format
['ext'] if format
.get('acodec') != 'none' else 'none'
6412 format
['video_ext'] = 'none'
6414 format
['video_ext'] = format
['ext']
6415 format
['audio_ext'] = 'none'
6416 # if format.get('preference') is None and format.get('ext') in ('f4f', 'f4m'): # Not supported?
6417 # format['preference'] = -1000
6419 if format
.get('preference') is None and format
.get('ext') == 'flv' and re
.match('[hx]265|he?vc?', format
.get('vcodec') or ''):
6420 # HEVC-over-FLV is out-of-spec by FLV's original spec
6421 # ref. https://trac.ffmpeg.org/ticket/6389
6422 # ref. https://github.com/yt-dlp/yt-dlp/pull/5821
6423 format
['preference'] = -100
6425 # Determine missing bitrates
6426 if format
.get('tbr') is None:
6427 if format
.get('vbr') is not None and format
.get('abr') is not None:
6428 format
['tbr'] = format
.get('vbr', 0) + format
.get('abr', 0)
6430 if format
.get('vcodec') != 'none' and format
.get('vbr') is None:
6431 format
['vbr'] = format
.get('tbr') - format
.get('abr', 0)
6432 if format
.get('acodec') != 'none' and format
.get('abr') is None:
6433 format
['abr'] = format
.get('tbr') - format
.get('vbr', 0)
6435 return tuple(self
._calculate
_field
_preference
(format
, field
) for field
in self
._order
)
6439 has_certifi
= bool(certifi
)
6440 has_websockets
= bool(websockets
)
6443 def load_plugins(name
, suffix
, namespace
):
6444 from .plugins
import load_plugins
6445 ret
= load_plugins(name
, suffix
)
6446 namespace
.update(ret
)