39 import xml
.etree
.ElementTree
42 from .compat
import asyncio
, functools
# isort: split
46 compat_etree_fromstring
,
49 compat_html_entities_html5
,
50 compat_HTMLParseError
,
61 compat_urllib_parse_unquote_plus
,
62 compat_urllib_parse_urlencode
,
63 compat_urllib_parse_urlparse
,
64 compat_urllib_request
,
67 from .dependencies
import brotli
, certifi
, websockets
68 from .socks
import ProxyType
, sockssocket
71 def register_socks_protocols():
72 # "Register" SOCKS protocols
73 # In Python < 2.6.5, urlsplit() suffers from bug https://bugs.python.org/issue7904
74 # URLs with protocols not in urlparse.uses_netloc are not handled correctly
75 for scheme
in ('socks', 'socks4', 'socks4a', 'socks5'):
76 if scheme
not in compat_urlparse
.uses_netloc
:
77 compat_urlparse
.uses_netloc
.append(scheme
)
80 # This is not clearly defined otherwise
81 compiled_regex_type
= type(re
.compile(''))
84 def random_user_agent():
85 _USER_AGENT_TPL
= 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/%s Safari/537.36'
126 return _USER_AGENT_TPL
% random
.choice(_CHROME_VERSIONS
)
129 SUPPORTED_ENCODINGS
= [
133 SUPPORTED_ENCODINGS
.append('br')
136 'User-Agent': random_user_agent(),
137 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
138 'Accept-Language': 'en-us,en;q=0.5',
139 'Sec-Fetch-Mode': 'navigate',
144 'Safari': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27',
148 NO_DEFAULT
= object()
150 ENGLISH_MONTH_NAMES
= [
151 'January', 'February', 'March', 'April', 'May', 'June',
152 'July', 'August', 'September', 'October', 'November', 'December']
155 'en': ENGLISH_MONTH_NAMES
,
157 'janvier', 'février', 'mars', 'avril', 'mai', 'juin',
158 'juillet', 'août', 'septembre', 'octobre', 'novembre', 'décembre'],
162 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'aac',
163 'flv', 'f4v', 'f4a', 'f4b',
164 'webm', 'ogg', 'ogv', 'oga', 'ogx', 'spx', 'opus',
165 'mkv', 'mka', 'mk3d',
174 'f4f', 'f4m', 'm3u8', 'smil')
176 # needed for sanitizing filenames in restricted mode
177 ACCENT_CHARS
= dict(zip('ÂÃÄÀÁÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖŐØŒÙÚÛÜŰÝÞßàáâãäåæçèéêëìíîïðñòóôõöőøœùúûüűýþÿ',
178 itertools
.chain('AAAAAA', ['AE'], 'CEEEEIIIIDNOOOOOOO', ['OE'], 'UUUUUY', ['TH', 'ss'],
179 'aaaaaa', ['ae'], 'ceeeeiiiionooooooo', ['oe'], 'uuuuuy', ['th'], 'y')))
209 '%Y-%m-%d %H:%M:%S.%f',
210 '%Y-%m-%d %H:%M:%S:%f',
213 '%Y-%m-%dT%H:%M:%SZ',
214 '%Y-%m-%dT%H:%M:%S.%fZ',
215 '%Y-%m-%dT%H:%M:%S.%f0Z',
217 '%Y-%m-%dT%H:%M:%S.%f',
220 '%b %d %Y at %H:%M:%S',
222 '%B %d %Y at %H:%M:%S',
226 DATE_FORMATS_DAY_FIRST
= list(DATE_FORMATS
)
227 DATE_FORMATS_DAY_FIRST
.extend([
236 DATE_FORMATS_MONTH_FIRST
= list(DATE_FORMATS
)
237 DATE_FORMATS_MONTH_FIRST
.extend([
245 PACKED_CODES_RE
= r
"}\('(.+)',(\d+),(\d+),'([^']+)'\.split\('\|'\)"
246 JSON_LD_RE
= r
'(?is)<script[^>]+type=(["\']?
)application
/ld\
+json\
1[^
>]*>(?P
<json_ld
>.+?
)</script
>'
248 NUMBER_RE = r'\d
+(?
:\
.\d
+)?
'
252 def preferredencoding():
253 """Get preferred encoding.
255 Returns the best encoding scheme for the system, based on
256 locale.getpreferredencoding() and some further tweaks.
259 pref = locale.getpreferredencoding()
267 def write_json_file(obj, fn):
268 """ Encode obj as JSON and write it to fn, atomically if possible """
270 tf = tempfile.NamedTemporaryFile(
271 prefix=f'{os.path.basename(fn)}
.', dir=os.path.dirname(fn),
272 suffix='.tmp
', delete=False, mode='w
', encoding='utf
-8')
276 json.dump(obj, tf, ensure_ascii=False)
277 if sys.platform == 'win32
':
278 # Need to remove existing file on Windows, else os.rename raises
279 # WindowsError or FileExistsError.
280 with contextlib.suppress(OSError):
282 with contextlib.suppress(OSError):
285 os.chmod(tf.name, 0o666 & ~mask)
286 os.rename(tf.name, fn)
288 with contextlib.suppress(OSError):
293 def find_xpath_attr(node, xpath, key, val=None):
294 """ Find the xpath xpath[@key=val] """
295 assert re.match(r'^
[a
-zA
-Z_
-]+$
', key)
296 expr = xpath + ('[@%s]' % key if val is None else f"[@{key}='{val}
']")
297 return node.find(expr)
299 # On python2.6 the xml.etree.ElementTree.Element methods don't support
300 # the namespace parameter
303 def xpath_with_ns(path
, ns_map
):
304 components
= [c
.split(':') for c
in path
.split('/')]
308 replaced
.append(c
[0])
311 replaced
.append('{%s}%s' % (ns_map
[ns
], tag
))
312 return '/'.join(replaced
)
315 def xpath_element(node
, xpath
, name
=None, fatal
=False, default
=NO_DEFAULT
):
316 def _find_xpath(xpath
):
317 return node
.find(xpath
)
319 if isinstance(xpath
, (str, compat_str
)):
320 n
= _find_xpath(xpath
)
328 if default
is not NO_DEFAULT
:
331 name
= xpath
if name
is None else name
332 raise ExtractorError('Could not find XML element %s' % name
)
338 def xpath_text(node
, xpath
, name
=None, fatal
=False, default
=NO_DEFAULT
):
339 n
= xpath_element(node
, xpath
, name
, fatal
=fatal
, default
=default
)
340 if n
is None or n
== default
:
343 if default
is not NO_DEFAULT
:
346 name
= xpath
if name
is None else name
347 raise ExtractorError('Could not find XML element\'s text %s' % name
)
353 def xpath_attr(node
, xpath
, key
, name
=None, fatal
=False, default
=NO_DEFAULT
):
354 n
= find_xpath_attr(node
, xpath
, key
)
356 if default
is not NO_DEFAULT
:
359 name
= f
'{xpath}[@{key}]' if name
is None else name
360 raise ExtractorError('Could not find XML attribute %s' % name
)
366 def get_element_by_id(id, html
, **kwargs
):
367 """Return the content of the tag with the specified ID in the passed HTML document"""
368 return get_element_by_attribute('id', id, html
, **kwargs
)
371 def get_element_html_by_id(id, html
, **kwargs
):
372 """Return the html of the tag with the specified ID in the passed HTML document"""
373 return get_element_html_by_attribute('id', id, html
, **kwargs
)
376 def get_element_by_class(class_name
, html
):
377 """Return the content of the first tag with the specified class in the passed HTML document"""
378 retval
= get_elements_by_class(class_name
, html
)
379 return retval
[0] if retval
else None
382 def get_element_html_by_class(class_name
, html
):
383 """Return the html of the first tag with the specified class in the passed HTML document"""
384 retval
= get_elements_html_by_class(class_name
, html
)
385 return retval
[0] if retval
else None
388 def get_element_by_attribute(attribute
, value
, html
, **kwargs
):
389 retval
= get_elements_by_attribute(attribute
, value
, html
, **kwargs
)
390 return retval
[0] if retval
else None
393 def get_element_html_by_attribute(attribute
, value
, html
, **kargs
):
394 retval
= get_elements_html_by_attribute(attribute
, value
, html
, **kargs
)
395 return retval
[0] if retval
else None
398 def get_elements_by_class(class_name
, html
, **kargs
):
399 """Return the content of all tags with the specified class in the passed HTML document as a list"""
400 return get_elements_by_attribute(
401 'class', r
'[^\'"]*(?<=[\'"\s
])%s(?
=[\'"\s])[^\'"]*' % re.escape(class_name),
402 html, escape_value=False)
405 def get_elements_html_by_class(class_name, html):
406 """Return the html of all tags with the specified class in the passed HTML document as a list"""
407 return get_elements_html_by_attribute(
408 'class', r'[^
\'"]*(?<=[\'"\s
])%s(?
=[\'"\s])[^\'"]*' % re.escape(class_name),
409 html, escape_value=False)
412 def get_elements_by_attribute(*args, **kwargs):
413 """Return the content of the tag with the specified attribute in the passed HTML document"""
414 return [content for content, _ in get_elements_text_and_html_by_attribute(*args, **kwargs)]
417 def get_elements_html_by_attribute(*args, **kwargs):
418 """Return the html of the tag with the specified attribute in the passed HTML document"""
419 return [whole for _, whole in get_elements_text_and_html_by_attribute(*args, **kwargs)]
422 def get_elements_text_and_html_by_attribute(attribute, value, html, escape_value=True):
424 Return the text (content) and the html (whole) of the tag with the specified
425 attribute in the passed HTML document
428 quote = '' if re.match(r'''[\s"'`
=<>]''', value) else '?'
430 value = re.escape(value) if escape_value else value
432 partial_element_re = rf'''(?x
)
433 <(?P
<tag
>[a
-zA
-Z0
-9:._-]+)
434 (?
:\
s(?
:[^
>"']|"[^
"]*"|
'[^']*')*)?
435 \s{re.escape(attribute)}\s*=\s*(?P<_q>['"]{quote})(?-x:{value})(?P=_q)
438 for m in re.finditer(partial_element_re, html):
439 content, whole = get_element_text_and_html_by_tag(m.group('tag'), html[m.start():])
442 unescapeHTML(re.sub(r'^(?P<q>["\'])(?P
<content
>.*)(?P
=q
)$
', r'\g
<content
>', content, flags=re.DOTALL)),
447 class HTMLBreakOnClosingTagParser(compat_HTMLParser):
449 HTML parser which raises HTMLBreakOnClosingTagException upon reaching the
450 closing tag for the first opening tag it has encountered, and can be used
454 class HTMLBreakOnClosingTagException(Exception):
458 self.tagstack = collections.deque()
459 compat_HTMLParser.__init__(self)
464 def __exit__(self, *_):
468 # handle_endtag does not return upon raising HTMLBreakOnClosingTagException,
469 # so data remains buffered; we no longer have any interest in it, thus
470 # override this method to discard it
473 def handle_starttag(self, tag, _):
474 self.tagstack.append(tag)
476 def handle_endtag(self, tag):
477 if not self.tagstack:
478 raise compat_HTMLParseError('no tags
in the stack
')
480 inner_tag = self.tagstack.pop()
484 raise compat_HTMLParseError(f'matching opening tag
for closing {tag} tag
not found
')
485 if not self.tagstack:
486 raise self.HTMLBreakOnClosingTagException()
489 def get_element_text_and_html_by_tag(tag, html):
491 For the first element with the specified tag in the passed HTML document
492 return its' content (text
) and the whole
element (html
)
494 def find_or_raise(haystack, needle, exc):
496 return haystack.index(needle)
499 closing_tag = f'</{tag}>'
500 whole_start = find_or_raise(
501 html, f'<{tag}', compat_HTMLParseError(f'opening {tag} tag not found'))
502 content_start = find_or_raise(
503 html[whole_start:], '>', compat_HTMLParseError(f'malformed opening {tag} tag'))
504 content_start += whole_start + 1
505 with HTMLBreakOnClosingTagParser() as parser:
506 parser.feed(html[whole_start:content_start])
507 if not parser.tagstack or parser.tagstack[0] != tag:
508 raise compat_HTMLParseError(f'parser did not match opening {tag} tag')
509 offset = content_start
510 while offset < len(html):
511 next_closing_tag_start = find_or_raise(
512 html[offset:], closing_tag,
513 compat_HTMLParseError(f'closing {tag} tag not found'))
514 next_closing_tag_end = next_closing_tag_start + len(closing_tag)
516 parser.feed(html[offset:offset + next_closing_tag_end])
517 offset += next_closing_tag_end
518 except HTMLBreakOnClosingTagParser.HTMLBreakOnClosingTagException:
519 return html[content_start:offset + next_closing_tag_start], \
520 html[whole_start:offset + next_closing_tag_end]
521 raise compat_HTMLParseError('unexpected end of html')
524 class HTMLAttributeParser(compat_HTMLParser):
525 """Trivial HTML parser to gather the attributes
for a single element
"""
529 compat_HTMLParser.__init__(self)
531 def handle_starttag(self, tag, attrs):
532 self.attrs = dict(attrs)
535 class HTMLListAttrsParser(compat_HTMLParser):
536 """HTML parser to gather the attributes
for the elements of a
list"""
539 compat_HTMLParser.__init__(self)
543 def handle_starttag(self, tag, attrs):
544 if tag == 'li' and self._level == 0:
545 self.items.append(dict(attrs))
548 def handle_endtag(self, tag):
552 def extract_attributes(html_element):
553 """Given a string
for an HTML element such
as
555 a
="foo" B
="bar" c
="&98;az" d
=boz
556 empty
= noval entity
="&"
559 Decode
and return a dictionary of attributes
.
561 'a': 'foo', 'b': 'bar', c
: 'baz', d
: 'boz',
562 'empty': '', 'noval': None, 'entity': '&',
563 'sq': '"', 'dq': '\''
566 parser = HTMLAttributeParser()
567 with contextlib.suppress(compat_HTMLParseError):
568 parser.feed(html_element)
573 def parse_list(webpage):
574 """Given a string
for an series of HTML
<li
> elements
,
575 return a dictionary of their attributes
"""
576 parser = HTMLListAttrsParser()
582 def clean_html(html):
583 """Clean an HTML snippet into a readable string
"""
585 if html is None: # Convenience for sanitizing descriptions etc.
588 html = re.sub(r'\s+', ' ', html)
589 html = re.sub(r'(?u)\s?<\s?br\s?/?\s?>\s?', '\n', html)
590 html = re.sub(r'(?u)<\s?/\s?p\s?>\s?<\s?p[^>]*>', '\n', html)
592 html = re.sub('<.*?>', '', html)
593 # Replace html entities
594 html = unescapeHTML(html)
598 class LenientJSONDecoder(json.JSONDecoder):
599 def __init__(self, *args, transform_source=None, ignore_extra=False, **kwargs):
600 self.transform_source, self.ignore_extra = transform_source, ignore_extra
601 super().__init__(*args, **kwargs)
604 if self.transform_source:
605 s = self.transform_source(s)
606 if self.ignore_extra:
607 return self.raw_decode(s.lstrip())[0]
608 return super().decode(s)
611 def sanitize_open(filename, open_mode):
612 """Try to
open the given filename
, and slightly tweak it
if this fails
.
614 Attempts to
open the given filename
. If this fails
, it tries to change
615 the filename slightly
, step by step
, until it
's either able to open it
616 or it fails and raises a final exception, like the standard open()
619 It returns the tuple (stream, definitive_file_name).
622 if sys.platform == 'win32
':
624 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
625 return (sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout, filename)
627 for attempt in range(2):
630 if sys.platform == 'win32
':
631 # FIXME: An exclusive lock also locks the file from being read.
632 # Since windows locks are mandatory, don't lock the
file on
windows (for now
).
633 # Ref: https://github.com/yt-dlp/yt-dlp/issues/3124
634 raise LockingUnsupportedError()
635 stream
= locked_file(filename
, open_mode
, block
=False).__enter
__()
637 stream
= open(filename
, open_mode
)
638 return stream
, filename
639 except OSError as err
:
640 if attempt
or err
.errno
in (errno
.EACCES
,):
642 old_filename
, filename
= filename
, sanitize_path(filename
)
643 if old_filename
== filename
:
647 def timeconvert(timestr
):
648 """Convert RFC 2822 defined time string into system timestamp"""
650 timetuple
= email
.utils
.parsedate_tz(timestr
)
651 if timetuple
is not None:
652 timestamp
= email
.utils
.mktime_tz(timetuple
)
656 def sanitize_filename(s
, restricted
=False, is_id
=NO_DEFAULT
):
657 """Sanitizes a string so it could be used as part of a filename.
658 @param restricted Use a stricter subset of allowed characters
659 @param is_id Whether this is an ID that should be kept unchanged if possible.
660 If unset, yt-dlp's new sanitization rules are in effect
665 def replace_insane(char
):
666 if restricted
and char
in ACCENT_CHARS
:
667 return ACCENT_CHARS
[char
]
668 elif not restricted
and char
== '\n':
670 elif char
== '?' or ord(char
) < 32 or ord(char
) == 127:
673 return '' if restricted
else '\''
675 return '\0_\0-' if restricted
else '\0 \0-'
676 elif char
in '\\/|*<>':
678 if restricted
and (char
in '!&\'()[]{}$;`^,#' or char
.isspace() or ord(char
) > 127):
682 s
= re
.sub(r
'[0-9]+(?::[0-9]+)+', lambda m
: m
.group(0).replace(':', '_'), s
) # Handle timestamps
683 result
= ''.join(map(replace_insane
, s
))
684 if is_id
is NO_DEFAULT
:
685 result
= re
.sub('(\0.)(?:(?=\\1)..)+', r
'\1', result
) # Remove repeated substitute chars
686 STRIP_RE
= '(?:\0.|[ _-])*'
687 result
= re
.sub(f
'^\0.{STRIP_RE}|{STRIP_RE}\0.$', '', result
) # Remove substitute chars from start/end
688 result
= result
.replace('\0', '') or '_'
691 while '__' in result
:
692 result
= result
.replace('__', '_')
693 result
= result
.strip('_')
694 # Common case of "Foreign band name - English song title"
695 if restricted
and result
.startswith('-_'):
697 if result
.startswith('-'):
698 result
= '_' + result
[len('-'):]
699 result
= result
.lstrip('.')
705 def sanitize_path(s
, force
=False):
706 """Sanitizes and normalizes path on Windows"""
707 if sys
.platform
== 'win32':
709 drive_or_unc
, _
= os
.path
.splitdrive(s
)
715 norm_path
= os
.path
.normpath(remove_start(s
, drive_or_unc
)).split(os
.path
.sep
)
719 path_part
if path_part
in ['.', '..'] else re
.sub(r
'(?:[/<>:"\|\\?\*]|[\s.]$)', '#', path_part
)
720 for path_part
in norm_path
]
722 sanitized_path
.insert(0, drive_or_unc
+ os
.path
.sep
)
723 elif force
and s
and s
[0] == os
.path
.sep
:
724 sanitized_path
.insert(0, os
.path
.sep
)
725 return os
.path
.join(*sanitized_path
)
728 def sanitize_url(url
):
729 # Prepend protocol-less URLs with `http:` scheme in order to mitigate
730 # the number of unwanted failures due to missing protocol
733 elif url
.startswith('//'):
734 return 'http:%s' % url
735 # Fix some common typos seen so far
737 # https://github.com/ytdl-org/youtube-dl/issues/15649
738 (r
'^httpss://', r
'https://'),
739 # https://bx1.be/lives/direct-tv/
740 (r
'^rmtp([es]?)://', r
'rtmp\1://'),
742 for mistake
, fixup
in COMMON_TYPOS
:
743 if re
.match(mistake
, url
):
744 return re
.sub(mistake
, fixup
, url
)
748 def extract_basic_auth(url
):
749 parts
= compat_urlparse
.urlsplit(url
)
750 if parts
.username
is None:
752 url
= compat_urlparse
.urlunsplit(parts
._replace
(netloc
=(
753 parts
.hostname
if parts
.port
is None
754 else '%s:%d' % (parts
.hostname
, parts
.port
))))
755 auth_payload
= base64
.b64encode(
756 ('%s:%s' % (parts
.username
, parts
.password
or '')).encode())
757 return url
, f
'Basic {auth_payload.decode()}'
760 def sanitized_Request(url
, *args
, **kwargs
):
761 url
, auth_header
= extract_basic_auth(escape_url(sanitize_url(url
)))
762 if auth_header
is not None:
763 headers
= args
[1] if len(args
) >= 2 else kwargs
.setdefault('headers', {})
764 headers
['Authorization'] = auth_header
765 return compat_urllib_request
.Request(url
, *args
, **kwargs
)
769 """Expand shell variables and ~"""
770 return os
.path
.expandvars(compat_expanduser(s
))
773 def orderedSet(iterable
, *, lazy
=False):
774 """Remove all duplicates from the input iterable"""
776 seen
= [] # Do not use set since the items can be unhashable
782 return _iter() if lazy
else list(_iter())
785 def _htmlentity_transform(entity_with_semicolon
):
786 """Transforms an HTML entity to a character."""
787 entity
= entity_with_semicolon
[:-1]
789 # Known non-numeric HTML entity
790 if entity
in compat_html_entities
.name2codepoint
:
791 return compat_chr(compat_html_entities
.name2codepoint
[entity
])
793 # TODO: HTML5 allows entities without a semicolon. For example,
794 # 'Éric' should be decoded as 'Éric'.
795 if entity_with_semicolon
in compat_html_entities_html5
:
796 return compat_html_entities_html5
[entity_with_semicolon
]
798 mobj
= re
.match(r
'#(x[0-9a-fA-F]+|[0-9]+)', entity
)
800 numstr
= mobj
.group(1)
801 if numstr
.startswith('x'):
803 numstr
= '0%s' % numstr
806 # See https://github.com/ytdl-org/youtube-dl/issues/7518
807 with contextlib
.suppress(ValueError):
808 return compat_chr(int(numstr
, base
))
810 # Unknown entity in name, return its literal representation
811 return '&%s;' % entity
817 assert isinstance(s
, str)
820 r
'&([^&;]+;)', lambda m
: _htmlentity_transform(m
.group(1)), s
)
823 def escapeHTML(text
):
826 .replace('&', '&')
827 .replace('<', '<')
828 .replace('>', '>')
829 .replace('"', '"')
830 .replace("'", ''')
834 def process_communicate_or_kill(p
, *args
, **kwargs
):
835 write_string('DeprecationWarning: yt_dlp.utils.process_communicate_or_kill is deprecated '
836 'and may be removed in a future version. Use yt_dlp.utils.Popen.communicate_or_kill instead')
837 return Popen
.communicate_or_kill(p
, *args
, **kwargs
)
840 class Popen(subprocess
.Popen
):
841 if sys
.platform
== 'win32':
842 _startupinfo
= subprocess
.STARTUPINFO()
843 _startupinfo
.dwFlags |
= subprocess
.STARTF_USESHOWWINDOW
847 def __init__(self
, *args
, text
=False, **kwargs
):
849 kwargs
['universal_newlines'] = True # For 3.6 compatibility
850 kwargs
.setdefault('encoding', 'utf-8')
851 kwargs
.setdefault('errors', 'replace')
852 super().__init
__(*args
, **kwargs
, startupinfo
=self
._startupinfo
)
854 def communicate_or_kill(self
, *args
, **kwargs
):
856 return self
.communicate(*args
, **kwargs
)
857 except BaseException
: # Including KeyboardInterrupt
858 self
.kill(timeout
=None)
861 def kill(self
, *, timeout
=0):
864 self
.wait(timeout
=timeout
)
867 def run(cls
, *args
, **kwargs
):
868 with cls(*args
, **kwargs
) as proc
:
869 stdout
, stderr
= proc
.communicate_or_kill()
870 return stdout
or '', stderr
or '', proc
.returncode
873 def get_subprocess_encoding():
874 if sys
.platform
== 'win32' and sys
.getwindowsversion()[0] >= 5:
875 # For subprocess calls, encode with locale encoding
876 # Refer to http://stackoverflow.com/a/9951851/35070
877 encoding
= preferredencoding()
879 encoding
= sys
.getfilesystemencoding()
885 def encodeFilename(s
, for_subprocess
=False):
886 assert isinstance(s
, str)
890 def decodeFilename(b
, for_subprocess
=False):
894 def encodeArgument(s
):
895 # Legacy code that uses byte strings
896 # Uncomment the following line after fixing all post processors
897 # assert isinstance(s, str), 'Internal error: %r should be of type %r, is %r' % (s, compat_str, type(s))
898 return s
if isinstance(s
, str) else s
.decode('ascii')
901 def decodeArgument(b
):
905 def decodeOption(optval
):
908 if isinstance(optval
, bytes):
909 optval
= optval
.decode(preferredencoding())
911 assert isinstance(optval
, compat_str
)
915 _timetuple
= collections
.namedtuple('Time', ('hours', 'minutes', 'seconds', 'milliseconds'))
918 def timetuple_from_msec(msec
):
919 secs
, msec
= divmod(msec
, 1000)
920 mins
, secs
= divmod(secs
, 60)
921 hrs
, mins
= divmod(mins
, 60)
922 return _timetuple(hrs
, mins
, secs
, msec
)
925 def formatSeconds(secs
, delim
=':', msec
=False):
926 time
= timetuple_from_msec(secs
* 1000)
928 ret
= '%d%s%02d%s%02d' % (time
.hours
, delim
, time
.minutes
, delim
, time
.seconds
)
930 ret
= '%d%s%02d' % (time
.minutes
, delim
, time
.seconds
)
932 ret
= '%d' % time
.seconds
933 return '%s.%03d' % (ret
, time
.milliseconds
) if msec
else ret
936 def _ssl_load_windows_store_certs(ssl_context
, storename
):
937 # Code adapted from _load_windows_store_certs in https://github.com/python/cpython/blob/main/Lib/ssl.py
939 certs
= [cert
for cert
, encoding
, trust
in ssl
.enum_certificates(storename
)
940 if encoding
== 'x509_asn' and (
941 trust
is True or ssl
.Purpose
.SERVER_AUTH
.oid
in trust
)]
942 except PermissionError
:
945 with contextlib
.suppress(ssl
.SSLError
):
946 ssl_context
.load_verify_locations(cadata
=cert
)
949 def make_HTTPS_handler(params
, **kwargs
):
950 opts_check_certificate
= not params
.get('nocheckcertificate')
951 context
= ssl
.SSLContext(ssl
.PROTOCOL_TLS_CLIENT
)
952 context
.check_hostname
= opts_check_certificate
953 if params
.get('legacyserverconnect'):
954 context
.options |
= 4 # SSL_OP_LEGACY_SERVER_CONNECT
955 # Allow use of weaker ciphers in Python 3.10+. See https://bugs.python.org/issue43998
956 context
.set_ciphers('DEFAULT')
958 context
.verify_mode
= ssl
.CERT_REQUIRED
if opts_check_certificate
else ssl
.CERT_NONE
959 if opts_check_certificate
:
960 if has_certifi
and 'no-certifi' not in params
.get('compat_opts', []):
961 context
.load_verify_locations(cafile
=certifi
.where())
963 context
.load_default_certs()
964 # Work around the issue in load_default_certs when there are bad certificates. See:
965 # https://github.com/yt-dlp/yt-dlp/issues/1060,
966 # https://bugs.python.org/issue35665, https://bugs.python.org/issue45312
968 # enum_certificates is not present in mingw python. See https://github.com/yt-dlp/yt-dlp/issues/1151
969 if sys
.platform
== 'win32' and hasattr(ssl
, 'enum_certificates'):
970 for storename
in ('CA', 'ROOT'):
971 _ssl_load_windows_store_certs(context
, storename
)
972 context
.set_default_verify_paths()
974 client_certfile
= params
.get('client_certificate')
977 context
.load_cert_chain(
978 client_certfile
, keyfile
=params
.get('client_certificate_key'),
979 password
=params
.get('client_certificate_password'))
981 raise YoutubeDLError('Unable to load client certificate')
983 # Some servers may reject requests if ALPN extension is not sent. See:
984 # https://github.com/python/cpython/issues/85140
985 # https://github.com/yt-dlp/yt-dlp/issues/3878
986 with contextlib
.suppress(NotImplementedError):
987 context
.set_alpn_protocols(['http/1.1'])
989 return YoutubeDLHTTPSHandler(params
, context
=context
, **kwargs
)
992 def bug_reports_message(before
=';'):
993 msg
= ('please report this issue on https://github.com/yt-dlp/yt-dlp/issues?q= , '
994 'filling out the appropriate issue template. '
995 'Confirm you are on the latest version using yt-dlp -U')
997 before
= before
.rstrip()
998 if not before
or before
.endswith(('.', '!', '?')):
999 msg
= msg
[0].title() + msg
[1:]
1001 return (before
+ ' ' if before
else '') + msg
1004 class YoutubeDLError(Exception):
1005 """Base exception for YoutubeDL errors."""
1008 def __init__(self
, msg
=None):
1011 elif self
.msg
is None:
1012 self
.msg
= type(self
).__name
__
1013 super().__init
__(self
.msg
)
1016 network_exceptions
= [compat_urllib_error
.URLError
, compat_http_client
.HTTPException
, socket
.error
]
1017 if hasattr(ssl
, 'CertificateError'):
1018 network_exceptions
.append(ssl
.CertificateError
)
1019 network_exceptions
= tuple(network_exceptions
)
1022 class ExtractorError(YoutubeDLError
):
1023 """Error during info extraction."""
1025 def __init__(self
, msg
, tb
=None, expected
=False, cause
=None, video_id
=None, ie
=None):
1026 """ tb, if given, is the original traceback (so that it can be printed out).
1027 If expected is set, this is a normal error message and most likely not a bug in yt-dlp.
1029 if sys
.exc_info()[0] in network_exceptions
:
1032 self
.orig_msg
= str(msg
)
1034 self
.expected
= expected
1036 self
.video_id
= video_id
1038 self
.exc_info
= sys
.exc_info() # preserve original exception
1040 super().__init
__(''.join((
1041 format_field(ie
, None, '[%s] '),
1042 format_field(video_id
, None, '%s: '),
1044 format_field(cause
, None, ' (caused by %r)'),
1045 '' if expected
else bug_reports_message())))
1047 def format_traceback(self
):
1048 return join_nonempty(
1049 self
.traceback
and ''.join(traceback
.format_tb(self
.traceback
)),
1050 self
.cause
and ''.join(traceback
.format_exception(None, self
.cause
, self
.cause
.__traceback
__)[1:]),
1054 class UnsupportedError(ExtractorError
):
1055 def __init__(self
, url
):
1057 'Unsupported URL: %s' % url
, expected
=True)
1061 class RegexNotFoundError(ExtractorError
):
1062 """Error when a regex didn't match"""
1066 class GeoRestrictedError(ExtractorError
):
1067 """Geographic restriction Error exception.
1069 This exception may be thrown when a video is not available from your
1070 geographic location due to geographic restrictions imposed by a website.
1073 def __init__(self
, msg
, countries
=None, **kwargs
):
1074 kwargs
['expected'] = True
1075 super().__init
__(msg
, **kwargs
)
1076 self
.countries
= countries
1079 class DownloadError(YoutubeDLError
):
1080 """Download Error exception.
1082 This exception may be thrown by FileDownloader objects if they are not
1083 configured to continue on errors. They will contain the appropriate
1087 def __init__(self
, msg
, exc_info
=None):
1088 """ exc_info, if given, is the original exception that caused the trouble (as returned by sys.exc_info()). """
1089 super().__init
__(msg
)
1090 self
.exc_info
= exc_info
1093 class EntryNotInPlaylist(YoutubeDLError
):
1094 """Entry not in playlist exception.
1096 This exception will be thrown by YoutubeDL when a requested entry
1097 is not found in the playlist info_dict
1099 msg
= 'Entry not found in info'
1102 class SameFileError(YoutubeDLError
):
1103 """Same File exception.
1105 This exception will be thrown by FileDownloader objects if they detect
1106 multiple files would have to be downloaded to the same file on disk.
1108 msg
= 'Fixed output name but more than one file to download'
1110 def __init__(self
, filename
=None):
1111 if filename
is not None:
1112 self
.msg
+= f
': {filename}'
1113 super().__init
__(self
.msg
)
1116 class PostProcessingError(YoutubeDLError
):
1117 """Post Processing exception.
1119 This exception may be raised by PostProcessor's .run() method to
1120 indicate an error in the postprocessing task.
1124 class DownloadCancelled(YoutubeDLError
):
1125 """ Exception raised when the download queue should be interrupted """
1126 msg
= 'The download was cancelled'
1129 class ExistingVideoReached(DownloadCancelled
):
1130 """ --break-on-existing triggered """
1131 msg
= 'Encountered a video that is already in the archive, stopping due to --break-on-existing'
1134 class RejectedVideoReached(DownloadCancelled
):
1135 """ --break-on-reject triggered """
1136 msg
= 'Encountered a video that did not match filter, stopping due to --break-on-reject'
1139 class MaxDownloadsReached(DownloadCancelled
):
1140 """ --max-downloads limit has been reached. """
1141 msg
= 'Maximum number of downloads reached, stopping due to --max-downloads'
1144 class ReExtractInfo(YoutubeDLError
):
1145 """ Video info needs to be re-extracted. """
1147 def __init__(self
, msg
, expected
=False):
1148 super().__init
__(msg
)
1149 self
.expected
= expected
1152 class ThrottledDownload(ReExtractInfo
):
1153 """ Download speed below --throttled-rate. """
1154 msg
= 'The download speed is below throttle limit'
1157 super().__init
__(self
.msg
, expected
=False)
1160 class UnavailableVideoError(YoutubeDLError
):
1161 """Unavailable Format exception.
1163 This exception will be thrown when a video is requested
1164 in a format that is not available for that video.
1166 msg
= 'Unable to download video'
1168 def __init__(self
, err
=None):
1170 self
.msg
+= f
': {err}'
1171 super().__init
__(self
.msg
)
1174 class ContentTooShortError(YoutubeDLError
):
1175 """Content Too Short exception.
1177 This exception may be raised by FileDownloader objects when a file they
1178 download is too small for what the server announced first, indicating
1179 the connection was probably interrupted.
1182 def __init__(self
, downloaded
, expected
):
1183 super().__init
__(f
'Downloaded {downloaded} bytes, expected {expected} bytes')
1185 self
.downloaded
= downloaded
1186 self
.expected
= expected
1189 class XAttrMetadataError(YoutubeDLError
):
1190 def __init__(self
, code
=None, msg
='Unknown error'):
1191 super().__init
__(msg
)
1195 # Parsing code and msg
1196 if (self
.code
in (errno
.ENOSPC
, errno
.EDQUOT
)
1197 or 'No space left' in self
.msg
or 'Disk quota exceeded' in self
.msg
):
1198 self
.reason
= 'NO_SPACE'
1199 elif self
.code
== errno
.E2BIG
or 'Argument list too long' in self
.msg
:
1200 self
.reason
= 'VALUE_TOO_LONG'
1202 self
.reason
= 'NOT_SUPPORTED'
1205 class XAttrUnavailableError(YoutubeDLError
):
1209 def _create_http_connection(ydl_handler
, http_class
, is_https
, *args
, **kwargs
):
1210 hc
= http_class(*args
, **kwargs
)
1211 source_address
= ydl_handler
._params
.get('source_address')
1213 if source_address
is not None:
1214 # This is to workaround _create_connection() from socket where it will try all
1215 # address data from getaddrinfo() including IPv6. This filters the result from
1216 # getaddrinfo() based on the source_address value.
1217 # This is based on the cpython socket.create_connection() function.
1218 # https://github.com/python/cpython/blob/master/Lib/socket.py#L691
1219 def _create_connection(address
, timeout
=socket
._GLOBAL
_DEFAULT
_TIMEOUT
, source_address
=None):
1220 host
, port
= address
1222 addrs
= socket
.getaddrinfo(host
, port
, 0, socket
.SOCK_STREAM
)
1223 af
= socket
.AF_INET
if '.' in source_address
[0] else socket
.AF_INET6
1224 ip_addrs
= [addr
for addr
in addrs
if addr
[0] == af
]
1225 if addrs
and not ip_addrs
:
1226 ip_version
= 'v4' if af
== socket
.AF_INET
else 'v6'
1228 "No remote IP%s addresses available for connect, can't use '%s' as source address"
1229 % (ip_version
, source_address
[0]))
1230 for res
in ip_addrs
:
1231 af
, socktype
, proto
, canonname
, sa
= res
1234 sock
= socket
.socket(af
, socktype
, proto
)
1235 if timeout
is not socket
._GLOBAL
_DEFAULT
_TIMEOUT
:
1236 sock
.settimeout(timeout
)
1237 sock
.bind(source_address
)
1239 err
= None # Explicitly break reference cycle
1241 except OSError as _
:
1243 if sock
is not None:
1248 raise OSError('getaddrinfo returns an empty list')
1249 if hasattr(hc
, '_create_connection'):
1250 hc
._create
_connection
= _create_connection
1251 hc
.source_address
= (source_address
, 0)
1256 def handle_youtubedl_headers(headers
):
1257 filtered_headers
= headers
1259 if 'Youtubedl-no-compression' in filtered_headers
:
1260 filtered_headers
= {k: v for k, v in filtered_headers.items() if k.lower() != 'accept-encoding'}
1261 del filtered_headers
['Youtubedl-no-compression']
1263 return filtered_headers
1266 class YoutubeDLHandler(compat_urllib_request
.HTTPHandler
):
1267 """Handler for HTTP requests and responses.
1269 This class, when installed with an OpenerDirector, automatically adds
1270 the standard headers to every HTTP request and handles gzipped and
1271 deflated responses from web servers. If compression is to be avoided in
1272 a particular request, the original request in the program code only has
1273 to include the HTTP header "Youtubedl-no-compression", which will be
1274 removed before making the real request.
1276 Part of this code was copied from:
1278 http://techknack.net/python-urllib2-handlers/
1280 Andrew Rowls, the author of that code, agreed to release it to the
1284 def __init__(self
, params
, *args
, **kwargs
):
1285 compat_urllib_request
.HTTPHandler
.__init
__(self
, *args
, **kwargs
)
1286 self
._params
= params
1288 def http_open(self
, req
):
1289 conn_class
= compat_http_client
.HTTPConnection
1291 socks_proxy
= req
.headers
.get('Ytdl-socks-proxy')
1293 conn_class
= make_socks_conn_class(conn_class
, socks_proxy
)
1294 del req
.headers
['Ytdl-socks-proxy']
1296 return self
.do_open(functools
.partial(
1297 _create_http_connection
, self
, conn_class
, False),
1305 return zlib
.decompress(data
, -zlib
.MAX_WBITS
)
1307 return zlib
.decompress(data
)
1313 return brotli
.decompress(data
)
1315 def http_request(self
, req
):
1316 # According to RFC 3986, URLs can not contain non-ASCII characters, however this is not
1317 # always respected by websites, some tend to give out URLs with non percent-encoded
1318 # non-ASCII characters (see telemb.py, ard.py [#3412])
1319 # urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991)
1320 # To work around aforementioned issue we will replace request's original URL with
1321 # percent-encoded one
1322 # Since redirects are also affected (e.g. http://www.southpark.de/alle-episoden/s18e09)
1323 # the code of this workaround has been moved here from YoutubeDL.urlopen()
1324 url
= req
.get_full_url()
1325 url_escaped
= escape_url(url
)
1327 # Substitute URL if any change after escaping
1328 if url
!= url_escaped
:
1329 req
= update_Request(req
, url
=url_escaped
)
1331 for h
, v
in self
._params
.get('http_headers', std_headers
).items():
1332 # Capitalize is needed because of Python bug 2275: http://bugs.python.org/issue2275
1333 # The dict keys are capitalized because of this bug by urllib
1334 if h
.capitalize() not in req
.headers
:
1335 req
.add_header(h
, v
)
1337 if 'Accept-encoding' not in req
.headers
:
1338 req
.add_header('Accept-encoding', ', '.join(SUPPORTED_ENCODINGS
))
1340 req
.headers
= handle_youtubedl_headers(req
.headers
)
1344 def http_response(self
, req
, resp
):
1347 if resp
.headers
.get('Content-encoding', '') == 'gzip':
1348 content
= resp
.read()
1349 gz
= gzip
.GzipFile(fileobj
=io
.BytesIO(content
), mode
='rb')
1351 uncompressed
= io
.BytesIO(gz
.read())
1352 except OSError as original_ioerror
:
1353 # There may be junk add the end of the file
1354 # See http://stackoverflow.com/q/4928560/35070 for details
1355 for i
in range(1, 1024):
1357 gz
= gzip
.GzipFile(fileobj
=io
.BytesIO(content
[:-i
]), mode
='rb')
1358 uncompressed
= io
.BytesIO(gz
.read())
1363 raise original_ioerror
1364 resp
= compat_urllib_request
.addinfourl(uncompressed
, old_resp
.headers
, old_resp
.url
, old_resp
.code
)
1365 resp
.msg
= old_resp
.msg
1366 del resp
.headers
['Content-encoding']
1368 if resp
.headers
.get('Content-encoding', '') == 'deflate':
1369 gz
= io
.BytesIO(self
.deflate(resp
.read()))
1370 resp
= compat_urllib_request
.addinfourl(gz
, old_resp
.headers
, old_resp
.url
, old_resp
.code
)
1371 resp
.msg
= old_resp
.msg
1372 del resp
.headers
['Content-encoding']
1374 if resp
.headers
.get('Content-encoding', '') == 'br':
1375 resp
= compat_urllib_request
.addinfourl(
1376 io
.BytesIO(self
.brotli(resp
.read())), old_resp
.headers
, old_resp
.url
, old_resp
.code
)
1377 resp
.msg
= old_resp
.msg
1378 del resp
.headers
['Content-encoding']
1379 # Percent-encode redirect URL of Location HTTP header to satisfy RFC 3986 (see
1380 # https://github.com/ytdl-org/youtube-dl/issues/6457).
1381 if 300 <= resp
.code
< 400:
1382 location
= resp
.headers
.get('Location')
1384 # As of RFC 2616 default charset is iso-8859-1 that is respected by python 3
1385 location
= location
.encode('iso-8859-1').decode()
1386 location_escaped
= escape_url(location
)
1387 if location
!= location_escaped
:
1388 del resp
.headers
['Location']
1389 resp
.headers
['Location'] = location_escaped
1392 https_request
= http_request
1393 https_response
= http_response
1396 def make_socks_conn_class(base_class
, socks_proxy
):
1397 assert issubclass(base_class
, (
1398 compat_http_client
.HTTPConnection
, compat_http_client
.HTTPSConnection
))
1400 url_components
= compat_urlparse
.urlparse(socks_proxy
)
1401 if url_components
.scheme
.lower() == 'socks5':
1402 socks_type
= ProxyType
.SOCKS5
1403 elif url_components
.scheme
.lower() in ('socks', 'socks4'):
1404 socks_type
= ProxyType
.SOCKS4
1405 elif url_components
.scheme
.lower() == 'socks4a':
1406 socks_type
= ProxyType
.SOCKS4A
1408 def unquote_if_non_empty(s
):
1411 return compat_urllib_parse_unquote_plus(s
)
1415 url_components
.hostname
, url_components
.port
or 1080,
1417 unquote_if_non_empty(url_components
.username
),
1418 unquote_if_non_empty(url_components
.password
),
1421 class SocksConnection(base_class
):
1423 self
.sock
= sockssocket()
1424 self
.sock
.setproxy(*proxy_args
)
1425 if isinstance(self
.timeout
, (int, float)):
1426 self
.sock
.settimeout(self
.timeout
)
1427 self
.sock
.connect((self
.host
, self
.port
))
1429 if isinstance(self
, compat_http_client
.HTTPSConnection
):
1430 if hasattr(self
, '_context'): # Python > 2.6
1431 self
.sock
= self
._context
.wrap_socket(
1432 self
.sock
, server_hostname
=self
.host
)
1434 self
.sock
= ssl
.wrap_socket(self
.sock
)
1436 return SocksConnection
1439 class YoutubeDLHTTPSHandler(compat_urllib_request
.HTTPSHandler
):
1440 def __init__(self
, params
, https_conn_class
=None, *args
, **kwargs
):
1441 compat_urllib_request
.HTTPSHandler
.__init
__(self
, *args
, **kwargs
)
1442 self
._https
_conn
_class
= https_conn_class
or compat_http_client
.HTTPSConnection
1443 self
._params
= params
1445 def https_open(self
, req
):
1447 conn_class
= self
._https
_conn
_class
1449 if hasattr(self
, '_context'): # python > 2.6
1450 kwargs
['context'] = self
._context
1451 if hasattr(self
, '_check_hostname'): # python 3.x
1452 kwargs
['check_hostname'] = self
._check
_hostname
1454 socks_proxy
= req
.headers
.get('Ytdl-socks-proxy')
1456 conn_class
= make_socks_conn_class(conn_class
, socks_proxy
)
1457 del req
.headers
['Ytdl-socks-proxy']
1460 return self
.do_open(
1461 functools
.partial(_create_http_connection
, self
, conn_class
, True), req
, **kwargs
)
1462 except urllib
.error
.URLError
as e
:
1463 if (isinstance(e
.reason
, ssl
.SSLError
)
1464 and getattr(e
.reason
, 'reason', None) == 'SSLV3_ALERT_HANDSHAKE_FAILURE'):
1465 raise YoutubeDLError('SSLV3_ALERT_HANDSHAKE_FAILURE: Try using --legacy-server-connect')
1469 class YoutubeDLCookieJar(compat_cookiejar
.MozillaCookieJar
):
1471 See [1] for cookie file format.
1473 1. https://curl.haxx.se/docs/http-cookies.html
1475 _HTTPONLY_PREFIX
= '#HttpOnly_'
1477 _HEADER
= '''# Netscape HTTP Cookie File
1478 # This file is generated by yt-dlp. Do not edit.
1481 _CookieFileEntry
= collections
.namedtuple(
1483 ('domain_name', 'include_subdomains', 'path', 'https_only', 'expires_at', 'name', 'value'))
1485 def __init__(self
, filename
=None, *args
, **kwargs
):
1486 super().__init
__(None, *args
, **kwargs
)
1487 if self
.is_path(filename
):
1488 filename
= os
.fspath(filename
)
1489 self
.filename
= filename
1492 def _true_or_false(cndn
):
1493 return 'TRUE' if cndn
else 'FALSE'
1497 return isinstance(file, (str, bytes, os
.PathLike
))
1499 @contextlib.contextmanager
1500 def open(self
, file, *, write
=False):
1501 if self
.is_path(file):
1502 with open(file, 'w' if write
else 'r', encoding
='utf-8') as f
:
1509 def _really_save(self
, f
, ignore_discard
=False, ignore_expires
=False):
1512 if (not ignore_discard
and cookie
.discard
1513 or not ignore_expires
and cookie
.is_expired(now
)):
1515 name
, value
= cookie
.name
, cookie
.value
1517 # cookies.txt regards 'Set-Cookie: foo' as a cookie
1518 # with no name, whereas http.cookiejar regards it as a
1519 # cookie with no value.
1520 name
, value
= '', name
1521 f
.write('%s\n' % '\t'.join((
1523 self
._true
_or
_false
(cookie
.domain
.startswith('.')),
1525 self
._true
_or
_false
(cookie
.secure
),
1526 str_or_none(cookie
.expires
, default
=''),
1530 def save(self
, filename
=None, *args
, **kwargs
):
1532 Save cookies to a file.
1533 Code is taken from CPython 3.6
1534 https://github.com/python/cpython/blob/8d999cbf4adea053be6dbb612b9844635c4dfb8e/Lib/http/cookiejar.py#L2091-L2117 """
1536 if filename
is None:
1537 if self
.filename
is not None:
1538 filename
= self
.filename
1540 raise ValueError(compat_cookiejar
.MISSING_FILENAME_TEXT
)
1542 # Store session cookies with `expires` set to 0 instead of an empty string
1544 if cookie
.expires
is None:
1547 with self
.open(filename
, write
=True) as f
:
1548 f
.write(self
._HEADER
)
1549 self
._really
_save
(f
, *args
, **kwargs
)
1551 def load(self
, filename
=None, ignore_discard
=False, ignore_expires
=False):
1552 """Load cookies from a file."""
1553 if filename
is None:
1554 if self
.filename
is not None:
1555 filename
= self
.filename
1557 raise ValueError(compat_cookiejar
.MISSING_FILENAME_TEXT
)
1559 def prepare_line(line
):
1560 if line
.startswith(self
._HTTPONLY
_PREFIX
):
1561 line
= line
[len(self
._HTTPONLY
_PREFIX
):]
1562 # comments and empty lines are fine
1563 if line
.startswith('#') or not line
.strip():
1565 cookie_list
= line
.split('\t')
1566 if len(cookie_list
) != self
._ENTRY
_LEN
:
1567 raise compat_cookiejar
.LoadError('invalid length %d' % len(cookie_list
))
1568 cookie
= self
._CookieFileEntry
(*cookie_list
)
1569 if cookie
.expires_at
and not cookie
.expires_at
.isdigit():
1570 raise compat_cookiejar
.LoadError('invalid expires at %s' % cookie
.expires_at
)
1574 with self
.open(filename
) as f
:
1577 cf
.write(prepare_line(line
))
1578 except compat_cookiejar
.LoadError
as e
:
1579 if f
'{line.strip()} '[0] in '[{"':
1580 raise compat_cookiejar
.LoadError(
1581 'Cookies file must be Netscape formatted, not JSON. See '
1582 'https://github.com/ytdl-org/youtube-dl#how-do-i-pass-cookies-to-youtube-dl')
1583 write_string(f
'WARNING: skipping cookie file entry due to {e}: {line!r}\n')
1586 self
._really
_load
(cf
, filename
, ignore_discard
, ignore_expires
)
1587 # Session cookies are denoted by either `expires` field set to
1588 # an empty string or 0. MozillaCookieJar only recognizes the former
1589 # (see [1]). So we need force the latter to be recognized as session
1590 # cookies on our own.
1591 # Session cookies may be important for cookies-based authentication,
1592 # e.g. usually, when user does not check 'Remember me' check box while
1593 # logging in on a site, some important cookies are stored as session
1594 # cookies so that not recognizing them will result in failed login.
1595 # 1. https://bugs.python.org/issue17164
1597 # Treat `expires=0` cookies as session cookies
1598 if cookie
.expires
== 0:
1599 cookie
.expires
= None
1600 cookie
.discard
= True
1603 class YoutubeDLCookieProcessor(compat_urllib_request
.HTTPCookieProcessor
):
1604 def __init__(self
, cookiejar
=None):
1605 compat_urllib_request
.HTTPCookieProcessor
.__init
__(self
, cookiejar
)
1607 def http_response(self
, request
, response
):
1608 return compat_urllib_request
.HTTPCookieProcessor
.http_response(self
, request
, response
)
1610 https_request
= compat_urllib_request
.HTTPCookieProcessor
.http_request
1611 https_response
= http_response
1614 class YoutubeDLRedirectHandler(compat_urllib_request
.HTTPRedirectHandler
):
1615 """YoutubeDL redirect handler
1617 The code is based on HTTPRedirectHandler implementation from CPython [1].
1619 This redirect handler solves two issues:
1620 - ensures redirect URL is always unicode under python 2
1621 - introduces support for experimental HTTP response status code
1622 308 Permanent Redirect [2] used by some sites [3]
1624 1. https://github.com/python/cpython/blob/master/Lib/urllib/request.py
1625 2. https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/308
1626 3. https://github.com/ytdl-org/youtube-dl/issues/28768
1629 http_error_301
= http_error_303
= http_error_307
= http_error_308
= compat_urllib_request
.HTTPRedirectHandler
.http_error_302
1631 def redirect_request(self
, req
, fp
, code
, msg
, headers
, newurl
):
1632 """Return a Request or None in response to a redirect.
1634 This is called by the http_error_30x methods when a
1635 redirection response is received. If a redirection should
1636 take place, return a new Request to allow http_error_30x to
1637 perform the redirect. Otherwise, raise HTTPError if no-one
1638 else should try to handle this url. Return None if you can't
1639 but another Handler might.
1641 m
= req
.get_method()
1642 if (not (code
in (301, 302, 303, 307, 308) and m
in ("GET", "HEAD")
1643 or code
in (301, 302, 303) and m
== "POST")):
1644 raise compat_HTTPError(req
.full_url
, code
, msg
, headers
, fp
)
1645 # Strictly (according to RFC 2616), 301 or 302 in response to
1646 # a POST MUST NOT cause a redirection without confirmation
1647 # from the user (of urllib.request, in this case). In practice,
1648 # essentially all clients do redirect in this case, so we do
1651 # Be conciliant with URIs containing a space. This is mainly
1652 # redundant with the more complete encoding done in http_error_302(),
1653 # but it is kept for compatibility with other callers.
1654 newurl
= newurl
.replace(' ', '%20')
1656 CONTENT_HEADERS
= ("content-length", "content-type")
1657 # NB: don't use dict comprehension for python 2.6 compatibility
1658 newheaders
= {k: v for k, v in req.headers.items() if k.lower() not in CONTENT_HEADERS}
1660 # A 303 must either use GET or HEAD for subsequent request
1661 # https://datatracker.ietf.org/doc/html/rfc7231#section-6.4.4
1662 if code
== 303 and m
!= 'HEAD':
1664 # 301 and 302 redirects are commonly turned into a GET from a POST
1665 # for subsequent requests by browsers, so we'll do the same.
1666 # https://datatracker.ietf.org/doc/html/rfc7231#section-6.4.2
1667 # https://datatracker.ietf.org/doc/html/rfc7231#section-6.4.3
1668 if code
in (301, 302) and m
== 'POST':
1671 return compat_urllib_request
.Request(
1672 newurl
, headers
=newheaders
, origin_req_host
=req
.origin_req_host
,
1673 unverifiable
=True, method
=m
)
1676 def extract_timezone(date_str
):
1679 ^.{8,}? # >=8 char non-TZ prefix, if present
1680 (?P<tz>Z| # just the UTC Z, or
1681 (?:(?<=.\b\d{4}|\b\d{2}:\d\d)| # preceded by 4 digits or hh:mm or
1682 (?<!.\b[a-zA-Z]{3}|[a-zA-Z]{4}|..\b\d\d)) # not preceded by 3 alpha word or >= 4 alpha or 2 digits
1683 [ ]? # optional space
1684 (?P<sign>\+|-) # +/-
1685 (?P<hours>[0-9]{2}):?(?P<minutes>[0-9]{2}) # hh[:]mm
1689 timezone
= datetime
.timedelta()
1691 date_str
= date_str
[:-len(m
.group('tz'))]
1692 if not m
.group('sign'):
1693 timezone
= datetime
.timedelta()
1695 sign
= 1 if m
.group('sign') == '+' else -1
1696 timezone
= datetime
.timedelta(
1697 hours
=sign
* int(m
.group('hours')),
1698 minutes
=sign
* int(m
.group('minutes')))
1699 return timezone
, date_str
1702 def parse_iso8601(date_str
, delimiter
='T', timezone
=None):
1703 """ Return a UNIX timestamp from the given date """
1705 if date_str
is None:
1708 date_str
= re
.sub(r
'\.[0-9]+', '', date_str
)
1710 if timezone
is None:
1711 timezone
, date_str
= extract_timezone(date_str
)
1713 with contextlib
.suppress(ValueError):
1714 date_format
= f
'%Y-%m-%d{delimiter}%H:%M:%S'
1715 dt
= datetime
.datetime
.strptime(date_str
, date_format
) - timezone
1716 return calendar
.timegm(dt
.timetuple())
1719 def date_formats(day_first
=True):
1720 return DATE_FORMATS_DAY_FIRST
if day_first
else DATE_FORMATS_MONTH_FIRST
1723 def unified_strdate(date_str
, day_first
=True):
1724 """Return a string with the date in the format YYYYMMDD"""
1726 if date_str
is None:
1730 date_str
= date_str
.replace(',', ' ')
1731 # Remove AM/PM + timezone
1732 date_str
= re
.sub(r
'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str
)
1733 _
, date_str
= extract_timezone(date_str
)
1735 for expression
in date_formats(day_first
):
1736 with contextlib
.suppress(ValueError):
1737 upload_date
= datetime
.datetime
.strptime(date_str
, expression
).strftime('%Y%m%d')
1738 if upload_date
is None:
1739 timetuple
= email
.utils
.parsedate_tz(date_str
)
1741 with contextlib
.suppress(ValueError):
1742 upload_date
= datetime
.datetime(*timetuple
[:6]).strftime('%Y%m%d')
1743 if upload_date
is not None:
1744 return compat_str(upload_date
)
1747 def unified_timestamp(date_str
, day_first
=True):
1748 if date_str
is None:
1751 date_str
= re
.sub(r
'[,|]', '', date_str
)
1753 pm_delta
= 12 if re
.search(r
'(?i)PM', date_str
) else 0
1754 timezone
, date_str
= extract_timezone(date_str
)
1756 # Remove AM/PM + timezone
1757 date_str
= re
.sub(r
'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str
)
1759 # Remove unrecognized timezones from ISO 8601 alike timestamps
1760 m
= re
.search(r
'\d{1,2}:\d{1,2}(?:\.\d+)?(?P<tz>\s*[A-Z]+)$', date_str
)
1762 date_str
= date_str
[:-len(m
.group('tz'))]
1764 # Python only supports microseconds, so remove nanoseconds
1765 m
= re
.search(r
'^([0-9]{4,}-[0-9]{1,2}-[0-9]{1,2}T[0-9]{1,2}:[0-9]{1,2}:[0-9]{1,2}\.[0-9]{6})[0-9]+$', date_str
)
1767 date_str
= m
.group(1)
1769 for expression
in date_formats(day_first
):
1770 with contextlib
.suppress(ValueError):
1771 dt
= datetime
.datetime
.strptime(date_str
, expression
) - timezone
+ datetime
.timedelta(hours
=pm_delta
)
1772 return calendar
.timegm(dt
.timetuple())
1773 timetuple
= email
.utils
.parsedate_tz(date_str
)
1775 return calendar
.timegm(timetuple
) + pm_delta
* 3600
1778 def determine_ext(url
, default_ext
='unknown_video'):
1779 if url
is None or '.' not in url
:
1781 guess
= url
.partition('?')[0].rpartition('.')[2]
1782 if re
.match(r
'^[A-Za-z0-9]+$', guess
):
1784 # Try extract ext from URLs like http://example.com/foo/bar.mp4/?download
1785 elif guess
.rstrip('/') in KNOWN_EXTENSIONS
:
1786 return guess
.rstrip('/')
1791 def subtitles_filename(filename
, sub_lang
, sub_format
, expected_real_ext
=None):
1792 return replace_extension(filename
, sub_lang
+ '.' + sub_format
, expected_real_ext
)
1795 def datetime_from_str(date_str
, precision
='auto', format
='%Y%m%d'):
1797 Return a datetime object from a string.
1799 (now|today|yesterday|DATE)([+-]\d+(microsecond|second|minute|hour|day|week|month|year)s?)?
1801 @param format strftime format of DATE
1802 @param precision Round the datetime object: auto|microsecond|second|minute|hour|day
1803 auto: round to the unit provided in date_str (if applicable).
1805 auto_precision
= False
1806 if precision
== 'auto':
1807 auto_precision
= True
1808 precision
= 'microsecond'
1809 today
= datetime_round(datetime
.datetime
.utcnow(), precision
)
1810 if date_str
in ('now', 'today'):
1812 if date_str
== 'yesterday':
1813 return today
- datetime
.timedelta(days
=1)
1815 r
'(?P<start>.+)(?P<sign>[+-])(?P<time>\d+)(?P<unit>microsecond|second|minute|hour|day|week|month|year)s?',
1817 if match
is not None:
1818 start_time
= datetime_from_str(match
.group('start'), precision
, format
)
1819 time
= int(match
.group('time')) * (-1 if match
.group('sign') == '-' else 1)
1820 unit
= match
.group('unit')
1821 if unit
== 'month' or unit
== 'year':
1822 new_date
= datetime_add_months(start_time
, time
* 12 if unit
== 'year' else time
)
1828 delta
= datetime
.timedelta(**{unit + 's': time}
)
1829 new_date
= start_time
+ delta
1831 return datetime_round(new_date
, unit
)
1834 return datetime_round(datetime
.datetime
.strptime(date_str
, format
), precision
)
1837 def date_from_str(date_str
, format
='%Y%m%d', strict
=False):
1839 Return a date object from a string using datetime_from_str
1841 @param strict Restrict allowed patterns to "YYYYMMDD" and
1842 (now|today|yesterday)(-\d+(day|week|month|year)s?)?
1844 if strict
and not re
.fullmatch(r
'\d{8}|(now|today|yesterday)(-\d+(day|week|month|year)s?)?', date_str
):
1845 raise ValueError(f
'Invalid date format "{date_str}"')
1846 return datetime_from_str(date_str
, precision
='microsecond', format
=format
).date()
1849 def datetime_add_months(dt
, months
):
1850 """Increment/Decrement a datetime object by months."""
1851 month
= dt
.month
+ months
- 1
1852 year
= dt
.year
+ month
// 12
1853 month
= month
% 12 + 1
1854 day
= min(dt
.day
, calendar
.monthrange(year
, month
)[1])
1855 return dt
.replace(year
, month
, day
)
1858 def datetime_round(dt
, precision
='day'):
1860 Round a datetime object's time to a specific precision
1862 if precision
== 'microsecond':
1871 roundto
= lambda x
, n
: ((x
+ n
/ 2) // n
) * n
1872 timestamp
= calendar
.timegm(dt
.timetuple())
1873 return datetime
.datetime
.utcfromtimestamp(roundto(timestamp
, unit_seconds
[precision
]))
1876 def hyphenate_date(date_str
):
1878 Convert a date in 'YYYYMMDD' format to 'YYYY-MM-DD' format"""
1879 match
= re
.match(r
'^(\d\d\d\d)(\d\d)(\d\d)$', date_str
)
1880 if match
is not None:
1881 return '-'.join(match
.groups())
1887 """Represents a time interval between two dates"""
1889 def __init__(self
, start
=None, end
=None):
1890 """start and end must be strings in the format accepted by date"""
1891 if start
is not None:
1892 self
.start
= date_from_str(start
, strict
=True)
1894 self
.start
= datetime
.datetime
.min.date()
1896 self
.end
= date_from_str(end
, strict
=True)
1898 self
.end
= datetime
.datetime
.max.date()
1899 if self
.start
> self
.end
:
1900 raise ValueError('Date range: "%s" , the start date must be before the end date' % self
)
1904 """Returns a range that only contains the given day"""
1905 return cls(day
, day
)
1907 def __contains__(self
, date
):
1908 """Check if the date is in the range"""
1909 if not isinstance(date
, datetime
.date
):
1910 date
= date_from_str(date
)
1911 return self
.start
<= date
<= self
.end
1914 return f
'{self.start.isoformat()} - {self.end.isoformat()}'
1917 def platform_name():
1918 """ Returns the platform name as a compat_str """
1919 res
= platform
.platform()
1920 if isinstance(res
, bytes):
1921 res
= res
.decode(preferredencoding())
1923 assert isinstance(res
, compat_str
)
1928 def get_windows_version():
1929 ''' Get Windows version. returns () if it's not running on Windows '''
1930 if compat_os_name
== 'nt':
1931 return version_tuple(platform
.win32_ver()[1])
1936 def write_string(s
, out
=None, encoding
=None):
1937 assert isinstance(s
, str)
1938 out
= out
or sys
.stderr
1940 if compat_os_name
== 'nt' and supports_terminal_sequences(out
):
1941 s
= re
.sub(r
'([\r\n]+)', r
' \1', s
)
1943 enc
, buffer = None, out
1944 if 'b' in getattr(out
, 'mode', ''):
1945 enc
= encoding
or preferredencoding()
1946 elif hasattr(out
, 'buffer'):
1948 enc
= encoding
or getattr(out
, 'encoding', None) or preferredencoding()
1950 buffer.write(s
.encode(enc
, 'ignore') if enc
else s
)
1954 def bytes_to_intlist(bs
):
1957 if isinstance(bs
[0], int): # Python 3
1960 return [ord(c
) for c
in bs
]
1963 def intlist_to_bytes(xs
):
1966 return compat_struct_pack('%dB' % len(xs
), *xs
)
1969 class LockingUnsupportedError(OSError):
1970 msg
= 'File locking is not supported'
1973 super().__init
__(self
.msg
)
1976 # Cross-platform file locking
1977 if sys
.platform
== 'win32':
1978 import ctypes
.wintypes
1981 class OVERLAPPED(ctypes
.Structure
):
1983 ('Internal', ctypes
.wintypes
.LPVOID
),
1984 ('InternalHigh', ctypes
.wintypes
.LPVOID
),
1985 ('Offset', ctypes
.wintypes
.DWORD
),
1986 ('OffsetHigh', ctypes
.wintypes
.DWORD
),
1987 ('hEvent', ctypes
.wintypes
.HANDLE
),
1990 kernel32
= ctypes
.windll
.kernel32
1991 LockFileEx
= kernel32
.LockFileEx
1992 LockFileEx
.argtypes
= [
1993 ctypes
.wintypes
.HANDLE
, # hFile
1994 ctypes
.wintypes
.DWORD
, # dwFlags
1995 ctypes
.wintypes
.DWORD
, # dwReserved
1996 ctypes
.wintypes
.DWORD
, # nNumberOfBytesToLockLow
1997 ctypes
.wintypes
.DWORD
, # nNumberOfBytesToLockHigh
1998 ctypes
.POINTER(OVERLAPPED
) # Overlapped
2000 LockFileEx
.restype
= ctypes
.wintypes
.BOOL
2001 UnlockFileEx
= kernel32
.UnlockFileEx
2002 UnlockFileEx
.argtypes
= [
2003 ctypes
.wintypes
.HANDLE
, # hFile
2004 ctypes
.wintypes
.DWORD
, # dwReserved
2005 ctypes
.wintypes
.DWORD
, # nNumberOfBytesToLockLow
2006 ctypes
.wintypes
.DWORD
, # nNumberOfBytesToLockHigh
2007 ctypes
.POINTER(OVERLAPPED
) # Overlapped
2009 UnlockFileEx
.restype
= ctypes
.wintypes
.BOOL
2010 whole_low
= 0xffffffff
2011 whole_high
= 0x7fffffff
2013 def _lock_file(f
, exclusive
, block
):
2014 overlapped
= OVERLAPPED()
2015 overlapped
.Offset
= 0
2016 overlapped
.OffsetHigh
= 0
2017 overlapped
.hEvent
= 0
2018 f
._lock
_file
_overlapped
_p
= ctypes
.pointer(overlapped
)
2020 if not LockFileEx(msvcrt
.get_osfhandle(f
.fileno()),
2021 (0x2 if exclusive
else 0x0) |
(0x0 if block
else 0x1),
2022 0, whole_low
, whole_high
, f
._lock
_file
_overlapped
_p
):
2023 # NB: No argument form of "ctypes.FormatError" does not work on PyPy
2024 raise BlockingIOError(f
'Locking file failed: {ctypes.FormatError(ctypes.GetLastError())!r}')
2026 def _unlock_file(f
):
2027 assert f
._lock
_file
_overlapped
_p
2028 handle
= msvcrt
.get_osfhandle(f
.fileno())
2029 if not UnlockFileEx(handle
, 0, whole_low
, whole_high
, f
._lock
_file
_overlapped
_p
):
2030 raise OSError('Unlocking file failed: %r' % ctypes
.FormatError())
2036 def _lock_file(f
, exclusive
, block
):
2037 flags
= fcntl
.LOCK_EX
if exclusive
else fcntl
.LOCK_SH
2039 flags |
= fcntl
.LOCK_NB
2041 fcntl
.flock(f
, flags
)
2042 except BlockingIOError
:
2044 except OSError: # AOSP does not have flock()
2045 fcntl
.lockf(f
, flags
)
2047 def _unlock_file(f
):
2049 fcntl
.flock(f
, fcntl
.LOCK_UN
)
2051 fcntl
.lockf(f
, fcntl
.LOCK_UN
)
2055 def _lock_file(f
, exclusive
, block
):
2056 raise LockingUnsupportedError()
2058 def _unlock_file(f
):
2059 raise LockingUnsupportedError()
2065 def __init__(self
, filename
, mode
, block
=True, encoding
=None):
2066 if mode
not in {'r', 'rb', 'a', 'ab', 'w', 'wb'}
:
2067 raise NotImplementedError(mode
)
2068 self
.mode
, self
.block
= mode
, block
2070 writable
= any(f
in mode
for f
in 'wax+')
2071 readable
= any(f
in mode
for f
in 'r+')
2072 flags
= functools
.reduce(operator
.ior
, (
2073 getattr(os
, 'O_CLOEXEC', 0), # UNIX only
2074 getattr(os
, 'O_BINARY', 0), # Windows only
2075 getattr(os
, 'O_NOINHERIT', 0), # Windows only
2076 os
.O_CREAT
if writable
else 0, # O_TRUNC only after locking
2077 os
.O_APPEND
if 'a' in mode
else 0,
2078 os
.O_EXCL
if 'x' in mode
else 0,
2079 os
.O_RDONLY
if not writable
else os
.O_RDWR
if readable
else os
.O_WRONLY
,
2082 self
.f
= os
.fdopen(os
.open(filename
, flags
, 0o666), mode
, encoding
=encoding
)
2084 def __enter__(self
):
2085 exclusive
= 'r' not in self
.mode
2087 _lock_file(self
.f
, exclusive
, self
.block
)
2092 if 'w' in self
.mode
:
2095 except OSError as e
:
2097 errno
.ESPIPE
, # Illegal seek - expected for FIFO
2098 errno
.EINVAL
, # Invalid argument - expected for /dev/null
2107 _unlock_file(self
.f
)
2111 def __exit__(self
, *_
):
2120 def __getattr__(self
, attr
):
2121 return getattr(self
.f
, attr
)
2128 def get_filesystem_encoding():
2129 encoding
= sys
.getfilesystemencoding()
2130 return encoding
if encoding
is not None else 'utf-8'
2133 def shell_quote(args
):
2135 encoding
= get_filesystem_encoding()
2137 if isinstance(a
, bytes):
2138 # We may get a filename encoded with 'encodeFilename'
2139 a
= a
.decode(encoding
)
2140 quoted_args
.append(compat_shlex_quote(a
))
2141 return ' '.join(quoted_args
)
2144 def smuggle_url(url
, data
):
2145 """ Pass additional data in a URL for internal use. """
2147 url
, idata
= unsmuggle_url(url
, {})
2149 sdata
= compat_urllib_parse_urlencode(
2150 {'__youtubedl_smuggle': json.dumps(data)}
)
2151 return url
+ '#' + sdata
2154 def unsmuggle_url(smug_url
, default
=None):
2155 if '#__youtubedl_smuggle' not in smug_url
:
2156 return smug_url
, default
2157 url
, _
, sdata
= smug_url
.rpartition('#')
2158 jsond
= compat_parse_qs(sdata
)['__youtubedl_smuggle'][0]
2159 data
= json
.loads(jsond
)
2163 def format_decimal_suffix(num
, fmt
='%d%s', *, factor
=1000):
2164 """ Formats numbers with decimal sufixes like K, M, etc """
2165 num
, factor
= float_or_none(num
), float(factor
)
2166 if num
is None or num
< 0:
2168 POSSIBLE_SUFFIXES
= 'kMGTPEZY'
2169 exponent
= 0 if num
== 0 else min(int(math
.log(num
, factor
)), len(POSSIBLE_SUFFIXES
))
2170 suffix
= ['', *POSSIBLE_SUFFIXES
][exponent
]
2172 suffix
= {'k': 'Ki', '': ''}
.get(suffix
, f
'{suffix}i')
2173 converted
= num
/ (factor
** exponent
)
2174 return fmt
% (converted
, suffix
)
2177 def format_bytes(bytes):
2178 return format_decimal_suffix(bytes, '%.2f%sB', factor
=1024) or 'N/A'
2181 def lookup_unit_table(unit_table
, s
):
2182 units_re
= '|'.join(re
.escape(u
) for u
in unit_table
)
2184 r
'(?P<num>[0-9]+(?:[,.][0-9]*)?)\s*(?P<unit>%s)\b' % units_re
, s
)
2187 num_str
= m
.group('num').replace(',', '.')
2188 mult
= unit_table
[m
.group('unit')]
2189 return int(float(num_str
) * mult
)
2192 def parse_filesize(s
):
2196 # The lower-case forms are of course incorrect and unofficial,
2197 # but we support those too
2214 'megabytes': 1000 ** 2,
2215 'mebibytes': 1024 ** 2,
2221 'gigabytes': 1000 ** 3,
2222 'gibibytes': 1024 ** 3,
2228 'terabytes': 1000 ** 4,
2229 'tebibytes': 1024 ** 4,
2235 'petabytes': 1000 ** 5,
2236 'pebibytes': 1024 ** 5,
2242 'exabytes': 1000 ** 6,
2243 'exbibytes': 1024 ** 6,
2249 'zettabytes': 1000 ** 7,
2250 'zebibytes': 1024 ** 7,
2256 'yottabytes': 1000 ** 8,
2257 'yobibytes': 1024 ** 8,
2260 return lookup_unit_table(_UNIT_TABLE
, s
)
2267 s
= re
.sub(r
'^[^\d]+\s', '', s
).strip()
2269 if re
.match(r
'^[\d,.]+$', s
):
2270 return str_to_int(s
)
2283 ret
= lookup_unit_table(_UNIT_TABLE
, s
)
2287 mobj
= re
.match(r
'([\d,.]+)(?:$|\s)', s
)
2289 return str_to_int(mobj
.group(1))
2292 def parse_resolution(s
, *, lenient
=False):
2297 mobj
= re
.search(r
'(?P<w>\d+)\s*[xX×,]\s*(?P<h>\d+)', s
)
2299 mobj
= re
.search(r
'(?<![a-zA-Z0-9])(?P<w>\d+)\s*[xX×,]\s*(?P<h>\d+)(?![a-zA-Z0-9])', s
)
2302 'width': int(mobj
.group('w')),
2303 'height': int(mobj
.group('h')),
2306 mobj
= re
.search(r
'(?<![a-zA-Z0-9])(\d+)[pPiI](?![a-zA-Z0-9])', s
)
2308 return {'height': int(mobj.group(1))}
2310 mobj
= re
.search(r
'\b([48])[kK]\b', s
)
2312 return {'height': int(mobj.group(1)) * 540}
2317 def parse_bitrate(s
):
2318 if not isinstance(s
, compat_str
):
2320 mobj
= re
.search(r
'\b(\d+)\s*kbps', s
)
2322 return int(mobj
.group(1))
2325 def month_by_name(name
, lang
='en'):
2326 """ Return the number of a month by (locale-independently) English name """
2328 month_names
= MONTH_NAMES
.get(lang
, MONTH_NAMES
['en'])
2331 return month_names
.index(name
) + 1
2336 def month_by_abbreviation(abbrev
):
2337 """ Return the number of a month by (locale-independently) English
2341 return [s
[:3] for s
in ENGLISH_MONTH_NAMES
].index(abbrev
) + 1
2346 def fix_xml_ampersands(xml_str
):
2347 """Replace all the '&' by '&' in XML"""
2349 r
'&(?!amp;|lt;|gt;|apos;|quot;|#x[0-9a-fA-F]{,4};|#[0-9]{,4};)',
2354 def setproctitle(title
):
2355 assert isinstance(title
, compat_str
)
2357 # ctypes in Jython is not complete
2358 # http://bugs.jython.org/issue2148
2359 if sys
.platform
.startswith('java'):
2363 libc
= ctypes
.cdll
.LoadLibrary('libc.so.6')
2367 # LoadLibrary in Windows Python 2.7.13 only expects
2368 # a bytestring, but since unicode_literals turns
2369 # every string into a unicode string, it fails.
2371 title_bytes
= title
.encode()
2372 buf
= ctypes
.create_string_buffer(len(title_bytes
))
2373 buf
.value
= title_bytes
2375 libc
.prctl(15, buf
, 0, 0, 0)
2376 except AttributeError:
2377 return # Strange libc, just skip this
2380 def remove_start(s
, start
):
2381 return s
[len(start
):] if s
is not None and s
.startswith(start
) else s
2384 def remove_end(s
, end
):
2385 return s
[:-len(end
)] if s
is not None and s
.endswith(end
) else s
2388 def remove_quotes(s
):
2389 if s
is None or len(s
) < 2:
2391 for quote
in ('"', "'", ):
2392 if s
[0] == quote
and s
[-1] == quote
:
2397 def get_domain(url
):
2398 domain
= re
.match(r
'(?:https?:\/\/)?(?:www\.)?(?P<domain>[^\n\/]+\.[^\n\/]+)(?:\/(.*))?', url
)
2399 return domain
.group('domain') if domain
else None
2402 def url_basename(url
):
2403 path
= compat_urlparse
.urlparse(url
).path
2404 return path
.strip('/').split('/')[-1]
2408 return re
.match(r
'https?://[^?#&]+/', url
).group()
2411 def urljoin(base
, path
):
2412 if isinstance(path
, bytes):
2413 path
= path
.decode()
2414 if not isinstance(path
, compat_str
) or not path
:
2416 if re
.match(r
'^(?:[a-zA-Z][a-zA-Z0-9+-.]*:)?//', path
):
2418 if isinstance(base
, bytes):
2419 base
= base
.decode()
2420 if not isinstance(base
, compat_str
) or not re
.match(
2421 r
'^(?:https?:)?//', base
):
2423 return compat_urlparse
.urljoin(base
, path
)
2426 class HEADRequest(compat_urllib_request
.Request
):
2427 def get_method(self
):
2431 class PUTRequest(compat_urllib_request
.Request
):
2432 def get_method(self
):
2436 def int_or_none(v
, scale
=1, default
=None, get_attr
=None, invscale
=1):
2437 if get_attr
and v
is not None:
2438 v
= getattr(v
, get_attr
, None)
2440 return int(v
) * invscale
// scale
2441 except (ValueError, TypeError, OverflowError):
2445 def str_or_none(v
, default
=None):
2446 return default
if v
is None else compat_str(v
)
2449 def str_to_int(int_str
):
2450 """ A more relaxed version of int_or_none """
2451 if isinstance(int_str
, int):
2453 elif isinstance(int_str
, compat_str
):
2454 int_str
= re
.sub(r
'[,\.\+]', '', int_str
)
2455 return int_or_none(int_str
)
2458 def float_or_none(v
, scale
=1, invscale
=1, default
=None):
2462 return float(v
) * invscale
/ scale
2463 except (ValueError, TypeError):
2467 def bool_or_none(v
, default
=None):
2468 return v
if isinstance(v
, bool) else default
2471 def strip_or_none(v
, default
=None):
2472 return v
.strip() if isinstance(v
, compat_str
) else default
2475 def url_or_none(url
):
2476 if not url
or not isinstance(url
, compat_str
):
2479 return url
if re
.match(r
'^(?:(?:https?|rt(?:m(?:pt?[es]?|fp)|sp[su]?)|mms|ftps?):)?//', url
) else None
2482 def request_to_url(req
):
2483 if isinstance(req
, compat_urllib_request
.Request
):
2484 return req
.get_full_url()
2489 def strftime_or_none(timestamp
, date_format
, default
=None):
2490 datetime_object
= None
2492 if isinstance(timestamp
, (int, float)): # unix timestamp
2493 datetime_object
= datetime
.datetime
.utcfromtimestamp(timestamp
)
2494 elif isinstance(timestamp
, compat_str
): # assume YYYYMMDD
2495 datetime_object
= datetime
.datetime
.strptime(timestamp
, '%Y%m%d')
2496 return datetime_object
.strftime(date_format
)
2497 except (ValueError, TypeError, AttributeError):
2501 def parse_duration(s
):
2502 if not isinstance(s
, str):
2508 days
, hours
, mins
, secs
, ms
= [None] * 5
2509 m
= re
.match(r
'''(?x)
2511 (?:(?:(?P<days>[0-9]+):)?(?P<hours>[0-9]+):)?(?P<mins>[0-9]+):)?
2512 (?P<secs>(?(before_secs)[0-9]{1,2}|[0-9]+))
2513 (?P<ms>[.:][0-9]+)?Z?$
2516 days
, hours
, mins
, secs
, ms
= m
.group('days', 'hours', 'mins', 'secs', 'ms')
2521 [0-9]+\s*y(?:ears?)?,?\s*
2524 [0-9]+\s*m(?:onths?)?,?\s*
2527 [0-9]+\s*w(?:eeks?)?,?\s*
2530 (?P<days>[0-9]+)\s*d(?:ays?)?,?\s*
2534 (?P<hours>[0-9]+)\s*h(?:ours?)?,?\s*
2537 (?P<mins>[0-9]+)\s*m(?:in(?:ute)?s?)?,?\s*
2540 (?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?\s*s(?:ec(?:ond)?s?)?\s*
2543 days
, hours
, mins
, secs
, ms
= m
.groups()
2545 m
= re
.match(r
'(?i)(?:(?P<hours>[0-9.]+)\s*(?:hours?)|(?P<mins>[0-9.]+)\s*(?:mins?\.?|minutes?)\s*)Z?$', s
)
2547 hours
, mins
= m
.groups()
2552 ms
= ms
.replace(':', '.')
2553 return sum(float(part
or 0) * mult
for part
, mult
in (
2554 (days
, 86400), (hours
, 3600), (mins
, 60), (secs
, 1), (ms
, 1)))
2557 def prepend_extension(filename
, ext
, expected_real_ext
=None):
2558 name
, real_ext
= os
.path
.splitext(filename
)
2560 f
'{name}.{ext}{real_ext}'
2561 if not expected_real_ext
or real_ext
[1:] == expected_real_ext
2562 else f
'{filename}.{ext}')
2565 def replace_extension(filename
, ext
, expected_real_ext
=None):
2566 name
, real_ext
= os
.path
.splitext(filename
)
2567 return '{}.{}'.format(
2568 name
if not expected_real_ext
or real_ext
[1:] == expected_real_ext
else filename
,
2572 def check_executable(exe
, args
=[]):
2573 """ Checks if the given binary is installed somewhere in PATH, and returns its name.
2574 args can be a list of arguments for a short output (like -version) """
2576 Popen
.run([exe
] + args
, stdout
=subprocess
.PIPE
, stderr
=subprocess
.PIPE
)
2582 def _get_exe_version_output(exe
, args
, *, to_screen
=None):
2584 to_screen(f
'Checking exe version: {shell_quote([exe] + args)}')
2586 # STDIN should be redirected too. On UNIX-like systems, ffmpeg triggers
2587 # SIGTTOU if yt-dlp is run in the background.
2588 # See https://github.com/ytdl-org/youtube-dl/issues/955#issuecomment-209789656
2589 stdout
, _
, _
= Popen
.run([encodeArgument(exe
)] + args
, text
=True,
2590 stdin
=subprocess
.PIPE
, stdout
=subprocess
.PIPE
, stderr
=subprocess
.STDOUT
)
2596 def detect_exe_version(output
, version_re
=None, unrecognized
='present'):
2597 assert isinstance(output
, compat_str
)
2598 if version_re
is None:
2599 version_re
= r
'version\s+([-0-9._a-zA-Z]+)'
2600 m
= re
.search(version_re
, output
)
2607 def get_exe_version(exe
, args
=['--version'],
2608 version_re
=None, unrecognized
='present'):
2609 """ Returns the version of the specified executable,
2610 or False if the executable is not present """
2611 out
= _get_exe_version_output(exe
, args
)
2612 return detect_exe_version(out
, version_re
, unrecognized
) if out
else False
2615 def frange(start
=0, stop
=None, step
=1):
2618 start
, stop
= 0, start
2619 sign
= [-1, 1][step
> 0] if step
else 0
2620 while sign
* start
< sign
* stop
:
2625 class LazyList(collections
.abc
.Sequence
):
2626 """Lazy immutable list from an iterable
2627 Note that slices of a LazyList are lists and not LazyList"""
2629 class IndexError(IndexError):
2632 def __init__(self
, iterable
, *, reverse
=False, _cache
=None):
2633 self
._iterable
= iter(iterable
)
2634 self
._cache
= [] if _cache
is None else _cache
2635 self
._reversed
= reverse
2639 # We need to consume the entire iterable to iterate in reverse
2640 yield from self
.exhaust()
2642 yield from self
._cache
2643 for item
in self
._iterable
:
2644 self
._cache
.append(item
)
2648 self
._cache
.extend(self
._iterable
)
2649 self
._iterable
= [] # Discard the emptied iterable to make it pickle-able
2653 """Evaluate the entire iterable"""
2654 return self
._exhaust
()[::-1 if self
._reversed
else 1]
2657 def _reverse_index(x
):
2658 return None if x
is None else -(x
+ 1)
2660 def __getitem__(self
, idx
):
2661 if isinstance(idx
, slice):
2663 idx
= slice(self
._reverse
_index
(idx
.start
), self
._reverse
_index
(idx
.stop
), -(idx
.step
or 1))
2664 start
, stop
, step
= idx
.start
, idx
.stop
, idx
.step
or 1
2665 elif isinstance(idx
, int):
2667 idx
= self
._reverse
_index
(idx
)
2668 start
, stop
, step
= idx
, idx
, 0
2670 raise TypeError('indices must be integers or slices')
2671 if ((start
or 0) < 0 or (stop
or 0) < 0
2672 or (start
is None and step
< 0)
2673 or (stop
is None and step
> 0)):
2674 # We need to consume the entire iterable to be able to slice from the end
2675 # Obviously, never use this with infinite iterables
2678 return self
._cache
[idx
]
2679 except IndexError as e
:
2680 raise self
.IndexError(e
) from e
2681 n
= max(start
or 0, stop
or 0) - len(self
._cache
) + 1
2683 self
._cache
.extend(itertools
.islice(self
._iterable
, n
))
2685 return self
._cache
[idx
]
2686 except IndexError as e
:
2687 raise self
.IndexError(e
) from e
2691 self
[-1] if self
._reversed
else self
[0]
2692 except self
.IndexError:
2698 return len(self
._cache
)
2700 def __reversed__(self
):
2701 return type(self
)(self
._iterable
, reverse
=not self
._reversed
, _cache
=self
._cache
)
2704 return type(self
)(self
._iterable
, reverse
=self
._reversed
, _cache
=self
._cache
)
2707 # repr and str should mimic a list. So we exhaust the iterable
2708 return repr(self
.exhaust())
2711 return repr(self
.exhaust())
2716 class IndexError(IndexError):
2720 # This is only useful for tests
2721 return len(self
.getslice())
2723 def __init__(self
, pagefunc
, pagesize
, use_cache
=True):
2724 self
._pagefunc
= pagefunc
2725 self
._pagesize
= pagesize
2726 self
._pagecount
= float('inf')
2727 self
._use
_cache
= use_cache
2730 def getpage(self
, pagenum
):
2731 page_results
= self
._cache
.get(pagenum
)
2732 if page_results
is None:
2733 page_results
= [] if pagenum
> self
._pagecount
else list(self
._pagefunc
(pagenum
))
2735 self
._cache
[pagenum
] = page_results
2738 def getslice(self
, start
=0, end
=None):
2739 return list(self
._getslice
(start
, end
))
2741 def _getslice(self
, start
, end
):
2742 raise NotImplementedError('This method must be implemented by subclasses')
2744 def __getitem__(self
, idx
):
2745 assert self
._use
_cache
, 'Indexing PagedList requires cache'
2746 if not isinstance(idx
, int) or idx
< 0:
2747 raise TypeError('indices must be non-negative integers')
2748 entries
= self
.getslice(idx
, idx
+ 1)
2750 raise self
.IndexError()
2754 class OnDemandPagedList(PagedList
):
2755 """Download pages until a page with less than maximum results"""
2757 def _getslice(self
, start
, end
):
2758 for pagenum
in itertools
.count(start
// self
._pagesize
):
2759 firstid
= pagenum
* self
._pagesize
2760 nextfirstid
= pagenum
* self
._pagesize
+ self
._pagesize
2761 if start
>= nextfirstid
:
2765 start
% self
._pagesize
2766 if firstid
<= start
< nextfirstid
2769 ((end
- 1) % self
._pagesize
) + 1
2770 if (end
is not None and firstid
<= end
<= nextfirstid
)
2774 page_results
= self
.getpage(pagenum
)
2776 self
._pagecount
= pagenum
- 1
2778 if startv
!= 0 or endv
is not None:
2779 page_results
= page_results
[startv
:endv
]
2780 yield from page_results
2782 # A little optimization - if current page is not "full", ie. does
2783 # not contain page_size videos then we can assume that this page
2784 # is the last one - there are no more ids on further pages -
2785 # i.e. no need to query again.
2786 if len(page_results
) + startv
< self
._pagesize
:
2789 # If we got the whole page, but the next page is not interesting,
2790 # break out early as well
2791 if end
== nextfirstid
:
2795 class InAdvancePagedList(PagedList
):
2796 """PagedList with total number of pages known in advance"""
2798 def __init__(self
, pagefunc
, pagecount
, pagesize
):
2799 PagedList
.__init
__(self
, pagefunc
, pagesize
, True)
2800 self
._pagecount
= pagecount
2802 def _getslice(self
, start
, end
):
2803 start_page
= start
// self
._pagesize
2804 end_page
= self
._pagecount
if end
is None else min(self
._pagecount
, end
// self
._pagesize
+ 1)
2805 skip_elems
= start
- start_page
* self
._pagesize
2806 only_more
= None if end
is None else end
- start
2807 for pagenum
in range(start_page
, end_page
):
2808 page_results
= self
.getpage(pagenum
)
2810 page_results
= page_results
[skip_elems
:]
2812 if only_more
is not None:
2813 if len(page_results
) < only_more
:
2814 only_more
-= len(page_results
)
2816 yield from page_results
[:only_more
]
2818 yield from page_results
2821 class PlaylistEntries
:
2822 MissingEntry
= object()
2823 is_exhausted
= False
2825 def __init__(self
, ydl
, info_dict
):
2828 # _entries must be assigned now since infodict can change during iteration
2829 entries
= info_dict
.get('entries')
2831 raise EntryNotInPlaylist('There are no entries')
2832 elif isinstance(entries
, list):
2833 self
.is_exhausted
= True
2835 requested_entries
= info_dict
.get('requested_entries')
2836 self
.is_incomplete
= bool(requested_entries
)
2837 if self
.is_incomplete
:
2838 assert self
.is_exhausted
2839 self
._entries
= [self
.MissingEntry
] * max(requested_entries
)
2840 for i
, entry
in zip(requested_entries
, entries
):
2841 self
._entries
[i
- 1] = entry
2842 elif isinstance(entries
, (list, PagedList
, LazyList
)):
2843 self
._entries
= entries
2845 self
._entries
= LazyList(entries
)
2847 PLAYLIST_ITEMS_RE
= re
.compile(r
'''(?x)
2848 (?P<start>[+-]?\d+)?
2850 (?P<end>[+-]?\d+|inf(?:inite)?)?
2851 (?::(?P<step>[+-]?\d+))?
2855 def parse_playlist_items(cls
, string
):
2856 for segment
in string
.split(','):
2858 raise ValueError('There is two or more consecutive commas')
2859 mobj
= cls
.PLAYLIST_ITEMS_RE
.fullmatch(segment
)
2861 raise ValueError(f
'{segment!r} is not a valid specification')
2862 start
, end
, step
, has_range
= mobj
.group('start', 'end', 'step', 'range')
2863 if int_or_none(step
) == 0:
2864 raise ValueError(f
'Step in {segment!r} cannot be zero')
2865 yield slice(int_or_none(start
), float_or_none(end
), int_or_none(step
)) if has_range
else int(start
)
2867 def get_requested_items(self
):
2868 playlist_items
= self
.ydl
.params
.get('playlist_items')
2869 playlist_start
= self
.ydl
.params
.get('playliststart', 1)
2870 playlist_end
= self
.ydl
.params
.get('playlistend')
2871 # For backwards compatibility, interpret -1 as whole list
2872 if playlist_end
in (-1, None):
2874 if not playlist_items
:
2875 playlist_items
= f
'{playlist_start}:{playlist_end}'
2876 elif playlist_start
!= 1 or playlist_end
:
2877 self
.ydl
.report_warning('Ignoring playliststart and playlistend because playlistitems was given', only_once
=True)
2879 for index
in self
.parse_playlist_items(playlist_items
):
2880 for i
, entry
in self
[index
]:
2883 # TODO: Add auto-generated fields
2884 self
.ydl
._match
_entry
(entry
, incomplete
=True, silent
=True)
2885 except (ExistingVideoReached
, RejectedVideoReached
):
2888 def get_full_count(self
):
2889 if self
.is_exhausted
and not self
.is_incomplete
:
2891 elif isinstance(self
._entries
, InAdvancePagedList
):
2892 if self
._entries
._pagesize
== 1:
2893 return self
._entries
._pagecount
2895 @functools.cached_property
2897 if isinstance(self
._entries
, list):
2900 entry
= self
._entries
[i
]
2902 entry
= self
.MissingEntry
2903 if not self
.is_incomplete
:
2904 raise self
.IndexError()
2905 if entry
is self
.MissingEntry
:
2906 raise EntryNotInPlaylist(f
'Entry {i} cannot be found')
2911 return type(self
.ydl
)._handle
_extraction
_exceptions
(lambda _
, i
: self
._entries
[i
])(self
.ydl
, i
)
2912 except (LazyList
.IndexError, PagedList
.IndexError):
2913 raise self
.IndexError()
2916 def __getitem__(self
, idx
):
2917 if isinstance(idx
, int):
2918 idx
= slice(idx
, idx
)
2920 # NB: PlaylistEntries[1:10] => (0, 1, ... 9)
2921 step
= 1 if idx
.step
is None else idx
.step
2922 if idx
.start
is None:
2923 start
= 0 if step
> 0 else len(self
) - 1
2925 start
= idx
.start
- 1 if idx
.start
>= 0 else len(self
) + idx
.start
2927 # NB: Do not call len(self) when idx == [:]
2928 if idx
.stop
is None:
2929 stop
= 0 if step
< 0 else float('inf')
2931 stop
= idx
.stop
- 1 if idx
.stop
>= 0 else len(self
) + idx
.stop
2932 stop
+= [-1, 1][step
> 0]
2934 for i
in frange(start
, stop
, step
):
2938 entry
= self
._getter
(i
)
2939 except self
.IndexError:
2940 self
.is_exhausted
= True
2947 return len(tuple(self
[:]))
2949 class IndexError(IndexError):
2953 def uppercase_escape(s
):
2954 unicode_escape
= codecs
.getdecoder('unicode_escape')
2956 r
'\\U[0-9a-fA-F]{8}',
2957 lambda m
: unicode_escape(m
.group(0))[0],
2961 def lowercase_escape(s
):
2962 unicode_escape
= codecs
.getdecoder('unicode_escape')
2964 r
'\\u[0-9a-fA-F]{4}',
2965 lambda m
: unicode_escape(m
.group(0))[0],
2969 def escape_rfc3986(s
):
2970 """Escape non-ASCII characters as suggested by RFC 3986"""
2971 return urllib
.parse
.quote(s
, b
"%/;:@&=+$,!~*'()?#[]")
2974 def escape_url(url
):
2975 """Escape URL as suggested by RFC 3986"""
2976 url_parsed
= compat_urllib_parse_urlparse(url
)
2977 return url_parsed
._replace
(
2978 netloc
=url_parsed
.netloc
.encode('idna').decode('ascii'),
2979 path
=escape_rfc3986(url_parsed
.path
),
2980 params
=escape_rfc3986(url_parsed
.params
),
2981 query
=escape_rfc3986(url_parsed
.query
),
2982 fragment
=escape_rfc3986(url_parsed
.fragment
)
2987 return compat_parse_qs(compat_urllib_parse_urlparse(url
).query
)
2990 def read_batch_urls(batch_fd
):
2992 if not isinstance(url
, compat_str
):
2993 url
= url
.decode('utf-8', 'replace')
2994 BOM_UTF8
= ('\xef\xbb\xbf', '\ufeff')
2995 for bom
in BOM_UTF8
:
2996 if url
.startswith(bom
):
2997 url
= url
[len(bom
):]
2999 if not url
or url
.startswith(('#', ';', ']')):
3001 # "#" cannot be stripped out since it is part of the URI
3002 # However, it can be safely stipped out if follwing a whitespace
3003 return re
.split(r
'\s#', url
, 1)[0].rstrip()
3005 with contextlib
.closing(batch_fd
) as fd
:
3006 return [url
for url
in map(fixup
, fd
) if url
]
3009 def urlencode_postdata(*args
, **kargs
):
3010 return compat_urllib_parse_urlencode(*args
, **kargs
).encode('ascii')
3013 def update_url_query(url
, query
):
3016 parsed_url
= compat_urlparse
.urlparse(url
)
3017 qs
= compat_parse_qs(parsed_url
.query
)
3019 return compat_urlparse
.urlunparse(parsed_url
._replace
(
3020 query
=compat_urllib_parse_urlencode(qs
, True)))
3023 def update_Request(req
, url
=None, data
=None, headers
={}, query={}
):
3024 req_headers
= req
.headers
.copy()
3025 req_headers
.update(headers
)
3026 req_data
= data
or req
.data
3027 req_url
= update_url_query(url
or req
.get_full_url(), query
)
3028 req_get_method
= req
.get_method()
3029 if req_get_method
== 'HEAD':
3030 req_type
= HEADRequest
3031 elif req_get_method
== 'PUT':
3032 req_type
= PUTRequest
3034 req_type
= compat_urllib_request
.Request
3036 req_url
, data
=req_data
, headers
=req_headers
,
3037 origin_req_host
=req
.origin_req_host
, unverifiable
=req
.unverifiable
)
3038 if hasattr(req
, 'timeout'):
3039 new_req
.timeout
= req
.timeout
3043 def _multipart_encode_impl(data
, boundary
):
3044 content_type
= 'multipart/form-data; boundary=%s' % boundary
3047 for k
, v
in data
.items():
3048 out
+= b
'--' + boundary
.encode('ascii') + b
'\r\n'
3049 if isinstance(k
, compat_str
):
3051 if isinstance(v
, compat_str
):
3053 # RFC 2047 requires non-ASCII field names to be encoded, while RFC 7578
3054 # suggests sending UTF-8 directly. Firefox sends UTF-8, too
3055 content
= b
'Content-Disposition: form-data; name="' + k
+ b
'"\r\n\r\n' + v
+ b
'\r\n'
3056 if boundary
.encode('ascii') in content
:
3057 raise ValueError('Boundary overlaps with data')
3060 out
+= b
'--' + boundary
.encode('ascii') + b
'--\r\n'
3062 return out
, content_type
3065 def multipart_encode(data
, boundary
=None):
3067 Encode a dict to RFC 7578-compliant form-data
3070 A dict where keys and values can be either Unicode or bytes-like
3073 If specified a Unicode object, it's used as the boundary. Otherwise
3074 a random boundary is generated.
3076 Reference: https://tools.ietf.org/html/rfc7578
3078 has_specified_boundary
= boundary
is not None
3081 if boundary
is None:
3082 boundary
= '---------------' + str(random
.randrange(0x0fffffff, 0xffffffff))
3085 out
, content_type
= _multipart_encode_impl(data
, boundary
)
3088 if has_specified_boundary
:
3092 return out
, content_type
3095 def dict_get(d
, key_or_keys
, default
=None, skip_false_values
=True):
3096 for val
in map(d
.get
, variadic(key_or_keys
)):
3097 if val
is not None and (val
or not skip_false_values
):
3102 def try_call(*funcs
, expected_type
=None, args
=[], kwargs
={}):
3105 val
= f(*args
, **kwargs
)
3106 except (AttributeError, KeyError, TypeError, IndexError, ZeroDivisionError):
3109 if expected_type
is None or isinstance(val
, expected_type
):
3113 def try_get(src
, getter
, expected_type
=None):
3114 return try_call(*variadic(getter
), args
=(src
,), expected_type
=expected_type
)
3117 def filter_dict(dct
, cndn
=lambda _
, v
: v
is not None):
3118 return {k: v for k, v in dct.items() if cndn(k, v)}
3121 def merge_dicts(*dicts
):
3123 for a_dict
in dicts
:
3124 for k
, v
in a_dict
.items():
3125 if (v
is not None and k
not in merged
3126 or isinstance(v
, str) and merged
[k
] == ''):
3131 def encode_compat_str(string
, encoding
=preferredencoding(), errors
='strict'):
3132 return string
if isinstance(string
, compat_str
) else compat_str(string
, encoding
, errors
)
3144 TV_PARENTAL_GUIDELINES
= {
3154 def parse_age_limit(s
):
3155 # isinstance(False, int) is True. So type() must be used instead
3156 if type(s
) is int: # noqa: E721
3157 return s
if 0 <= s
<= 21 else None
3158 elif not isinstance(s
, str):
3160 m
= re
.match(r
'^(?P<age>\d{1,2})\+?$', s
)
3162 return int(m
.group('age'))
3165 return US_RATINGS
[s
]
3166 m
= re
.match(r
'^TV[_-]?(%s)$' % '|'.join(k
[3:] for k
in TV_PARENTAL_GUIDELINES
), s
)
3168 return TV_PARENTAL_GUIDELINES
['TV-' + m
.group(1)]
3172 def strip_jsonp(code
):
3175 (?:window\.)?(?P<func_name>[a-zA-Z0-9_.$]*)
3176 (?:\s*&&\s*(?P=func_name))?
3177 \s*\(\s*(?P<callback_data>.*)\);?
3178 \s*?(?://[^\n]*)*$''',
3179 r
'\g<callback_data>', code
)
3182 def js_to_json(code
, vars={}):
3183 # vars is a dict of var, val pairs to substitute
3184 COMMENT_RE
= r
'/\*(?:(?!\*/).)*?\*/|//[^\n]*\n'
3185 SKIP_RE
= fr
'\s*(?:{COMMENT_RE})?\s*'
3187 (fr
'(?s)^(0[xX][0-9a-fA-F]+){SKIP_RE}:?$', 16),
3188 (fr
'(?s)^(0+[0-7]+){SKIP_RE}:?$', 8),
3193 if v
in ('true', 'false', 'null'):
3195 elif v
in ('undefined', 'void 0'):
3197 elif v
.startswith('/*') or v
.startswith('//') or v
.startswith('!') or v
== ',':
3200 if v
[0] in ("'", '"'):
3201 v
= re
.sub(r
'(?s)\\.|"', lambda m
: {
3206 }.get(m
.group(0), m
.group(0)), v
[1:-1])
3208 for regex
, base
in INTEGER_TABLE
:
3209 im
= re
.match(regex
, v
)
3211 i
= int(im
.group(1), base
)
3212 return '"%d":' % i
if v
.endswith(':') else '%d' % i
3219 code
= re
.sub(r
'new Date\((".+")\)', r
'\g<1>', code
)
3221 return re
.sub(r
'''(?sx)
3222 "(?:[^"\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^"\\]*"|
3223 '(?:[^'\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^'\\]*'|
3224 {comment}|,(?={skip}[\]}}])|
3225 void\s0|(?:(?<![0-9])[eE]|[a-df-zA-DF-Z_$])[.a-zA-Z_$0-9]*|
3226 \b(?:0[xX][0-9a-fA-F]+|0+[0-7]+)(?:{skip}:)?|
3229 '''.format(comment
=COMMENT_RE
, skip
=SKIP_RE
), fix_kv
, code
)
3232 def qualities(quality_ids
):
3233 """ Get a numeric quality value out of a list of possible values """
3236 return quality_ids
.index(qid
)
3242 POSTPROCESS_WHEN
= ('pre_process', 'after_filter', 'before_dl', 'after_move', 'post_process', 'after_video', 'playlist')
3246 'default': '%(title)s [%(id)s].%(ext)s',
3247 'chapter': '%(title)s - %(section_number)03d %(section_title)s [%(id)s].%(ext)s',
3253 'description': 'description',
3254 'annotation': 'annotations.xml',
3255 'infojson': 'info.json',
3258 'pl_thumbnail': None,
3259 'pl_description': 'description',
3260 'pl_infojson': 'info.json',
3263 # As of [1] format syntax is:
3264 # %[mapping_key][conversion_flags][minimum_width][.precision][length_modifier]type
3265 # 1. https://docs.python.org/2/library/stdtypes.html#string-formatting
3266 STR_FORMAT_RE_TMPL
= r
'''(?x)
3267 (?<!%)(?P<prefix>(?:%%)*)
3269 (?P<has_key>\((?P<key>{0})\))?
3271 (?P<conversion>[#0\-+ ]+)?
3273 (?P<precision>\.\d+)?
3274 (?P<len_mod>[hlL])? # unused in python
3275 {1} # conversion type
3280 STR_FORMAT_TYPES
= 'diouxXeEfFgGcrs'
3283 def limit_length(s
, length
):
3284 """ Add ellipses to overly long strings """
3289 return s
[:length
- len(ELLIPSES
)] + ELLIPSES
3293 def version_tuple(v
):
3294 return tuple(int(e
) for e
in re
.split(r
'[-.]', v
))
3297 def is_outdated_version(version
, limit
, assume_new
=True):
3299 return not assume_new
3301 return version_tuple(version
) < version_tuple(limit
)
3303 return not assume_new
3306 def ytdl_is_updateable():
3307 """ Returns if yt-dlp can be updated with -U """
3309 from .update
import is_non_updateable
3311 return not is_non_updateable()
3314 def args_to_str(args
):
3315 # Get a short string representation for a subprocess command
3316 return ' '.join(compat_shlex_quote(a
) for a
in args
)
3319 def error_to_compat_str(err
):
3323 def error_to_str(err
):
3324 return f
'{type(err).__name__}: {err}'
3327 def mimetype2ext(mt
):
3331 mt
, _
, params
= mt
.partition(';')
3336 # Per RFC 3003, audio/mpeg can be .mp1, .mp2 or .mp3. Here use .mp3 as
3337 # it's the most popular one
3338 'audio/mpeg': 'mp3',
3339 'audio/x-wav': 'wav',
3341 'audio/wave': 'wav',
3344 ext
= FULL_MAP
.get(mt
)
3350 'smptett+xml': 'tt',
3354 'x-mp4-fragmented': 'mp4',
3355 'x-ms-sami': 'sami',
3358 'x-mpegurl': 'm3u8',
3359 'vnd.apple.mpegurl': 'm3u8',
3363 'vnd.ms-sstr+xml': 'ism',
3367 'filmstrip+json': 'fs',
3371 _
, _
, subtype
= mt
.rpartition('/')
3372 ext
= SUBTYPE_MAP
.get(subtype
.lower())
3383 _
, _
, suffix
= subtype
.partition('+')
3384 ext
= SUFFIX_MAP
.get(suffix
)
3388 return subtype
.replace('+', '.')
3391 def ext2mimetype(ext_or_url
):
3394 if '.' not in ext_or_url
:
3395 ext_or_url
= f
'file.{ext_or_url}'
3396 return mimetypes
.guess_type(ext_or_url
)[0]
3399 def parse_codecs(codecs_str
):
3400 # http://tools.ietf.org/html/rfc6381
3403 split_codecs
= list(filter(None, map(
3404 str.strip
, codecs_str
.strip().strip(',').split(','))))
3405 vcodec
, acodec
, scodec
, hdr
= None, None, None, None
3406 for full_codec
in split_codecs
:
3407 parts
= full_codec
.split('.')
3408 codec
= parts
[0].replace('0', '')
3409 if codec
in ('avc1', 'avc2', 'avc3', 'avc4', 'vp9', 'vp8', 'hev1', 'hev2',
3410 'h263', 'h264', 'mp4v', 'hvc1', 'av1', 'theora', 'dvh1', 'dvhe'):
3412 vcodec
= '.'.join(parts
[:4]) if codec
in ('vp9', 'av1', 'hvc1') else full_codec
3413 if codec
in ('dvh1', 'dvhe'):
3415 elif codec
== 'av1' and len(parts
) > 3 and parts
[3] == '10':
3417 elif full_codec
.replace('0', '').startswith('vp9.2'):
3419 elif codec
in ('flac', 'mp4a', 'opus', 'vorbis', 'mp3', 'aac', 'ac-3', 'ec-3', 'eac3', 'dtsc', 'dtse', 'dtsh', 'dtsl'):
3422 elif codec
in ('stpp', 'wvtt',):
3426 write_string(f
'WARNING: Unknown codec {full_codec}\n')
3427 if vcodec
or acodec
or scodec
:
3429 'vcodec': vcodec
or 'none',
3430 'acodec': acodec
or 'none',
3431 'dynamic_range': hdr
,
3432 **({'scodec': scodec}
if scodec
is not None else {}),
3434 elif len(split_codecs
) == 2:
3436 'vcodec': split_codecs
[0],
3437 'acodec': split_codecs
[1],
3442 def urlhandle_detect_ext(url_handle
):
3443 getheader
= url_handle
.headers
.get
3445 cd
= getheader('Content-Disposition')
3447 m
= re
.match(r
'attachment;\s*filename="(?P<filename>[^"]+)"', cd
)
3449 e
= determine_ext(m
.group('filename'), default_ext
=None)
3453 return mimetype2ext(getheader('Content-Type'))
3456 def encode_data_uri(data
, mime_type
):
3457 return 'data:%s;base64,%s' % (mime_type
, base64
.b64encode(data
).decode('ascii'))
3460 def age_restricted(content_limit
, age_limit
):
3461 """ Returns True iff the content should be blocked """
3463 if age_limit
is None: # No limit set
3465 if content_limit
is None:
3466 return False # Content available for everyone
3467 return age_limit
< content_limit
3470 def is_html(first_bytes
):
3471 """ Detect whether a file contains HTML by examining its first bytes. """
3474 (b
'\xef\xbb\xbf', 'utf-8'),
3475 (b
'\x00\x00\xfe\xff', 'utf-32-be'),
3476 (b
'\xff\xfe\x00\x00', 'utf-32-le'),
3477 (b
'\xff\xfe', 'utf-16-le'),
3478 (b
'\xfe\xff', 'utf-16-be'),
3482 for bom
, enc
in BOMS
:
3483 while first_bytes
.startswith(bom
):
3484 encoding
, first_bytes
= enc
, first_bytes
[len(bom
):]
3486 return re
.match(r
'^\s*<', first_bytes
.decode(encoding
, 'replace'))
3489 def determine_protocol(info_dict
):
3490 protocol
= info_dict
.get('protocol')
3491 if protocol
is not None:
3494 url
= sanitize_url(info_dict
['url'])
3495 if url
.startswith('rtmp'):
3497 elif url
.startswith('mms'):
3499 elif url
.startswith('rtsp'):
3502 ext
= determine_ext(url
)
3508 return compat_urllib_parse_urlparse(url
).scheme
3511 def render_table(header_row
, data
, delim
=False, extra_gap
=0, hide_empty
=False):
3512 """ Render a list of rows, each as a list of values.
3513 Text after a \t will be right aligned """
3515 return len(remove_terminal_sequences(string
).replace('\t', ''))
3517 def get_max_lens(table
):
3518 return [max(width(str(v
)) for v
in col
) for col
in zip(*table
)]
3520 def filter_using_list(row
, filterArray
):
3521 return [col
for take
, col
in itertools
.zip_longest(filterArray
, row
, fillvalue
=True) if take
]
3523 max_lens
= get_max_lens(data
) if hide_empty
else []
3524 header_row
= filter_using_list(header_row
, max_lens
)
3525 data
= [filter_using_list(row
, max_lens
) for row
in data
]
3527 table
= [header_row
] + data
3528 max_lens
= get_max_lens(table
)
3531 table
= [header_row
, [delim
* (ml
+ extra_gap
) for ml
in max_lens
]] + data
3532 table
[1][-1] = table
[1][-1][:-extra_gap
* len(delim
)] # Remove extra_gap from end of delimiter
3534 for pos
, text
in enumerate(map(str, row
)):
3536 row
[pos
] = text
.replace('\t', ' ' * (max_lens
[pos
] - width(text
))) + ' ' * extra_gap
3538 row
[pos
] = text
+ ' ' * (max_lens
[pos
] - width(text
) + extra_gap
)
3539 ret
= '\n'.join(''.join(row
).rstrip() for row
in table
)
3543 def _match_one(filter_part
, dct
, incomplete
):
3544 # TODO: Generalize code with YoutubeDL._build_format_filter
3545 STRING_OPERATORS
= {
3546 '*=': operator
.contains
,
3547 '^=': lambda attr
, value
: attr
.startswith(value
),
3548 '$=': lambda attr
, value
: attr
.endswith(value
),
3549 '~=': lambda attr
, value
: re
.search(value
, attr
),
3551 COMPARISON_OPERATORS
= {
3553 '<=': operator
.le
, # "<=" must be defined above "<"
3560 if isinstance(incomplete
, bool):
3561 is_incomplete
= lambda _
: incomplete
3563 is_incomplete
= lambda k
: k
in incomplete
3565 operator_rex
= re
.compile(r
'''(?x)
3567 \s*(?P<negation>!\s*)?(?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
3569 (?P<quote>["\'])(?P
<quotedstrval
>.+?
)(?P
=quote
)|
3572 ''' % '|'.join(map(re.escape, COMPARISON_OPERATORS.keys())))
3573 m = operator_rex.fullmatch(filter_part.strip())
3576 unnegated_op = COMPARISON_OPERATORS[m['op']]
3578 op = lambda attr, value: not unnegated_op(attr, value)
3581 comparison_value = m['quotedstrval'] or m['strval'] or m['intval']
3583 comparison_value = comparison_value.replace(r'\%s' % m['quote'], m['quote'])
3584 actual_value = dct.get(m['key'])
3585 numeric_comparison = None
3586 if isinstance(actual_value, (int, float)):
3587 # If the original field is a string and matching comparisonvalue is
3588 # a number we should respect the origin of the original field
3589 # and process comparison value as a string (see
3590 # https://github.com/ytdl-org/youtube-dl/issues/11082)
3592 numeric_comparison = int(comparison_value)
3594 numeric_comparison = parse_filesize(comparison_value)
3595 if numeric_comparison is None:
3596 numeric_comparison = parse_filesize(f'{comparison_value}B')
3597 if numeric_comparison is None:
3598 numeric_comparison = parse_duration(comparison_value)
3599 if numeric_comparison is not None and m['op'] in STRING_OPERATORS:
3600 raise ValueError('Operator %s only supports string values!' % m['op'])
3601 if actual_value is None:
3602 return is_incomplete(m['key']) or m['none_inclusive']
3603 return op(actual_value, comparison_value if numeric_comparison is None else numeric_comparison)
3606 '': lambda v: (v is True) if isinstance(v, bool) else (v is not None),
3607 '!': lambda v: (v is False) if isinstance(v, bool) else (v is None),
3609 operator_rex = re.compile(r'''(?x
)
3610 (?P
<op
>%s)\s
*(?P
<key
>[a
-z_
]+)
3611 ''' % '|'.join(map(re.escape, UNARY_OPERATORS.keys())))
3612 m = operator_rex.fullmatch(filter_part.strip())
3614 op = UNARY_OPERATORS[m.group('op')]
3615 actual_value = dct.get(m.group('key'))
3616 if is_incomplete(m.group('key')) and actual_value is None:
3618 return op(actual_value)
3620 raise ValueError('Invalid filter part %r' % filter_part)
3623 def match_str(filter_str, dct, incomplete=False):
3624 """ Filter a dictionary with a simple string syntax.
3625 @returns Whether the filter passes
3626 @param incomplete Set of keys that is expected to be missing from dct.
3627 Can be True/False to indicate all/none of the keys may be missing.
3628 All conditions on incomplete keys pass if the key is missing
3631 _match_one(filter_part.replace(r'\&', '&'), dct, incomplete)
3632 for filter_part in re.split(r'(?<!\\)&', filter_str))
3635 def match_filter_func(filters):
3638 filters = set(variadic(filters))
3640 interactive = '-' in filters
3644 def _match_func(info_dict, incomplete=False):
3645 if not filters or any(match_str(f, info_dict, incomplete) for f in filters):
3646 return NO_DEFAULT if interactive and not incomplete else None
3648 video_title = info_dict.get('title') or info_dict.get('id') or 'video'
3649 filter_str = ') | ('.join(map(str.strip, filters))
3650 return f'{video_title} does not pass filter ({filter_str}), skipping ..'
3654 def download_range_func(chapters, ranges):
3655 def inner(info_dict, ydl):
3656 warning = ('There are no chapters matching the regex' if info_dict.get('chapters')
3657 else 'Cannot match chapters since chapter information is unavailable')
3658 for regex in chapters or []:
3659 for i, chapter in enumerate(info_dict.get('chapters') or []):
3660 if re.search(regex, chapter['title']):
3662 yield {**chapter, 'index': i}
3663 if chapters and warning:
3664 ydl.to_screen(f'[info] {info_dict["id"]}: {warning}')
3666 yield from ({'start_time': start, 'end_time': end} for start, end in ranges or [])
3671 def parse_dfxp_time_expr(time_expr):
3675 mobj = re.match(rf'^(?P<time_offset>{NUMBER_RE})s?$', time_expr)
3677 return float(mobj.group('time_offset'))
3679 mobj = re.match(r'^(\d+):(\d\d):(\d\d(?:(?:\.|:)\d+)?)$', time_expr)
3681 return 3600 * int(mobj.group(1)) + 60 * int(mobj.group(2)) + float(mobj.group(3).replace(':', '.'))
3684 def srt_subtitles_timecode(seconds):
3685 return '%02d:%02d:%02d,%03d' % timetuple_from_msec(seconds * 1000)
3688 def ass_subtitles_timecode(seconds):
3689 time = timetuple_from_msec(seconds * 1000)
3690 return '%01d:%02d:%02d.%02d' % (*time[:-1], time.milliseconds / 10)
3693 def dfxp2srt(dfxp_data):
3695 @param dfxp_data A
bytes-like
object containing DFXP data
3696 @returns A
unicode object containing converted SRT data
3698 LEGACY_NAMESPACES = (
3699 (b'http://www.w3.org/ns/ttml', [
3700 b'http://www.w3.org/2004/11/ttaf1',
3701 b'http://www.w3.org/2006/04/ttaf1',
3702 b'http://www.w3.org/2006/10/ttaf1',
3704 (b'http://www.w3.org/ns/ttml#styling', [
3705 b'http://www.w3.org/ns/ttml#style',
3709 SUPPORTED_STYLING = [
3718 _x = functools.partial(xpath_with_ns, ns_map={
3719 'xml': 'http://www.w3.org/XML/1998/namespace',
3720 'ttml': 'http://www.w3.org/ns/ttml',
3721 'tts': 'http://www.w3.org/ns/ttml#styling',
3727 class TTMLPElementParser:
3729 _unclosed_elements = []
3730 _applied_styles = []
3732 def start(self, tag, attrib):
3733 if tag in (_x('ttml:br'), 'br'):
3736 unclosed_elements = []
3738 element_style_id = attrib.get('style')
3740 style.update(default_style)
3741 if element_style_id:
3742 style.update(styles.get(element_style_id, {}))
3743 for prop in SUPPORTED_STYLING:
3744 prop_val = attrib.get(_x('tts:' + prop))
3746 style[prop] = prop_val
3749 for k, v in sorted(style.items()):
3750 if self._applied_styles and self._applied_styles[-1].get(k) == v:
3753 font += ' color="%s"' % v
3754 elif k == 'fontSize':
3755 font += ' size="%s"' % v
3756 elif k == 'fontFamily':
3757 font += ' face="%s"' % v
3758 elif k == 'fontWeight' and v == 'bold':
3760 unclosed_elements.append('b')
3761 elif k == 'fontStyle' and v == 'italic':
3763 unclosed_elements.append('i')
3764 elif k == 'textDecoration' and v == 'underline':
3766 unclosed_elements.append('u')
3768 self._out += '<font' + font + '>'
3769 unclosed_elements.append('font')
3771 if self._applied_styles:
3772 applied_style.update(self._applied_styles[-1])
3773 applied_style.update(style)
3774 self._applied_styles.append(applied_style)
3775 self._unclosed_elements.append(unclosed_elements)
3778 if tag not in (_x('ttml:br'), 'br'):
3779 unclosed_elements = self._unclosed_elements.pop()
3780 for element in reversed(unclosed_elements):
3781 self._out += '</%s>' % element
3782 if unclosed_elements and self._applied_styles:
3783 self._applied_styles.pop()
3785 def data(self, data):
3789 return self._out.strip()
3791 def parse_node(node):
3792 target = TTMLPElementParser()
3793 parser = xml.etree.ElementTree.XMLParser(target=target)
3794 parser.feed(xml.etree.ElementTree.tostring(node))
3795 return parser.close()
3797 for k, v in LEGACY_NAMESPACES:
3799 dfxp_data = dfxp_data.replace(ns, k)
3801 dfxp = compat_etree_fromstring(dfxp_data)
3803 paras = dfxp.findall(_x('.//ttml:p')) or dfxp.findall('.//p')
3806 raise ValueError('Invalid dfxp/TTML subtitle')
3810 for style in dfxp.findall(_x('.//ttml:style')):
3811 style_id = style.get('id') or style.get(_x('xml:id'))
3814 parent_style_id = style.get('style')
3816 if parent_style_id not in styles:
3819 styles[style_id] = styles[parent_style_id].copy()
3820 for prop in SUPPORTED_STYLING:
3821 prop_val = style.get(_x('tts:' + prop))
3823 styles.setdefault(style_id, {})[prop] = prop_val
3829 for p in ('body', 'div'):
3830 ele = xpath_element(dfxp, [_x('.//ttml:' + p), './/' + p])
3833 style = styles.get(ele.get('style'))
3836 default_style.update(style)
3838 for para, index in zip(paras, itertools.count(1)):
3839 begin_time = parse_dfxp_time_expr(para.attrib.get('begin'))
3840 end_time = parse_dfxp_time_expr(para.attrib.get('end'))
3841 dur = parse_dfxp_time_expr(para.attrib.get('dur'))
3842 if begin_time is None:
3847 end_time = begin_time + dur
3848 out.append('%d\n%s --> %s\n%s\n\n' % (
3850 srt_subtitles_timecode(begin_time),
3851 srt_subtitles_timecode(end_time),
3857 def cli_option(params, command_option, param, separator=None):
3858 param = params.get(param)
3859 return ([] if param is None
3860 else [command_option, str(param)] if separator is None
3861 else [f'{command_option}{separator}{param}'])
3864 def cli_bool_option(params, command_option, param, true_value='true', false_value='false', separator=None):
3865 param = params.get(param)
3866 assert param in (True, False, None)
3867 return cli_option({True: true_value, False: false_value}, command_option, param, separator)
3870 def cli_valueless_option(params, command_option, param, expected_value=True):
3871 return [command_option] if params.get(param) == expected_value else []
3874 def cli_configuration_args(argdict, keys, default=[], use_compat=True):
3875 if isinstance(argdict, (list, tuple)): # for backward compatibility
3882 assert isinstance(argdict, dict)
3884 assert isinstance(keys, (list, tuple))
3885 for key_list in keys:
3886 arg_list = list(filter(
3887 lambda x: x is not None,
3888 [argdict.get(key.lower()) for key in variadic(key_list)]))
3890 return [arg for args in arg_list for arg in args]
3894 def _configuration_args(main_key, argdict, exe, keys=None, default=[], use_compat=True):
3895 main_key, exe = main_key.lower(), exe.lower()
3896 root_key = exe if main_key == exe else f'{main_key}+{exe}'
3897 keys = [f'{root_key}{k}' for k in (keys or [''])]
3898 if root_key in keys:
3900 keys.append((main_key, exe))
3901 keys.append('default')
3904 return cli_configuration_args(argdict, keys, default, use_compat)
3908 # See http://www.loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt
3967 'iw': 'heb', # Replaced by he in 1989 revision
3977 'in': 'ind', # Replaced by id in 1989 revision
4092 'ji': 'yid', # Replaced by yi in 1989 revision
4100 def short2long(cls, code):
4101 """Convert language code from ISO 639-1 to ISO 639-2/T"""
4102 return cls._lang_map.get(code[:2])
4105 def long2short(cls, code):
4106 """Convert language code from ISO 639-2/T to ISO 639-1"""
4107 for short_name, long_name in cls._lang_map.items():
4108 if long_name == code:
4113 # From http://data.okfn.org/data/core/country-list
4115 'AF': 'Afghanistan',
4116 'AX': 'Åland Islands',
4119 'AS': 'American Samoa',
4124 'AG': 'Antigua and Barbuda',
4141 'BO': 'Bolivia, Plurinational State of',
4142 'BQ': 'Bonaire, Sint Eustatius and Saba',
4143 'BA': 'Bosnia and Herzegovina',
4145 'BV': 'Bouvet Island',
4147 'IO': 'British Indian Ocean Territory',
4148 'BN': 'Brunei Darussalam',
4150 'BF': 'Burkina Faso',
4156 'KY': 'Cayman Islands',
4157 'CF': 'Central African Republic',
4161 'CX': 'Christmas Island',
4162 'CC': 'Cocos (Keeling) Islands',
4166 'CD': 'Congo, the Democratic Republic of the',
4167 'CK': 'Cook Islands',
4169 'CI': 'Côte d\'Ivoire',
4174 'CZ': 'Czech Republic',
4178 'DO': 'Dominican Republic',
4181 'SV': 'El Salvador',
4182 'GQ': 'Equatorial Guinea',
4186 'FK': 'Falkland Islands (Malvinas)',
4187 'FO': 'Faroe Islands',
4191 'GF': 'French Guiana',
4192 'PF': 'French Polynesia',
4193 'TF': 'French Southern Territories',
4208 'GW': 'Guinea-Bissau',
4211 'HM': 'Heard Island and McDonald Islands',
4212 'VA': 'Holy See (Vatican City State)',
4219 'IR': 'Iran, Islamic Republic of',
4222 'IM': 'Isle of Man',
4232 'KP': 'Korea, Democratic People\'s Republic of',
4233 'KR': 'Korea, Republic of',
4236 'LA': 'Lao People\'s Democratic Republic',
4242 'LI': 'Liechtenstein',
4246 'MK': 'Macedonia, the Former Yugoslav Republic of',
4253 'MH': 'Marshall Islands',
4259 'FM': 'Micronesia, Federated States of',
4260 'MD': 'Moldova, Republic of',
4271 'NL': 'Netherlands',
4272 'NC': 'New Caledonia',
4273 'NZ': 'New Zealand',
4278 'NF': 'Norfolk Island',
4279 'MP': 'Northern Mariana Islands',
4284 'PS': 'Palestine, State of',
4286 'PG': 'Papua New Guinea',
4289 'PH': 'Philippines',
4293 'PR': 'Puerto Rico',
4297 'RU': 'Russian Federation',
4299 'BL': 'Saint Barthélemy',
4300 'SH': 'Saint Helena, Ascension and Tristan da Cunha',
4301 'KN': 'Saint Kitts and Nevis',
4302 'LC': 'Saint Lucia',
4303 'MF': 'Saint Martin (French part)',
4304 'PM': 'Saint Pierre and Miquelon',
4305 'VC': 'Saint Vincent and the Grenadines',
4308 'ST': 'Sao Tome and Principe',
4309 'SA': 'Saudi Arabia',
4313 'SL': 'Sierra Leone',
4315 'SX': 'Sint Maarten (Dutch part)',
4318 'SB': 'Solomon Islands',
4320 'ZA': 'South Africa',
4321 'GS': 'South Georgia and the South Sandwich Islands',
4322 'SS': 'South Sudan',
4327 'SJ': 'Svalbard and Jan Mayen',
4330 'CH': 'Switzerland',
4331 'SY': 'Syrian Arab Republic',
4332 'TW': 'Taiwan, Province of China',
4334 'TZ': 'Tanzania, United Republic of',
4336 'TL': 'Timor-Leste',
4340 'TT': 'Trinidad and Tobago',
4343 'TM': 'Turkmenistan',
4344 'TC': 'Turks and Caicos Islands',
4348 'AE': 'United Arab Emirates',
4349 'GB': 'United Kingdom',
4350 'US': 'United States',
4351 'UM': 'United States Minor Outlying Islands',
4355 'VE': 'Venezuela, Bolivarian Republic of',
4357 'VG': 'Virgin Islands, British',
4358 'VI': 'Virgin Islands, U.S.',
4359 'WF': 'Wallis and Futuna',
4360 'EH': 'Western Sahara',
4364 # Not ISO 3166 codes, but used for IP blocks
4365 'AP': 'Asia/Pacific Region',
4370 def short2full(cls, code):
4371 """Convert an ISO 3166-2 country code to the corresponding full name"""
4372 return cls._country_map.get(code.upper())
4376 # Major IPv4 address blocks per country
4378 'AD': '46.172.224.0/19',
4379 'AE': '94.200.0.0/13',
4380 'AF': '149.54.0.0/17',
4381 'AG': '209.59.64.0/18',
4382 'AI': '204.14.248.0/21',
4383 'AL': '46.99.0.0/16',
4384 'AM': '46.70.0.0/15',
4385 'AO': '105.168.0.0/13',
4386 'AP': '182.50.184.0/21',
4387 'AQ': '23.154.160.0/24',
4388 'AR': '181.0.0.0/12',
4389 'AS': '202.70.112.0/20',
4390 'AT': '77.116.0.0/14',
4391 'AU': '1.128.0.0/11',
4392 'AW': '181.41.0.0/18',
4393 'AX': '185.217.4.0/22',
4394 'AZ': '5.197.0.0/16',
4395 'BA': '31.176.128.0/17',
4396 'BB': '65.48.128.0/17',
4397 'BD': '114.130.0.0/16',
4399 'BF': '102.178.0.0/15',
4400 'BG': '95.42.0.0/15',
4401 'BH': '37.131.0.0/17',
4402 'BI': '154.117.192.0/18',
4403 'BJ': '137.255.0.0/16',
4404 'BL': '185.212.72.0/23',
4405 'BM': '196.12.64.0/18',
4406 'BN': '156.31.0.0/16',
4407 'BO': '161.56.0.0/16',
4408 'BQ': '161.0.80.0/20',
4409 'BR': '191.128.0.0/12',
4410 'BS': '24.51.64.0/18',
4411 'BT': '119.2.96.0/19',
4412 'BW': '168.167.0.0/16',
4413 'BY': '178.120.0.0/13',
4414 'BZ': '179.42.192.0/18',
4415 'CA': '99.224.0.0/11',
4416 'CD': '41.243.0.0/16',
4417 'CF': '197.242.176.0/21',
4418 'CG': '160.113.0.0/16',
4419 'CH': '85.0.0.0/13',
4420 'CI': '102.136.0.0/14',
4421 'CK': '202.65.32.0/19',
4422 'CL': '152.172.0.0/14',
4423 'CM': '102.244.0.0/14',
4424 'CN': '36.128.0.0/10',
4425 'CO': '181.240.0.0/12',
4426 'CR': '201.192.0.0/12',
4427 'CU': '152.206.0.0/15',
4428 'CV': '165.90.96.0/19',
4429 'CW': '190.88.128.0/17',
4430 'CY': '31.153.0.0/16',
4431 'CZ': '88.100.0.0/14',
4433 'DJ': '197.241.0.0/17',
4434 'DK': '87.48.0.0/12',
4435 'DM': '192.243.48.0/20',
4436 'DO': '152.166.0.0/15',
4437 'DZ': '41.96.0.0/12',
4438 'EC': '186.68.0.0/15',
4439 'EE': '90.190.0.0/15',
4440 'EG': '156.160.0.0/11',
4441 'ER': '196.200.96.0/20',
4442 'ES': '88.0.0.0/11',
4443 'ET': '196.188.0.0/14',
4444 'EU': '2.16.0.0/13',
4445 'FI': '91.152.0.0/13',
4446 'FJ': '144.120.0.0/16',
4447 'FK': '80.73.208.0/21',
4448 'FM': '119.252.112.0/20',
4449 'FO': '88.85.32.0/19',
4451 'GA': '41.158.0.0/15',
4453 'GD': '74.122.88.0/21',
4454 'GE': '31.146.0.0/16',
4455 'GF': '161.22.64.0/18',
4456 'GG': '62.68.160.0/19',
4457 'GH': '154.160.0.0/12',
4458 'GI': '95.164.0.0/16',
4459 'GL': '88.83.0.0/19',
4460 'GM': '160.182.0.0/15',
4461 'GN': '197.149.192.0/18',
4462 'GP': '104.250.0.0/19',
4463 'GQ': '105.235.224.0/20',
4464 'GR': '94.64.0.0/13',
4465 'GT': '168.234.0.0/16',
4466 'GU': '168.123.0.0/16',
4467 'GW': '197.214.80.0/20',
4468 'GY': '181.41.64.0/18',
4469 'HK': '113.252.0.0/14',
4470 'HN': '181.210.0.0/16',
4471 'HR': '93.136.0.0/13',
4472 'HT': '148.102.128.0/17',
4473 'HU': '84.0.0.0/14',
4474 'ID': '39.192.0.0/10',
4475 'IE': '87.32.0.0/12',
4476 'IL': '79.176.0.0/13',
4477 'IM': '5.62.80.0/20',
4478 'IN': '117.192.0.0/10',
4479 'IO': '203.83.48.0/21',
4480 'IQ': '37.236.0.0/14',
4481 'IR': '2.176.0.0/12',
4482 'IS': '82.221.0.0/16',
4483 'IT': '79.0.0.0/10',
4484 'JE': '87.244.64.0/18',
4485 'JM': '72.27.0.0/17',
4486 'JO': '176.29.0.0/16',
4487 'JP': '133.0.0.0/8',
4488 'KE': '105.48.0.0/12',
4489 'KG': '158.181.128.0/17',
4490 'KH': '36.37.128.0/17',
4491 'KI': '103.25.140.0/22',
4492 'KM': '197.255.224.0/20',
4493 'KN': '198.167.192.0/19',
4494 'KP': '175.45.176.0/22',
4495 'KR': '175.192.0.0/10',
4496 'KW': '37.36.0.0/14',
4497 'KY': '64.96.0.0/15',
4498 'KZ': '2.72.0.0/13',
4499 'LA': '115.84.64.0/18',
4500 'LB': '178.135.0.0/16',
4501 'LC': '24.92.144.0/20',
4502 'LI': '82.117.0.0/19',
4503 'LK': '112.134.0.0/15',
4504 'LR': '102.183.0.0/16',
4505 'LS': '129.232.0.0/17',
4506 'LT': '78.56.0.0/13',
4507 'LU': '188.42.0.0/16',
4508 'LV': '46.109.0.0/16',
4509 'LY': '41.252.0.0/14',
4510 'MA': '105.128.0.0/11',
4511 'MC': '88.209.64.0/18',
4512 'MD': '37.246.0.0/16',
4513 'ME': '178.175.0.0/17',
4514 'MF': '74.112.232.0/21',
4515 'MG': '154.126.0.0/17',
4516 'MH': '117.103.88.0/21',
4517 'MK': '77.28.0.0/15',
4518 'ML': '154.118.128.0/18',
4519 'MM': '37.111.0.0/17',
4520 'MN': '49.0.128.0/17',
4521 'MO': '60.246.0.0/16',
4522 'MP': '202.88.64.0/20',
4523 'MQ': '109.203.224.0/19',
4524 'MR': '41.188.64.0/18',
4525 'MS': '208.90.112.0/22',
4526 'MT': '46.11.0.0/16',
4527 'MU': '105.16.0.0/12',
4528 'MV': '27.114.128.0/18',
4529 'MW': '102.70.0.0/15',
4530 'MX': '187.192.0.0/11',
4531 'MY': '175.136.0.0/13',
4532 'MZ': '197.218.0.0/15',
4533 'NA': '41.182.0.0/16',
4534 'NC': '101.101.0.0/18',
4535 'NE': '197.214.0.0/18',
4536 'NF': '203.17.240.0/22',
4537 'NG': '105.112.0.0/12',
4538 'NI': '186.76.0.0/15',
4539 'NL': '145.96.0.0/11',
4540 'NO': '84.208.0.0/13',
4541 'NP': '36.252.0.0/15',
4542 'NR': '203.98.224.0/19',
4543 'NU': '49.156.48.0/22',
4544 'NZ': '49.224.0.0/14',
4545 'OM': '5.36.0.0/15',
4546 'PA': '186.72.0.0/15',
4547 'PE': '186.160.0.0/14',
4548 'PF': '123.50.64.0/18',
4549 'PG': '124.240.192.0/19',
4550 'PH': '49.144.0.0/13',
4551 'PK': '39.32.0.0/11',
4552 'PL': '83.0.0.0/11',
4553 'PM': '70.36.0.0/20',
4554 'PR': '66.50.0.0/16',
4555 'PS': '188.161.0.0/16',
4556 'PT': '85.240.0.0/13',
4557 'PW': '202.124.224.0/20',
4558 'PY': '181.120.0.0/14',
4559 'QA': '37.210.0.0/15',
4560 'RE': '102.35.0.0/16',
4561 'RO': '79.112.0.0/13',
4562 'RS': '93.86.0.0/15',
4563 'RU': '5.136.0.0/13',
4564 'RW': '41.186.0.0/16',
4565 'SA': '188.48.0.0/13',
4566 'SB': '202.1.160.0/19',
4567 'SC': '154.192.0.0/11',
4568 'SD': '102.120.0.0/13',
4569 'SE': '78.64.0.0/12',
4570 'SG': '8.128.0.0/10',
4571 'SI': '188.196.0.0/14',
4572 'SK': '78.98.0.0/15',
4573 'SL': '102.143.0.0/17',
4574 'SM': '89.186.32.0/19',
4575 'SN': '41.82.0.0/15',
4576 'SO': '154.115.192.0/18',
4577 'SR': '186.179.128.0/17',
4578 'SS': '105.235.208.0/21',
4579 'ST': '197.159.160.0/19',
4580 'SV': '168.243.0.0/16',
4581 'SX': '190.102.0.0/20',
4583 'SZ': '41.84.224.0/19',
4584 'TC': '65.255.48.0/20',
4585 'TD': '154.68.128.0/19',
4586 'TG': '196.168.0.0/14',
4587 'TH': '171.96.0.0/13',
4588 'TJ': '85.9.128.0/18',
4589 'TK': '27.96.24.0/21',
4590 'TL': '180.189.160.0/20',
4591 'TM': '95.85.96.0/19',
4592 'TN': '197.0.0.0/11',
4593 'TO': '175.176.144.0/21',
4594 'TR': '78.160.0.0/11',
4595 'TT': '186.44.0.0/15',
4596 'TV': '202.2.96.0/19',
4597 'TW': '120.96.0.0/11',
4598 'TZ': '156.156.0.0/14',
4599 'UA': '37.52.0.0/14',
4600 'UG': '102.80.0.0/13',
4602 'UY': '167.56.0.0/13',
4603 'UZ': '84.54.64.0/18',
4604 'VA': '212.77.0.0/19',
4605 'VC': '207.191.240.0/21',
4606 'VE': '186.88.0.0/13',
4607 'VG': '66.81.192.0/20',
4608 'VI': '146.226.0.0/16',
4609 'VN': '14.160.0.0/11',
4610 'VU': '202.80.32.0/20',
4611 'WF': '117.20.32.0/21',
4612 'WS': '202.4.32.0/19',
4613 'YE': '134.35.0.0/16',
4614 'YT': '41.242.116.0/22',
4615 'ZA': '41.0.0.0/11',
4616 'ZM': '102.144.0.0/13',
4617 'ZW': '102.177.192.0/18',
4621 def random_ipv4(cls, code_or_block):
4622 if len(code_or_block) == 2:
4623 block = cls._country_ip_map.get(code_or_block.upper())
4627 block = code_or_block
4628 addr, preflen = block.split('/')
4629 addr_min = compat_struct_unpack('!L', socket.inet_aton(addr))[0]
4630 addr_max = addr_min | (0xffffffff >> int(preflen))
4631 return compat_str(socket.inet_ntoa(
4632 compat_struct_pack('!L', random.randint(addr_min, addr_max))))
4635 class PerRequestProxyHandler(compat_urllib_request.ProxyHandler):
4636 def __init__(self, proxies=None):
4637 # Set default handlers
4638 for type in ('http', 'https'):
4639 setattr(self, '%s_open' % type,
4640 lambda r, proxy='__noproxy__', type=type, meth=self.proxy_open:
4641 meth(r, proxy, type))
4642 compat_urllib_request.ProxyHandler.__init__(self, proxies)
4644 def proxy_open(self, req, proxy, type):
4645 req_proxy = req.headers.get('Ytdl-request-proxy')
4646 if req_proxy is not None:
4648 del req.headers['Ytdl-request-proxy']
4650 if proxy == '__noproxy__':
4651 return None # No Proxy
4652 if compat_urlparse.urlparse(proxy).scheme.lower() in ('socks', 'socks4', 'socks4a', 'socks5'):
4653 req.add_header('Ytdl-socks-proxy', proxy)
4654 # yt-dlp's http/https handlers do wrapping the socket with socks
4656 return compat_urllib_request.ProxyHandler.proxy_open(
4657 self, req, proxy, type)
4660 # Both long_to_bytes and bytes_to_long are adapted from PyCrypto, which is
4661 # released into Public Domain
4662 # https://github.com/dlitz/pycrypto/blob/master/lib/Crypto/Util/number.py#L387
4664 def long_to_bytes(n, blocksize=0):
4665 """long_to_bytes(n:long, blocksize:int) : string
4666 Convert a long integer to a byte string.
4668 If optional blocksize is given and greater than zero, pad the front of the
4669 byte string with binary zeros so that the length is a multiple of
4672 # after much testing, this algorithm was deemed to be the fastest
4676 s = compat_struct_pack('>I', n & 0xffffffff) + s
4678 # strip off leading zeros
4679 for i in range(len(s)):
4680 if s[i] != b'\000'[0]:
4683 # only happens when n == 0
4687 # add back some pad bytes. this could be done more efficiently w.r.t. the
4688 # de-padding being done above, but sigh...
4689 if blocksize > 0 and len(s) % blocksize:
4690 s = (blocksize - len(s) % blocksize) * b'\000' + s
4694 def bytes_to_long(s):
4695 """bytes_to_long(string) : long
4696 Convert a byte string to a long integer.
4698 This is (essentially) the inverse of long_to_bytes().
4703 extra = (4 - length % 4)
4704 s = b'\000' * extra + s
4705 length = length + extra
4706 for i in range(0, length, 4):
4707 acc = (acc << 32) + compat_struct_unpack('>I', s[i:i + 4])[0]
4711 def ohdave_rsa_encrypt(data, exponent, modulus):
4713 Implement OHDave
's RSA algorithm. See http://www.ohdave.com/rsa/
4716 data: data to encrypt, bytes-like object
4717 exponent, modulus: parameter e and N of RSA algorithm, both integer
4718 Output: hex string of encrypted data
4720 Limitation: supports one block encryption only
4723 payload = int(binascii.hexlify(data[::-1]), 16)
4724 encrypted = pow(payload, exponent, modulus)
4725 return '%x' % encrypted
4728 def pkcs1pad(data, length):
4730 Padding input data with PKCS#1 scheme
4732 @param {int[]} data input data
4733 @param {int} length target length
4734 @returns {int[]} padded data
4736 if len(data) > length - 11:
4737 raise ValueError('Input data too
long for PKCS
#1 padding')
4739 pseudo_random
= [random
.randint(0, 254) for _
in range(length
- len(data
) - 3)]
4740 return [0, 2] + pseudo_random
+ [0] + data
4743 def encode_base_n(num
, n
, table
=None):
4744 FULL_TABLE
= '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
4746 table
= FULL_TABLE
[:n
]
4749 raise ValueError('base %d exceeds table length %d' % (n
, len(table
)))
4756 ret
= table
[num
% n
] + ret
4761 def decode_packed_codes(code
):
4762 mobj
= re
.search(PACKED_CODES_RE
, code
)
4763 obfuscated_code
, base
, count
, symbols
= mobj
.groups()
4766 symbols
= symbols
.split('|')
4771 base_n_count
= encode_base_n(count
, base
)
4772 symbol_table
[base_n_count
] = symbols
[count
] or base_n_count
4775 r
'\b(\w+)\b', lambda mobj
: symbol_table
[mobj
.group(0)],
4779 def caesar(s
, alphabet
, shift
):
4784 alphabet
[(alphabet
.index(c
) + shift
) % l
] if c
in alphabet
else c
4789 return caesar(s
, r
'''!"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~''', 47)
4792 def parse_m3u8_attributes(attrib
):
4794 for (key
, val
) in re
.findall(r
'(?P<key>[A-Z0-9-]+)=(?P<val>"[^"]+"|[^",]+)(?:,|$)', attrib
):
4795 if val
.startswith('"'):
4801 def urshift(val
, n
):
4802 return val
>> n
if val
>= 0 else (val
+ 0x100000000) >> n
4805 # Based on png2str() written by @gdkchan and improved by @yokrysty
4806 # Originally posted at https://github.com/ytdl-org/youtube-dl/issues/9706
4807 def decode_png(png_data
):
4808 # Reference: https://www.w3.org/TR/PNG/
4809 header
= png_data
[8:]
4811 if png_data
[:8] != b
'\x89PNG\x0d\x0a\x1a\x0a' or header
[4:8] != b
'IHDR':
4812 raise OSError('Not a valid PNG file.')
4814 int_map
= {1: '>B', 2: '>H', 4: '>I'}
4815 unpack_integer
= lambda x
: compat_struct_unpack(int_map
[len(x
)], x
)[0]
4820 length
= unpack_integer(header
[:4])
4823 chunk_type
= header
[:4]
4826 chunk_data
= header
[:length
]
4827 header
= header
[length
:]
4829 header
= header
[4:] # Skip CRC
4837 ihdr
= chunks
[0]['data']
4839 width
= unpack_integer(ihdr
[:4])
4840 height
= unpack_integer(ihdr
[4:8])
4844 for chunk
in chunks
:
4845 if chunk
['type'] == b
'IDAT':
4846 idat
+= chunk
['data']
4849 raise OSError('Unable to read PNG data.')
4851 decompressed_data
= bytearray(zlib
.decompress(idat
))
4856 def _get_pixel(idx
):
4861 for y
in range(height
):
4862 basePos
= y
* (1 + stride
)
4863 filter_type
= decompressed_data
[basePos
]
4867 pixels
.append(current_row
)
4869 for x
in range(stride
):
4870 color
= decompressed_data
[1 + basePos
+ x
]
4871 basex
= y
* stride
+ x
4876 left
= _get_pixel(basex
- 3)
4878 up
= _get_pixel(basex
- stride
)
4880 if filter_type
== 1: # Sub
4881 color
= (color
+ left
) & 0xff
4882 elif filter_type
== 2: # Up
4883 color
= (color
+ up
) & 0xff
4884 elif filter_type
== 3: # Average
4885 color
= (color
+ ((left
+ up
) >> 1)) & 0xff
4886 elif filter_type
== 4: # Paeth
4892 c
= _get_pixel(basex
- stride
- 3)
4900 if pa
<= pb
and pa
<= pc
:
4901 color
= (color
+ a
) & 0xff
4903 color
= (color
+ b
) & 0xff
4905 color
= (color
+ c
) & 0xff
4907 current_row
.append(color
)
4909 return width
, height
, pixels
4912 def write_xattr(path
, key
, value
):
4913 # Windows: Write xattrs to NTFS Alternate Data Streams:
4914 # http://en.wikipedia.org/wiki/NTFS#Alternate_data_streams_.28ADS.29
4915 if compat_os_name
== 'nt':
4916 assert ':' not in key
4917 assert os
.path
.exists(path
)
4920 with open(f
'{path}:{key}', 'wb') as f
:
4922 except OSError as e
:
4923 raise XAttrMetadataError(e
.errno
, e
.strerror
)
4926 # UNIX Method 1. Use xattrs/pyxattrs modules
4927 from .dependencies
import xattr
4930 if getattr(xattr
, '_yt_dlp__identifier', None) == 'pyxattr':
4931 # Unicode arguments are not supported in pyxattr until version 0.5.0
4932 # See https://github.com/ytdl-org/youtube-dl/issues/5498
4933 if version_tuple(xattr
.__version
__) >= (0, 5, 0):
4934 setxattr
= xattr
.set
4936 setxattr
= xattr
.setxattr
4940 setxattr(path
, key
, value
)
4941 except OSError as e
:
4942 raise XAttrMetadataError(e
.errno
, e
.strerror
)
4945 # UNIX Method 2. Use setfattr/xattr executables
4946 exe
= ('setfattr' if check_executable('setfattr', ['--version'])
4947 else 'xattr' if check_executable('xattr', ['-h']) else None)
4949 raise XAttrUnavailableError(
4950 'Couldn\'t find a tool to set the xattrs. Install either the python "xattr" or "pyxattr" modules or the '
4951 + ('"xattr" binary' if sys
.platform
!= 'linux' else 'GNU "attr" package (which contains the "setfattr" tool)'))
4953 value
= value
.decode()
4955 _
, stderr
, returncode
= Popen
.run(
4956 [exe
, '-w', key
, value
, path
] if exe
== 'xattr' else [exe
, '-n', key
, '-v', value
, path
],
4957 text
=True, stdout
=subprocess
.PIPE
, stderr
=subprocess
.PIPE
, stdin
=subprocess
.PIPE
)
4958 except OSError as e
:
4959 raise XAttrMetadataError(e
.errno
, e
.strerror
)
4961 raise XAttrMetadataError(returncode
, stderr
)
4964 def random_birthday(year_field
, month_field
, day_field
):
4965 start_date
= datetime
.date(1950, 1, 1)
4966 end_date
= datetime
.date(1995, 12, 31)
4967 offset
= random
.randint(0, (end_date
- start_date
).days
)
4968 random_date
= start_date
+ datetime
.timedelta(offset
)
4970 year_field
: str(random_date
.year
),
4971 month_field
: str(random_date
.month
),
4972 day_field
: str(random_date
.day
),
4976 # Templates for internet shortcut files, which are plain text files.
4977 DOT_URL_LINK_TEMPLATE
= '''\
4982 DOT_WEBLOC_LINK_TEMPLATE
= '''\
4983 <?xml version="1.0" encoding="UTF-8"?>
4984 <!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
4985 <plist version="1.0">
4988 \t<string>%(url)s</string>
4993 DOT_DESKTOP_LINK_TEMPLATE
= '''\
5003 'url': DOT_URL_LINK_TEMPLATE
,
5004 'desktop': DOT_DESKTOP_LINK_TEMPLATE
,
5005 'webloc': DOT_WEBLOC_LINK_TEMPLATE
,
5009 def iri_to_uri(iri
):
5011 Converts an IRI (Internationalized Resource Identifier, allowing Unicode characters) to a URI (Uniform Resource Identifier, ASCII-only).
5013 The function doesn't add an additional layer of escaping; e.g., it doesn't escape `%3C` as `%253C`. Instead, it percent-escapes characters with an underlying UTF-8 encoding *besides* those already escaped, leaving the URI intact.
5016 iri_parts
= compat_urllib_parse_urlparse(iri
)
5018 if '[' in iri_parts
.netloc
:
5019 raise ValueError('IPv6 URIs are not, yet, supported.')
5020 # Querying `.netloc`, when there's only one bracket, also raises a ValueError.
5022 # The `safe` argument values, that the following code uses, contain the characters that should not be percent-encoded. Everything else but letters, digits and '_.-' will be percent-encoded with an underlying UTF-8 encoding. Everything already percent-encoded will be left as is.
5025 if iri_parts
.username
:
5026 net_location
+= urllib
.parse
.quote(iri_parts
.username
, safe
=r
"!$%&'()*+,~")
5027 if iri_parts
.password
is not None:
5028 net_location
+= ':' + urllib
.parse
.quote(iri_parts
.password
, safe
=r
"!$%&'()*+,~")
5031 net_location
+= iri_parts
.hostname
.encode('idna').decode() # Punycode for Unicode hostnames.
5032 # The 'idna' encoding produces ASCII text.
5033 if iri_parts
.port
is not None and iri_parts
.port
!= 80:
5034 net_location
+= ':' + str(iri_parts
.port
)
5036 return urllib
.parse
.urlunparse(
5040 urllib
.parse
.quote_plus(iri_parts
.path
, safe
=r
"!$%&'()*+,/:;=@|~"),
5042 # Unsure about the `safe` argument, since this is a legacy way of handling parameters.
5043 urllib
.parse
.quote_plus(iri_parts
.params
, safe
=r
"!$%&'()*+,/:;=@|~"),
5045 # Not totally sure about the `safe` argument, since the source does not explicitly mention the query URI component.
5046 urllib
.parse
.quote_plus(iri_parts
.query
, safe
=r
"!$%&'()*+,/:;=?@{|}~"),
5048 urllib
.parse
.quote_plus(iri_parts
.fragment
, safe
=r
"!#$%&'()*+,/:;=?@{|}~")))
5050 # Source for `safe` arguments: https://url.spec.whatwg.org/#percent-encoded-bytes.
5053 def to_high_limit_path(path
):
5054 if sys
.platform
in ['win32', 'cygwin']:
5055 # Work around MAX_PATH limitation on Windows. The maximum allowed length for the individual path segments may still be quite limited.
5056 return '\\\\?\\' + os
.path
.abspath(path
)
5061 def format_field(obj
, field
=None, template
='%s', ignore
=NO_DEFAULT
, default
='', func
=None):
5062 val
= traverse_obj(obj
, *variadic(field
))
5063 if (not val
and val
!= 0) if ignore
is NO_DEFAULT
else val
in ignore
:
5065 return template
% (func(val
) if func
else val
)
5068 def clean_podcast_url(url
):
5069 return re
.sub(r
'''(?x)
5073 media\.blubrry\.com| # https://create.blubrry.com/resources/podcast-media-download-statistics/getting-started/
5076 (?:dts|www)\.podtrac\.com/(?:pts/)?redirect\.[0-9a-z]{3,4}| # http://analytics.podtrac.com/how-to-measure
5079 cn\.co| # https://podcorn.com/analytics-prefix/
5080 st\.fm # https://podsights.com/docs/
5085 _HEX_TABLE
= '0123456789abcdef'
5088 def random_uuidv4():
5089 return re
.sub(r
'[xy]', lambda x
: _HEX_TABLE
[random
.randint(0, 15)], 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx')
5092 def make_dir(path
, to_screen
=None):
5094 dn
= os
.path
.dirname(path
)
5095 if dn
and not os
.path
.exists(dn
):
5098 except OSError as err
:
5099 if callable(to_screen
) is not None:
5100 to_screen('unable to create directory ' + error_to_compat_str(err
))
5104 def get_executable_path():
5105 from .update
import _get_variant_and_executable_path
5107 return os
.path
.dirname(os
.path
.abspath(_get_variant_and_executable_path()[1]))
5110 def load_plugins(name
, suffix
, namespace
):
5112 with contextlib
.suppress(FileNotFoundError
):
5113 plugins_spec
= importlib
.util
.spec_from_file_location(
5114 name
, os
.path
.join(get_executable_path(), 'ytdlp_plugins', name
, '__init__.py'))
5115 plugins
= importlib
.util
.module_from_spec(plugins_spec
)
5116 sys
.modules
[plugins_spec
.name
] = plugins
5117 plugins_spec
.loader
.exec_module(plugins
)
5118 for name
in dir(plugins
):
5119 if name
in namespace
:
5121 if not name
.endswith(suffix
):
5123 klass
= getattr(plugins
, name
)
5124 classes
[name
] = namespace
[name
] = klass
5129 obj
, *path_list
, default
=None, expected_type
=None, get_all
=True,
5130 casesense
=True, is_user_input
=False, traverse_string
=False):
5131 ''' Traverse nested list/dict/tuple
5132 @param path_list A list of paths which are checked one by one.
5133 Each path is a list of keys where each key is a:
5135 - string: A dictionary key
5136 - int: An index into a list
5137 - tuple: A list of keys all of which will be traversed
5138 - Ellipsis: Fetch all values in the object
5139 - Function: Takes the key and value as arguments
5140 and returns whether the key matches or not
5141 @param default Default value to return
5142 @param expected_type Only accept final value of this type (Can also be any callable)
5143 @param get_all Return all the values obtained from a path or only the first one
5144 @param casesense Whether to consider dictionary keys as case sensitive
5145 @param is_user_input Whether the keys are generated from user input. If True,
5146 strings are converted to int/slice if necessary
5147 @param traverse_string Whether to traverse inside strings. If True, any
5148 non-compatible object will also be converted into a string
5152 _lower
= lambda k
: (k
.lower() if isinstance(k
, str) else k
)
5153 path_list
= (map(_lower
, variadic(path
)) for path
in path_list
)
5155 def _traverse_obj(obj
, path
, _current_depth
=0):
5157 path
= tuple(variadic(path
))
5158 for i
, key
in enumerate(path
):
5159 if None in (key
, obj
):
5161 if isinstance(key
, (list, tuple)):
5162 obj
= [_traverse_obj(obj
, sub_key
, _current_depth
) for sub_key
in key
]
5165 obj
= (obj
.values() if isinstance(obj
, dict)
5166 else obj
if isinstance(obj
, (list, tuple, LazyList
))
5167 else str(obj
) if traverse_string
else [])
5169 depth
= max(depth
, _current_depth
)
5170 return [_traverse_obj(inner_obj
, path
[i
+ 1:], _current_depth
) for inner_obj
in obj
]
5172 if isinstance(obj
, (list, tuple, LazyList
)):
5173 obj
= enumerate(obj
)
5174 elif isinstance(obj
, dict):
5177 if not traverse_string
:
5181 depth
= max(depth
, _current_depth
)
5182 return [_traverse_obj(v
, path
[i
+ 1:], _current_depth
) for k
, v
in obj
if try_call(key
, args
=(k
, v
))]
5183 elif isinstance(obj
, dict) and not (is_user_input
and key
== ':'):
5184 obj
= (obj
.get(key
) if casesense
or (key
in obj
)
5185 else next((v
for k
, v
in obj
.items() if _lower(k
) == key
), None))
5188 key
= (int_or_none(key
) if ':' not in key
5189 else slice(*map(int_or_none
, key
.split(':'))))
5190 if key
== slice(None):
5191 return _traverse_obj(obj
, (..., *path
[i
+ 1:]), _current_depth
)
5192 if not isinstance(key
, (int, slice)):
5194 if not isinstance(obj
, (list, tuple, LazyList
)):
5195 if not traverse_string
:
5204 if isinstance(expected_type
, type):
5205 type_test
= lambda val
: val
if isinstance(val
, expected_type
) else None
5206 elif expected_type
is not None:
5207 type_test
= expected_type
5209 type_test
= lambda val
: val
5211 for path
in path_list
:
5213 val
= _traverse_obj(obj
, path
)
5216 for _
in range(depth
- 1):
5217 val
= itertools
.chain
.from_iterable(v
for v
in val
if v
is not None)
5218 val
= [v
for v
in map(type_test
, val
) if v
is not None]
5220 return val
if get_all
else val
[0]
5222 val
= type_test(val
)
5228 def traverse_dict(dictn
, keys
, casesense
=True):
5229 write_string('DeprecationWarning: yt_dlp.utils.traverse_dict is deprecated '
5230 'and may be removed in a future version. Use yt_dlp.utils.traverse_obj instead')
5231 return traverse_obj(dictn
, keys
, casesense
=casesense
, is_user_input
=True, traverse_string
=True)
5234 def get_first(obj
, keys
, **kwargs
):
5235 return traverse_obj(obj
, (..., *variadic(keys
)), **kwargs
, get_all
=False)
5238 def variadic(x
, allowed_types
=(str, bytes, dict)):
5239 return x
if isinstance(x
, collections
.abc
.Iterable
) and not isinstance(x
, allowed_types
) else (x
,)
5242 def decode_base(value
, digits
):
5243 # This will convert given base-x string to scalar (long or int)
5244 table
= {char: index for index, char in enumerate(digits)}
5249 result
+= table
[chr]
5253 def time_seconds(**kwargs
):
5254 t
= datetime
.datetime
.now(datetime
.timezone(datetime
.timedelta(**kwargs
)))
5255 return t
.timestamp()
5258 # create a JSON Web Signature (jws) with HS256 algorithm
5259 # the resulting format is in JWS Compact Serialization
5260 # implemented following JWT https://www.rfc-editor.org/rfc/rfc7519.html
5261 # implemented following JWS https://www.rfc-editor.org/rfc/rfc7515.html
5262 def jwt_encode_hs256(payload_data
, key
, headers
={}):
5268 header_data
.update(headers
)
5269 header_b64
= base64
.b64encode(json
.dumps(header_data
).encode())
5270 payload_b64
= base64
.b64encode(json
.dumps(payload_data
).encode())
5271 h
= hmac
.new(key
.encode(), header_b64
+ b
'.' + payload_b64
, hashlib
.sha256
)
5272 signature_b64
= base64
.b64encode(h
.digest())
5273 token
= header_b64
+ b
'.' + payload_b64
+ b
'.' + signature_b64
5277 # can be extended in future to verify the signature and parse header and return the algorithm used if it's not HS256
5278 def jwt_decode_hs256(jwt
):
5279 header_b64
, payload_b64
, signature_b64
= jwt
.split('.')
5280 payload_data
= json
.loads(base64
.urlsafe_b64decode(payload_b64
))
5284 WINDOWS_VT_MODE
= False if compat_os_name
== 'nt' else None
5288 def supports_terminal_sequences(stream
):
5289 if compat_os_name
== 'nt':
5290 if not WINDOWS_VT_MODE
:
5292 elif not os
.getenv('TERM'):
5295 return stream
.isatty()
5296 except BaseException
:
5300 def windows_enable_vt_mode(): # TODO: Do this the proper way https://bugs.python.org/issue30075
5301 if get_windows_version() < (10, 0, 10586):
5303 global WINDOWS_VT_MODE
5305 Popen
.run('', shell
=True)
5309 WINDOWS_VT_MODE
= True
5310 supports_terminal_sequences
.cache_clear()
5313 _terminal_sequences_re
= re
.compile('\033\\[[^m]+m')
5316 def remove_terminal_sequences(string
):
5317 return _terminal_sequences_re
.sub('', string
)
5320 def number_of_digits(number
):
5321 return len('%d' % number
)
5324 def join_nonempty(*values
, delim
='-', from_dict
=None):
5325 if from_dict
is not None:
5326 values
= map(from_dict
.get
, values
)
5327 return delim
.join(map(str, filter(None, values
)))
5330 def scale_thumbnails_to_max_format_width(formats
, thumbnails
, url_width_re
):
5332 Find the largest format dimensions in terms of video width and, for each thumbnail:
5333 * Modify the URL: Match the width with the provided regex and replace with the former width
5336 This function is useful with video services that scale the provided thumbnails on demand
5338 _keys
= ('width', 'height')
5339 max_dimensions
= max(
5340 (tuple(format
.get(k
) or 0 for k
in _keys
) for format
in formats
),
5342 if not max_dimensions
[0]:
5346 {'url': re.sub(url_width_re, str(max_dimensions[0]), thumbnail['url'])}
,
5347 dict(zip(_keys
, max_dimensions
)), thumbnail
)
5348 for thumbnail
in thumbnails
5352 def parse_http_range(range):
5353 """ Parse value of "Range" or "Content-Range" HTTP header into tuple. """
5355 return None, None, None
5356 crg
= re
.search(r
'bytes[ =](\d+)-(\d+)?(?:/(\d+))?', range)
5358 return None, None, None
5359 return int(crg
.group(1)), int_or_none(crg
.group(2)), int_or_none(crg
.group(3))
5362 def read_stdin(what
):
5363 eof
= 'Ctrl+Z' if compat_os_name
== 'nt' else 'Ctrl+D'
5364 write_string(f
'Reading {what} from STDIN - EOF ({eof}) to end:\n')
5372 __initialized
= False
5374 def __init__(self
, parser
, label
=None):
5375 self
.parser
, self
.label
= parser
, label
5376 self
._loaded
_paths
, self
.configs
= set(), []
5378 def init(self
, args
=None, filename
=None):
5379 assert not self
.__initialized
5382 location
= os
.path
.realpath(filename
)
5383 directory
= os
.path
.dirname(location
)
5384 if location
in self
._loaded
_paths
:
5386 self
._loaded
_paths
.add(location
)
5388 self
.own_args
, self
.__initialized
= args
, True
5389 opts
, _
= self
.parser
.parse_known_args(args
)
5390 self
.parsed_args
, self
.filename
= args
, filename
5392 for location
in opts
.config_locations
or []:
5394 self
.append_config(shlex
.split(read_stdin('options'), comments
=True), label
='stdin')
5396 location
= os
.path
.join(directory
, expand_path(location
))
5397 if os
.path
.isdir(location
):
5398 location
= os
.path
.join(location
, 'yt-dlp.conf')
5399 if not os
.path
.exists(location
):
5400 self
.parser
.error(f
'config location {location} does not exist')
5401 self
.append_config(self
.read_file(location
), location
)
5405 label
= join_nonempty(
5406 self
.label
, 'config', f
'"{self.filename}"' if self
.filename
else '',
5408 return join_nonempty(
5409 self
.own_args
is not None and f
'{label[0].upper()}{label[1:]}: {self.hide_login_info(self.own_args)}',
5410 *(f
'\n{c}'.replace('\n', '\n| ')[1:] for c
in self
.configs
),
5414 def read_file(filename
, default
=[]):
5416 optionf
= open(filename
)
5418 return default
# silently skip if file is not present
5420 # FIXME: https://github.com/ytdl-org/youtube-dl/commit/dfe5fa49aed02cf36ba9f743b11b0903554b5e56
5421 contents
= optionf
.read()
5422 res
= shlex
.split(contents
, comments
=True)
5428 def hide_login_info(opts
):
5429 PRIVATE_OPTS
= {'-p', '--password', '-u', '--username', '--video-password', '--ap-password', '--ap-username'}
5430 eqre
= re
.compile('^(?P<key>' + ('|'.join(re
.escape(po
) for po
in PRIVATE_OPTS
)) + ')=.+$')
5435 return m
.group('key') + '=PRIVATE'
5439 opts
= list(map(_scrub_eq
, opts
))
5440 for idx
, opt
in enumerate(opts
):
5441 if opt
in PRIVATE_OPTS
and idx
+ 1 < len(opts
):
5442 opts
[idx
+ 1] = 'PRIVATE'
5445 def append_config(self
, *args
, label
=None):
5446 config
= type(self
)(self
.parser
, label
)
5447 config
._loaded
_paths
= self
._loaded
_paths
5448 if config
.init(*args
):
5449 self
.configs
.append(config
)
5453 for config
in reversed(self
.configs
):
5454 yield from config
.all_args
5455 yield from self
.parsed_args
or []
5457 def parse_known_args(self
, **kwargs
):
5458 return self
.parser
.parse_known_args(self
.all_args
, **kwargs
)
5460 def parse_args(self
):
5461 return self
.parser
.parse_args(self
.all_args
)
5464 class WebSocketsWrapper():
5465 """Wraps websockets module to use in non-async scopes"""
5468 def __init__(self
, url
, headers
=None, connect
=True):
5469 self
.loop
= asyncio
.new_event_loop()
5470 # XXX: "loop" is deprecated
5471 self
.conn
= websockets
.connect(
5472 url
, extra_headers
=headers
, ping_interval
=None,
5473 close_timeout
=float('inf'), loop
=self
.loop
, ping_timeout
=float('inf'))
5476 atexit
.register(self
.__exit
__, None, None, None)
5478 def __enter__(self
):
5480 self
.pool
= self
.run_with_loop(self
.conn
.__aenter
__(), self
.loop
)
5483 def send(self
, *args
):
5484 self
.run_with_loop(self
.pool
.send(*args
), self
.loop
)
5486 def recv(self
, *args
):
5487 return self
.run_with_loop(self
.pool
.recv(*args
), self
.loop
)
5489 def __exit__(self
, type, value
, traceback
):
5491 return self
.run_with_loop(self
.conn
.__aexit
__(type, value
, traceback
), self
.loop
)
5494 self
._cancel
_all
_tasks
(self
.loop
)
5496 # taken from https://github.com/python/cpython/blob/3.9/Lib/asyncio/runners.py with modifications
5497 # for contributors: If there's any new library using asyncio needs to be run in non-async, move these function out of this class
5499 def run_with_loop(main
, loop
):
5500 if not asyncio
.iscoroutine(main
):
5501 raise ValueError(f
'a coroutine was expected, got {main!r}')
5504 return loop
.run_until_complete(main
)
5506 loop
.run_until_complete(loop
.shutdown_asyncgens())
5507 if hasattr(loop
, 'shutdown_default_executor'):
5508 loop
.run_until_complete(loop
.shutdown_default_executor())
5511 def _cancel_all_tasks(loop
):
5512 to_cancel
= asyncio
.all_tasks(loop
)
5517 for task
in to_cancel
:
5520 # XXX: "loop" is removed in python 3.10+
5521 loop
.run_until_complete(
5522 asyncio
.gather(*to_cancel
, loop
=loop
, return_exceptions
=True))
5524 for task
in to_cancel
:
5525 if task
.cancelled():
5527 if task
.exception() is not None:
5528 loop
.call_exception_handler({
5529 'message': 'unhandled exception during asyncio.run() shutdown',
5530 'exception': task
.exception(),
5535 def merge_headers(*dicts
):
5536 """Merge dicts of http headers case insensitively, prioritizing the latter ones"""
5537 return {k.title(): v for k, v in itertools.chain.from_iterable(map(dict.items, dicts))}
5540 class classproperty
:
5541 """classmethod(property(func)) that works in py < 3.9"""
5543 def __init__(self
, func
):
5544 functools
.update_wrapper(self
, func
)
5547 def __get__(self
, _
, cls
):
5548 return self
.func(cls
)
5551 class Namespace(types
.SimpleNamespace
):
5552 """Immutable namespace"""
5555 return iter(self
.__dict
__.values())
5559 return self
.__dict
__.items()
5563 has_certifi
= bool(certifi
)
5564 has_websockets
= bool(websockets
)