4 from __future__
import unicode_literals
41 import xml
.etree
.ElementTree
46 compat_HTMLParseError
,
53 compat_ctypes_WINFUNCTYPE
,
54 compat_etree_fromstring
,
57 compat_html_entities_html5
,
71 compat_urllib_parse_urlencode
,
72 compat_urllib_parse_urlparse
,
73 compat_urllib_parse_urlunparse
,
74 compat_urllib_parse_quote
,
75 compat_urllib_parse_quote_plus
,
76 compat_urllib_parse_unquote_plus
,
77 compat_urllib_request
,
89 def register_socks_protocols():
90 # "Register" SOCKS protocols
91 # In Python < 2.6.5, urlsplit() suffers from bug https://bugs.python.org/issue7904
92 # URLs with protocols not in urlparse.uses_netloc are not handled correctly
93 for scheme
in ('socks', 'socks4', 'socks4a', 'socks5'):
94 if scheme
not in compat_urlparse
.uses_netloc
:
95 compat_urlparse
.uses_netloc
.append(scheme
)
98 # This is not clearly defined otherwise
99 compiled_regex_type
= type(re
.compile(''))
102 def random_user_agent():
103 _USER_AGENT_TPL
= 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/%s Safari/537.36'
144 return _USER_AGENT_TPL
% random
.choice(_CHROME_VERSIONS
)
147 SUPPORTED_ENCODINGS
= [
151 SUPPORTED_ENCODINGS
.append('br')
154 'User-Agent': random_user_agent(),
155 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
156 'Accept-Encoding': ', '.join(SUPPORTED_ENCODINGS
),
157 'Accept-Language': 'en-us,en;q=0.5',
158 'Sec-Fetch-Mode': 'navigate',
163 'Safari': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27',
167 NO_DEFAULT
= object()
169 ENGLISH_MONTH_NAMES
= [
170 'January', 'February', 'March', 'April', 'May', 'June',
171 'July', 'August', 'September', 'October', 'November', 'December']
174 'en': ENGLISH_MONTH_NAMES
,
176 'janvier', 'février', 'mars', 'avril', 'mai', 'juin',
177 'juillet', 'août', 'septembre', 'octobre', 'novembre', 'décembre'],
181 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'aac',
182 'flv', 'f4v', 'f4a', 'f4b',
183 'webm', 'ogg', 'ogv', 'oga', 'ogx', 'spx', 'opus',
184 'mkv', 'mka', 'mk3d',
193 'f4f', 'f4m', 'm3u8', 'smil')
195 # needed for sanitizing filenames in restricted mode
196 ACCENT_CHARS
= dict(zip('ÂÃÄÀÁÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖŐØŒÙÚÛÜŰÝÞßàáâãäåæçèéêëìíîïðñòóôõöőøœùúûüűýþÿ',
197 itertools
.chain('AAAAAA', ['AE'], 'CEEEEIIIIDNOOOOOOO', ['OE'], 'UUUUUY', ['TH', 'ss'],
198 'aaaaaa', ['ae'], 'ceeeeiiiionooooooo', ['oe'], 'uuuuuy', ['th'], 'y')))
228 '%Y-%m-%d %H:%M:%S.%f',
229 '%Y-%m-%d %H:%M:%S:%f',
232 '%Y-%m-%dT%H:%M:%SZ',
233 '%Y-%m-%dT%H:%M:%S.%fZ',
234 '%Y-%m-%dT%H:%M:%S.%f0Z',
236 '%Y-%m-%dT%H:%M:%S.%f',
239 '%b %d %Y at %H:%M:%S',
241 '%B %d %Y at %H:%M:%S',
245 DATE_FORMATS_DAY_FIRST
= list(DATE_FORMATS
)
246 DATE_FORMATS_DAY_FIRST
.extend([
255 DATE_FORMATS_MONTH_FIRST
= list(DATE_FORMATS
)
256 DATE_FORMATS_MONTH_FIRST
.extend([
264 PACKED_CODES_RE
= r
"}\('(.+)',(\d+),(\d+),'([^']+)'\.split\('\|'\)"
265 JSON_LD_RE
= r
'(?is)<script[^>]+type=(["\']?
)application
/ld\
+json\
1[^
>]*>(?P
<json_ld
>.+?
)</script
>'
268 def preferredencoding():
269 """Get preferred encoding.
271 Returns the best encoding scheme for the system, based on
272 locale.getpreferredencoding() and some further tweaks.
275 pref = locale.getpreferredencoding()
283 def write_json_file(obj, fn):
284 """ Encode obj as JSON and write it to fn, atomically if possible """
286 fn = encodeFilename(fn)
287 if sys.version_info < (3, 0) and sys.platform != 'win32
':
288 encoding = get_filesystem_encoding()
289 # os.path.basename returns a bytes object, but NamedTemporaryFile
290 # will fail if the filename contains non ascii characters unless we
291 # use a unicode object
292 path_basename = lambda f: os.path.basename(fn).decode(encoding)
293 # the same for os.path.dirname
294 path_dirname = lambda f: os.path.dirname(fn).decode(encoding)
296 path_basename = os.path.basename
297 path_dirname = os.path.dirname
301 'prefix
': path_basename(fn) + '.',
302 'dir': path_dirname(fn),
306 # In Python 2.x, json.dump expects a bytestream.
307 # In Python 3.x, it writes to a character stream
308 if sys.version_info < (3, 0):
316 tf = tempfile.NamedTemporaryFile(**compat_kwargs(args))
320 json.dump(obj, tf, ensure_ascii=False)
321 if sys.platform == 'win32
':
322 # Need to remove existing file on Windows, else os.rename raises
323 # WindowsError or FileExistsError.
331 os.chmod(tf.name, 0o666 & ~mask)
334 os.rename(tf.name, fn)
343 if sys.version_info >= (2, 7):
344 def find_xpath_attr(node, xpath, key, val=None):
345 """ Find the xpath xpath[@key=val] """
346 assert re.match(r'^
[a
-zA
-Z_
-]+$
', key)
347 expr = xpath + ('[@%s]' % key if val is None else "[@%s='%s']" % (key, val))
348 return node.find(expr)
350 def find_xpath_attr(node, xpath, key, val=None):
351 for f in node.findall(compat_xpath(xpath)):
352 if key not in f.attrib:
354 if val is None or f.attrib.get(key) == val:
358 # On python2.6 the xml.etree.ElementTree.Element methods don't support
359 # the namespace parameter
362 def xpath_with_ns(path
, ns_map
):
363 components
= [c
.split(':') for c
in path
.split('/')]
367 replaced
.append(c
[0])
370 replaced
.append('{%s}%s' % (ns_map
[ns
], tag
))
371 return '/'.join(replaced
)
374 def xpath_element(node
, xpath
, name
=None, fatal
=False, default
=NO_DEFAULT
):
375 def _find_xpath(xpath
):
376 return node
.find(compat_xpath(xpath
))
378 if isinstance(xpath
, (str, compat_str
)):
379 n
= _find_xpath(xpath
)
387 if default
is not NO_DEFAULT
:
390 name
= xpath
if name
is None else name
391 raise ExtractorError('Could not find XML element %s' % name
)
397 def xpath_text(node
, xpath
, name
=None, fatal
=False, default
=NO_DEFAULT
):
398 n
= xpath_element(node
, xpath
, name
, fatal
=fatal
, default
=default
)
399 if n
is None or n
== default
:
402 if default
is not NO_DEFAULT
:
405 name
= xpath
if name
is None else name
406 raise ExtractorError('Could not find XML element\'s text %s' % name
)
412 def xpath_attr(node
, xpath
, key
, name
=None, fatal
=False, default
=NO_DEFAULT
):
413 n
= find_xpath_attr(node
, xpath
, key
)
415 if default
is not NO_DEFAULT
:
418 name
= '%s[@%s]' % (xpath
, key
) if name
is None else name
419 raise ExtractorError('Could not find XML attribute %s' % name
)
425 def get_element_by_id(id, html
):
426 """Return the content of the tag with the specified ID in the passed HTML document"""
427 return get_element_by_attribute('id', id, html
)
430 def get_element_html_by_id(id, html
):
431 """Return the html of the tag with the specified ID in the passed HTML document"""
432 return get_element_html_by_attribute('id', id, html
)
435 def get_element_by_class(class_name
, html
):
436 """Return the content of the first tag with the specified class in the passed HTML document"""
437 retval
= get_elements_by_class(class_name
, html
)
438 return retval
[0] if retval
else None
441 def get_element_html_by_class(class_name
, html
):
442 """Return the html of the first tag with the specified class in the passed HTML document"""
443 retval
= get_elements_html_by_class(class_name
, html
)
444 return retval
[0] if retval
else None
447 def get_element_by_attribute(attribute
, value
, html
, escape_value
=True):
448 retval
= get_elements_by_attribute(attribute
, value
, html
, escape_value
)
449 return retval
[0] if retval
else None
452 def get_element_html_by_attribute(attribute
, value
, html
, escape_value
=True):
453 retval
= get_elements_html_by_attribute(attribute
, value
, html
, escape_value
)
454 return retval
[0] if retval
else None
457 def get_elements_by_class(class_name
, html
):
458 """Return the content of all tags with the specified class in the passed HTML document as a list"""
459 return get_elements_by_attribute(
460 'class', r
'[^\'"]*\b%s\b[^\'"]*' % re.escape(class_name),
461 html, escape_value=False)
464 def get_elements_html_by_class(class_name, html):
465 """Return the html of all tags with the specified class in the passed HTML document as a list"""
466 return get_elements_html_by_attribute(
467 'class', r'[^
\'"]*\b%s\b[^\'"]*' % re.escape(class_name),
468 html, escape_value=False)
471 def get_elements_by_attribute(*args, **kwargs):
472 """Return the content of the tag with the specified attribute in the passed HTML document"""
473 return [content for content, _ in get_elements_text_and_html_by_attribute(*args, **kwargs)]
476 def get_elements_html_by_attribute(*args, **kwargs):
477 """Return the html of the tag with the specified attribute in the passed HTML document"""
478 return [whole for _, whole in get_elements_text_and_html_by_attribute(*args, **kwargs)]
481 def get_elements_text_and_html_by_attribute(attribute, value, html, escape_value=True):
483 Return the text (content) and the html (whole) of the tag with the specified
484 attribute in the passed HTML document
487 value_quote_optional = '' if re.match(r'''[\s"'`
=<>]''', value) else '?'
489 value = re.escape(value) if escape_value else value
491 partial_element_re = r'''(?x
)
492 <(?P
<tag
>[a
-zA
-Z0
-9:._-]+)
493 (?
:\
s(?
:[^
>"']|"[^
"]*"|
'[^']*')*)?
494 \s%(attribute)s\s*=\s*(?P<_q>['"]%(vqo)s)(?-x:%(value)s)(?P=_q)
495 ''' % {'attribute': re.escape(attribute), 'value': value, 'vqo': value_quote_optional}
497 for m in re.finditer(partial_element_re, html):
498 content, whole = get_element_text_and_html_by_tag(m.group('tag'), html[m.start():])
501 unescapeHTML(re.sub(r'^(?P<q>["\'])(?P
<content
>.*)(?P
=q
)$
', r'\g
<content
>', content, flags=re.DOTALL)),
506 class HTMLBreakOnClosingTagParser(compat_HTMLParser):
508 HTML parser which raises HTMLBreakOnClosingTagException upon reaching the
509 closing tag for the first opening tag it has encountered, and can be used
513 class HTMLBreakOnClosingTagException(Exception):
517 self.tagstack = collections.deque()
518 compat_HTMLParser.__init__(self)
523 def __exit__(self, *_):
527 # handle_endtag does not return upon raising HTMLBreakOnClosingTagException,
528 # so data remains buffered; we no longer have any interest in it, thus
529 # override this method to discard it
532 def handle_starttag(self, tag, _):
533 self.tagstack.append(tag)
535 def handle_endtag(self, tag):
536 if not self.tagstack:
537 raise compat_HTMLParseError('no tags
in the stack
')
539 inner_tag = self.tagstack.pop()
543 raise compat_HTMLParseError(f'matching opening tag
for closing {tag} tag
not found
')
544 if not self.tagstack:
545 raise self.HTMLBreakOnClosingTagException()
548 def get_element_text_and_html_by_tag(tag, html):
550 For the first element with the specified tag in the passed HTML document
551 return its' content (text
) and the whole
element (html
)
553 def find_or_raise(haystack, needle, exc):
555 return haystack.index(needle)
558 closing_tag = f'</{tag}>'
559 whole_start = find_or_raise(
560 html, f'<{tag}', compat_HTMLParseError(f'opening {tag} tag not found'))
561 content_start = find_or_raise(
562 html[whole_start:], '>', compat_HTMLParseError(f'malformed opening {tag} tag'))
563 content_start += whole_start + 1
564 with HTMLBreakOnClosingTagParser() as parser:
565 parser.feed(html[whole_start:content_start])
566 if not parser.tagstack or parser.tagstack[0] != tag:
567 raise compat_HTMLParseError(f'parser did not match opening {tag} tag')
568 offset = content_start
569 while offset < len(html):
570 next_closing_tag_start = find_or_raise(
571 html[offset:], closing_tag,
572 compat_HTMLParseError(f'closing {tag} tag not found'))
573 next_closing_tag_end = next_closing_tag_start + len(closing_tag)
575 parser.feed(html[offset:offset + next_closing_tag_end])
576 offset += next_closing_tag_end
577 except HTMLBreakOnClosingTagParser.HTMLBreakOnClosingTagException:
578 return html[content_start:offset + next_closing_tag_start], \
579 html[whole_start:offset + next_closing_tag_end]
580 raise compat_HTMLParseError('unexpected end of html')
583 class HTMLAttributeParser(compat_HTMLParser):
584 """Trivial HTML parser to gather the attributes
for a single element
"""
588 compat_HTMLParser.__init__(self)
590 def handle_starttag(self, tag, attrs):
591 self.attrs = dict(attrs)
594 class HTMLListAttrsParser(compat_HTMLParser):
595 """HTML parser to gather the attributes
for the elements of a
list"""
598 compat_HTMLParser.__init__(self)
602 def handle_starttag(self, tag, attrs):
603 if tag == 'li' and self._level == 0:
604 self.items.append(dict(attrs))
607 def handle_endtag(self, tag):
611 def extract_attributes(html_element):
612 """Given a string
for an HTML element such
as
614 a
="foo" B
="bar" c
="&98;az" d
=boz
615 empty
= noval entity
="&"
618 Decode
and return a dictionary of attributes
.
620 'a': 'foo', 'b': 'bar', c
: 'baz', d
: 'boz',
621 'empty': '', 'noval': None, 'entity': '&',
622 'sq': '"', 'dq': '\''
624 NB HTMLParser
is stricter
in Python
2.6 & 3.2 than
in later versions
,
625 but the cases
in the unit test will work
for all of
2.6, 2.7, 3.2-3.5.
627 parser = HTMLAttributeParser()
629 parser.feed(html_element)
631 # Older Python may throw HTMLParseError in case of malformed HTML
632 except compat_HTMLParseError:
637 def parse_list(webpage):
638 """Given a string
for an series of HTML
<li
> elements
,
639 return a dictionary of their attributes
"""
640 parser = HTMLListAttrsParser()
646 def clean_html(html):
647 """Clean an HTML snippet into a readable string
"""
649 if html is None: # Convenience for sanitizing descriptions etc.
652 html = re.sub(r'\s+', ' ', html)
653 html = re.sub(r'(?u)\s?<\s?br\s?/?\s?>\s?', '\n', html)
654 html = re.sub(r'(?u)<\s?/\s?p\s?>\s?<\s?p[^>]*>', '\n', html)
656 html = re.sub('<.*?>', '', html)
657 # Replace html entities
658 html = unescapeHTML(html)
662 def sanitize_open(filename, open_mode):
663 """Try to
open the given filename
, and slightly tweak it
if this fails
.
665 Attempts to
open the given filename
. If this fails
, it tries to change
666 the filename slightly
, step by step
, until it
's either able to open it
667 or it fails and raises a final exception, like the standard open()
670 It returns the tuple (stream, definitive_file_name).
674 if sys.platform == 'win32
':
676 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
677 return (sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout, filename)
678 stream = locked_file(filename, open_mode, block=False).open()
679 return (stream, filename)
680 except (IOError, OSError) as err:
681 if err.errno in (errno.EACCES,):
684 # In case of error, try to remove win32 forbidden chars
685 alt_filename = sanitize_path(filename)
686 if alt_filename == filename:
689 # An exception here should be caught in the caller
690 stream = locked_file(filename, open_mode, block=False).open()
691 return (stream, alt_filename)
694 def timeconvert(timestr):
695 """Convert RFC 2822 defined time string into system timestamp"""
697 timetuple = email.utils.parsedate_tz(timestr)
698 if timetuple is not None:
699 timestamp = email.utils.mktime_tz(timetuple)
703 def sanitize_filename(s, restricted=False, is_id=False):
704 """Sanitizes a string so it could be used as part of a filename.
705 If restricted is set, use a stricter subset of allowed characters.
706 Set is_id if this is not an arbitrary string, but an ID that should be kept
709 def replace_insane(char):
710 if restricted and char in ACCENT_CHARS:
711 return ACCENT_CHARS[char]
712 elif not restricted and char == '\n':
714 elif char == '?
' or ord(char) < 32 or ord(char) == 127:
717 return '' if restricted else '\''
719 return '_-' if restricted else ' -'
720 elif char in '\\/|*<>':
722 if restricted and (char in '!&\'()[]{}$;`^,#' or char.isspace()):
724 if restricted and ord(char) > 127:
731 s = re.sub(r'[0-9]+(?::[0-9]+)+', lambda m: m.group(0).replace(':', '_'), s)
732 result = ''.join(map(replace_insane, s))
734 while '__' in result:
735 result = result.replace('__', '_')
736 result = result.strip('_')
737 # Common case of "Foreign band name
- English song title
"
738 if restricted and result.startswith('-_'):
740 if result.startswith('-'):
741 result = '_' + result[len('-'):]
742 result = result.lstrip('.')
748 def sanitize_path(s, force=False):
749 """Sanitizes and normalizes path on Windows"""
750 if sys.platform == 'win32':
752 drive_or_unc, _ = os.path.splitdrive(s)
753 if sys.version_info < (2, 7) and not drive_or_unc:
754 drive_or_unc, _ = os.path.splitunc(s)
760 norm_path = os.path.normpath(remove_start(s, drive_or_unc)).split(os.path.sep)
764 path_part if path_part in ['.', '..'] else re.sub(r'(?:[/<>:"\|
\\?\
*]|
[\s
.]$
)', '#', path_part)
765 for path_part
in norm_path
]
767 sanitized_path
.insert(0, drive_or_unc
+ os
.path
.sep
)
768 elif force
and s
[0] == os
.path
.sep
:
769 sanitized_path
.insert(0, os
.path
.sep
)
770 return os
.path
.join(*sanitized_path
)
773 def sanitize_url(url
):
774 # Prepend protocol-less URLs with `http:` scheme in order to mitigate
775 # the number of unwanted failures due to missing protocol
776 if url
.startswith('//'):
777 return 'http:%s' % url
778 # Fix some common typos seen so far
780 # https://github.com/ytdl-org/youtube-dl/issues/15649
781 (r
'^httpss://', r
'https://'),
782 # https://bx1.be/lives/direct-tv/
783 (r
'^rmtp([es]?)://', r
'rtmp\1://'),
785 for mistake
, fixup
in COMMON_TYPOS
:
786 if re
.match(mistake
, url
):
787 return re
.sub(mistake
, fixup
, url
)
791 def extract_basic_auth(url
):
792 parts
= compat_urlparse
.urlsplit(url
)
793 if parts
.username
is None:
795 url
= compat_urlparse
.urlunsplit(parts
._replace
(netloc
=(
796 parts
.hostname
if parts
.port
is None
797 else '%s:%d' % (parts
.hostname
, parts
.port
))))
798 auth_payload
= base64
.b64encode(
799 ('%s:%s' % (parts
.username
, parts
.password
or '')).encode('utf-8'))
800 return url
, 'Basic ' + auth_payload
.decode('utf-8')
803 def sanitized_Request(url
, *args
, **kwargs
):
804 url
, auth_header
= extract_basic_auth(escape_url(sanitize_url(url
)))
805 if auth_header
is not None:
806 headers
= args
[1] if len(args
) >= 2 else kwargs
.setdefault('headers', {})
807 headers
['Authorization'] = auth_header
808 return compat_urllib_request
.Request(url
, *args
, **kwargs
)
812 """Expand shell variables and ~"""
813 return os
.path
.expandvars(compat_expanduser(s
))
816 def orderedSet(iterable
):
817 """ Remove all duplicates from the input iterable """
825 def _htmlentity_transform(entity_with_semicolon
):
826 """Transforms an HTML entity to a character."""
827 entity
= entity_with_semicolon
[:-1]
829 # Known non-numeric HTML entity
830 if entity
in compat_html_entities
.name2codepoint
:
831 return compat_chr(compat_html_entities
.name2codepoint
[entity
])
833 # TODO: HTML5 allows entities without a semicolon. For example,
834 # 'Éric' should be decoded as 'Éric'.
835 if entity_with_semicolon
in compat_html_entities_html5
:
836 return compat_html_entities_html5
[entity_with_semicolon
]
838 mobj
= re
.match(r
'#(x[0-9a-fA-F]+|[0-9]+)', entity
)
840 numstr
= mobj
.group(1)
841 if numstr
.startswith('x'):
843 numstr
= '0%s' % numstr
846 # See https://github.com/ytdl-org/youtube-dl/issues/7518
848 return compat_chr(int(numstr
, base
))
852 # Unknown entity in name, return its literal representation
853 return '&%s;' % entity
859 assert type(s
) == compat_str
862 r
'&([^&;]+;)', lambda m
: _htmlentity_transform(m
.group(1)), s
)
865 def escapeHTML(text
):
868 .replace('&', '&')
869 .replace('<', '<')
870 .replace('>', '>')
871 .replace('"', '"')
872 .replace("'", ''')
876 def process_communicate_or_kill(p
, *args
, **kwargs
):
878 return p
.communicate(*args
, **kwargs
)
879 except BaseException
: # Including KeyboardInterrupt
885 class Popen(subprocess
.Popen
):
886 if sys
.platform
== 'win32':
887 _startupinfo
= subprocess
.STARTUPINFO()
888 _startupinfo
.dwFlags |
= subprocess
.STARTF_USESHOWWINDOW
892 def __init__(self
, *args
, **kwargs
):
893 super(Popen
, self
).__init
__(*args
, **kwargs
, startupinfo
=self
._startupinfo
)
895 def communicate_or_kill(self
, *args
, **kwargs
):
896 return process_communicate_or_kill(self
, *args
, **kwargs
)
899 def get_subprocess_encoding():
900 if sys
.platform
== 'win32' and sys
.getwindowsversion()[0] >= 5:
901 # For subprocess calls, encode with locale encoding
902 # Refer to http://stackoverflow.com/a/9951851/35070
903 encoding
= preferredencoding()
905 encoding
= sys
.getfilesystemencoding()
911 def encodeFilename(s
, for_subprocess
=False):
913 @param s The name of the file
916 assert type(s
) == compat_str
918 # Python 3 has a Unicode API
919 if sys
.version_info
>= (3, 0):
922 # Pass '' directly to use Unicode APIs on Windows 2000 and up
923 # (Detecting Windows NT 4 is tricky because 'major >= 4' would
924 # match Windows 9x series as well. Besides, NT 4 is obsolete.)
925 if not for_subprocess
and sys
.platform
== 'win32' and sys
.getwindowsversion()[0] >= 5:
928 # Jython assumes filenames are Unicode strings though reported as Python 2.x compatible
929 if sys
.platform
.startswith('java'):
932 return s
.encode(get_subprocess_encoding(), 'ignore')
935 def decodeFilename(b
, for_subprocess
=False):
937 if sys
.version_info
>= (3, 0):
940 if not isinstance(b
, bytes):
943 return b
.decode(get_subprocess_encoding(), 'ignore')
946 def encodeArgument(s
):
947 if not isinstance(s
, compat_str
):
948 # Legacy code that uses byte strings
949 # Uncomment the following line after fixing all post processors
950 # assert False, 'Internal error: %r should be of type %r, is %r' % (s, compat_str, type(s))
951 s
= s
.decode('ascii')
952 return encodeFilename(s
, True)
955 def decodeArgument(b
):
956 return decodeFilename(b
, True)
959 def decodeOption(optval
):
962 if isinstance(optval
, bytes):
963 optval
= optval
.decode(preferredencoding())
965 assert isinstance(optval
, compat_str
)
969 _timetuple
= collections
.namedtuple('Time', ('hours', 'minutes', 'seconds', 'milliseconds'))
972 def timetuple_from_msec(msec
):
973 secs
, msec
= divmod(msec
, 1000)
974 mins
, secs
= divmod(secs
, 60)
975 hrs
, mins
= divmod(mins
, 60)
976 return _timetuple(hrs
, mins
, secs
, msec
)
979 def formatSeconds(secs
, delim
=':', msec
=False):
980 time
= timetuple_from_msec(secs
* 1000)
982 ret
= '%d%s%02d%s%02d' % (time
.hours
, delim
, time
.minutes
, delim
, time
.seconds
)
984 ret
= '%d%s%02d' % (time
.minutes
, delim
, time
.seconds
)
986 ret
= '%d' % time
.seconds
987 return '%s.%03d' % (ret
, time
.milliseconds
) if msec
else ret
990 def _ssl_load_windows_store_certs(ssl_context
, storename
):
991 # Code adapted from _load_windows_store_certs in https://github.com/python/cpython/blob/main/Lib/ssl.py
993 certs
= [cert
for cert
, encoding
, trust
in ssl
.enum_certificates(storename
)
994 if encoding
== 'x509_asn' and (
995 trust
is True or ssl
.Purpose
.SERVER_AUTH
.oid
in trust
)]
996 except PermissionError
:
1000 ssl_context
.load_verify_locations(cadata
=cert
)
1001 except ssl
.SSLError
:
1005 def make_HTTPS_handler(params
, **kwargs
):
1006 opts_check_certificate
= not params
.get('nocheckcertificate')
1007 context
= ssl
.SSLContext(ssl
.PROTOCOL_TLS_CLIENT
)
1008 context
.check_hostname
= opts_check_certificate
1009 if params
.get('legacyserverconnect'):
1010 context
.options |
= 4 # SSL_OP_LEGACY_SERVER_CONNECT
1011 context
.verify_mode
= ssl
.CERT_REQUIRED
if opts_check_certificate
else ssl
.CERT_NONE
1012 if opts_check_certificate
:
1014 context
.load_default_certs()
1015 # Work around the issue in load_default_certs when there are bad certificates. See:
1016 # https://github.com/yt-dlp/yt-dlp/issues/1060,
1017 # https://bugs.python.org/issue35665, https://bugs.python.org/issue45312
1018 except ssl
.SSLError
:
1019 # enum_certificates is not present in mingw python. See https://github.com/yt-dlp/yt-dlp/issues/1151
1020 if sys
.platform
== 'win32' and hasattr(ssl
, 'enum_certificates'):
1021 # Create a new context to discard any certificates that were already loaded
1022 context
= ssl
.SSLContext(ssl
.PROTOCOL_TLS_CLIENT
)
1023 context
.check_hostname
, context
.verify_mode
= True, ssl
.CERT_REQUIRED
1024 for storename
in ('CA', 'ROOT'):
1025 _ssl_load_windows_store_certs(context
, storename
)
1026 context
.set_default_verify_paths()
1027 return YoutubeDLHTTPSHandler(params
, context
=context
, **kwargs
)
1030 def bug_reports_message(before
=';'):
1031 msg
= ('please report this issue on https://github.com/yt-dlp/yt-dlp , '
1032 'filling out the appropriate issue template. '
1033 'Confirm you are on the latest version using yt-dlp -U')
1035 before
= before
.rstrip()
1036 if not before
or before
.endswith(('.', '!', '?')):
1037 msg
= msg
[0].title() + msg
[1:]
1039 return (before
+ ' ' if before
else '') + msg
1042 class YoutubeDLError(Exception):
1043 """Base exception for YoutubeDL errors."""
1046 def __init__(self
, msg
=None):
1049 elif self
.msg
is None:
1050 self
.msg
= type(self
).__name
__
1051 super().__init
__(self
.msg
)
1054 network_exceptions
= [compat_urllib_error
.URLError
, compat_http_client
.HTTPException
, socket
.error
]
1055 if hasattr(ssl
, 'CertificateError'):
1056 network_exceptions
.append(ssl
.CertificateError
)
1057 network_exceptions
= tuple(network_exceptions
)
1060 class ExtractorError(YoutubeDLError
):
1061 """Error during info extraction."""
1063 def __init__(self
, msg
, tb
=None, expected
=False, cause
=None, video_id
=None, ie
=None):
1064 """ tb, if given, is the original traceback (so that it can be printed out).
1065 If expected is set, this is a normal error message and most likely not a bug in yt-dlp.
1067 if sys
.exc_info()[0] in network_exceptions
:
1070 self
.orig_msg
= str(msg
)
1072 self
.expected
= expected
1074 self
.video_id
= video_id
1076 self
.exc_info
= sys
.exc_info() # preserve original exception
1078 super(ExtractorError
, self
).__init
__(''.join((
1079 format_field(ie
, template
='[%s] '),
1080 format_field(video_id
, template
='%s: '),
1082 format_field(cause
, template
=' (caused by %r)'),
1083 '' if expected
else bug_reports_message())))
1085 def format_traceback(self
):
1086 return join_nonempty(
1087 self
.traceback
and ''.join(traceback
.format_tb(self
.traceback
)),
1088 self
.cause
and ''.join(traceback
.format_exception(None, self
.cause
, self
.cause
.__traceback
__)[1:]),
1092 class UnsupportedError(ExtractorError
):
1093 def __init__(self
, url
):
1094 super(UnsupportedError
, self
).__init
__(
1095 'Unsupported URL: %s' % url
, expected
=True)
1099 class RegexNotFoundError(ExtractorError
):
1100 """Error when a regex didn't match"""
1104 class GeoRestrictedError(ExtractorError
):
1105 """Geographic restriction Error exception.
1107 This exception may be thrown when a video is not available from your
1108 geographic location due to geographic restrictions imposed by a website.
1111 def __init__(self
, msg
, countries
=None, **kwargs
):
1112 kwargs
['expected'] = True
1113 super(GeoRestrictedError
, self
).__init
__(msg
, **kwargs
)
1114 self
.countries
= countries
1117 class DownloadError(YoutubeDLError
):
1118 """Download Error exception.
1120 This exception may be thrown by FileDownloader objects if they are not
1121 configured to continue on errors. They will contain the appropriate
1125 def __init__(self
, msg
, exc_info
=None):
1126 """ exc_info, if given, is the original exception that caused the trouble (as returned by sys.exc_info()). """
1127 super(DownloadError
, self
).__init
__(msg
)
1128 self
.exc_info
= exc_info
1131 class EntryNotInPlaylist(YoutubeDLError
):
1132 """Entry not in playlist exception.
1134 This exception will be thrown by YoutubeDL when a requested entry
1135 is not found in the playlist info_dict
1137 msg
= 'Entry not found in info'
1140 class SameFileError(YoutubeDLError
):
1141 """Same File exception.
1143 This exception will be thrown by FileDownloader objects if they detect
1144 multiple files would have to be downloaded to the same file on disk.
1146 msg
= 'Fixed output name but more than one file to download'
1148 def __init__(self
, filename
=None):
1149 if filename
is not None:
1150 self
.msg
+= f
': {filename}'
1151 super().__init
__(self
.msg
)
1154 class PostProcessingError(YoutubeDLError
):
1155 """Post Processing exception.
1157 This exception may be raised by PostProcessor's .run() method to
1158 indicate an error in the postprocessing task.
1162 class DownloadCancelled(YoutubeDLError
):
1163 """ Exception raised when the download queue should be interrupted """
1164 msg
= 'The download was cancelled'
1167 class ExistingVideoReached(DownloadCancelled
):
1168 """ --break-on-existing triggered """
1169 msg
= 'Encountered a video that is already in the archive, stopping due to --break-on-existing'
1172 class RejectedVideoReached(DownloadCancelled
):
1173 """ --break-on-reject triggered """
1174 msg
= 'Encountered a video that did not match filter, stopping due to --break-on-reject'
1177 class MaxDownloadsReached(DownloadCancelled
):
1178 """ --max-downloads limit has been reached. """
1179 msg
= 'Maximum number of downloads reached, stopping due to --max-downloads'
1182 class ReExtractInfo(YoutubeDLError
):
1183 """ Video info needs to be re-extracted. """
1185 def __init__(self
, msg
, expected
=False):
1186 super().__init
__(msg
)
1187 self
.expected
= expected
1190 class ThrottledDownload(ReExtractInfo
):
1191 """ Download speed below --throttled-rate. """
1192 msg
= 'The download speed is below throttle limit'
1195 super().__init
__(self
.msg
, expected
=False)
1198 class UnavailableVideoError(YoutubeDLError
):
1199 """Unavailable Format exception.
1201 This exception will be thrown when a video is requested
1202 in a format that is not available for that video.
1204 msg
= 'Unable to download video'
1206 def __init__(self
, err
=None):
1208 self
.msg
+= f
': {err}'
1209 super().__init
__(self
.msg
)
1212 class ContentTooShortError(YoutubeDLError
):
1213 """Content Too Short exception.
1215 This exception may be raised by FileDownloader objects when a file they
1216 download is too small for what the server announced first, indicating
1217 the connection was probably interrupted.
1220 def __init__(self
, downloaded
, expected
):
1221 super(ContentTooShortError
, self
).__init
__(
1222 'Downloaded {0} bytes, expected {1} bytes'.format(downloaded
, expected
)
1225 self
.downloaded
= downloaded
1226 self
.expected
= expected
1229 class XAttrMetadataError(YoutubeDLError
):
1230 def __init__(self
, code
=None, msg
='Unknown error'):
1231 super(XAttrMetadataError
, self
).__init
__(msg
)
1235 # Parsing code and msg
1236 if (self
.code
in (errno
.ENOSPC
, errno
.EDQUOT
)
1237 or 'No space left' in self
.msg
or 'Disk quota exceeded' in self
.msg
):
1238 self
.reason
= 'NO_SPACE'
1239 elif self
.code
== errno
.E2BIG
or 'Argument list too long' in self
.msg
:
1240 self
.reason
= 'VALUE_TOO_LONG'
1242 self
.reason
= 'NOT_SUPPORTED'
1245 class XAttrUnavailableError(YoutubeDLError
):
1249 def _create_http_connection(ydl_handler
, http_class
, is_https
, *args
, **kwargs
):
1250 # Working around python 2 bug (see http://bugs.python.org/issue17849) by limiting
1251 # expected HTTP responses to meet HTTP/1.0 or later (see also
1252 # https://github.com/ytdl-org/youtube-dl/issues/6727)
1253 if sys
.version_info
< (3, 0):
1254 kwargs
['strict'] = True
1255 hc
= http_class(*args
, **compat_kwargs(kwargs
))
1256 source_address
= ydl_handler
._params
.get('source_address')
1258 if source_address
is not None:
1259 # This is to workaround _create_connection() from socket where it will try all
1260 # address data from getaddrinfo() including IPv6. This filters the result from
1261 # getaddrinfo() based on the source_address value.
1262 # This is based on the cpython socket.create_connection() function.
1263 # https://github.com/python/cpython/blob/master/Lib/socket.py#L691
1264 def _create_connection(address
, timeout
=socket
._GLOBAL
_DEFAULT
_TIMEOUT
, source_address
=None):
1265 host
, port
= address
1267 addrs
= socket
.getaddrinfo(host
, port
, 0, socket
.SOCK_STREAM
)
1268 af
= socket
.AF_INET
if '.' in source_address
[0] else socket
.AF_INET6
1269 ip_addrs
= [addr
for addr
in addrs
if addr
[0] == af
]
1270 if addrs
and not ip_addrs
:
1271 ip_version
= 'v4' if af
== socket
.AF_INET
else 'v6'
1273 "No remote IP%s addresses available for connect, can't use '%s' as source address"
1274 % (ip_version
, source_address
[0]))
1275 for res
in ip_addrs
:
1276 af
, socktype
, proto
, canonname
, sa
= res
1279 sock
= socket
.socket(af
, socktype
, proto
)
1280 if timeout
is not socket
._GLOBAL
_DEFAULT
_TIMEOUT
:
1281 sock
.settimeout(timeout
)
1282 sock
.bind(source_address
)
1284 err
= None # Explicitly break reference cycle
1286 except socket
.error
as _
:
1288 if sock
is not None:
1293 raise socket
.error('getaddrinfo returns an empty list')
1294 if hasattr(hc
, '_create_connection'):
1295 hc
._create
_connection
= _create_connection
1296 sa
= (source_address
, 0)
1297 if hasattr(hc
, 'source_address'): # Python 2.7+
1298 hc
.source_address
= sa
1300 def _hc_connect(self
, *args
, **kwargs
):
1301 sock
= _create_connection(
1302 (self
.host
, self
.port
), self
.timeout
, sa
)
1304 self
.sock
= ssl
.wrap_socket(
1305 sock
, self
.key_file
, self
.cert_file
,
1306 ssl_version
=ssl
.PROTOCOL_TLSv1
)
1309 hc
.connect
= functools
.partial(_hc_connect
, hc
)
1314 def handle_youtubedl_headers(headers
):
1315 filtered_headers
= headers
1317 if 'Youtubedl-no-compression' in filtered_headers
:
1318 filtered_headers
= dict((k
, v
) for k
, v
in filtered_headers
.items() if k
.lower() != 'accept-encoding')
1319 del filtered_headers
['Youtubedl-no-compression']
1321 return filtered_headers
1324 class YoutubeDLHandler(compat_urllib_request
.HTTPHandler
):
1325 """Handler for HTTP requests and responses.
1327 This class, when installed with an OpenerDirector, automatically adds
1328 the standard headers to every HTTP request and handles gzipped and
1329 deflated responses from web servers. If compression is to be avoided in
1330 a particular request, the original request in the program code only has
1331 to include the HTTP header "Youtubedl-no-compression", which will be
1332 removed before making the real request.
1334 Part of this code was copied from:
1336 http://techknack.net/python-urllib2-handlers/
1338 Andrew Rowls, the author of that code, agreed to release it to the
1342 def __init__(self
, params
, *args
, **kwargs
):
1343 compat_urllib_request
.HTTPHandler
.__init
__(self
, *args
, **kwargs
)
1344 self
._params
= params
1346 def http_open(self
, req
):
1347 conn_class
= compat_http_client
.HTTPConnection
1349 socks_proxy
= req
.headers
.get('Ytdl-socks-proxy')
1351 conn_class
= make_socks_conn_class(conn_class
, socks_proxy
)
1352 del req
.headers
['Ytdl-socks-proxy']
1354 return self
.do_open(functools
.partial(
1355 _create_http_connection
, self
, conn_class
, False),
1363 return zlib
.decompress(data
, -zlib
.MAX_WBITS
)
1365 return zlib
.decompress(data
)
1371 return compat_brotli
.decompress(data
)
1373 def http_request(self
, req
):
1374 # According to RFC 3986, URLs can not contain non-ASCII characters, however this is not
1375 # always respected by websites, some tend to give out URLs with non percent-encoded
1376 # non-ASCII characters (see telemb.py, ard.py [#3412])
1377 # urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991)
1378 # To work around aforementioned issue we will replace request's original URL with
1379 # percent-encoded one
1380 # Since redirects are also affected (e.g. http://www.southpark.de/alle-episoden/s18e09)
1381 # the code of this workaround has been moved here from YoutubeDL.urlopen()
1382 url
= req
.get_full_url()
1383 url_escaped
= escape_url(url
)
1385 # Substitute URL if any change after escaping
1386 if url
!= url_escaped
:
1387 req
= update_Request(req
, url
=url_escaped
)
1389 for h
, v
in self
._params
.get('http_headers', std_headers
).items():
1390 # Capitalize is needed because of Python bug 2275: http://bugs.python.org/issue2275
1391 # The dict keys are capitalized because of this bug by urllib
1392 if h
.capitalize() not in req
.headers
:
1393 req
.add_header(h
, v
)
1395 req
.headers
= handle_youtubedl_headers(req
.headers
)
1397 if sys
.version_info
< (2, 7) and '#' in req
.get_full_url():
1398 # Python 2.6 is brain-dead when it comes to fragments
1399 req
._Request
__original
= req
._Request
__original
.partition('#')[0]
1400 req
._Request
__r
_type
= req
._Request
__r
_type
.partition('#')[0]
1404 def http_response(self
, req
, resp
):
1407 if resp
.headers
.get('Content-encoding', '') == 'gzip':
1408 content
= resp
.read()
1409 gz
= gzip
.GzipFile(fileobj
=io
.BytesIO(content
), mode
='rb')
1411 uncompressed
= io
.BytesIO(gz
.read())
1412 except IOError as original_ioerror
:
1413 # There may be junk add the end of the file
1414 # See http://stackoverflow.com/q/4928560/35070 for details
1415 for i
in range(1, 1024):
1417 gz
= gzip
.GzipFile(fileobj
=io
.BytesIO(content
[:-i
]), mode
='rb')
1418 uncompressed
= io
.BytesIO(gz
.read())
1423 raise original_ioerror
1424 resp
= compat_urllib_request
.addinfourl(uncompressed
, old_resp
.headers
, old_resp
.url
, old_resp
.code
)
1425 resp
.msg
= old_resp
.msg
1426 del resp
.headers
['Content-encoding']
1428 if resp
.headers
.get('Content-encoding', '') == 'deflate':
1429 gz
= io
.BytesIO(self
.deflate(resp
.read()))
1430 resp
= compat_urllib_request
.addinfourl(gz
, old_resp
.headers
, old_resp
.url
, old_resp
.code
)
1431 resp
.msg
= old_resp
.msg
1432 del resp
.headers
['Content-encoding']
1434 if resp
.headers
.get('Content-encoding', '') == 'br':
1435 resp
= compat_urllib_request
.addinfourl(
1436 io
.BytesIO(self
.brotli(resp
.read())), old_resp
.headers
, old_resp
.url
, old_resp
.code
)
1437 resp
.msg
= old_resp
.msg
1438 del resp
.headers
['Content-encoding']
1439 # Percent-encode redirect URL of Location HTTP header to satisfy RFC 3986 (see
1440 # https://github.com/ytdl-org/youtube-dl/issues/6457).
1441 if 300 <= resp
.code
< 400:
1442 location
= resp
.headers
.get('Location')
1444 # As of RFC 2616 default charset is iso-8859-1 that is respected by python 3
1445 if sys
.version_info
>= (3, 0):
1446 location
= location
.encode('iso-8859-1').decode('utf-8')
1448 location
= location
.decode('utf-8')
1449 location_escaped
= escape_url(location
)
1450 if location
!= location_escaped
:
1451 del resp
.headers
['Location']
1452 if sys
.version_info
< (3, 0):
1453 location_escaped
= location_escaped
.encode('utf-8')
1454 resp
.headers
['Location'] = location_escaped
1457 https_request
= http_request
1458 https_response
= http_response
1461 def make_socks_conn_class(base_class
, socks_proxy
):
1462 assert issubclass(base_class
, (
1463 compat_http_client
.HTTPConnection
, compat_http_client
.HTTPSConnection
))
1465 url_components
= compat_urlparse
.urlparse(socks_proxy
)
1466 if url_components
.scheme
.lower() == 'socks5':
1467 socks_type
= ProxyType
.SOCKS5
1468 elif url_components
.scheme
.lower() in ('socks', 'socks4'):
1469 socks_type
= ProxyType
.SOCKS4
1470 elif url_components
.scheme
.lower() == 'socks4a':
1471 socks_type
= ProxyType
.SOCKS4A
1473 def unquote_if_non_empty(s
):
1476 return compat_urllib_parse_unquote_plus(s
)
1480 url_components
.hostname
, url_components
.port
or 1080,
1482 unquote_if_non_empty(url_components
.username
),
1483 unquote_if_non_empty(url_components
.password
),
1486 class SocksConnection(base_class
):
1488 self
.sock
= sockssocket()
1489 self
.sock
.setproxy(*proxy_args
)
1490 if type(self
.timeout
) in (int, float):
1491 self
.sock
.settimeout(self
.timeout
)
1492 self
.sock
.connect((self
.host
, self
.port
))
1494 if isinstance(self
, compat_http_client
.HTTPSConnection
):
1495 if hasattr(self
, '_context'): # Python > 2.6
1496 self
.sock
= self
._context
.wrap_socket(
1497 self
.sock
, server_hostname
=self
.host
)
1499 self
.sock
= ssl
.wrap_socket(self
.sock
)
1501 return SocksConnection
1504 class YoutubeDLHTTPSHandler(compat_urllib_request
.HTTPSHandler
):
1505 def __init__(self
, params
, https_conn_class
=None, *args
, **kwargs
):
1506 compat_urllib_request
.HTTPSHandler
.__init
__(self
, *args
, **kwargs
)
1507 self
._https
_conn
_class
= https_conn_class
or compat_http_client
.HTTPSConnection
1508 self
._params
= params
1510 def https_open(self
, req
):
1512 conn_class
= self
._https
_conn
_class
1514 if hasattr(self
, '_context'): # python > 2.6
1515 kwargs
['context'] = self
._context
1516 if hasattr(self
, '_check_hostname'): # python 3.x
1517 kwargs
['check_hostname'] = self
._check
_hostname
1519 socks_proxy
= req
.headers
.get('Ytdl-socks-proxy')
1521 conn_class
= make_socks_conn_class(conn_class
, socks_proxy
)
1522 del req
.headers
['Ytdl-socks-proxy']
1524 return self
.do_open(functools
.partial(
1525 _create_http_connection
, self
, conn_class
, True),
1529 class YoutubeDLCookieJar(compat_cookiejar
.MozillaCookieJar
):
1531 See [1] for cookie file format.
1533 1. https://curl.haxx.se/docs/http-cookies.html
1535 _HTTPONLY_PREFIX
= '#HttpOnly_'
1537 _HEADER
= '''# Netscape HTTP Cookie File
1538 # This file is generated by yt-dlp. Do not edit.
1541 _CookieFileEntry
= collections
.namedtuple(
1543 ('domain_name', 'include_subdomains', 'path', 'https_only', 'expires_at', 'name', 'value'))
1545 def save(self
, filename
=None, ignore_discard
=False, ignore_expires
=False):
1547 Save cookies to a file.
1549 Most of the code is taken from CPython 3.8 and slightly adapted
1550 to support cookie files with UTF-8 in both python 2 and 3.
1552 if filename
is None:
1553 if self
.filename
is not None:
1554 filename
= self
.filename
1556 raise ValueError(compat_cookiejar
.MISSING_FILENAME_TEXT
)
1558 # Store session cookies with `expires` set to 0 instead of an empty
1561 if cookie
.expires
is None:
1564 with io
.open(filename
, 'w', encoding
='utf-8') as f
:
1565 f
.write(self
._HEADER
)
1568 if not ignore_discard
and cookie
.discard
:
1570 if not ignore_expires
and cookie
.is_expired(now
):
1576 if cookie
.domain
.startswith('.'):
1577 initial_dot
= 'TRUE'
1579 initial_dot
= 'FALSE'
1580 if cookie
.expires
is not None:
1581 expires
= compat_str(cookie
.expires
)
1584 if cookie
.value
is None:
1585 # cookies.txt regards 'Set-Cookie: foo' as a cookie
1586 # with no name, whereas http.cookiejar regards it as a
1587 # cookie with no value.
1592 value
= cookie
.value
1594 '\t'.join([cookie
.domain
, initial_dot
, cookie
.path
,
1595 secure
, expires
, name
, value
]) + '\n')
1597 def load(self
, filename
=None, ignore_discard
=False, ignore_expires
=False):
1598 """Load cookies from a file."""
1599 if filename
is None:
1600 if self
.filename
is not None:
1601 filename
= self
.filename
1603 raise ValueError(compat_cookiejar
.MISSING_FILENAME_TEXT
)
1605 def prepare_line(line
):
1606 if line
.startswith(self
._HTTPONLY
_PREFIX
):
1607 line
= line
[len(self
._HTTPONLY
_PREFIX
):]
1608 # comments and empty lines are fine
1609 if line
.startswith('#') or not line
.strip():
1611 cookie_list
= line
.split('\t')
1612 if len(cookie_list
) != self
._ENTRY
_LEN
:
1613 raise compat_cookiejar
.LoadError('invalid length %d' % len(cookie_list
))
1614 cookie
= self
._CookieFileEntry
(*cookie_list
)
1615 if cookie
.expires_at
and not cookie
.expires_at
.isdigit():
1616 raise compat_cookiejar
.LoadError('invalid expires at %s' % cookie
.expires_at
)
1620 with io
.open(filename
, encoding
='utf-8') as f
:
1623 cf
.write(prepare_line(line
))
1624 except compat_cookiejar
.LoadError
as e
:
1626 'WARNING: skipping cookie file entry due to %s: %r\n'
1627 % (e
, line
), sys
.stderr
)
1630 self
._really
_load
(cf
, filename
, ignore_discard
, ignore_expires
)
1631 # Session cookies are denoted by either `expires` field set to
1632 # an empty string or 0. MozillaCookieJar only recognizes the former
1633 # (see [1]). So we need force the latter to be recognized as session
1634 # cookies on our own.
1635 # Session cookies may be important for cookies-based authentication,
1636 # e.g. usually, when user does not check 'Remember me' check box while
1637 # logging in on a site, some important cookies are stored as session
1638 # cookies so that not recognizing them will result in failed login.
1639 # 1. https://bugs.python.org/issue17164
1641 # Treat `expires=0` cookies as session cookies
1642 if cookie
.expires
== 0:
1643 cookie
.expires
= None
1644 cookie
.discard
= True
1647 class YoutubeDLCookieProcessor(compat_urllib_request
.HTTPCookieProcessor
):
1648 def __init__(self
, cookiejar
=None):
1649 compat_urllib_request
.HTTPCookieProcessor
.__init
__(self
, cookiejar
)
1651 def http_response(self
, request
, response
):
1652 # Python 2 will choke on next HTTP request in row if there are non-ASCII
1653 # characters in Set-Cookie HTTP header of last response (see
1654 # https://github.com/ytdl-org/youtube-dl/issues/6769).
1655 # In order to at least prevent crashing we will percent encode Set-Cookie
1656 # header before HTTPCookieProcessor starts processing it.
1657 # if sys.version_info < (3, 0) and response.headers:
1658 # for set_cookie_header in ('Set-Cookie', 'Set-Cookie2'):
1659 # set_cookie = response.headers.get(set_cookie_header)
1661 # set_cookie_escaped = compat_urllib_parse.quote(set_cookie, b"%/;:@&=+$,!~*'()?#[] ")
1662 # if set_cookie != set_cookie_escaped:
1663 # del response.headers[set_cookie_header]
1664 # response.headers[set_cookie_header] = set_cookie_escaped
1665 return compat_urllib_request
.HTTPCookieProcessor
.http_response(self
, request
, response
)
1667 https_request
= compat_urllib_request
.HTTPCookieProcessor
.http_request
1668 https_response
= http_response
1671 class YoutubeDLRedirectHandler(compat_urllib_request
.HTTPRedirectHandler
):
1672 """YoutubeDL redirect handler
1674 The code is based on HTTPRedirectHandler implementation from CPython [1].
1676 This redirect handler solves two issues:
1677 - ensures redirect URL is always unicode under python 2
1678 - introduces support for experimental HTTP response status code
1679 308 Permanent Redirect [2] used by some sites [3]
1681 1. https://github.com/python/cpython/blob/master/Lib/urllib/request.py
1682 2. https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/308
1683 3. https://github.com/ytdl-org/youtube-dl/issues/28768
1686 http_error_301
= http_error_303
= http_error_307
= http_error_308
= compat_urllib_request
.HTTPRedirectHandler
.http_error_302
1688 def redirect_request(self
, req
, fp
, code
, msg
, headers
, newurl
):
1689 """Return a Request or None in response to a redirect.
1691 This is called by the http_error_30x methods when a
1692 redirection response is received. If a redirection should
1693 take place, return a new Request to allow http_error_30x to
1694 perform the redirect. Otherwise, raise HTTPError if no-one
1695 else should try to handle this url. Return None if you can't
1696 but another Handler might.
1698 m
= req
.get_method()
1699 if (not (code
in (301, 302, 303, 307, 308) and m
in ("GET", "HEAD")
1700 or code
in (301, 302, 303) and m
== "POST")):
1701 raise compat_HTTPError(req
.full_url
, code
, msg
, headers
, fp
)
1702 # Strictly (according to RFC 2616), 301 or 302 in response to
1703 # a POST MUST NOT cause a redirection without confirmation
1704 # from the user (of urllib.request, in this case). In practice,
1705 # essentially all clients do redirect in this case, so we do
1708 # On python 2 urlh.geturl() may sometimes return redirect URL
1709 # as byte string instead of unicode. This workaround allows
1710 # to force it always return unicode.
1711 if sys
.version_info
[0] < 3:
1712 newurl
= compat_str(newurl
)
1714 # Be conciliant with URIs containing a space. This is mainly
1715 # redundant with the more complete encoding done in http_error_302(),
1716 # but it is kept for compatibility with other callers.
1717 newurl
= newurl
.replace(' ', '%20')
1719 CONTENT_HEADERS
= ("content-length", "content-type")
1720 # NB: don't use dict comprehension for python 2.6 compatibility
1721 newheaders
= dict((k
, v
) for k
, v
in req
.headers
.items()
1722 if k
.lower() not in CONTENT_HEADERS
)
1723 return compat_urllib_request
.Request(
1724 newurl
, headers
=newheaders
, origin_req_host
=req
.origin_req_host
,
1728 def extract_timezone(date_str
):
1731 ^.{8,}? # >=8 char non-TZ prefix, if present
1732 (?P<tz>Z| # just the UTC Z, or
1733 (?:(?<=.\b\d{4}|\b\d{2}:\d\d)| # preceded by 4 digits or hh:mm or
1734 (?<!.\b[a-zA-Z]{3}|[a-zA-Z]{4}|..\b\d\d)) # not preceded by 3 alpha word or >= 4 alpha or 2 digits
1735 [ ]? # optional space
1736 (?P<sign>\+|-) # +/-
1737 (?P<hours>[0-9]{2}):?(?P<minutes>[0-9]{2}) # hh[:]mm
1741 timezone
= datetime
.timedelta()
1743 date_str
= date_str
[:-len(m
.group('tz'))]
1744 if not m
.group('sign'):
1745 timezone
= datetime
.timedelta()
1747 sign
= 1 if m
.group('sign') == '+' else -1
1748 timezone
= datetime
.timedelta(
1749 hours
=sign
* int(m
.group('hours')),
1750 minutes
=sign
* int(m
.group('minutes')))
1751 return timezone
, date_str
1754 def parse_iso8601(date_str
, delimiter
='T', timezone
=None):
1755 """ Return a UNIX timestamp from the given date """
1757 if date_str
is None:
1760 date_str
= re
.sub(r
'\.[0-9]+', '', date_str
)
1762 if timezone
is None:
1763 timezone
, date_str
= extract_timezone(date_str
)
1766 date_format
= '%Y-%m-%d{0}%H:%M:%S'.format(delimiter
)
1767 dt
= datetime
.datetime
.strptime(date_str
, date_format
) - timezone
1768 return calendar
.timegm(dt
.timetuple())
1773 def date_formats(day_first
=True):
1774 return DATE_FORMATS_DAY_FIRST
if day_first
else DATE_FORMATS_MONTH_FIRST
1777 def unified_strdate(date_str
, day_first
=True):
1778 """Return a string with the date in the format YYYYMMDD"""
1780 if date_str
is None:
1784 date_str
= date_str
.replace(',', ' ')
1785 # Remove AM/PM + timezone
1786 date_str
= re
.sub(r
'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str
)
1787 _
, date_str
= extract_timezone(date_str
)
1789 for expression
in date_formats(day_first
):
1791 upload_date
= datetime
.datetime
.strptime(date_str
, expression
).strftime('%Y%m%d')
1794 if upload_date
is None:
1795 timetuple
= email
.utils
.parsedate_tz(date_str
)
1798 upload_date
= datetime
.datetime(*timetuple
[:6]).strftime('%Y%m%d')
1801 if upload_date
is not None:
1802 return compat_str(upload_date
)
1805 def unified_timestamp(date_str
, day_first
=True):
1806 if date_str
is None:
1809 date_str
= re
.sub(r
'[,|]', '', date_str
)
1811 pm_delta
= 12 if re
.search(r
'(?i)PM', date_str
) else 0
1812 timezone
, date_str
= extract_timezone(date_str
)
1814 # Remove AM/PM + timezone
1815 date_str
= re
.sub(r
'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str
)
1817 # Remove unrecognized timezones from ISO 8601 alike timestamps
1818 m
= re
.search(r
'\d{1,2}:\d{1,2}(?:\.\d+)?(?P<tz>\s*[A-Z]+)$', date_str
)
1820 date_str
= date_str
[:-len(m
.group('tz'))]
1822 # Python only supports microseconds, so remove nanoseconds
1823 m
= re
.search(r
'^([0-9]{4,}-[0-9]{1,2}-[0-9]{1,2}T[0-9]{1,2}:[0-9]{1,2}:[0-9]{1,2}\.[0-9]{6})[0-9]+$', date_str
)
1825 date_str
= m
.group(1)
1827 for expression
in date_formats(day_first
):
1829 dt
= datetime
.datetime
.strptime(date_str
, expression
) - timezone
+ datetime
.timedelta(hours
=pm_delta
)
1830 return calendar
.timegm(dt
.timetuple())
1833 timetuple
= email
.utils
.parsedate_tz(date_str
)
1835 return calendar
.timegm(timetuple
) + pm_delta
* 3600
1838 def determine_ext(url
, default_ext
='unknown_video'):
1839 if url
is None or '.' not in url
:
1841 guess
= url
.partition('?')[0].rpartition('.')[2]
1842 if re
.match(r
'^[A-Za-z0-9]+$', guess
):
1844 # Try extract ext from URLs like http://example.com/foo/bar.mp4/?download
1845 elif guess
.rstrip('/') in KNOWN_EXTENSIONS
:
1846 return guess
.rstrip('/')
1851 def subtitles_filename(filename
, sub_lang
, sub_format
, expected_real_ext
=None):
1852 return replace_extension(filename
, sub_lang
+ '.' + sub_format
, expected_real_ext
)
1855 def datetime_from_str(date_str
, precision
='auto', format
='%Y%m%d'):
1857 Return a datetime object from a string in the format YYYYMMDD or
1858 (now|today|yesterday|date)[+-][0-9](microsecond|second|minute|hour|day|week|month|year)(s)?
1860 format: string date format used to return datetime object from
1861 precision: round the time portion of a datetime object.
1862 auto|microsecond|second|minute|hour|day.
1863 auto: round to the unit provided in date_str (if applicable).
1865 auto_precision
= False
1866 if precision
== 'auto':
1867 auto_precision
= True
1868 precision
= 'microsecond'
1869 today
= datetime_round(datetime
.datetime
.utcnow(), precision
)
1870 if date_str
in ('now', 'today'):
1872 if date_str
== 'yesterday':
1873 return today
- datetime
.timedelta(days
=1)
1875 r
'(?P<start>.+)(?P<sign>[+-])(?P<time>\d+)(?P<unit>microsecond|second|minute|hour|day|week|month|year)(s)?',
1877 if match
is not None:
1878 start_time
= datetime_from_str(match
.group('start'), precision
, format
)
1879 time
= int(match
.group('time')) * (-1 if match
.group('sign') == '-' else 1)
1880 unit
= match
.group('unit')
1881 if unit
== 'month' or unit
== 'year':
1882 new_date
= datetime_add_months(start_time
, time
* 12 if unit
== 'year' else time
)
1888 delta
= datetime
.timedelta(**{unit + 's': time}
)
1889 new_date
= start_time
+ delta
1891 return datetime_round(new_date
, unit
)
1894 return datetime_round(datetime
.datetime
.strptime(date_str
, format
), precision
)
1897 def date_from_str(date_str
, format
='%Y%m%d', strict
=False):
1899 Return a datetime object from a string in the format YYYYMMDD or
1900 (now|today|yesterday|date)[+-][0-9](microsecond|second|minute|hour|day|week|month|year)(s)?
1902 If "strict", only (now|today)[+-][0-9](day|week|month|year)(s)? is allowed
1904 format: string date format used to return datetime object from
1906 if strict
and not re
.fullmatch(r
'\d{8}|(now|today)[+-]\d+(day|week|month|year)(s)?', date_str
):
1907 raise ValueError(f
'Invalid date format {date_str}')
1908 return datetime_from_str(date_str
, precision
='microsecond', format
=format
).date()
1911 def datetime_add_months(dt
, months
):
1912 """Increment/Decrement a datetime object by months."""
1913 month
= dt
.month
+ months
- 1
1914 year
= dt
.year
+ month
// 12
1915 month
= month
% 12 + 1
1916 day
= min(dt
.day
, calendar
.monthrange(year
, month
)[1])
1917 return dt
.replace(year
, month
, day
)
1920 def datetime_round(dt
, precision
='day'):
1922 Round a datetime object's time to a specific precision
1924 if precision
== 'microsecond':
1933 roundto
= lambda x
, n
: ((x
+ n
/ 2) // n
) * n
1934 timestamp
= calendar
.timegm(dt
.timetuple())
1935 return datetime
.datetime
.utcfromtimestamp(roundto(timestamp
, unit_seconds
[precision
]))
1938 def hyphenate_date(date_str
):
1940 Convert a date in 'YYYYMMDD' format to 'YYYY-MM-DD' format"""
1941 match
= re
.match(r
'^(\d\d\d\d)(\d\d)(\d\d)$', date_str
)
1942 if match
is not None:
1943 return '-'.join(match
.groups())
1948 class DateRange(object):
1949 """Represents a time interval between two dates"""
1951 def __init__(self
, start
=None, end
=None):
1952 """start and end must be strings in the format accepted by date"""
1953 if start
is not None:
1954 self
.start
= date_from_str(start
, strict
=True)
1956 self
.start
= datetime
.datetime
.min.date()
1958 self
.end
= date_from_str(end
, strict
=True)
1960 self
.end
= datetime
.datetime
.max.date()
1961 if self
.start
> self
.end
:
1962 raise ValueError('Date range: "%s" , the start date must be before the end date' % self
)
1966 """Returns a range that only contains the given day"""
1967 return cls(day
, day
)
1969 def __contains__(self
, date
):
1970 """Check if the date is in the range"""
1971 if not isinstance(date
, datetime
.date
):
1972 date
= date_from_str(date
)
1973 return self
.start
<= date
<= self
.end
1976 return '%s - %s' % (self
.start
.isoformat(), self
.end
.isoformat())
1979 def platform_name():
1980 """ Returns the platform name as a compat_str """
1981 res
= platform
.platform()
1982 if isinstance(res
, bytes):
1983 res
= res
.decode(preferredencoding())
1985 assert isinstance(res
, compat_str
)
1989 def get_windows_version():
1990 ''' Get Windows version. None if it's not running on Windows '''
1991 if compat_os_name
== 'nt':
1992 return version_tuple(platform
.win32_ver()[1])
1997 def _windows_write_string(s
, out
):
1998 """ Returns True if the string was written using special methods,
1999 False if it has yet to be written out."""
2000 # Adapted from http://stackoverflow.com/a/3259271/35070
2002 import ctypes
.wintypes
2010 fileno
= out
.fileno()
2011 except AttributeError:
2012 # If the output stream doesn't have a fileno, it's virtual
2014 except io
.UnsupportedOperation
:
2015 # Some strange Windows pseudo files?
2017 if fileno
not in WIN_OUTPUT_IDS
:
2020 GetStdHandle
= compat_ctypes_WINFUNCTYPE(
2021 ctypes
.wintypes
.HANDLE
, ctypes
.wintypes
.DWORD
)(
2022 ('GetStdHandle', ctypes
.windll
.kernel32
))
2023 h
= GetStdHandle(WIN_OUTPUT_IDS
[fileno
])
2025 WriteConsoleW
= compat_ctypes_WINFUNCTYPE(
2026 ctypes
.wintypes
.BOOL
, ctypes
.wintypes
.HANDLE
, ctypes
.wintypes
.LPWSTR
,
2027 ctypes
.wintypes
.DWORD
, ctypes
.POINTER(ctypes
.wintypes
.DWORD
),
2028 ctypes
.wintypes
.LPVOID
)(('WriteConsoleW', ctypes
.windll
.kernel32
))
2029 written
= ctypes
.wintypes
.DWORD(0)
2031 GetFileType
= compat_ctypes_WINFUNCTYPE(ctypes
.wintypes
.DWORD
, ctypes
.wintypes
.DWORD
)(('GetFileType', ctypes
.windll
.kernel32
))
2032 FILE_TYPE_CHAR
= 0x0002
2033 FILE_TYPE_REMOTE
= 0x8000
2034 GetConsoleMode
= compat_ctypes_WINFUNCTYPE(
2035 ctypes
.wintypes
.BOOL
, ctypes
.wintypes
.HANDLE
,
2036 ctypes
.POINTER(ctypes
.wintypes
.DWORD
))(
2037 ('GetConsoleMode', ctypes
.windll
.kernel32
))
2038 INVALID_HANDLE_VALUE
= ctypes
.wintypes
.DWORD(-1).value
2040 def not_a_console(handle
):
2041 if handle
== INVALID_HANDLE_VALUE
or handle
is None:
2043 return ((GetFileType(handle
) & ~FILE_TYPE_REMOTE
) != FILE_TYPE_CHAR
2044 or GetConsoleMode(handle
, ctypes
.byref(ctypes
.wintypes
.DWORD())) == 0)
2046 if not_a_console(h
):
2049 def next_nonbmp_pos(s
):
2051 return next(i
for i
, c
in enumerate(s
) if ord(c
) > 0xffff)
2052 except StopIteration:
2056 count
= min(next_nonbmp_pos(s
), 1024)
2058 ret
= WriteConsoleW(
2059 h
, s
, count
if count
else 2, ctypes
.byref(written
), None)
2061 raise OSError('Failed to write string')
2062 if not count
: # We just wrote a non-BMP character
2063 assert written
.value
== 2
2066 assert written
.value
> 0
2067 s
= s
[written
.value
:]
2071 def write_string(s
, out
=None, encoding
=None):
2074 assert type(s
) == compat_str
2076 if sys
.platform
== 'win32' and encoding
is None and hasattr(out
, 'fileno'):
2077 if _windows_write_string(s
, out
):
2080 if ('b' in getattr(out
, 'mode', '')
2081 or sys
.version_info
[0] < 3): # Python 2 lies about mode of sys.stderr
2082 byt
= s
.encode(encoding
or preferredencoding(), 'ignore')
2084 elif hasattr(out
, 'buffer'):
2085 enc
= encoding
or getattr(out
, 'encoding', None) or preferredencoding()
2086 byt
= s
.encode(enc
, 'ignore')
2087 out
.buffer.write(byt
)
2093 def bytes_to_intlist(bs
):
2096 if isinstance(bs
[0], int): # Python 3
2099 return [ord(c
) for c
in bs
]
2102 def intlist_to_bytes(xs
):
2105 return compat_struct_pack('%dB' % len(xs
), *xs
)
2108 # Cross-platform file locking
2109 if sys
.platform
== 'win32':
2110 import ctypes
.wintypes
2113 class OVERLAPPED(ctypes
.Structure
):
2115 ('Internal', ctypes
.wintypes
.LPVOID
),
2116 ('InternalHigh', ctypes
.wintypes
.LPVOID
),
2117 ('Offset', ctypes
.wintypes
.DWORD
),
2118 ('OffsetHigh', ctypes
.wintypes
.DWORD
),
2119 ('hEvent', ctypes
.wintypes
.HANDLE
),
2122 kernel32
= ctypes
.windll
.kernel32
2123 LockFileEx
= kernel32
.LockFileEx
2124 LockFileEx
.argtypes
= [
2125 ctypes
.wintypes
.HANDLE
, # hFile
2126 ctypes
.wintypes
.DWORD
, # dwFlags
2127 ctypes
.wintypes
.DWORD
, # dwReserved
2128 ctypes
.wintypes
.DWORD
, # nNumberOfBytesToLockLow
2129 ctypes
.wintypes
.DWORD
, # nNumberOfBytesToLockHigh
2130 ctypes
.POINTER(OVERLAPPED
) # Overlapped
2132 LockFileEx
.restype
= ctypes
.wintypes
.BOOL
2133 UnlockFileEx
= kernel32
.UnlockFileEx
2134 UnlockFileEx
.argtypes
= [
2135 ctypes
.wintypes
.HANDLE
, # hFile
2136 ctypes
.wintypes
.DWORD
, # dwReserved
2137 ctypes
.wintypes
.DWORD
, # nNumberOfBytesToLockLow
2138 ctypes
.wintypes
.DWORD
, # nNumberOfBytesToLockHigh
2139 ctypes
.POINTER(OVERLAPPED
) # Overlapped
2141 UnlockFileEx
.restype
= ctypes
.wintypes
.BOOL
2142 whole_low
= 0xffffffff
2143 whole_high
= 0x7fffffff
2145 def _lock_file(f
, exclusive
, block
):
2146 overlapped
= OVERLAPPED()
2147 overlapped
.Offset
= 0
2148 overlapped
.OffsetHigh
= 0
2149 overlapped
.hEvent
= 0
2150 f
._lock
_file
_overlapped
_p
= ctypes
.pointer(overlapped
)
2152 if not LockFileEx(msvcrt
.get_osfhandle(f
.fileno()),
2153 (0x2 if exclusive
else 0x0) |
(0x0 if block
else 0x1),
2154 0, whole_low
, whole_high
, f
._lock
_file
_overlapped
_p
):
2155 raise BlockingIOError('Locking file failed: %r' % ctypes
.FormatError())
2157 def _unlock_file(f
):
2158 assert f
._lock
_file
_overlapped
_p
2159 handle
= msvcrt
.get_osfhandle(f
.fileno())
2160 if not UnlockFileEx(handle
, 0, whole_low
, whole_high
, f
._lock
_file
_overlapped
_p
):
2161 raise OSError('Unlocking file failed: %r' % ctypes
.FormatError())
2167 def _lock_file(f
, exclusive
, block
):
2170 fcntl
.LOCK_SH
if not exclusive
2171 else fcntl
.LOCK_EX
if block
2172 else fcntl
.LOCK_EX | fcntl
.LOCK_NB
)
2173 except BlockingIOError
:
2175 except OSError: # AOSP does not have flock()
2177 fcntl
.LOCK_SH
if not exclusive
2178 else fcntl
.LOCK_EX
if block
2179 else fcntl
.LOCK_EX | fcntl
.LOCK_NB
)
2181 def _unlock_file(f
):
2183 fcntl
.flock(f
, fcntl
.LOCK_UN
)
2185 fcntl
.lockf(f
, fcntl
.LOCK_UN
)
2188 UNSUPPORTED_MSG
= 'file locking is not supported on this platform'
2190 def _lock_file(f
, exclusive
, block
):
2191 raise IOError(UNSUPPORTED_MSG
)
2193 def _unlock_file(f
):
2194 raise IOError(UNSUPPORTED_MSG
)
2197 class locked_file(object):
2200 def __init__(self
, filename
, mode
, block
=True, encoding
=None):
2201 assert mode
in ['r', 'rb', 'a', 'ab', 'w', 'wb']
2202 self
.f
= io
.open(filename
, mode
, encoding
=encoding
)
2206 def __enter__(self
):
2207 exclusive
= 'r' not in self
.mode
2209 _lock_file(self
.f
, exclusive
, self
.block
)
2215 def __exit__(self
, etype
, value
, traceback
):
2217 if not self
._closed
:
2218 _unlock_file(self
.f
)
2226 def write(self
, *args
):
2227 return self
.f
.write(*args
)
2229 def read(self
, *args
):
2230 return self
.f
.read(*args
)
2236 return self
.__enter
__()
2238 def close(self
, *args
):
2239 self
.__exit
__(self
, *args
, value
=False, traceback
=False)
2242 def get_filesystem_encoding():
2243 encoding
= sys
.getfilesystemencoding()
2244 return encoding
if encoding
is not None else 'utf-8'
2247 def shell_quote(args
):
2249 encoding
= get_filesystem_encoding()
2251 if isinstance(a
, bytes):
2252 # We may get a filename encoded with 'encodeFilename'
2253 a
= a
.decode(encoding
)
2254 quoted_args
.append(compat_shlex_quote(a
))
2255 return ' '.join(quoted_args
)
2258 def smuggle_url(url
, data
):
2259 """ Pass additional data in a URL for internal use. """
2261 url
, idata
= unsmuggle_url(url
, {})
2263 sdata
= compat_urllib_parse_urlencode(
2264 {'__youtubedl_smuggle': json.dumps(data)}
)
2265 return url
+ '#' + sdata
2268 def unsmuggle_url(smug_url
, default
=None):
2269 if '#__youtubedl_smuggle' not in smug_url
:
2270 return smug_url
, default
2271 url
, _
, sdata
= smug_url
.rpartition('#')
2272 jsond
= compat_parse_qs(sdata
)['__youtubedl_smuggle'][0]
2273 data
= json
.loads(jsond
)
2277 def format_decimal_suffix(num
, fmt
='%d%s', *, factor
=1000):
2278 """ Formats numbers with decimal sufixes like K, M, etc """
2279 num
, factor
= float_or_none(num
), float(factor
)
2280 if num
is None or num
< 0:
2282 POSSIBLE_SUFFIXES
= 'kMGTPEZY'
2283 exponent
= 0 if num
== 0 else min(int(math
.log(num
, factor
)), len(POSSIBLE_SUFFIXES
))
2284 suffix
= ['', *POSSIBLE_SUFFIXES
][exponent
]
2286 suffix
= {'k': 'Ki', '': ''}
.get(suffix
, f
'{suffix}i')
2287 converted
= num
/ (factor
** exponent
)
2288 return fmt
% (converted
, suffix
)
2291 def format_bytes(bytes):
2292 return format_decimal_suffix(bytes, '%.2f%sB', factor
=1024) or 'N/A'
2295 def lookup_unit_table(unit_table
, s
):
2296 units_re
= '|'.join(re
.escape(u
) for u
in unit_table
)
2298 r
'(?P<num>[0-9]+(?:[,.][0-9]*)?)\s*(?P<unit>%s)\b' % units_re
, s
)
2301 num_str
= m
.group('num').replace(',', '.')
2302 mult
= unit_table
[m
.group('unit')]
2303 return int(float(num_str
) * mult
)
2306 def parse_filesize(s
):
2310 # The lower-case forms are of course incorrect and unofficial,
2311 # but we support those too
2328 'megabytes': 1000 ** 2,
2329 'mebibytes': 1024 ** 2,
2335 'gigabytes': 1000 ** 3,
2336 'gibibytes': 1024 ** 3,
2342 'terabytes': 1000 ** 4,
2343 'tebibytes': 1024 ** 4,
2349 'petabytes': 1000 ** 5,
2350 'pebibytes': 1024 ** 5,
2356 'exabytes': 1000 ** 6,
2357 'exbibytes': 1024 ** 6,
2363 'zettabytes': 1000 ** 7,
2364 'zebibytes': 1024 ** 7,
2370 'yottabytes': 1000 ** 8,
2371 'yobibytes': 1024 ** 8,
2374 return lookup_unit_table(_UNIT_TABLE
, s
)
2381 s
= re
.sub(r
'^[^\d]+\s', '', s
).strip()
2383 if re
.match(r
'^[\d,.]+$', s
):
2384 return str_to_int(s
)
2397 ret
= lookup_unit_table(_UNIT_TABLE
, s
)
2401 mobj
= re
.match(r
'([\d,.]+)(?:$|\s)', s
)
2403 return str_to_int(mobj
.group(1))
2406 def parse_resolution(s
):
2410 mobj
= re
.search(r
'(?<![a-zA-Z0-9])(?P<w>\d+)\s*[xX×,]\s*(?P<h>\d+)(?![a-zA-Z0-9])', s
)
2413 'width': int(mobj
.group('w')),
2414 'height': int(mobj
.group('h')),
2417 mobj
= re
.search(r
'(?<![a-zA-Z0-9])(\d+)[pPiI](?![a-zA-Z0-9])', s
)
2419 return {'height': int(mobj.group(1))}
2421 mobj
= re
.search(r
'\b([48])[kK]\b', s
)
2423 return {'height': int(mobj.group(1)) * 540}
2428 def parse_bitrate(s
):
2429 if not isinstance(s
, compat_str
):
2431 mobj
= re
.search(r
'\b(\d+)\s*kbps', s
)
2433 return int(mobj
.group(1))
2436 def month_by_name(name
, lang
='en'):
2437 """ Return the number of a month by (locale-independently) English name """
2439 month_names
= MONTH_NAMES
.get(lang
, MONTH_NAMES
['en'])
2442 return month_names
.index(name
) + 1
2447 def month_by_abbreviation(abbrev
):
2448 """ Return the number of a month by (locale-independently) English
2452 return [s
[:3] for s
in ENGLISH_MONTH_NAMES
].index(abbrev
) + 1
2457 def fix_xml_ampersands(xml_str
):
2458 """Replace all the '&' by '&' in XML"""
2460 r
'&(?!amp;|lt;|gt;|apos;|quot;|#x[0-9a-fA-F]{,4};|#[0-9]{,4};)',
2465 def setproctitle(title
):
2466 assert isinstance(title
, compat_str
)
2468 # ctypes in Jython is not complete
2469 # http://bugs.jython.org/issue2148
2470 if sys
.platform
.startswith('java'):
2474 libc
= ctypes
.cdll
.LoadLibrary('libc.so.6')
2478 # LoadLibrary in Windows Python 2.7.13 only expects
2479 # a bytestring, but since unicode_literals turns
2480 # every string into a unicode string, it fails.
2482 title_bytes
= title
.encode('utf-8')
2483 buf
= ctypes
.create_string_buffer(len(title_bytes
))
2484 buf
.value
= title_bytes
2486 libc
.prctl(15, buf
, 0, 0, 0)
2487 except AttributeError:
2488 return # Strange libc, just skip this
2491 def remove_start(s
, start
):
2492 return s
[len(start
):] if s
is not None and s
.startswith(start
) else s
2495 def remove_end(s
, end
):
2496 return s
[:-len(end
)] if s
is not None and s
.endswith(end
) else s
2499 def remove_quotes(s
):
2500 if s
is None or len(s
) < 2:
2502 for quote
in ('"', "'", ):
2503 if s
[0] == quote
and s
[-1] == quote
:
2508 def get_domain(url
):
2509 domain
= re
.match(r
'(?:https?:\/\/)?(?:www\.)?(?P<domain>[^\n\/]+\.[^\n\/]+)(?:\/(.*))?', url
)
2510 return domain
.group('domain') if domain
else None
2513 def url_basename(url
):
2514 path
= compat_urlparse
.urlparse(url
).path
2515 return path
.strip('/').split('/')[-1]
2519 return re
.match(r
'https?://[^?#&]+/', url
).group()
2522 def urljoin(base
, path
):
2523 if isinstance(path
, bytes):
2524 path
= path
.decode('utf-8')
2525 if not isinstance(path
, compat_str
) or not path
:
2527 if re
.match(r
'^(?:[a-zA-Z][a-zA-Z0-9+-.]*:)?//', path
):
2529 if isinstance(base
, bytes):
2530 base
= base
.decode('utf-8')
2531 if not isinstance(base
, compat_str
) or not re
.match(
2532 r
'^(?:https?:)?//', base
):
2534 return compat_urlparse
.urljoin(base
, path
)
2537 class HEADRequest(compat_urllib_request
.Request
):
2538 def get_method(self
):
2542 class PUTRequest(compat_urllib_request
.Request
):
2543 def get_method(self
):
2547 def int_or_none(v
, scale
=1, default
=None, get_attr
=None, invscale
=1):
2548 if get_attr
and v
is not None:
2549 v
= getattr(v
, get_attr
, None)
2551 return int(v
) * invscale
// scale
2552 except (ValueError, TypeError, OverflowError):
2556 def str_or_none(v
, default
=None):
2557 return default
if v
is None else compat_str(v
)
2560 def str_to_int(int_str
):
2561 """ A more relaxed version of int_or_none """
2562 if isinstance(int_str
, compat_integer_types
):
2564 elif isinstance(int_str
, compat_str
):
2565 int_str
= re
.sub(r
'[,\.\+]', '', int_str
)
2566 return int_or_none(int_str
)
2569 def float_or_none(v
, scale
=1, invscale
=1, default
=None):
2573 return float(v
) * invscale
/ scale
2574 except (ValueError, TypeError):
2578 def bool_or_none(v
, default
=None):
2579 return v
if isinstance(v
, bool) else default
2582 def strip_or_none(v
, default
=None):
2583 return v
.strip() if isinstance(v
, compat_str
) else default
2586 def url_or_none(url
):
2587 if not url
or not isinstance(url
, compat_str
):
2590 return url
if re
.match(r
'^(?:(?:https?|rt(?:m(?:pt?[es]?|fp)|sp[su]?)|mms|ftps?):)?//', url
) else None
2593 def request_to_url(req
):
2594 if isinstance(req
, compat_urllib_request
.Request
):
2595 return req
.get_full_url()
2600 def strftime_or_none(timestamp
, date_format
, default
=None):
2601 datetime_object
= None
2603 if isinstance(timestamp
, compat_numeric_types
): # unix timestamp
2604 datetime_object
= datetime
.datetime
.utcfromtimestamp(timestamp
)
2605 elif isinstance(timestamp
, compat_str
): # assume YYYYMMDD
2606 datetime_object
= datetime
.datetime
.strptime(timestamp
, '%Y%m%d')
2607 return datetime_object
.strftime(date_format
)
2608 except (ValueError, TypeError, AttributeError):
2612 def parse_duration(s
):
2613 if not isinstance(s
, compat_basestring
):
2619 days
, hours
, mins
, secs
, ms
= [None] * 5
2620 m
= re
.match(r
'''(?x)
2622 (?:(?:(?P<days>[0-9]+):)?(?P<hours>[0-9]+):)?(?P<mins>[0-9]+):)?
2623 (?P<secs>(?(before_secs)[0-9]{1,2}|[0-9]+))
2624 (?P<ms>[.:][0-9]+)?Z?$
2627 days
, hours
, mins
, secs
, ms
= m
.group('days', 'hours', 'mins', 'secs', 'ms')
2632 [0-9]+\s*y(?:ears?)?\s*
2635 [0-9]+\s*m(?:onths?)?\s*
2638 [0-9]+\s*w(?:eeks?)?\s*
2641 (?P<days>[0-9]+)\s*d(?:ays?)?\s*
2645 (?P<hours>[0-9]+)\s*h(?:ours?)?\s*
2648 (?P<mins>[0-9]+)\s*m(?:in(?:ute)?s?)?\s*
2651 (?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?\s*s(?:ec(?:ond)?s?)?\s*
2654 days
, hours
, mins
, secs
, ms
= m
.groups()
2656 m
= re
.match(r
'(?i)(?:(?P<hours>[0-9.]+)\s*(?:hours?)|(?P<mins>[0-9.]+)\s*(?:mins?\.?|minutes?)\s*)Z?$', s
)
2658 hours
, mins
= m
.groups()
2664 duration
+= float(secs
)
2666 duration
+= float(mins
) * 60
2668 duration
+= float(hours
) * 60 * 60
2670 duration
+= float(days
) * 24 * 60 * 60
2672 duration
+= float(ms
.replace(':', '.'))
2676 def prepend_extension(filename
, ext
, expected_real_ext
=None):
2677 name
, real_ext
= os
.path
.splitext(filename
)
2679 '{0}.{1}{2}'.format(name
, ext
, real_ext
)
2680 if not expected_real_ext
or real_ext
[1:] == expected_real_ext
2681 else '{0}.{1}'.format(filename
, ext
))
2684 def replace_extension(filename
, ext
, expected_real_ext
=None):
2685 name
, real_ext
= os
.path
.splitext(filename
)
2686 return '{0}.{1}'.format(
2687 name
if not expected_real_ext
or real_ext
[1:] == expected_real_ext
else filename
,
2691 def check_executable(exe
, args
=[]):
2692 """ Checks if the given binary is installed somewhere in PATH, and returns its name.
2693 args can be a list of arguments for a short output (like -version) """
2695 Popen([exe
] + args
, stdout
=subprocess
.PIPE
, stderr
=subprocess
.PIPE
).communicate_or_kill()
2701 def _get_exe_version_output(exe
, args
):
2703 # STDIN should be redirected too. On UNIX-like systems, ffmpeg triggers
2704 # SIGTTOU if yt-dlp is run in the background.
2705 # See https://github.com/ytdl-org/youtube-dl/issues/955#issuecomment-209789656
2707 [encodeArgument(exe
)] + args
, stdin
=subprocess
.PIPE
,
2708 stdout
=subprocess
.PIPE
, stderr
=subprocess
.STDOUT
).communicate_or_kill()
2711 if isinstance(out
, bytes): # Python 2.x
2712 out
= out
.decode('ascii', 'ignore')
2716 def detect_exe_version(output
, version_re
=None, unrecognized
='present'):
2717 assert isinstance(output
, compat_str
)
2718 if version_re
is None:
2719 version_re
= r
'version\s+([-0-9._a-zA-Z]+)'
2720 m
= re
.search(version_re
, output
)
2727 def get_exe_version(exe
, args
=['--version'],
2728 version_re
=None, unrecognized
='present'):
2729 """ Returns the version of the specified executable,
2730 or False if the executable is not present """
2731 out
= _get_exe_version_output(exe
, args
)
2732 return detect_exe_version(out
, version_re
, unrecognized
) if out
else False
2735 class LazyList(collections
.abc
.Sequence
):
2736 ''' Lazy immutable list from an iterable
2737 Note that slices of a LazyList are lists and not LazyList'''
2739 class IndexError(IndexError):
2742 def __init__(self
, iterable
, *, reverse
=False, _cache
=None):
2743 self
.__iterable
= iter(iterable
)
2744 self
.__cache
= [] if _cache
is None else _cache
2745 self
.__reversed
= reverse
2749 # We need to consume the entire iterable to iterate in reverse
2750 yield from self
.exhaust()
2752 yield from self
.__cache
2753 for item
in self
.__iterable
:
2754 self
.__cache
.append(item
)
2757 def __exhaust(self
):
2758 self
.__cache
.extend(self
.__iterable
)
2759 # Discard the emptied iterable to make it pickle-able
2760 self
.__iterable
= []
2764 ''' Evaluate the entire iterable '''
2765 return self
.__exhaust
()[::-1 if self
.__reversed
else 1]
2768 def __reverse_index(x
):
2769 return None if x
is None else -(x
+ 1)
2771 def __getitem__(self
, idx
):
2772 if isinstance(idx
, slice):
2774 idx
= slice(self
.__reverse
_index
(idx
.start
), self
.__reverse
_index
(idx
.stop
), -(idx
.step
or 1))
2775 start
, stop
, step
= idx
.start
, idx
.stop
, idx
.step
or 1
2776 elif isinstance(idx
, int):
2778 idx
= self
.__reverse
_index
(idx
)
2779 start
, stop
, step
= idx
, idx
, 0
2781 raise TypeError('indices must be integers or slices')
2782 if ((start
or 0) < 0 or (stop
or 0) < 0
2783 or (start
is None and step
< 0)
2784 or (stop
is None and step
> 0)):
2785 # We need to consume the entire iterable to be able to slice from the end
2786 # Obviously, never use this with infinite iterables
2789 return self
.__cache
[idx
]
2790 except IndexError as e
:
2791 raise self
.IndexError(e
) from e
2792 n
= max(start
or 0, stop
or 0) - len(self
.__cache
) + 1
2794 self
.__cache
.extend(itertools
.islice(self
.__iterable
, n
))
2796 return self
.__cache
[idx
]
2797 except IndexError as e
:
2798 raise self
.IndexError(e
) from e
2802 self
[-1] if self
.__reversed
else self
[0]
2803 except self
.IndexError:
2809 return len(self
.__cache
)
2811 def __reversed__(self
):
2812 return type(self
)(self
.__iterable
, reverse
=not self
.__reversed
, _cache
=self
.__cache
)
2815 return type(self
)(self
.__iterable
, reverse
=self
.__reversed
, _cache
=self
.__cache
)
2818 # repr and str should mimic a list. So we exhaust the iterable
2819 return repr(self
.exhaust())
2822 return repr(self
.exhaust())
2827 class IndexError(IndexError):
2831 # This is only useful for tests
2832 return len(self
.getslice())
2834 def __init__(self
, pagefunc
, pagesize
, use_cache
=True):
2835 self
._pagefunc
= pagefunc
2836 self
._pagesize
= pagesize
2837 self
._pagecount
= float('inf')
2838 self
._use
_cache
= use_cache
2841 def getpage(self
, pagenum
):
2842 page_results
= self
._cache
.get(pagenum
)
2843 if page_results
is None:
2844 page_results
= [] if pagenum
> self
._pagecount
else list(self
._pagefunc
(pagenum
))
2846 self
._cache
[pagenum
] = page_results
2849 def getslice(self
, start
=0, end
=None):
2850 return list(self
._getslice
(start
, end
))
2852 def _getslice(self
, start
, end
):
2853 raise NotImplementedError('This method must be implemented by subclasses')
2855 def __getitem__(self
, idx
):
2856 assert self
._use
_cache
, 'Indexing PagedList requires cache'
2857 if not isinstance(idx
, int) or idx
< 0:
2858 raise TypeError('indices must be non-negative integers')
2859 entries
= self
.getslice(idx
, idx
+ 1)
2861 raise self
.IndexError()
2865 class OnDemandPagedList(PagedList
):
2866 def _getslice(self
, start
, end
):
2867 for pagenum
in itertools
.count(start
// self
._pagesize
):
2868 firstid
= pagenum
* self
._pagesize
2869 nextfirstid
= pagenum
* self
._pagesize
+ self
._pagesize
2870 if start
>= nextfirstid
:
2874 start
% self
._pagesize
2875 if firstid
<= start
< nextfirstid
2878 ((end
- 1) % self
._pagesize
) + 1
2879 if (end
is not None and firstid
<= end
<= nextfirstid
)
2883 page_results
= self
.getpage(pagenum
)
2885 self
._pagecount
= pagenum
- 1
2887 if startv
!= 0 or endv
is not None:
2888 page_results
= page_results
[startv
:endv
]
2889 yield from page_results
2891 # A little optimization - if current page is not "full", ie. does
2892 # not contain page_size videos then we can assume that this page
2893 # is the last one - there are no more ids on further pages -
2894 # i.e. no need to query again.
2895 if len(page_results
) + startv
< self
._pagesize
:
2898 # If we got the whole page, but the next page is not interesting,
2899 # break out early as well
2900 if end
== nextfirstid
:
2904 class InAdvancePagedList(PagedList
):
2905 def __init__(self
, pagefunc
, pagecount
, pagesize
):
2906 PagedList
.__init
__(self
, pagefunc
, pagesize
, True)
2907 self
._pagecount
= pagecount
2909 def _getslice(self
, start
, end
):
2910 start_page
= start
// self
._pagesize
2911 end_page
= self
._pagecount
if end
is None else min(self
._pagecount
, end
// self
._pagesize
+ 1)
2912 skip_elems
= start
- start_page
* self
._pagesize
2913 only_more
= None if end
is None else end
- start
2914 for pagenum
in range(start_page
, end_page
):
2915 page_results
= self
.getpage(pagenum
)
2917 page_results
= page_results
[skip_elems
:]
2919 if only_more
is not None:
2920 if len(page_results
) < only_more
:
2921 only_more
-= len(page_results
)
2923 yield from page_results
[:only_more
]
2925 yield from page_results
2928 def uppercase_escape(s
):
2929 unicode_escape
= codecs
.getdecoder('unicode_escape')
2931 r
'\\U[0-9a-fA-F]{8}',
2932 lambda m
: unicode_escape(m
.group(0))[0],
2936 def lowercase_escape(s
):
2937 unicode_escape
= codecs
.getdecoder('unicode_escape')
2939 r
'\\u[0-9a-fA-F]{4}',
2940 lambda m
: unicode_escape(m
.group(0))[0],
2944 def escape_rfc3986(s
):
2945 """Escape non-ASCII characters as suggested by RFC 3986"""
2946 if sys
.version_info
< (3, 0) and isinstance(s
, compat_str
):
2947 s
= s
.encode('utf-8')
2948 return compat_urllib_parse
.quote(s
, b
"%/;:@&=+$,!~*'()?#[]")
2951 def escape_url(url
):
2952 """Escape URL as suggested by RFC 3986"""
2953 url_parsed
= compat_urllib_parse_urlparse(url
)
2954 return url_parsed
._replace
(
2955 netloc
=url_parsed
.netloc
.encode('idna').decode('ascii'),
2956 path
=escape_rfc3986(url_parsed
.path
),
2957 params
=escape_rfc3986(url_parsed
.params
),
2958 query
=escape_rfc3986(url_parsed
.query
),
2959 fragment
=escape_rfc3986(url_parsed
.fragment
)
2964 return compat_parse_qs(compat_urllib_parse_urlparse(url
).query
)
2967 def read_batch_urls(batch_fd
):
2969 if not isinstance(url
, compat_str
):
2970 url
= url
.decode('utf-8', 'replace')
2971 BOM_UTF8
= ('\xef\xbb\xbf', '\ufeff')
2972 for bom
in BOM_UTF8
:
2973 if url
.startswith(bom
):
2974 url
= url
[len(bom
):]
2976 if not url
or url
.startswith(('#', ';', ']')):
2978 # "#" cannot be stripped out since it is part of the URI
2979 # However, it can be safely stipped out if follwing a whitespace
2980 return re
.split(r
'\s#', url
, 1)[0].rstrip()
2982 with contextlib
.closing(batch_fd
) as fd
:
2983 return [url
for url
in map(fixup
, fd
) if url
]
2986 def urlencode_postdata(*args
, **kargs
):
2987 return compat_urllib_parse_urlencode(*args
, **kargs
).encode('ascii')
2990 def update_url_query(url
, query
):
2993 parsed_url
= compat_urlparse
.urlparse(url
)
2994 qs
= compat_parse_qs(parsed_url
.query
)
2996 return compat_urlparse
.urlunparse(parsed_url
._replace
(
2997 query
=compat_urllib_parse_urlencode(qs
, True)))
3000 def update_Request(req
, url
=None, data
=None, headers
={}, query={}
):
3001 req_headers
= req
.headers
.copy()
3002 req_headers
.update(headers
)
3003 req_data
= data
or req
.data
3004 req_url
= update_url_query(url
or req
.get_full_url(), query
)
3005 req_get_method
= req
.get_method()
3006 if req_get_method
== 'HEAD':
3007 req_type
= HEADRequest
3008 elif req_get_method
== 'PUT':
3009 req_type
= PUTRequest
3011 req_type
= compat_urllib_request
.Request
3013 req_url
, data
=req_data
, headers
=req_headers
,
3014 origin_req_host
=req
.origin_req_host
, unverifiable
=req
.unverifiable
)
3015 if hasattr(req
, 'timeout'):
3016 new_req
.timeout
= req
.timeout
3020 def _multipart_encode_impl(data
, boundary
):
3021 content_type
= 'multipart/form-data; boundary=%s' % boundary
3024 for k
, v
in data
.items():
3025 out
+= b
'--' + boundary
.encode('ascii') + b
'\r\n'
3026 if isinstance(k
, compat_str
):
3027 k
= k
.encode('utf-8')
3028 if isinstance(v
, compat_str
):
3029 v
= v
.encode('utf-8')
3030 # RFC 2047 requires non-ASCII field names to be encoded, while RFC 7578
3031 # suggests sending UTF-8 directly. Firefox sends UTF-8, too
3032 content
= b
'Content-Disposition: form-data; name="' + k
+ b
'"\r\n\r\n' + v
+ b
'\r\n'
3033 if boundary
.encode('ascii') in content
:
3034 raise ValueError('Boundary overlaps with data')
3037 out
+= b
'--' + boundary
.encode('ascii') + b
'--\r\n'
3039 return out
, content_type
3042 def multipart_encode(data
, boundary
=None):
3044 Encode a dict to RFC 7578-compliant form-data
3047 A dict where keys and values can be either Unicode or bytes-like
3050 If specified a Unicode object, it's used as the boundary. Otherwise
3051 a random boundary is generated.
3053 Reference: https://tools.ietf.org/html/rfc7578
3055 has_specified_boundary
= boundary
is not None
3058 if boundary
is None:
3059 boundary
= '---------------' + str(random
.randrange(0x0fffffff, 0xffffffff))
3062 out
, content_type
= _multipart_encode_impl(data
, boundary
)
3065 if has_specified_boundary
:
3069 return out
, content_type
3072 def dict_get(d
, key_or_keys
, default
=None, skip_false_values
=True):
3073 if isinstance(key_or_keys
, (list, tuple)):
3074 for key
in key_or_keys
:
3075 if key
not in d
or d
[key
] is None or skip_false_values
and not d
[key
]:
3079 return d
.get(key_or_keys
, default
)
3082 def try_get(src
, getter
, expected_type
=None):
3083 for get
in variadic(getter
):
3086 except (AttributeError, KeyError, TypeError, IndexError):
3089 if expected_type
is None or isinstance(v
, expected_type
):
3093 def merge_dicts(*dicts
):
3095 for a_dict
in dicts
:
3096 for k
, v
in a_dict
.items():
3100 or (isinstance(v
, compat_str
) and v
3101 and isinstance(merged
[k
], compat_str
)
3102 and not merged
[k
])):
3107 def encode_compat_str(string
, encoding
=preferredencoding(), errors
='strict'):
3108 return string
if isinstance(string
, compat_str
) else compat_str(string
, encoding
, errors
)
3120 TV_PARENTAL_GUIDELINES
= {
3130 def parse_age_limit(s
):
3132 return s
if 0 <= s
<= 21 else None
3133 if not isinstance(s
, compat_basestring
):
3135 m
= re
.match(r
'^(?P<age>\d{1,2})\+?$', s
)
3137 return int(m
.group('age'))
3140 return US_RATINGS
[s
]
3141 m
= re
.match(r
'^TV[_-]?(%s)$' % '|'.join(k
[3:] for k
in TV_PARENTAL_GUIDELINES
), s
)
3143 return TV_PARENTAL_GUIDELINES
['TV-' + m
.group(1)]
3147 def strip_jsonp(code
):
3150 (?:window\.)?(?P<func_name>[a-zA-Z0-9_.$]*)
3151 (?:\s*&&\s*(?P=func_name))?
3152 \s*\(\s*(?P<callback_data>.*)\);?
3153 \s*?(?://[^\n]*)*$''',
3154 r
'\g<callback_data>', code
)
3157 def js_to_json(code
, vars={}):
3158 # vars is a dict of var, val pairs to substitute
3159 COMMENT_RE
= r
'/\*(?:(?!\*/).)*?\*/|//[^\n]*\n'
3160 SKIP_RE
= r
'\s*(?:{comment})?\s*'.format(comment
=COMMENT_RE
)
3162 (r
'(?s)^(0[xX][0-9a-fA-F]+){skip}:?$'.format(skip
=SKIP_RE
), 16),
3163 (r
'(?s)^(0+[0-7]+){skip}:?$'.format(skip
=SKIP_RE
), 8),
3168 if v
in ('true', 'false', 'null'):
3170 elif v
in ('undefined', 'void 0'):
3172 elif v
.startswith('/*') or v
.startswith('//') or v
.startswith('!') or v
== ',':
3175 if v
[0] in ("'", '"'):
3176 v
= re
.sub(r
'(?s)\\.|"', lambda m
: {
3181 }.get(m
.group(0), m
.group(0)), v
[1:-1])
3183 for regex
, base
in INTEGER_TABLE
:
3184 im
= re
.match(regex
, v
)
3186 i
= int(im
.group(1), base
)
3187 return '"%d":' % i
if v
.endswith(':') else '%d' % i
3194 code
= re
.sub(r
'new Date\((".+")\)', r
'\g<1>', code
)
3196 return re
.sub(r
'''(?sx)
3197 "(?:[^"\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^"\\]*"|
3198 '(?:[^'\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^'\\]*'|
3199 {comment}|,(?={skip}[\]}}])|
3200 void\s0|(?:(?<![0-9])[eE]|[a-df-zA-DF-Z_$])[.a-zA-Z_$0-9]*|
3201 \b(?:0[xX][0-9a-fA-F]+|0+[0-7]+)(?:{skip}:)?|
3204 '''.format(comment
=COMMENT_RE
, skip
=SKIP_RE
), fix_kv
, code
)
3207 def qualities(quality_ids
):
3208 """ Get a numeric quality value out of a list of possible values """
3211 return quality_ids
.index(qid
)
3217 POSTPROCESS_WHEN
= {'pre_process', 'after_filter', 'before_dl', 'after_move', 'post_process', 'after_video', 'playlist'}
3221 'default': '%(title)s [%(id)s].%(ext)s',
3222 'chapter': '%(title)s - %(section_number)03d %(section_title)s [%(id)s].%(ext)s',
3228 'description': 'description',
3229 'annotation': 'annotations.xml',
3230 'infojson': 'info.json',
3233 'pl_thumbnail': None,
3234 'pl_description': 'description',
3235 'pl_infojson': 'info.json',
3238 # As of [1] format syntax is:
3239 # %[mapping_key][conversion_flags][minimum_width][.precision][length_modifier]type
3240 # 1. https://docs.python.org/2/library/stdtypes.html#string-formatting
3241 STR_FORMAT_RE_TMPL
= r
'''(?x)
3242 (?<!%)(?P<prefix>(?:%%)*)
3244 (?P<has_key>\((?P<key>{0})\))?
3246 (?P<conversion>[#0\-+ ]+)?
3248 (?P<precision>\.\d+)?
3249 (?P<len_mod>[hlL])? # unused in python
3250 {1} # conversion type
3255 STR_FORMAT_TYPES
= 'diouxXeEfFgGcrs'
3258 def limit_length(s
, length
):
3259 """ Add ellipses to overly long strings """
3264 return s
[:length
- len(ELLIPSES
)] + ELLIPSES
3268 def version_tuple(v
):
3269 return tuple(int(e
) for e
in re
.split(r
'[-.]', v
))
3272 def is_outdated_version(version
, limit
, assume_new
=True):
3274 return not assume_new
3276 return version_tuple(version
) < version_tuple(limit
)
3278 return not assume_new
3281 def ytdl_is_updateable():
3282 """ Returns if yt-dlp can be updated with -U """
3284 from .update
import is_non_updateable
3286 return not is_non_updateable()
3289 def args_to_str(args
):
3290 # Get a short string representation for a subprocess command
3291 return ' '.join(compat_shlex_quote(a
) for a
in args
)
3294 def error_to_compat_str(err
):
3296 # On python 2 error byte string must be decoded with proper
3297 # encoding rather than ascii
3298 if sys
.version_info
[0] < 3:
3299 err_str
= err_str
.decode(preferredencoding())
3303 def mimetype2ext(mt
):
3307 mt
, _
, params
= mt
.partition(';')
3312 # Per RFC 3003, audio/mpeg can be .mp1, .mp2 or .mp3. Here use .mp3 as
3313 # it's the most popular one
3314 'audio/mpeg': 'mp3',
3315 'audio/x-wav': 'wav',
3317 'audio/wave': 'wav',
3320 ext
= FULL_MAP
.get(mt
)
3326 'smptett+xml': 'tt',
3330 'x-mp4-fragmented': 'mp4',
3331 'x-ms-sami': 'sami',
3334 'x-mpegurl': 'm3u8',
3335 'vnd.apple.mpegurl': 'm3u8',
3339 'vnd.ms-sstr+xml': 'ism',
3343 'filmstrip+json': 'fs',
3347 _
, _
, subtype
= mt
.rpartition('/')
3348 ext
= SUBTYPE_MAP
.get(subtype
.lower())
3359 _
, _
, suffix
= subtype
.partition('+')
3360 ext
= SUFFIX_MAP
.get(suffix
)
3364 return subtype
.replace('+', '.')
3367 def ext2mimetype(ext_or_url
):
3370 if '.' not in ext_or_url
:
3371 ext_or_url
= f
'file.{ext_or_url}'
3372 return mimetypes
.guess_type(ext_or_url
)[0]
3375 def parse_codecs(codecs_str
):
3376 # http://tools.ietf.org/html/rfc6381
3379 split_codecs
= list(filter(None, map(
3380 str.strip
, codecs_str
.strip().strip(',').split(','))))
3381 vcodec
, acodec
, tcodec
, hdr
= None, None, None, None
3382 for full_codec
in split_codecs
:
3383 parts
= full_codec
.split('.')
3384 codec
= parts
[0].replace('0', '')
3385 if codec
in ('avc1', 'avc2', 'avc3', 'avc4', 'vp9', 'vp8', 'hev1', 'hev2',
3386 'h263', 'h264', 'mp4v', 'hvc1', 'av1', 'theora', 'dvh1', 'dvhe'):
3388 vcodec
= '.'.join(parts
[:4]) if codec
in ('vp9', 'av1', 'hvc1') else full_codec
3389 if codec
in ('dvh1', 'dvhe'):
3391 elif codec
== 'av1' and len(parts
) > 3 and parts
[3] == '10':
3393 elif full_codec
.replace('0', '').startswith('vp9.2'):
3395 elif codec
in ('flac', 'mp4a', 'opus', 'vorbis', 'mp3', 'aac', 'ac-3', 'ec-3', 'eac3', 'dtsc', 'dtse', 'dtsh', 'dtsl'):
3398 elif codec
in ('stpp', 'wvtt',):
3402 write_string('WARNING: Unknown codec %s\n' % full_codec
, sys
.stderr
)
3403 if vcodec
or acodec
or tcodec
:
3405 'vcodec': vcodec
or 'none',
3406 'acodec': acodec
or 'none',
3407 'dynamic_range': hdr
,
3408 **({'tcodec': tcodec}
if tcodec
is not None else {}),
3410 elif len(split_codecs
) == 2:
3412 'vcodec': split_codecs
[0],
3413 'acodec': split_codecs
[1],
3418 def urlhandle_detect_ext(url_handle
):
3419 getheader
= url_handle
.headers
.get
3421 cd
= getheader('Content-Disposition')
3423 m
= re
.match(r
'attachment;\s*filename="(?P<filename>[^"]+)"', cd
)
3425 e
= determine_ext(m
.group('filename'), default_ext
=None)
3429 return mimetype2ext(getheader('Content-Type'))
3432 def encode_data_uri(data
, mime_type
):
3433 return 'data:%s;base64,%s' % (mime_type
, base64
.b64encode(data
).decode('ascii'))
3436 def age_restricted(content_limit
, age_limit
):
3437 """ Returns True iff the content should be blocked """
3439 if age_limit
is None: # No limit set
3441 if content_limit
is None:
3442 return False # Content available for everyone
3443 return age_limit
< content_limit
3446 def is_html(first_bytes
):
3447 """ Detect whether a file contains HTML by examining its first bytes. """
3450 (b
'\xef\xbb\xbf', 'utf-8'),
3451 (b
'\x00\x00\xfe\xff', 'utf-32-be'),
3452 (b
'\xff\xfe\x00\x00', 'utf-32-le'),
3453 (b
'\xff\xfe', 'utf-16-le'),
3454 (b
'\xfe\xff', 'utf-16-be'),
3456 for bom
, enc
in BOMS
:
3457 if first_bytes
.startswith(bom
):
3458 s
= first_bytes
[len(bom
):].decode(enc
, 'replace')
3461 s
= first_bytes
.decode('utf-8', 'replace')
3463 return re
.match(r
'^\s*<', s
)
3466 def determine_protocol(info_dict
):
3467 protocol
= info_dict
.get('protocol')
3468 if protocol
is not None:
3471 url
= sanitize_url(info_dict
['url'])
3472 if url
.startswith('rtmp'):
3474 elif url
.startswith('mms'):
3476 elif url
.startswith('rtsp'):
3479 ext
= determine_ext(url
)
3485 return compat_urllib_parse_urlparse(url
).scheme
3488 def render_table(header_row
, data
, delim
=False, extra_gap
=0, hide_empty
=False):
3489 """ Render a list of rows, each as a list of values.
3490 Text after a \t will be right aligned """
3492 return len(remove_terminal_sequences(string
).replace('\t', ''))
3494 def get_max_lens(table
):
3495 return [max(width(str(v
)) for v
in col
) for col
in zip(*table
)]
3497 def filter_using_list(row
, filterArray
):
3498 return [col
for take
, col
in itertools
.zip_longest(filterArray
, row
, fillvalue
=True) if take
]
3500 max_lens
= get_max_lens(data
) if hide_empty
else []
3501 header_row
= filter_using_list(header_row
, max_lens
)
3502 data
= [filter_using_list(row
, max_lens
) for row
in data
]
3504 table
= [header_row
] + data
3505 max_lens
= get_max_lens(table
)
3508 table
= [header_row
, [delim
* (ml
+ extra_gap
) for ml
in max_lens
]] + data
3509 table
[1][-1] = table
[1][-1][:-extra_gap
* len(delim
)] # Remove extra_gap from end of delimiter
3511 for pos
, text
in enumerate(map(str, row
)):
3513 row
[pos
] = text
.replace('\t', ' ' * (max_lens
[pos
] - width(text
))) + ' ' * extra_gap
3515 row
[pos
] = text
+ ' ' * (max_lens
[pos
] - width(text
) + extra_gap
)
3516 ret
= '\n'.join(''.join(row
).rstrip() for row
in table
)
3520 def _match_one(filter_part
, dct
, incomplete
):
3521 # TODO: Generalize code with YoutubeDL._build_format_filter
3522 STRING_OPERATORS
= {
3523 '*=': operator
.contains
,
3524 '^=': lambda attr
, value
: attr
.startswith(value
),
3525 '$=': lambda attr
, value
: attr
.endswith(value
),
3526 '~=': lambda attr
, value
: re
.search(value
, attr
),
3528 COMPARISON_OPERATORS
= {
3530 '<=': operator
.le
, # "<=" must be defined above "<"
3537 operator_rex
= re
.compile(r
'''(?x)\s*
3539 \s*(?P<negation>!\s*)?(?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
3541 (?P<quote>["\'])(?P
<quotedstrval
>.+?
)(?P
=quote
)|
3545 ''' % '|'.join(map(re.escape, COMPARISON_OPERATORS.keys())))
3546 m = operator_rex.search(filter_part)
3549 unnegated_op = COMPARISON_OPERATORS[m['op']]
3551 op = lambda attr, value: not unnegated_op(attr, value)
3554 comparison_value = m['quotedstrval'] or m['strval'] or m['intval']
3556 comparison_value = comparison_value.replace(r'\%s' % m['quote'], m['quote'])
3557 actual_value = dct.get(m['key'])
3558 numeric_comparison = None
3559 if isinstance(actual_value, compat_numeric_types):
3560 # If the original field is a string and matching comparisonvalue is
3561 # a number we should respect the origin of the original field
3562 # and process comparison value as a string (see
3563 # https://github.com/ytdl-org/youtube-dl/issues/11082)
3565 numeric_comparison = int(comparison_value)
3567 numeric_comparison = parse_filesize(comparison_value)
3568 if numeric_comparison is None:
3569 numeric_comparison = parse_filesize(f'{comparison_value}B')
3570 if numeric_comparison is None:
3571 numeric_comparison = parse_duration(comparison_value)
3572 if numeric_comparison is not None and m['op'] in STRING_OPERATORS:
3573 raise ValueError('Operator %s only supports string values!' % m['op'])
3574 if actual_value is None:
3575 return incomplete or m['none_inclusive']
3576 return op(actual_value, comparison_value if numeric_comparison is None else numeric_comparison)
3579 '': lambda v: (v is True) if isinstance(v, bool) else (v is not None),
3580 '!': lambda v: (v is False) if isinstance(v, bool) else (v is None),
3582 operator_rex = re.compile(r'''(?x
)\s
*
3583 (?P
<op
>%s)\s
*(?P
<key
>[a
-z_
]+)
3585 ''' % '|'.join(map(re.escape, UNARY_OPERATORS.keys())))
3586 m = operator_rex.search(filter_part)
3588 op = UNARY_OPERATORS[m.group('op')]
3589 actual_value = dct.get(m.group('key'))
3590 if incomplete and actual_value is None:
3592 return op(actual_value)
3594 raise ValueError('Invalid filter part %r' % filter_part)
3597 def match_str(filter_str, dct, incomplete=False):
3598 """ Filter a dictionary with a simple string syntax. Returns True (=passes filter) or false
3599 When incomplete, all conditions passes on missing fields
3602 _match_one(filter_part.replace(r'\&', '&'), dct, incomplete)
3603 for filter_part in re.split(r'(?<!\\)&', filter_str))
3606 def match_filter_func(filter_str):
3607 if filter_str is None:
3610 def _match_func(info_dict, *args, **kwargs):
3611 if match_str(filter_str, info_dict, *args, **kwargs):
3614 video_title = info_dict.get('title', info_dict.get('id', 'video'))
3615 return '%s does not pass filter %s, skipping ..' % (video_title, filter_str)
3619 def parse_dfxp_time_expr(time_expr):
3623 mobj = re.match(r'^(?P<time_offset>\d+(?:\.\d+)?)s?$', time_expr)
3625 return float(mobj.group('time_offset'))
3627 mobj = re.match(r'^(\d+):(\d\d):(\d\d(?:(?:\.|:)\d+)?)$', time_expr)
3629 return 3600 * int(mobj.group(1)) + 60 * int(mobj.group(2)) + float(mobj.group(3).replace(':', '.'))
3632 def srt_subtitles_timecode(seconds):
3633 return '%02d:%02d:%02d,%03d' % timetuple_from_msec(seconds * 1000)
3636 def ass_subtitles_timecode(seconds):
3637 time = timetuple_from_msec(seconds * 1000)
3638 return '%01d:%02d:%02d.%02d' % (*time[:-1], time.milliseconds / 10)
3641 def dfxp2srt(dfxp_data):
3643 @param dfxp_data A
bytes-like
object containing DFXP data
3644 @returns A
unicode object containing converted SRT data
3646 LEGACY_NAMESPACES = (
3647 (b'http://www.w3.org/ns/ttml', [
3648 b'http://www.w3.org/2004/11/ttaf1',
3649 b'http://www.w3.org/2006/04/ttaf1',
3650 b'http://www.w3.org/2006/10/ttaf1',
3652 (b'http://www.w3.org/ns/ttml#styling', [
3653 b'http://www.w3.org/ns/ttml#style',
3657 SUPPORTED_STYLING = [
3666 _x = functools.partial(xpath_with_ns, ns_map={
3667 'xml': 'http://www.w3.org/XML/1998/namespace',
3668 'ttml': 'http://www.w3.org/ns/ttml',
3669 'tts': 'http://www.w3.org/ns/ttml#styling',
3675 class TTMLPElementParser(object):
3677 _unclosed_elements = []
3678 _applied_styles = []
3680 def start(self, tag, attrib):
3681 if tag in (_x('ttml:br'), 'br'):
3684 unclosed_elements = []
3686 element_style_id = attrib.get('style')
3688 style.update(default_style)
3689 if element_style_id:
3690 style.update(styles.get(element_style_id, {}))
3691 for prop in SUPPORTED_STYLING:
3692 prop_val = attrib.get(_x('tts:' + prop))
3694 style[prop] = prop_val
3697 for k, v in sorted(style.items()):
3698 if self._applied_styles and self._applied_styles[-1].get(k) == v:
3701 font += ' color="%s"' % v
3702 elif k == 'fontSize':
3703 font += ' size="%s"' % v
3704 elif k == 'fontFamily':
3705 font += ' face="%s"' % v
3706 elif k == 'fontWeight' and v == 'bold':
3708 unclosed_elements.append('b')
3709 elif k == 'fontStyle' and v == 'italic':
3711 unclosed_elements.append('i')
3712 elif k == 'textDecoration' and v == 'underline':
3714 unclosed_elements.append('u')
3716 self._out += '<font' + font + '>'
3717 unclosed_elements.append('font')
3719 if self._applied_styles:
3720 applied_style.update(self._applied_styles[-1])
3721 applied_style.update(style)
3722 self._applied_styles.append(applied_style)
3723 self._unclosed_elements.append(unclosed_elements)
3726 if tag not in (_x('ttml:br'), 'br'):
3727 unclosed_elements = self._unclosed_elements.pop()
3728 for element in reversed(unclosed_elements):
3729 self._out += '</%s>' % element
3730 if unclosed_elements and self._applied_styles:
3731 self._applied_styles.pop()
3733 def data(self, data):
3737 return self._out.strip()
3739 def parse_node(node):
3740 target = TTMLPElementParser()
3741 parser = xml.etree.ElementTree.XMLParser(target=target)
3742 parser.feed(xml.etree.ElementTree.tostring(node))
3743 return parser.close()
3745 for k, v in LEGACY_NAMESPACES:
3747 dfxp_data = dfxp_data.replace(ns, k)
3749 dfxp = compat_etree_fromstring(dfxp_data)
3751 paras = dfxp.findall(_x('.//ttml:p')) or dfxp.findall('.//p')
3754 raise ValueError('Invalid dfxp/TTML subtitle')
3758 for style in dfxp.findall(_x('.//ttml:style')):
3759 style_id = style.get('id') or style.get(_x('xml:id'))
3762 parent_style_id = style.get('style')
3764 if parent_style_id not in styles:
3767 styles[style_id] = styles[parent_style_id].copy()
3768 for prop in SUPPORTED_STYLING:
3769 prop_val = style.get(_x('tts:' + prop))
3771 styles.setdefault(style_id, {})[prop] = prop_val
3777 for p in ('body', 'div'):
3778 ele = xpath_element(dfxp, [_x('.//ttml:' + p), './/' + p])
3781 style = styles.get(ele.get('style'))
3784 default_style.update(style)
3786 for para, index in zip(paras, itertools.count(1)):
3787 begin_time = parse_dfxp_time_expr(para.attrib.get('begin'))
3788 end_time = parse_dfxp_time_expr(para.attrib.get('end'))
3789 dur = parse_dfxp_time_expr(para.attrib.get('dur'))
3790 if begin_time is None:
3795 end_time = begin_time + dur
3796 out.append('%d\n%s --> %s\n%s\n\n' % (
3798 srt_subtitles_timecode(begin_time),
3799 srt_subtitles_timecode(end_time),
3805 def cli_option(params, command_option, param):
3806 param = params.get(param)
3808 param = compat_str(param)
3809 return [command_option, param] if param is not None else []
3812 def cli_bool_option(params, command_option, param, true_value='true', false_value='false', separator=None):
3813 param = params.get(param)
3816 assert isinstance(param, bool)
3818 return [command_option + separator + (true_value if param else false_value)]
3819 return [command_option, true_value if param else false_value]
3822 def cli_valueless_option(params, command_option, param, expected_value=True):
3823 param = params.get(param)
3824 return [command_option] if param == expected_value else []
3827 def cli_configuration_args(argdict, keys, default=[], use_compat=True):
3828 if isinstance(argdict, (list, tuple)): # for backward compatibility
3835 assert isinstance(argdict, dict)
3837 assert isinstance(keys, (list, tuple))
3838 for key_list in keys:
3839 arg_list = list(filter(
3840 lambda x: x is not None,
3841 [argdict.get(key.lower()) for key in variadic(key_list)]))
3843 return [arg for args in arg_list for arg in args]
3847 def _configuration_args(main_key, argdict, exe, keys=None, default=[], use_compat=True):
3848 main_key, exe = main_key.lower(), exe.lower()
3849 root_key = exe if main_key == exe else f'{main_key}+{exe}'
3850 keys = [f'{root_key}{k}' for k in (keys or [''])]
3851 if root_key in keys:
3853 keys.append((main_key, exe))
3854 keys.append('default')
3857 return cli_configuration_args(argdict, keys, default, use_compat)
3860 class ISO639Utils(object):
3861 # See http://www.loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt
3920 'iw': 'heb', # Replaced by he in 1989 revision
3930 'in': 'ind', # Replaced by id in 1989 revision
4045 'ji': 'yid', # Replaced by yi in 1989 revision
4053 def short2long(cls, code):
4054 """Convert language code from ISO 639-1 to ISO 639-2/T"""
4055 return cls._lang_map.get(code[:2])
4058 def long2short(cls, code):
4059 """Convert language code from ISO 639-2/T to ISO 639-1"""
4060 for short_name, long_name in cls._lang_map.items():
4061 if long_name == code:
4065 class ISO3166Utils(object):
4066 # From http://data.okfn.org/data/core/country-list
4068 'AF': 'Afghanistan',
4069 'AX': 'Åland Islands',
4072 'AS': 'American Samoa',
4077 'AG': 'Antigua and Barbuda',
4094 'BO': 'Bolivia, Plurinational State of',
4095 'BQ': 'Bonaire, Sint Eustatius and Saba',
4096 'BA': 'Bosnia and Herzegovina',
4098 'BV': 'Bouvet Island',
4100 'IO': 'British Indian Ocean Territory',
4101 'BN': 'Brunei Darussalam',
4103 'BF': 'Burkina Faso',
4109 'KY': 'Cayman Islands',
4110 'CF': 'Central African Republic',
4114 'CX': 'Christmas Island',
4115 'CC': 'Cocos (Keeling) Islands',
4119 'CD': 'Congo, the Democratic Republic of the',
4120 'CK': 'Cook Islands',
4122 'CI': 'Côte d\'Ivoire',
4127 'CZ': 'Czech Republic',
4131 'DO': 'Dominican Republic',
4134 'SV': 'El Salvador',
4135 'GQ': 'Equatorial Guinea',
4139 'FK': 'Falkland Islands (Malvinas)',
4140 'FO': 'Faroe Islands',
4144 'GF': 'French Guiana',
4145 'PF': 'French Polynesia',
4146 'TF': 'French Southern Territories',
4161 'GW': 'Guinea-Bissau',
4164 'HM': 'Heard Island and McDonald Islands',
4165 'VA': 'Holy See (Vatican City State)',
4172 'IR': 'Iran, Islamic Republic of',
4175 'IM': 'Isle of Man',
4185 'KP': 'Korea, Democratic People\'s Republic of',
4186 'KR': 'Korea, Republic of',
4189 'LA': 'Lao People\'s Democratic Republic',
4195 'LI': 'Liechtenstein',
4199 'MK': 'Macedonia, the Former Yugoslav Republic of',
4206 'MH': 'Marshall Islands',
4212 'FM': 'Micronesia, Federated States of',
4213 'MD': 'Moldova, Republic of',
4224 'NL': 'Netherlands',
4225 'NC': 'New Caledonia',
4226 'NZ': 'New Zealand',
4231 'NF': 'Norfolk Island',
4232 'MP': 'Northern Mariana Islands',
4237 'PS': 'Palestine, State of',
4239 'PG': 'Papua New Guinea',
4242 'PH': 'Philippines',
4246 'PR': 'Puerto Rico',
4250 'RU': 'Russian Federation',
4252 'BL': 'Saint Barthélemy',
4253 'SH': 'Saint Helena, Ascension and Tristan da Cunha',
4254 'KN': 'Saint Kitts and Nevis',
4255 'LC': 'Saint Lucia',
4256 'MF': 'Saint Martin (French part)',
4257 'PM': 'Saint Pierre and Miquelon',
4258 'VC': 'Saint Vincent and the Grenadines',
4261 'ST': 'Sao Tome and Principe',
4262 'SA': 'Saudi Arabia',
4266 'SL': 'Sierra Leone',
4268 'SX': 'Sint Maarten (Dutch part)',
4271 'SB': 'Solomon Islands',
4273 'ZA': 'South Africa',
4274 'GS': 'South Georgia and the South Sandwich Islands',
4275 'SS': 'South Sudan',
4280 'SJ': 'Svalbard and Jan Mayen',
4283 'CH': 'Switzerland',
4284 'SY': 'Syrian Arab Republic',
4285 'TW': 'Taiwan, Province of China',
4287 'TZ': 'Tanzania, United Republic of',
4289 'TL': 'Timor-Leste',
4293 'TT': 'Trinidad and Tobago',
4296 'TM': 'Turkmenistan',
4297 'TC': 'Turks and Caicos Islands',
4301 'AE': 'United Arab Emirates',
4302 'GB': 'United Kingdom',
4303 'US': 'United States',
4304 'UM': 'United States Minor Outlying Islands',
4308 'VE': 'Venezuela, Bolivarian Republic of',
4310 'VG': 'Virgin Islands, British',
4311 'VI': 'Virgin Islands, U.S.',
4312 'WF': 'Wallis and Futuna',
4313 'EH': 'Western Sahara',
4320 def short2full(cls, code):
4321 """Convert an ISO 3166-2 country code to the corresponding full name"""
4322 return cls._country_map.get(code.upper())
4325 class GeoUtils(object):
4326 # Major IPv4 address blocks per country
4328 'AD': '46.172.224.0/19',
4329 'AE': '94.200.0.0/13',
4330 'AF': '149.54.0.0/17',
4331 'AG': '209.59.64.0/18',
4332 'AI': '204.14.248.0/21',
4333 'AL': '46.99.0.0/16',
4334 'AM': '46.70.0.0/15',
4335 'AO': '105.168.0.0/13',
4336 'AP': '182.50.184.0/21',
4337 'AQ': '23.154.160.0/24',
4338 'AR': '181.0.0.0/12',
4339 'AS': '202.70.112.0/20',
4340 'AT': '77.116.0.0/14',
4341 'AU': '1.128.0.0/11',
4342 'AW': '181.41.0.0/18',
4343 'AX': '185.217.4.0/22',
4344 'AZ': '5.197.0.0/16',
4345 'BA': '31.176.128.0/17',
4346 'BB': '65.48.128.0/17',
4347 'BD': '114.130.0.0/16',
4349 'BF': '102.178.0.0/15',
4350 'BG': '95.42.0.0/15',
4351 'BH': '37.131.0.0/17',
4352 'BI': '154.117.192.0/18',
4353 'BJ': '137.255.0.0/16',
4354 'BL': '185.212.72.0/23',
4355 'BM': '196.12.64.0/18',
4356 'BN': '156.31.0.0/16',
4357 'BO': '161.56.0.0/16',
4358 'BQ': '161.0.80.0/20',
4359 'BR': '191.128.0.0/12',
4360 'BS': '24.51.64.0/18',
4361 'BT': '119.2.96.0/19',
4362 'BW': '168.167.0.0/16',
4363 'BY': '178.120.0.0/13',
4364 'BZ': '179.42.192.0/18',
4365 'CA': '99.224.0.0/11',
4366 'CD': '41.243.0.0/16',
4367 'CF': '197.242.176.0/21',
4368 'CG': '160.113.0.0/16',
4369 'CH': '85.0.0.0/13',
4370 'CI': '102.136.0.0/14',
4371 'CK': '202.65.32.0/19',
4372 'CL': '152.172.0.0/14',
4373 'CM': '102.244.0.0/14',
4374 'CN': '36.128.0.0/10',
4375 'CO': '181.240.0.0/12',
4376 'CR': '201.192.0.0/12',
4377 'CU': '152.206.0.0/15',
4378 'CV': '165.90.96.0/19',
4379 'CW': '190.88.128.0/17',
4380 'CY': '31.153.0.0/16',
4381 'CZ': '88.100.0.0/14',
4383 'DJ': '197.241.0.0/17',
4384 'DK': '87.48.0.0/12',
4385 'DM': '192.243.48.0/20',
4386 'DO': '152.166.0.0/15',
4387 'DZ': '41.96.0.0/12',
4388 'EC': '186.68.0.0/15',
4389 'EE': '90.190.0.0/15',
4390 'EG': '156.160.0.0/11',
4391 'ER': '196.200.96.0/20',
4392 'ES': '88.0.0.0/11',
4393 'ET': '196.188.0.0/14',
4394 'EU': '2.16.0.0/13',
4395 'FI': '91.152.0.0/13',
4396 'FJ': '144.120.0.0/16',
4397 'FK': '80.73.208.0/21',
4398 'FM': '119.252.112.0/20',
4399 'FO': '88.85.32.0/19',
4401 'GA': '41.158.0.0/15',
4403 'GD': '74.122.88.0/21',
4404 'GE': '31.146.0.0/16',
4405 'GF': '161.22.64.0/18',
4406 'GG': '62.68.160.0/19',
4407 'GH': '154.160.0.0/12',
4408 'GI': '95.164.0.0/16',
4409 'GL': '88.83.0.0/19',
4410 'GM': '160.182.0.0/15',
4411 'GN': '197.149.192.0/18',
4412 'GP': '104.250.0.0/19',
4413 'GQ': '105.235.224.0/20',
4414 'GR': '94.64.0.0/13',
4415 'GT': '168.234.0.0/16',
4416 'GU': '168.123.0.0/16',
4417 'GW': '197.214.80.0/20',
4418 'GY': '181.41.64.0/18',
4419 'HK': '113.252.0.0/14',
4420 'HN': '181.210.0.0/16',
4421 'HR': '93.136.0.0/13',
4422 'HT': '148.102.128.0/17',
4423 'HU': '84.0.0.0/14',
4424 'ID': '39.192.0.0/10',
4425 'IE': '87.32.0.0/12',
4426 'IL': '79.176.0.0/13',
4427 'IM': '5.62.80.0/20',
4428 'IN': '117.192.0.0/10',
4429 'IO': '203.83.48.0/21',
4430 'IQ': '37.236.0.0/14',
4431 'IR': '2.176.0.0/12',
4432 'IS': '82.221.0.0/16',
4433 'IT': '79.0.0.0/10',
4434 'JE': '87.244.64.0/18',
4435 'JM': '72.27.0.0/17',
4436 'JO': '176.29.0.0/16',
4437 'JP': '133.0.0.0/8',
4438 'KE': '105.48.0.0/12',
4439 'KG': '158.181.128.0/17',
4440 'KH': '36.37.128.0/17',
4441 'KI': '103.25.140.0/22',
4442 'KM': '197.255.224.0/20',
4443 'KN': '198.167.192.0/19',
4444 'KP': '175.45.176.0/22',
4445 'KR': '175.192.0.0/10',
4446 'KW': '37.36.0.0/14',
4447 'KY': '64.96.0.0/15',
4448 'KZ': '2.72.0.0/13',
4449 'LA': '115.84.64.0/18',
4450 'LB': '178.135.0.0/16',
4451 'LC': '24.92.144.0/20',
4452 'LI': '82.117.0.0/19',
4453 'LK': '112.134.0.0/15',
4454 'LR': '102.183.0.0/16',
4455 'LS': '129.232.0.0/17',
4456 'LT': '78.56.0.0/13',
4457 'LU': '188.42.0.0/16',
4458 'LV': '46.109.0.0/16',
4459 'LY': '41.252.0.0/14',
4460 'MA': '105.128.0.0/11',
4461 'MC': '88.209.64.0/18',
4462 'MD': '37.246.0.0/16',
4463 'ME': '178.175.0.0/17',
4464 'MF': '74.112.232.0/21',
4465 'MG': '154.126.0.0/17',
4466 'MH': '117.103.88.0/21',
4467 'MK': '77.28.0.0/15',
4468 'ML': '154.118.128.0/18',
4469 'MM': '37.111.0.0/17',
4470 'MN': '49.0.128.0/17',
4471 'MO': '60.246.0.0/16',
4472 'MP': '202.88.64.0/20',
4473 'MQ': '109.203.224.0/19',
4474 'MR': '41.188.64.0/18',
4475 'MS': '208.90.112.0/22',
4476 'MT': '46.11.0.0/16',
4477 'MU': '105.16.0.0/12',
4478 'MV': '27.114.128.0/18',
4479 'MW': '102.70.0.0/15',
4480 'MX': '187.192.0.0/11',
4481 'MY': '175.136.0.0/13',
4482 'MZ': '197.218.0.0/15',
4483 'NA': '41.182.0.0/16',
4484 'NC': '101.101.0.0/18',
4485 'NE': '197.214.0.0/18',
4486 'NF': '203.17.240.0/22',
4487 'NG': '105.112.0.0/12',
4488 'NI': '186.76.0.0/15',
4489 'NL': '145.96.0.0/11',
4490 'NO': '84.208.0.0/13',
4491 'NP': '36.252.0.0/15',
4492 'NR': '203.98.224.0/19',
4493 'NU': '49.156.48.0/22',
4494 'NZ': '49.224.0.0/14',
4495 'OM': '5.36.0.0/15',
4496 'PA': '186.72.0.0/15',
4497 'PE': '186.160.0.0/14',
4498 'PF': '123.50.64.0/18',
4499 'PG': '124.240.192.0/19',
4500 'PH': '49.144.0.0/13',
4501 'PK': '39.32.0.0/11',
4502 'PL': '83.0.0.0/11',
4503 'PM': '70.36.0.0/20',
4504 'PR': '66.50.0.0/16',
4505 'PS': '188.161.0.0/16',
4506 'PT': '85.240.0.0/13',
4507 'PW': '202.124.224.0/20',
4508 'PY': '181.120.0.0/14',
4509 'QA': '37.210.0.0/15',
4510 'RE': '102.35.0.0/16',
4511 'RO': '79.112.0.0/13',
4512 'RS': '93.86.0.0/15',
4513 'RU': '5.136.0.0/13',
4514 'RW': '41.186.0.0/16',
4515 'SA': '188.48.0.0/13',
4516 'SB': '202.1.160.0/19',
4517 'SC': '154.192.0.0/11',
4518 'SD': '102.120.0.0/13',
4519 'SE': '78.64.0.0/12',
4520 'SG': '8.128.0.0/10',
4521 'SI': '188.196.0.0/14',
4522 'SK': '78.98.0.0/15',
4523 'SL': '102.143.0.0/17',
4524 'SM': '89.186.32.0/19',
4525 'SN': '41.82.0.0/15',
4526 'SO': '154.115.192.0/18',
4527 'SR': '186.179.128.0/17',
4528 'SS': '105.235.208.0/21',
4529 'ST': '197.159.160.0/19',
4530 'SV': '168.243.0.0/16',
4531 'SX': '190.102.0.0/20',
4533 'SZ': '41.84.224.0/19',
4534 'TC': '65.255.48.0/20',
4535 'TD': '154.68.128.0/19',
4536 'TG': '196.168.0.0/14',
4537 'TH': '171.96.0.0/13',
4538 'TJ': '85.9.128.0/18',
4539 'TK': '27.96.24.0/21',
4540 'TL': '180.189.160.0/20',
4541 'TM': '95.85.96.0/19',
4542 'TN': '197.0.0.0/11',
4543 'TO': '175.176.144.0/21',
4544 'TR': '78.160.0.0/11',
4545 'TT': '186.44.0.0/15',
4546 'TV': '202.2.96.0/19',
4547 'TW': '120.96.0.0/11',
4548 'TZ': '156.156.0.0/14',
4549 'UA': '37.52.0.0/14',
4550 'UG': '102.80.0.0/13',
4552 'UY': '167.56.0.0/13',
4553 'UZ': '84.54.64.0/18',
4554 'VA': '212.77.0.0/19',
4555 'VC': '207.191.240.0/21',
4556 'VE': '186.88.0.0/13',
4557 'VG': '66.81.192.0/20',
4558 'VI': '146.226.0.0/16',
4559 'VN': '14.160.0.0/11',
4560 'VU': '202.80.32.0/20',
4561 'WF': '117.20.32.0/21',
4562 'WS': '202.4.32.0/19',
4563 'YE': '134.35.0.0/16',
4564 'YT': '41.242.116.0/22',
4565 'ZA': '41.0.0.0/11',
4566 'ZM': '102.144.0.0/13',
4567 'ZW': '102.177.192.0/18',
4571 def random_ipv4(cls, code_or_block):
4572 if len(code_or_block) == 2:
4573 block = cls._country_ip_map.get(code_or_block.upper())
4577 block = code_or_block
4578 addr, preflen = block.split('/')
4579 addr_min = compat_struct_unpack('!L', socket.inet_aton(addr))[0]
4580 addr_max = addr_min | (0xffffffff >> int(preflen))
4581 return compat_str(socket.inet_ntoa(
4582 compat_struct_pack('!L', random.randint(addr_min, addr_max))))
4585 class PerRequestProxyHandler(compat_urllib_request.ProxyHandler):
4586 def __init__(self, proxies=None):
4587 # Set default handlers
4588 for type in ('http', 'https'):
4589 setattr(self, '%s_open' % type,
4590 lambda r, proxy='__noproxy__', type=type, meth=self.proxy_open:
4591 meth(r, proxy, type))
4592 compat_urllib_request.ProxyHandler.__init__(self, proxies)
4594 def proxy_open(self, req, proxy, type):
4595 req_proxy = req.headers.get('Ytdl-request-proxy')
4596 if req_proxy is not None:
4598 del req.headers['Ytdl-request-proxy']
4600 if proxy == '__noproxy__':
4601 return None # No Proxy
4602 if compat_urlparse.urlparse(proxy).scheme.lower() in ('socks', 'socks4', 'socks4a', 'socks5'):
4603 req.add_header('Ytdl-socks-proxy', proxy)
4604 # yt-dlp's http/https handlers do wrapping the socket with socks
4606 return compat_urllib_request.ProxyHandler.proxy_open(
4607 self, req, proxy, type)
4610 # Both long_to_bytes and bytes_to_long are adapted from PyCrypto, which is
4611 # released into Public Domain
4612 # https://github.com/dlitz/pycrypto/blob/master/lib/Crypto/Util/number.py#L387
4614 def long_to_bytes(n, blocksize=0):
4615 """long_to_bytes(n:long, blocksize:int) : string
4616 Convert a long integer to a byte string.
4618 If optional blocksize is given and greater than zero, pad the front of the
4619 byte string with binary zeros so that the length is a multiple of
4622 # after much testing, this algorithm was deemed to be the fastest
4626 s = compat_struct_pack('>I', n & 0xffffffff) + s
4628 # strip off leading zeros
4629 for i in range(len(s)):
4630 if s[i] != b'\000'[0]:
4633 # only happens when n == 0
4637 # add back some pad bytes. this could be done more efficiently w.r.t. the
4638 # de-padding being done above, but sigh...
4639 if blocksize > 0 and len(s) % blocksize:
4640 s = (blocksize - len(s) % blocksize) * b'\000' + s
4644 def bytes_to_long(s):
4645 """bytes_to_long(string) : long
4646 Convert a byte string to a long integer.
4648 This is (essentially) the inverse of long_to_bytes().
4653 extra = (4 - length % 4)
4654 s = b'\000' * extra + s
4655 length = length + extra
4656 for i in range(0, length, 4):
4657 acc = (acc << 32) + compat_struct_unpack('>I', s[i:i + 4])[0]
4661 def ohdave_rsa_encrypt(data, exponent, modulus):
4663 Implement OHDave
's RSA algorithm. See http://www.ohdave.com/rsa/
4666 data: data to encrypt, bytes-like object
4667 exponent, modulus: parameter e and N of RSA algorithm, both integer
4668 Output: hex string of encrypted data
4670 Limitation: supports one block encryption only
4673 payload = int(binascii.hexlify(data[::-1]), 16)
4674 encrypted = pow(payload, exponent, modulus)
4675 return '%x' % encrypted
4678 def pkcs1pad(data, length):
4680 Padding input data with PKCS#1 scheme
4682 @param {int[]} data input data
4683 @param {int} length target length
4684 @returns {int[]} padded data
4686 if len(data) > length - 11:
4687 raise ValueError('Input data too
long for PKCS
#1 padding')
4689 pseudo_random
= [random
.randint(0, 254) for _
in range(length
- len(data
) - 3)]
4690 return [0, 2] + pseudo_random
+ [0] + data
4693 def encode_base_n(num
, n
, table
=None):
4694 FULL_TABLE
= '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
4696 table
= FULL_TABLE
[:n
]
4699 raise ValueError('base %d exceeds table length %d' % (n
, len(table
)))
4706 ret
= table
[num
% n
] + ret
4711 def decode_packed_codes(code
):
4712 mobj
= re
.search(PACKED_CODES_RE
, code
)
4713 obfuscated_code
, base
, count
, symbols
= mobj
.groups()
4716 symbols
= symbols
.split('|')
4721 base_n_count
= encode_base_n(count
, base
)
4722 symbol_table
[base_n_count
] = symbols
[count
] or base_n_count
4725 r
'\b(\w+)\b', lambda mobj
: symbol_table
[mobj
.group(0)],
4729 def caesar(s
, alphabet
, shift
):
4734 alphabet
[(alphabet
.index(c
) + shift
) % l
] if c
in alphabet
else c
4739 return caesar(s
, r
'''!"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~''', 47)
4742 def parse_m3u8_attributes(attrib
):
4744 for (key
, val
) in re
.findall(r
'(?P<key>[A-Z0-9-]+)=(?P<val>"[^"]+"|[^",]+)(?:,|$)', attrib
):
4745 if val
.startswith('"'):
4751 def urshift(val
, n
):
4752 return val
>> n
if val
>= 0 else (val
+ 0x100000000) >> n
4755 # Based on png2str() written by @gdkchan and improved by @yokrysty
4756 # Originally posted at https://github.com/ytdl-org/youtube-dl/issues/9706
4757 def decode_png(png_data
):
4758 # Reference: https://www.w3.org/TR/PNG/
4759 header
= png_data
[8:]
4761 if png_data
[:8] != b
'\x89PNG\x0d\x0a\x1a\x0a' or header
[4:8] != b
'IHDR':
4762 raise IOError('Not a valid PNG file.')
4764 int_map
= {1: '>B', 2: '>H', 4: '>I'}
4765 unpack_integer
= lambda x
: compat_struct_unpack(int_map
[len(x
)], x
)[0]
4770 length
= unpack_integer(header
[:4])
4773 chunk_type
= header
[:4]
4776 chunk_data
= header
[:length
]
4777 header
= header
[length
:]
4779 header
= header
[4:] # Skip CRC
4787 ihdr
= chunks
[0]['data']
4789 width
= unpack_integer(ihdr
[:4])
4790 height
= unpack_integer(ihdr
[4:8])
4794 for chunk
in chunks
:
4795 if chunk
['type'] == b
'IDAT':
4796 idat
+= chunk
['data']
4799 raise IOError('Unable to read PNG data.')
4801 decompressed_data
= bytearray(zlib
.decompress(idat
))
4806 def _get_pixel(idx
):
4811 for y
in range(height
):
4812 basePos
= y
* (1 + stride
)
4813 filter_type
= decompressed_data
[basePos
]
4817 pixels
.append(current_row
)
4819 for x
in range(stride
):
4820 color
= decompressed_data
[1 + basePos
+ x
]
4821 basex
= y
* stride
+ x
4826 left
= _get_pixel(basex
- 3)
4828 up
= _get_pixel(basex
- stride
)
4830 if filter_type
== 1: # Sub
4831 color
= (color
+ left
) & 0xff
4832 elif filter_type
== 2: # Up
4833 color
= (color
+ up
) & 0xff
4834 elif filter_type
== 3: # Average
4835 color
= (color
+ ((left
+ up
) >> 1)) & 0xff
4836 elif filter_type
== 4: # Paeth
4842 c
= _get_pixel(basex
- stride
- 3)
4850 if pa
<= pb
and pa
<= pc
:
4851 color
= (color
+ a
) & 0xff
4853 color
= (color
+ b
) & 0xff
4855 color
= (color
+ c
) & 0xff
4857 current_row
.append(color
)
4859 return width
, height
, pixels
4862 def write_xattr(path
, key
, value
):
4863 # This mess below finds the best xattr tool for the job
4865 # try the pyxattr module...
4868 if hasattr(xattr
, 'set'): # pyxattr
4869 # Unicode arguments are not supported in python-pyxattr until
4871 # See https://github.com/ytdl-org/youtube-dl/issues/5498
4872 pyxattr_required_version
= '0.5.0'
4873 if version_tuple(xattr
.__version
__) < version_tuple(pyxattr_required_version
):
4874 # TODO: fallback to CLI tools
4875 raise XAttrUnavailableError(
4876 'python-pyxattr is detected but is too old. '
4877 'yt-dlp requires %s or above while your version is %s. '
4878 'Falling back to other xattr implementations' % (
4879 pyxattr_required_version
, xattr
.__version
__))
4881 setxattr
= xattr
.set
4883 setxattr
= xattr
.setxattr
4886 setxattr(path
, key
, value
)
4887 except EnvironmentError as e
:
4888 raise XAttrMetadataError(e
.errno
, e
.strerror
)
4891 if compat_os_name
== 'nt':
4892 # Write xattrs to NTFS Alternate Data Streams:
4893 # http://en.wikipedia.org/wiki/NTFS#Alternate_data_streams_.28ADS.29
4894 assert ':' not in key
4895 assert os
.path
.exists(path
)
4897 ads_fn
= path
+ ':' + key
4899 with open(ads_fn
, 'wb') as f
:
4901 except EnvironmentError as e
:
4902 raise XAttrMetadataError(e
.errno
, e
.strerror
)
4904 user_has_setfattr
= check_executable('setfattr', ['--version'])
4905 user_has_xattr
= check_executable('xattr', ['-h'])
4907 if user_has_setfattr
or user_has_xattr
:
4909 value
= value
.decode('utf-8')
4910 if user_has_setfattr
:
4911 executable
= 'setfattr'
4912 opts
= ['-n', key
, '-v', value
]
4913 elif user_has_xattr
:
4914 executable
= 'xattr'
4915 opts
= ['-w', key
, value
]
4917 cmd
= ([encodeFilename(executable
, True)]
4918 + [encodeArgument(o
) for o
in opts
]
4919 + [encodeFilename(path
, True)])
4923 cmd
, stdout
=subprocess
.PIPE
, stderr
=subprocess
.PIPE
, stdin
=subprocess
.PIPE
)
4924 except EnvironmentError as e
:
4925 raise XAttrMetadataError(e
.errno
, e
.strerror
)
4926 stdout
, stderr
= p
.communicate_or_kill()
4927 stderr
= stderr
.decode('utf-8', 'replace')
4928 if p
.returncode
!= 0:
4929 raise XAttrMetadataError(p
.returncode
, stderr
)
4932 # On Unix, and can't find pyxattr, setfattr, or xattr.
4933 if sys
.platform
.startswith('linux'):
4934 raise XAttrUnavailableError(
4935 "Couldn't find a tool to set the xattrs. "
4936 "Install either the python 'pyxattr' or 'xattr' "
4937 "modules, or the GNU 'attr' package "
4938 "(which contains the 'setfattr' tool).")
4940 raise XAttrUnavailableError(
4941 "Couldn't find a tool to set the xattrs. "
4942 "Install either the python 'xattr' module, "
4943 "or the 'xattr' binary.")
4946 def random_birthday(year_field
, month_field
, day_field
):
4947 start_date
= datetime
.date(1950, 1, 1)
4948 end_date
= datetime
.date(1995, 12, 31)
4949 offset
= random
.randint(0, (end_date
- start_date
).days
)
4950 random_date
= start_date
+ datetime
.timedelta(offset
)
4952 year_field
: str(random_date
.year
),
4953 month_field
: str(random_date
.month
),
4954 day_field
: str(random_date
.day
),
4958 # Templates for internet shortcut files, which are plain text files.
4959 DOT_URL_LINK_TEMPLATE
= '''
4964 DOT_WEBLOC_LINK_TEMPLATE
= '''
4965 <?xml version="1.0" encoding="UTF-8"?>
4966 <!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
4967 <plist version="1.0">
4970 \t<string>%(url)s</string>
4975 DOT_DESKTOP_LINK_TEMPLATE
= '''
4985 'url': DOT_URL_LINK_TEMPLATE
,
4986 'desktop': DOT_DESKTOP_LINK_TEMPLATE
,
4987 'webloc': DOT_WEBLOC_LINK_TEMPLATE
,
4991 def iri_to_uri(iri
):
4993 Converts an IRI (Internationalized Resource Identifier, allowing Unicode characters) to a URI (Uniform Resource Identifier, ASCII-only).
4995 The function doesn't add an additional layer of escaping; e.g., it doesn't escape `%3C` as `%253C`. Instead, it percent-escapes characters with an underlying UTF-8 encoding *besides* those already escaped, leaving the URI intact.
4998 iri_parts
= compat_urllib_parse_urlparse(iri
)
5000 if '[' in iri_parts
.netloc
:
5001 raise ValueError('IPv6 URIs are not, yet, supported.')
5002 # Querying `.netloc`, when there's only one bracket, also raises a ValueError.
5004 # The `safe` argument values, that the following code uses, contain the characters that should not be percent-encoded. Everything else but letters, digits and '_.-' will be percent-encoded with an underlying UTF-8 encoding. Everything already percent-encoded will be left as is.
5007 if iri_parts
.username
:
5008 net_location
+= compat_urllib_parse_quote(iri_parts
.username
, safe
=r
"!$%&'()*+,~")
5009 if iri_parts
.password
is not None:
5010 net_location
+= ':' + compat_urllib_parse_quote(iri_parts
.password
, safe
=r
"!$%&'()*+,~")
5013 net_location
+= iri_parts
.hostname
.encode('idna').decode('utf-8') # Punycode for Unicode hostnames.
5014 # The 'idna' encoding produces ASCII text.
5015 if iri_parts
.port
is not None and iri_parts
.port
!= 80:
5016 net_location
+= ':' + str(iri_parts
.port
)
5018 return compat_urllib_parse_urlunparse(
5022 compat_urllib_parse_quote_plus(iri_parts
.path
, safe
=r
"!$%&'()*+,/:;=@|~"),
5024 # Unsure about the `safe` argument, since this is a legacy way of handling parameters.
5025 compat_urllib_parse_quote_plus(iri_parts
.params
, safe
=r
"!$%&'()*+,/:;=@|~"),
5027 # Not totally sure about the `safe` argument, since the source does not explicitly mention the query URI component.
5028 compat_urllib_parse_quote_plus(iri_parts
.query
, safe
=r
"!$%&'()*+,/:;=?@{|}~"),
5030 compat_urllib_parse_quote_plus(iri_parts
.fragment
, safe
=r
"!#$%&'()*+,/:;=?@{|}~")))
5032 # Source for `safe` arguments: https://url.spec.whatwg.org/#percent-encoded-bytes.
5035 def to_high_limit_path(path
):
5036 if sys
.platform
in ['win32', 'cygwin']:
5037 # Work around MAX_PATH limitation on Windows. The maximum allowed length for the individual path segments may still be quite limited.
5038 return r
'\\?\ '.rstrip() + os
.path
.abspath(path
)
5043 def format_field(obj
, field
=None, template
='%s', ignore
=(None, ''), default
='', func
=None):
5044 val
= traverse_obj(obj
, *variadic(field
))
5047 return template
% (func(val
) if func
else val
)
5050 def clean_podcast_url(url
):
5051 return re
.sub(r
'''(?x)
5055 media\.blubrry\.com| # https://create.blubrry.com/resources/podcast-media-download-statistics/getting-started/
5058 (?:dts|www)\.podtrac\.com/(?:pts/)?redirect\.[0-9a-z]{3,4}| # http://analytics.podtrac.com/how-to-measure
5061 cn\.co| # https://podcorn.com/analytics-prefix/
5062 st\.fm # https://podsights.com/docs/
5067 _HEX_TABLE
= '0123456789abcdef'
5070 def random_uuidv4():
5071 return re
.sub(r
'[xy]', lambda x
: _HEX_TABLE
[random
.randint(0, 15)], 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx')
5074 def make_dir(path
, to_screen
=None):
5076 dn
= os
.path
.dirname(path
)
5077 if dn
and not os
.path
.exists(dn
):
5080 except (OSError, IOError) as err
:
5081 if callable(to_screen
) is not None:
5082 to_screen('unable to create directory ' + error_to_compat_str(err
))
5086 def get_executable_path():
5087 from zipimport
import zipimporter
5088 if hasattr(sys
, 'frozen'): # Running from PyInstaller
5089 path
= os
.path
.dirname(sys
.executable
)
5090 elif isinstance(globals().get('__loader__'), zipimporter
): # Running from ZIP
5091 path
= os
.path
.join(os
.path
.dirname(__file__
), '../..')
5093 path
= os
.path
.join(os
.path
.dirname(__file__
), '..')
5094 return os
.path
.abspath(path
)
5097 def load_plugins(name
, suffix
, namespace
):
5100 plugins_spec
= importlib
.util
.spec_from_file_location(
5101 name
, os
.path
.join(get_executable_path(), 'ytdlp_plugins', name
, '__init__.py'))
5102 plugins
= importlib
.util
.module_from_spec(plugins_spec
)
5103 sys
.modules
[plugins_spec
.name
] = plugins
5104 plugins_spec
.loader
.exec_module(plugins
)
5105 for name
in dir(plugins
):
5106 if name
in namespace
:
5108 if not name
.endswith(suffix
):
5110 klass
= getattr(plugins
, name
)
5111 classes
[name
] = namespace
[name
] = klass
5112 except FileNotFoundError
:
5118 obj
, *path_list
, default
=None, expected_type
=None, get_all
=True,
5119 casesense
=True, is_user_input
=False, traverse_string
=False):
5120 ''' Traverse nested list/dict/tuple
5121 @param path_list A list of paths which are checked one by one.
5122 Each path is a list of keys where each key is a string,
5123 a function, a tuple of strings/None or "...".
5124 When a fuction is given, it takes the key as argument and
5125 returns whether the key matches or not. When a tuple is given,
5126 all the keys given in the tuple are traversed, and
5127 "..." traverses all the keys in the object
5128 "None" returns the object without traversal
5129 @param default Default value to return
5130 @param expected_type Only accept final value of this type (Can also be any callable)
5131 @param get_all Return all the values obtained from a path or only the first one
5132 @param casesense Whether to consider dictionary keys as case sensitive
5133 @param is_user_input Whether the keys are generated from user input. If True,
5134 strings are converted to int/slice if necessary
5135 @param traverse_string Whether to traverse inside strings. If True, any
5136 non-compatible object will also be converted into a string
5140 _lower
= lambda k
: (k
.lower() if isinstance(k
, str) else k
)
5141 path_list
= (map(_lower
, variadic(path
)) for path
in path_list
)
5143 def _traverse_obj(obj
, path
, _current_depth
=0):
5145 path
= tuple(variadic(path
))
5146 for i
, key
in enumerate(path
):
5147 if None in (key
, obj
):
5149 if isinstance(key
, (list, tuple)):
5150 obj
= [_traverse_obj(obj
, sub_key
, _current_depth
) for sub_key
in key
]
5153 obj
= (obj
.values() if isinstance(obj
, dict)
5154 else obj
if isinstance(obj
, (list, tuple, LazyList
))
5155 else str(obj
) if traverse_string
else [])
5157 depth
= max(depth
, _current_depth
)
5158 return [_traverse_obj(inner_obj
, path
[i
+ 1:], _current_depth
) for inner_obj
in obj
]
5160 if isinstance(obj
, (list, tuple, LazyList
)):
5161 obj
= enumerate(obj
)
5162 elif isinstance(obj
, dict):
5165 if not traverse_string
:
5169 depth
= max(depth
, _current_depth
)
5170 return [_traverse_obj(v
, path
[i
+ 1:], _current_depth
) for k
, v
in obj
if key(k
)]
5171 elif isinstance(obj
, dict) and not (is_user_input
and key
== ':'):
5172 obj
= (obj
.get(key
) if casesense
or (key
in obj
)
5173 else next((v
for k
, v
in obj
.items() if _lower(k
) == key
), None))
5176 key
= (int_or_none(key
) if ':' not in key
5177 else slice(*map(int_or_none
, key
.split(':'))))
5178 if key
== slice(None):
5179 return _traverse_obj(obj
, (..., *path
[i
+ 1:]), _current_depth
)
5180 if not isinstance(key
, (int, slice)):
5182 if not isinstance(obj
, (list, tuple, LazyList
)):
5183 if not traverse_string
:
5192 if isinstance(expected_type
, type):
5193 type_test
= lambda val
: val
if isinstance(val
, expected_type
) else None
5194 elif expected_type
is not None:
5195 type_test
= expected_type
5197 type_test
= lambda val
: val
5199 for path
in path_list
:
5201 val
= _traverse_obj(obj
, path
)
5204 for _
in range(depth
- 1):
5205 val
= itertools
.chain
.from_iterable(v
for v
in val
if v
is not None)
5206 val
= [v
for v
in map(type_test
, val
) if v
is not None]
5208 return val
if get_all
else val
[0]
5210 val
= type_test(val
)
5216 def traverse_dict(dictn
, keys
, casesense
=True):
5217 write_string('DeprecationWarning: yt_dlp.utils.traverse_dict is deprecated '
5218 'and may be removed in a future version. Use yt_dlp.utils.traverse_obj instead')
5219 return traverse_obj(dictn
, keys
, casesense
=casesense
, is_user_input
=True, traverse_string
=True)
5222 def get_first(obj
, keys
, **kwargs
):
5223 return traverse_obj(obj
, (..., *variadic(keys
)), **kwargs
, get_all
=False)
5226 def variadic(x
, allowed_types
=(str, bytes, dict)):
5227 return x
if isinstance(x
, collections
.abc
.Iterable
) and not isinstance(x
, allowed_types
) else (x
,)
5230 def decode_base(value
, digits
):
5231 # This will convert given base-x string to scalar (long or int)
5232 table
= {char: index for index, char in enumerate(digits)}
5237 result
+= table
[chr]
5241 def time_seconds(**kwargs
):
5242 t
= datetime
.datetime
.now(datetime
.timezone(datetime
.timedelta(**kwargs
)))
5243 return t
.timestamp()
5246 # create a JSON Web Signature (jws) with HS256 algorithm
5247 # the resulting format is in JWS Compact Serialization
5248 # implemented following JWT https://www.rfc-editor.org/rfc/rfc7519.html
5249 # implemented following JWS https://www.rfc-editor.org/rfc/rfc7515.html
5250 def jwt_encode_hs256(payload_data
, key
, headers
={}):
5256 header_data
.update(headers
)
5257 header_b64
= base64
.b64encode(json
.dumps(header_data
).encode('utf-8'))
5258 payload_b64
= base64
.b64encode(json
.dumps(payload_data
).encode('utf-8'))
5259 h
= hmac
.new(key
.encode('utf-8'), header_b64
+ b
'.' + payload_b64
, hashlib
.sha256
)
5260 signature_b64
= base64
.b64encode(h
.digest())
5261 token
= header_b64
+ b
'.' + payload_b64
+ b
'.' + signature_b64
5265 # can be extended in future to verify the signature and parse header and return the algorithm used if it's not HS256
5266 def jwt_decode_hs256(jwt
):
5267 header_b64
, payload_b64
, signature_b64
= jwt
.split('.')
5268 payload_data
= json
.loads(base64
.urlsafe_b64decode(payload_b64
))
5272 def supports_terminal_sequences(stream
):
5273 if compat_os_name
== 'nt':
5274 from .compat
import WINDOWS_VT_MODE
# Must be imported locally
5275 if not WINDOWS_VT_MODE
or get_windows_version() < (10, 0, 10586):
5277 elif not os
.getenv('TERM'):
5280 return stream
.isatty()
5281 except BaseException
:
5285 _terminal_sequences_re
= re
.compile('\033\\[[^m]+m')
5288 def remove_terminal_sequences(string
):
5289 return _terminal_sequences_re
.sub('', string
)
5292 def number_of_digits(number
):
5293 return len('%d' % number
)
5296 def join_nonempty(*values
, delim
='-', from_dict
=None):
5297 if from_dict
is not None:
5298 values
= map(from_dict
.get
, values
)
5299 return delim
.join(map(str, filter(None, values
)))
5302 def scale_thumbnails_to_max_format_width(formats
, thumbnails
, url_width_re
):
5304 Find the largest format dimensions in terms of video width and, for each thumbnail:
5305 * Modify the URL: Match the width with the provided regex and replace with the former width
5308 This function is useful with video services that scale the provided thumbnails on demand
5310 _keys
= ('width', 'height')
5311 max_dimensions
= max(
5312 [tuple(format
.get(k
) or 0 for k
in _keys
) for format
in formats
],
5314 if not max_dimensions
[0]:
5318 {'url': re.sub(url_width_re, str(max_dimensions[0]), thumbnail['url'])}
,
5319 dict(zip(_keys
, max_dimensions
)), thumbnail
)
5320 for thumbnail
in thumbnails
5324 def parse_http_range(range):
5325 """ Parse value of "Range" or "Content-Range" HTTP header into tuple. """
5327 return None, None, None
5328 crg
= re
.search(r
'bytes[ =](\d+)-(\d+)?(?:/(\d+))?', range)
5330 return None, None, None
5331 return int(crg
.group(1)), int_or_none(crg
.group(2)), int_or_none(crg
.group(3))
5337 __initialized
= False
5339 def __init__(self
, parser
, label
=None):
5340 self
._parser
, self
.label
= parser
, label
5341 self
._loaded
_paths
, self
.configs
= set(), []
5343 def init(self
, args
=None, filename
=None):
5344 assert not self
.__initialized
5347 location
= os
.path
.realpath(filename
)
5348 directory
= os
.path
.dirname(location
)
5349 if location
in self
._loaded
_paths
:
5351 self
._loaded
_paths
.add(location
)
5353 self
.__initialized
= True
5354 self
.own_args
, self
.filename
= args
, filename
5355 for location
in self
._parser
.parse_args(args
)[0].config_locations
or []:
5356 location
= os
.path
.join(directory
, expand_path(location
))
5357 if os
.path
.isdir(location
):
5358 location
= os
.path
.join(location
, 'yt-dlp.conf')
5359 if not os
.path
.exists(location
):
5360 self
._parser
.error(f
'config location {location} does not exist')
5361 self
.append_config(self
.read_file(location
), location
)
5365 label
= join_nonempty(
5366 self
.label
, 'config', f
'"{self.filename}"' if self
.filename
else '',
5368 return join_nonempty(
5369 self
.own_args
is not None and f
'{label[0].upper()}{label[1:]}: {self.hide_login_info(self.own_args)}',
5370 *(f
'\n{c}'.replace('\n', '\n| ')[1:] for c
in self
.configs
),
5374 def read_file(filename
, default
=[]):
5376 optionf
= open(filename
)
5378 return default
# silently skip if file is not present
5380 # FIXME: https://github.com/ytdl-org/youtube-dl/commit/dfe5fa49aed02cf36ba9f743b11b0903554b5e56
5381 contents
= optionf
.read()
5382 if sys
.version_info
< (3,):
5383 contents
= contents
.decode(preferredencoding())
5384 res
= compat_shlex_split(contents
, comments
=True)
5390 def hide_login_info(opts
):
5391 PRIVATE_OPTS
= set(['-p', '--password', '-u', '--username', '--video-password', '--ap-password', '--ap-username'])
5392 eqre
= re
.compile('^(?P<key>' + ('|'.join(re
.escape(po
) for po
in PRIVATE_OPTS
)) + ')=.+$')
5397 return m
.group('key') + '=PRIVATE'
5401 opts
= list(map(_scrub_eq
, opts
))
5402 for idx
, opt
in enumerate(opts
):
5403 if opt
in PRIVATE_OPTS
and idx
+ 1 < len(opts
):
5404 opts
[idx
+ 1] = 'PRIVATE'
5407 def append_config(self
, *args
, label
=None):
5408 config
= type(self
)(self
._parser
, label
)
5409 config
._loaded
_paths
= self
._loaded
_paths
5410 if config
.init(*args
):
5411 self
.configs
.append(config
)
5415 for config
in reversed(self
.configs
):
5416 yield from config
.all_args
5417 yield from self
.own_args
or []
5419 def parse_args(self
):
5420 return self
._parser
.parse_args(list(self
.all_args
))
5423 class WebSocketsWrapper():
5424 """Wraps websockets module to use in non-async scopes"""
5426 def __init__(self
, url
, headers
=None):
5427 self
.loop
= asyncio
.events
.new_event_loop()
5428 self
.conn
= compat_websockets
.connect(
5429 url
, extra_headers
=headers
, ping_interval
=None,
5430 close_timeout
=float('inf'), loop
=self
.loop
, ping_timeout
=float('inf'))
5431 atexit
.register(self
.__exit
__, None, None, None)
5433 def __enter__(self
):
5434 self
.pool
= self
.run_with_loop(self
.conn
.__aenter
__(), self
.loop
)
5437 def send(self
, *args
):
5438 self
.run_with_loop(self
.pool
.send(*args
), self
.loop
)
5440 def recv(self
, *args
):
5441 return self
.run_with_loop(self
.pool
.recv(*args
), self
.loop
)
5443 def __exit__(self
, type, value
, traceback
):
5445 return self
.run_with_loop(self
.conn
.__aexit
__(type, value
, traceback
), self
.loop
)
5448 self
._cancel
_all
_tasks
(self
.loop
)
5450 # taken from https://github.com/python/cpython/blob/3.9/Lib/asyncio/runners.py with modifications
5451 # for contributors: If there's any new library using asyncio needs to be run in non-async, move these function out of this class
5453 def run_with_loop(main
, loop
):
5454 if not asyncio
.coroutines
.iscoroutine(main
):
5455 raise ValueError(f
'a coroutine was expected, got {main!r}')
5458 return loop
.run_until_complete(main
)
5460 loop
.run_until_complete(loop
.shutdown_asyncgens())
5461 if hasattr(loop
, 'shutdown_default_executor'):
5462 loop
.run_until_complete(loop
.shutdown_default_executor())
5465 def _cancel_all_tasks(loop
):
5466 to_cancel
= asyncio
.tasks
.all_tasks(loop
)
5471 for task
in to_cancel
:
5474 loop
.run_until_complete(
5475 asyncio
.tasks
.gather(*to_cancel
, loop
=loop
, return_exceptions
=True))
5477 for task
in to_cancel
:
5478 if task
.cancelled():
5480 if task
.exception() is not None:
5481 loop
.call_exception_handler({
5482 'message': 'unhandled exception during asyncio.run() shutdown',
5483 'exception': task
.exception(),
5488 has_websockets
= bool(compat_websockets
)
5491 def merge_headers(*dicts
):
5492 """Merge dicts of http headers case insensitively, prioritizing the latter ones"""
5493 return {k.title(): v for k, v in itertools.chain.from_iterable(map(dict.items, dicts))}