4 from __future__
import unicode_literals
41 import xml
.etree
.ElementTree
46 compat_HTMLParseError
,
53 compat_ctypes_WINFUNCTYPE
,
54 compat_etree_fromstring
,
57 compat_html_entities_html5
,
71 compat_urllib_parse_urlencode
,
72 compat_urllib_parse_urlparse
,
73 compat_urllib_parse_urlunparse
,
74 compat_urllib_parse_quote
,
75 compat_urllib_parse_quote_plus
,
76 compat_urllib_parse_unquote_plus
,
77 compat_urllib_request
,
95 def register_socks_protocols():
96 # "Register" SOCKS protocols
97 # In Python < 2.6.5, urlsplit() suffers from bug https://bugs.python.org/issue7904
98 # URLs with protocols not in urlparse.uses_netloc are not handled correctly
99 for scheme
in ('socks', 'socks4', 'socks4a', 'socks5'):
100 if scheme
not in compat_urlparse
.uses_netloc
:
101 compat_urlparse
.uses_netloc
.append(scheme
)
104 # This is not clearly defined otherwise
105 compiled_regex_type
= type(re
.compile(''))
108 def random_user_agent():
109 _USER_AGENT_TPL
= 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/%s Safari/537.36'
150 return _USER_AGENT_TPL
% random
.choice(_CHROME_VERSIONS
)
153 SUPPORTED_ENCODINGS
= [
157 SUPPORTED_ENCODINGS
.append('br')
160 'User-Agent': random_user_agent(),
161 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
162 'Accept-Language': 'en-us,en;q=0.5',
163 'Sec-Fetch-Mode': 'navigate',
168 'Safari': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27',
172 NO_DEFAULT
= object()
174 ENGLISH_MONTH_NAMES
= [
175 'January', 'February', 'March', 'April', 'May', 'June',
176 'July', 'August', 'September', 'October', 'November', 'December']
179 'en': ENGLISH_MONTH_NAMES
,
181 'janvier', 'février', 'mars', 'avril', 'mai', 'juin',
182 'juillet', 'août', 'septembre', 'octobre', 'novembre', 'décembre'],
186 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'aac',
187 'flv', 'f4v', 'f4a', 'f4b',
188 'webm', 'ogg', 'ogv', 'oga', 'ogx', 'spx', 'opus',
189 'mkv', 'mka', 'mk3d',
198 'f4f', 'f4m', 'm3u8', 'smil')
200 # needed for sanitizing filenames in restricted mode
201 ACCENT_CHARS
= dict(zip('ÂÃÄÀÁÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖŐØŒÙÚÛÜŰÝÞßàáâãäåæçèéêëìíîïðñòóôõöőøœùúûüűýþÿ',
202 itertools
.chain('AAAAAA', ['AE'], 'CEEEEIIIIDNOOOOOOO', ['OE'], 'UUUUUY', ['TH', 'ss'],
203 'aaaaaa', ['ae'], 'ceeeeiiiionooooooo', ['oe'], 'uuuuuy', ['th'], 'y')))
233 '%Y-%m-%d %H:%M:%S.%f',
234 '%Y-%m-%d %H:%M:%S:%f',
237 '%Y-%m-%dT%H:%M:%SZ',
238 '%Y-%m-%dT%H:%M:%S.%fZ',
239 '%Y-%m-%dT%H:%M:%S.%f0Z',
241 '%Y-%m-%dT%H:%M:%S.%f',
244 '%b %d %Y at %H:%M:%S',
246 '%B %d %Y at %H:%M:%S',
250 DATE_FORMATS_DAY_FIRST
= list(DATE_FORMATS
)
251 DATE_FORMATS_DAY_FIRST
.extend([
260 DATE_FORMATS_MONTH_FIRST
= list(DATE_FORMATS
)
261 DATE_FORMATS_MONTH_FIRST
.extend([
269 PACKED_CODES_RE
= r
"}\('(.+)',(\d+),(\d+),'([^']+)'\.split\('\|'\)"
270 JSON_LD_RE
= r
'(?is)<script[^>]+type=(["\']?
)application
/ld\
+json\
1[^
>]*>(?P
<json_ld
>.+?
)</script
>'
273 def preferredencoding():
274 """Get preferred encoding.
276 Returns the best encoding scheme for the system, based on
277 locale.getpreferredencoding() and some further tweaks.
280 pref = locale.getpreferredencoding()
288 def write_json_file(obj, fn):
289 """ Encode obj as JSON and write it to fn, atomically if possible """
291 fn = encodeFilename(fn)
292 if sys.version_info < (3, 0) and sys.platform != 'win32
':
293 encoding = get_filesystem_encoding()
294 # os.path.basename returns a bytes object, but NamedTemporaryFile
295 # will fail if the filename contains non ascii characters unless we
296 # use a unicode object
297 path_basename = lambda f: os.path.basename(fn).decode(encoding)
298 # the same for os.path.dirname
299 path_dirname = lambda f: os.path.dirname(fn).decode(encoding)
301 path_basename = os.path.basename
302 path_dirname = os.path.dirname
306 'prefix
': path_basename(fn) + '.',
307 'dir': path_dirname(fn),
311 # In Python 2.x, json.dump expects a bytestream.
312 # In Python 3.x, it writes to a character stream
313 if sys.version_info < (3, 0):
321 tf = tempfile.NamedTemporaryFile(**compat_kwargs(args))
325 json.dump(obj, tf, ensure_ascii=False)
326 if sys.platform == 'win32
':
327 # Need to remove existing file on Windows, else os.rename raises
328 # WindowsError or FileExistsError.
336 os.chmod(tf.name, 0o666 & ~mask)
339 os.rename(tf.name, fn)
348 if sys.version_info >= (2, 7):
349 def find_xpath_attr(node, xpath, key, val=None):
350 """ Find the xpath xpath[@key=val] """
351 assert re.match(r'^
[a
-zA
-Z_
-]+$
', key)
352 expr = xpath + ('[@%s]' % key if val is None else "[@%s='%s']" % (key, val))
353 return node.find(expr)
355 def find_xpath_attr(node, xpath, key, val=None):
356 for f in node.findall(compat_xpath(xpath)):
357 if key not in f.attrib:
359 if val is None or f.attrib.get(key) == val:
363 # On python2.6 the xml.etree.ElementTree.Element methods don't support
364 # the namespace parameter
367 def xpath_with_ns(path
, ns_map
):
368 components
= [c
.split(':') for c
in path
.split('/')]
372 replaced
.append(c
[0])
375 replaced
.append('{%s}%s' % (ns_map
[ns
], tag
))
376 return '/'.join(replaced
)
379 def xpath_element(node
, xpath
, name
=None, fatal
=False, default
=NO_DEFAULT
):
380 def _find_xpath(xpath
):
381 return node
.find(compat_xpath(xpath
))
383 if isinstance(xpath
, (str, compat_str
)):
384 n
= _find_xpath(xpath
)
392 if default
is not NO_DEFAULT
:
395 name
= xpath
if name
is None else name
396 raise ExtractorError('Could not find XML element %s' % name
)
402 def xpath_text(node
, xpath
, name
=None, fatal
=False, default
=NO_DEFAULT
):
403 n
= xpath_element(node
, xpath
, name
, fatal
=fatal
, default
=default
)
404 if n
is None or n
== default
:
407 if default
is not NO_DEFAULT
:
410 name
= xpath
if name
is None else name
411 raise ExtractorError('Could not find XML element\'s text %s' % name
)
417 def xpath_attr(node
, xpath
, key
, name
=None, fatal
=False, default
=NO_DEFAULT
):
418 n
= find_xpath_attr(node
, xpath
, key
)
420 if default
is not NO_DEFAULT
:
423 name
= '%s[@%s]' % (xpath
, key
) if name
is None else name
424 raise ExtractorError('Could not find XML attribute %s' % name
)
430 def get_element_by_id(id, html
):
431 """Return the content of the tag with the specified ID in the passed HTML document"""
432 return get_element_by_attribute('id', id, html
)
435 def get_element_html_by_id(id, html
):
436 """Return the html of the tag with the specified ID in the passed HTML document"""
437 return get_element_html_by_attribute('id', id, html
)
440 def get_element_by_class(class_name
, html
):
441 """Return the content of the first tag with the specified class in the passed HTML document"""
442 retval
= get_elements_by_class(class_name
, html
)
443 return retval
[0] if retval
else None
446 def get_element_html_by_class(class_name
, html
):
447 """Return the html of the first tag with the specified class in the passed HTML document"""
448 retval
= get_elements_html_by_class(class_name
, html
)
449 return retval
[0] if retval
else None
452 def get_element_by_attribute(attribute
, value
, html
, escape_value
=True):
453 retval
= get_elements_by_attribute(attribute
, value
, html
, escape_value
)
454 return retval
[0] if retval
else None
457 def get_element_html_by_attribute(attribute
, value
, html
, escape_value
=True):
458 retval
= get_elements_html_by_attribute(attribute
, value
, html
, escape_value
)
459 return retval
[0] if retval
else None
462 def get_elements_by_class(class_name
, html
):
463 """Return the content of all tags with the specified class in the passed HTML document as a list"""
464 return get_elements_by_attribute(
465 'class', r
'[^\'"]*\b%s\b[^\'"]*' % re.escape(class_name),
466 html, escape_value=False)
469 def get_elements_html_by_class(class_name, html):
470 """Return the html of all tags with the specified class in the passed HTML document as a list"""
471 return get_elements_html_by_attribute(
472 'class', r'[^
\'"]*\b%s\b[^\'"]*' % re.escape(class_name),
473 html, escape_value=False)
476 def get_elements_by_attribute(*args, **kwargs):
477 """Return the content of the tag with the specified attribute in the passed HTML document"""
478 return [content for content, _ in get_elements_text_and_html_by_attribute(*args, **kwargs)]
481 def get_elements_html_by_attribute(*args, **kwargs):
482 """Return the html of the tag with the specified attribute in the passed HTML document"""
483 return [whole for _, whole in get_elements_text_and_html_by_attribute(*args, **kwargs)]
486 def get_elements_text_and_html_by_attribute(attribute, value, html, escape_value=True):
488 Return the text (content) and the html (whole) of the tag with the specified
489 attribute in the passed HTML document
492 value_quote_optional = '' if re.match(r'''[\s"'`
=<>]''', value) else '?'
494 value = re.escape(value) if escape_value else value
496 partial_element_re = r'''(?x
)
497 <(?P
<tag
>[a
-zA
-Z0
-9:._-]+)
498 (?
:\
s(?
:[^
>"']|"[^
"]*"|
'[^']*')*)?
499 \s%(attribute)s\s*=\s*(?P<_q>['"]%(vqo)s)(?-x:%(value)s)(?P=_q)
500 ''' % {'attribute': re.escape(attribute), 'value': value, 'vqo': value_quote_optional}
502 for m in re.finditer(partial_element_re, html):
503 content, whole = get_element_text_and_html_by_tag(m.group('tag'), html[m.start():])
506 unescapeHTML(re.sub(r'^(?P<q>["\'])(?P
<content
>.*)(?P
=q
)$
', r'\g
<content
>', content, flags=re.DOTALL)),
511 class HTMLBreakOnClosingTagParser(compat_HTMLParser):
513 HTML parser which raises HTMLBreakOnClosingTagException upon reaching the
514 closing tag for the first opening tag it has encountered, and can be used
518 class HTMLBreakOnClosingTagException(Exception):
522 self.tagstack = collections.deque()
523 compat_HTMLParser.__init__(self)
528 def __exit__(self, *_):
532 # handle_endtag does not return upon raising HTMLBreakOnClosingTagException,
533 # so data remains buffered; we no longer have any interest in it, thus
534 # override this method to discard it
537 def handle_starttag(self, tag, _):
538 self.tagstack.append(tag)
540 def handle_endtag(self, tag):
541 if not self.tagstack:
542 raise compat_HTMLParseError('no tags
in the stack
')
544 inner_tag = self.tagstack.pop()
548 raise compat_HTMLParseError(f'matching opening tag
for closing {tag} tag
not found
')
549 if not self.tagstack:
550 raise self.HTMLBreakOnClosingTagException()
553 def get_element_text_and_html_by_tag(tag, html):
555 For the first element with the specified tag in the passed HTML document
556 return its' content (text
) and the whole
element (html
)
558 def find_or_raise(haystack, needle, exc):
560 return haystack.index(needle)
563 closing_tag = f'</{tag}>'
564 whole_start = find_or_raise(
565 html, f'<{tag}', compat_HTMLParseError(f'opening {tag} tag not found'))
566 content_start = find_or_raise(
567 html[whole_start:], '>', compat_HTMLParseError(f'malformed opening {tag} tag'))
568 content_start += whole_start + 1
569 with HTMLBreakOnClosingTagParser() as parser:
570 parser.feed(html[whole_start:content_start])
571 if not parser.tagstack or parser.tagstack[0] != tag:
572 raise compat_HTMLParseError(f'parser did not match opening {tag} tag')
573 offset = content_start
574 while offset < len(html):
575 next_closing_tag_start = find_or_raise(
576 html[offset:], closing_tag,
577 compat_HTMLParseError(f'closing {tag} tag not found'))
578 next_closing_tag_end = next_closing_tag_start + len(closing_tag)
580 parser.feed(html[offset:offset + next_closing_tag_end])
581 offset += next_closing_tag_end
582 except HTMLBreakOnClosingTagParser.HTMLBreakOnClosingTagException:
583 return html[content_start:offset + next_closing_tag_start], \
584 html[whole_start:offset + next_closing_tag_end]
585 raise compat_HTMLParseError('unexpected end of html')
588 class HTMLAttributeParser(compat_HTMLParser):
589 """Trivial HTML parser to gather the attributes
for a single element
"""
593 compat_HTMLParser.__init__(self)
595 def handle_starttag(self, tag, attrs):
596 self.attrs = dict(attrs)
599 class HTMLListAttrsParser(compat_HTMLParser):
600 """HTML parser to gather the attributes
for the elements of a
list"""
603 compat_HTMLParser.__init__(self)
607 def handle_starttag(self, tag, attrs):
608 if tag == 'li' and self._level == 0:
609 self.items.append(dict(attrs))
612 def handle_endtag(self, tag):
616 def extract_attributes(html_element):
617 """Given a string
for an HTML element such
as
619 a
="foo" B
="bar" c
="&98;az" d
=boz
620 empty
= noval entity
="&"
623 Decode
and return a dictionary of attributes
.
625 'a': 'foo', 'b': 'bar', c
: 'baz', d
: 'boz',
626 'empty': '', 'noval': None, 'entity': '&',
627 'sq': '"', 'dq': '\''
629 NB HTMLParser
is stricter
in Python
2.6 & 3.2 than
in later versions
,
630 but the cases
in the unit test will work
for all of
2.6, 2.7, 3.2-3.5.
632 parser = HTMLAttributeParser()
634 parser.feed(html_element)
636 # Older Python may throw HTMLParseError in case of malformed HTML
637 except compat_HTMLParseError:
642 def parse_list(webpage):
643 """Given a string
for an series of HTML
<li
> elements
,
644 return a dictionary of their attributes
"""
645 parser = HTMLListAttrsParser()
651 def clean_html(html):
652 """Clean an HTML snippet into a readable string
"""
654 if html is None: # Convenience for sanitizing descriptions etc.
657 html = re.sub(r'\s+', ' ', html)
658 html = re.sub(r'(?u)\s?<\s?br\s?/?\s?>\s?', '\n', html)
659 html = re.sub(r'(?u)<\s?/\s?p\s?>\s?<\s?p[^>]*>', '\n', html)
661 html = re.sub('<.*?>', '', html)
662 # Replace html entities
663 html = unescapeHTML(html)
667 def sanitize_open(filename, open_mode):
668 """Try to
open the given filename
, and slightly tweak it
if this fails
.
670 Attempts to
open the given filename
. If this fails
, it tries to change
671 the filename slightly
, step by step
, until it
's either able to open it
672 or it fails and raises a final exception, like the standard open()
675 It returns the tuple (stream, definitive_file_name).
679 if sys.platform == 'win32
':
681 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
682 return (sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout, filename)
683 stream = locked_file(filename, open_mode, block=False).open()
684 return (stream, filename)
685 except (IOError, OSError) as err:
686 if err.errno in (errno.EACCES,):
689 # In case of error, try to remove win32 forbidden chars
690 alt_filename = sanitize_path(filename)
691 if alt_filename == filename:
694 # An exception here should be caught in the caller
695 stream = locked_file(filename, open_mode, block=False).open()
696 return (stream, alt_filename)
699 def timeconvert(timestr):
700 """Convert RFC 2822 defined time string into system timestamp"""
702 timetuple = email.utils.parsedate_tz(timestr)
703 if timetuple is not None:
704 timestamp = email.utils.mktime_tz(timetuple)
708 def sanitize_filename(s, restricted=False, is_id=NO_DEFAULT):
709 """Sanitizes a string so it could be used as part of a filename.
710 @param restricted Use a stricter subset of allowed characters
711 @param is_id Whether this is an ID that should be kept unchanged if possible.
712 If unset, yt-dlp's new sanitization rules are
in effect
717 def replace_insane(char):
718 if restricted and char in ACCENT_CHARS:
719 return ACCENT_CHARS[char]
720 elif not restricted and char == '\n':
722 elif char == '?' or ord(char) < 32 or ord(char) == 127:
725 return '' if restricted else '\''
727 return '\0_\0-' if restricted else '\0 \0-'
728 elif char in '\\/|*<>':
730 if restricted and (char in '!&\'()[]{}$;`^,#' or char.isspace() or ord(char) > 127):
734 s = re.sub(r'[0-9]+(?::[0-9]+)+', lambda m: m.group(0).replace(':', '_'), s) # Handle timestamps
735 result = ''.join(map(replace_insane, s))
736 if is_id is NO_DEFAULT:
737 result = re.sub('(\0.)(?:(?=\\1)..)+', r'\1', result) # Remove repeated substitute chars
738 STRIP_RE = '(?:\0.|[ _-])*'
739 result = re.sub(f'^\0.{STRIP_RE}|{STRIP_RE}\0.$', '', result) # Remove substitute chars from start/end
740 result = result.replace('\0', '') or '_'
743 while '__' in result:
744 result = result.replace('__', '_')
745 result = result.strip('_')
746 # Common case of "Foreign band name - English song title"
747 if restricted and result.startswith('-_'):
749 if result.startswith('-'):
750 result = '_' + result[len('-'):]
751 result = result.lstrip('.')
757 def sanitize_path(s, force=False):
758 """Sanitizes
and normalizes path on Windows
"""
759 if sys.platform == 'win32':
761 drive_or_unc, _ = os.path.splitdrive(s)
762 if sys.version_info < (2, 7) and not drive_or_unc:
763 drive_or_unc, _ = os.path.splitunc(s)
769 norm_path = os.path.normpath(remove_start(s, drive_or_unc)).split(os.path.sep)
773 path_part if path_part in ['.', '..'] else re.sub(r'(?:[/<>:"\|\\?\*]|[\s.]$)', '#', path_part)
774 for path_part in norm_path]
776 sanitized_path.insert(0, drive_or_unc + os.path.sep)
777 elif force and s[0] == os.path.sep:
778 sanitized_path.insert(0, os.path.sep)
779 return os.path.join(*sanitized_path)
782 def sanitize_url(url):
783 # Prepend protocol-less URLs with `http:` scheme in order to mitigate
784 # the number of unwanted failures due to missing protocol
785 if url.startswith('//'):
786 return 'http:%s' % url
787 # Fix some common typos seen so far
789 # https://github.com/ytdl-org/youtube-dl/issues/15649
790 (r'^httpss://', r'https://'),
791 # https://bx1.be/lives/direct-tv/
792 (r'^rmtp([es]?)://', r'rtmp\1://'),
794 for mistake, fixup in COMMON_TYPOS:
795 if re.match(mistake, url):
796 return re.sub(mistake, fixup, url)
800 def extract_basic_auth(url):
801 parts = compat_urlparse.urlsplit(url)
802 if parts.username is None:
804 url = compat_urlparse.urlunsplit(parts._replace(netloc=(
805 parts.hostname if parts.port is None
806 else '%s:%d' % (parts.hostname, parts.port))))
807 auth_payload = base64.b64encode(
808 ('%s:%s' % (parts.username, parts.password or '')).encode('utf-8'))
809 return url, 'Basic ' + auth_payload.decode('utf-8')
812 def sanitized_Request(url, *args, **kwargs):
813 url, auth_header = extract_basic_auth(escape_url(sanitize_url(url)))
814 if auth_header is not None:
815 headers = args[1] if len(args) >= 2 else kwargs.setdefault('headers', {})
816 headers['Authorization'] = auth_header
817 return compat_urllib_request.Request(url, *args, **kwargs)
821 """Expand shell variables
and ~
"""
822 return os.path.expandvars(compat_expanduser(s))
825 def orderedSet(iterable):
826 """ Remove all duplicates
from the
input iterable
"""
834 def _htmlentity_transform(entity_with_semicolon):
835 """Transforms an HTML entity to a character
."""
836 entity = entity_with_semicolon[:-1]
838 # Known non-numeric HTML entity
839 if entity in compat_html_entities.name2codepoint:
840 return compat_chr(compat_html_entities.name2codepoint[entity])
842 # TODO: HTML5 allows entities without a semicolon. For example,
843 # 'Éric' should be decoded as 'Éric'.
844 if entity_with_semicolon in compat_html_entities_html5:
845 return compat_html_entities_html5[entity_with_semicolon]
847 mobj = re.match(r'#(x[0-9a-fA-F]+|[0-9]+)', entity)
849 numstr = mobj.group(1)
850 if numstr.startswith('x'):
852 numstr = '0%s' % numstr
855 # See https://github.com/ytdl-org/youtube-dl/issues/7518
857 return compat_chr(int(numstr, base))
861 # Unknown entity in name, return its literal representation
862 return '&%s;' % entity
868 assert type(s) == compat_str
871 r'&([^&;]+;)', lambda m: _htmlentity_transform(m.group(1)), s)
874 def escapeHTML(text):
877 .replace('&', '&')
878 .replace('<', '<')
879 .replace('>', '>')
880 .replace('"', '"')
881 .replace("'", ''')
885 def process_communicate_or_kill(p, *args, **kwargs):
887 return p.communicate(*args, **kwargs)
888 except BaseException: # Including KeyboardInterrupt
894 class Popen(subprocess.Popen):
895 if sys.platform == 'win32':
896 _startupinfo = subprocess.STARTUPINFO()
897 _startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
901 def __init__(self, *args, **kwargs):
902 super(Popen, self).__init__(*args, **kwargs, startupinfo=self._startupinfo)
904 def communicate_or_kill(self, *args, **kwargs):
905 return process_communicate_or_kill(self, *args, **kwargs)
908 def get_subprocess_encoding():
909 if sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5:
910 # For subprocess calls, encode with locale encoding
911 # Refer to http://stackoverflow.com/a/9951851/35070
912 encoding = preferredencoding()
914 encoding = sys.getfilesystemencoding()
920 def encodeFilename(s, for_subprocess=False):
922 @param s The name of the
file
925 assert type(s) == compat_str
927 # Python 3 has a Unicode API
928 if sys.version_info >= (3, 0):
931 # Pass '' directly to use Unicode APIs on Windows 2000 and up
932 # (Detecting Windows NT 4 is tricky because 'major >= 4' would
933 # match Windows 9x series as well. Besides, NT 4 is obsolete.)
934 if not for_subprocess and sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5:
937 # Jython assumes filenames are Unicode strings though reported as Python 2.x compatible
938 if sys.platform.startswith('java'):
941 return s.encode(get_subprocess_encoding(), 'ignore')
944 def decodeFilename(b, for_subprocess=False):
946 if sys.version_info >= (3, 0):
949 if not isinstance(b, bytes):
952 return b.decode(get_subprocess_encoding(), 'ignore')
955 def encodeArgument(s):
956 if not isinstance(s, compat_str):
957 # Legacy code that uses byte strings
958 # Uncomment the following line after fixing all post processors
959 # assert False, 'Internal error: %r should be of type %r, is %r' % (s, compat_str, type(s))
960 s = s.decode('ascii')
961 return encodeFilename(s, True)
964 def decodeArgument(b):
965 return decodeFilename(b, True)
968 def decodeOption(optval):
971 if isinstance(optval, bytes):
972 optval = optval.decode(preferredencoding())
974 assert isinstance(optval, compat_str)
978 _timetuple = collections.namedtuple('Time', ('hours', 'minutes', 'seconds', 'milliseconds'))
981 def timetuple_from_msec(msec):
982 secs, msec = divmod(msec, 1000)
983 mins, secs = divmod(secs, 60)
984 hrs, mins = divmod(mins, 60)
985 return _timetuple(hrs, mins, secs, msec)
988 def formatSeconds(secs, delim=':', msec=False):
989 time = timetuple_from_msec(secs * 1000)
991 ret = '%d%s%02d%s%02d' % (time.hours, delim, time.minutes, delim, time.seconds)
993 ret = '%d%s%02d' % (time.minutes, delim, time.seconds)
995 ret = '%d' % time.seconds
996 return '%s.%03d' % (ret, time.milliseconds) if msec else ret
999 def _ssl_load_windows_store_certs(ssl_context, storename):
1000 # Code adapted from _load_windows_store_certs in https://github.com/python/cpython/blob/main/Lib/ssl.py
1002 certs = [cert for cert, encoding, trust in ssl.enum_certificates(storename)
1003 if encoding == 'x509_asn' and (
1004 trust is True or ssl.Purpose.SERVER_AUTH.oid in trust)]
1005 except PermissionError:
1009 ssl_context.load_verify_locations(cadata=cert)
1010 except ssl.SSLError:
1014 def make_HTTPS_handler(params, **kwargs):
1015 opts_check_certificate = not params.get('nocheckcertificate')
1016 context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
1017 context.check_hostname = opts_check_certificate
1018 if params.get('legacyserverconnect'):
1019 context.options |= 4 # SSL_OP_LEGACY_SERVER_CONNECT
1020 context.verify_mode = ssl.CERT_REQUIRED if opts_check_certificate else ssl.CERT_NONE
1021 if opts_check_certificate:
1022 if has_certifi and 'no-certifi' not in params.get('compat_opts', []):
1023 context.load_verify_locations(cafile=certifi.where())
1026 context.load_default_certs()
1027 # Work around the issue in load_default_certs when there are bad certificates. See:
1028 # https://github.com/yt-dlp/yt-dlp/issues/1060,
1029 # https://bugs.python.org/issue35665, https://bugs.python.org/issue45312
1030 except ssl.SSLError:
1031 # enum_certificates is not present in mingw python. See https://github.com/yt-dlp/yt-dlp/issues/1151
1032 if sys.platform == 'win32' and hasattr(ssl, 'enum_certificates'):
1033 # Create a new context to discard any certificates that were already loaded
1034 context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
1035 context.check_hostname, context.verify_mode = True, ssl.CERT_REQUIRED
1036 for storename in ('CA', 'ROOT'):
1037 _ssl_load_windows_store_certs(context, storename)
1038 context.set_default_verify_paths()
1039 return YoutubeDLHTTPSHandler(params, context=context, **kwargs)
1042 def bug_reports_message(before=';'):
1043 msg = ('please report this issue on https://github.com/yt-dlp/yt-dlp , '
1044 'filling out the appropriate issue template. '
1045 'Confirm you are on the latest version using yt-dlp -U')
1047 before = before.rstrip()
1048 if not before or before.endswith(('.', '!', '?')):
1049 msg = msg[0].title() + msg[1:]
1051 return (before + ' ' if before else '') + msg
1054 class YoutubeDLError(Exception):
1055 """Base exception
for YoutubeDL errors
."""
1058 def __init__(self, msg=None):
1061 elif self.msg is None:
1062 self.msg = type(self).__name__
1063 super().__init__(self.msg)
1066 network_exceptions = [compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error]
1067 if hasattr(ssl, 'CertificateError'):
1068 network_exceptions.append(ssl.CertificateError)
1069 network_exceptions = tuple(network_exceptions)
1072 class ExtractorError(YoutubeDLError):
1073 """Error during info extraction
."""
1075 def __init__(self, msg, tb=None, expected=False, cause=None, video_id=None, ie=None):
1076 """ tb
, if given
, is the original
traceback (so that it can be printed out
).
1077 If expected
is set, this
is a normal error message
and most likely
not a bug
in yt
-dlp
.
1079 if sys.exc_info()[0] in network_exceptions:
1082 self.orig_msg = str(msg)
1084 self.expected = expected
1086 self.video_id = video_id
1088 self.exc_info = sys.exc_info() # preserve original exception
1090 super(ExtractorError, self).__init__(''.join((
1091 format_field(ie, template='[%s] '),
1092 format_field(video_id, template='%s: '),
1094 format_field(cause, template=' (caused by %r)'),
1095 '' if expected else bug_reports_message())))
1097 def format_traceback(self):
1098 return join_nonempty(
1099 self.traceback and ''.join(traceback.format_tb(self.traceback)),
1100 self.cause and ''.join(traceback.format_exception(None, self.cause, self.cause.__traceback__)[1:]),
1104 class UnsupportedError(ExtractorError):
1105 def __init__(self, url):
1106 super(UnsupportedError, self).__init__(
1107 'Unsupported URL: %s' % url, expected=True)
1111 class RegexNotFoundError(ExtractorError):
1112 """Error when a regex didn
't match"""
1116 class GeoRestrictedError(ExtractorError):
1117 """Geographic restriction Error exception.
1119 This exception may be thrown when a video is not available from your
1120 geographic location due to geographic restrictions imposed by a website.
1123 def __init__(self, msg, countries=None, **kwargs):
1124 kwargs['expected
'] = True
1125 super(GeoRestrictedError, self).__init__(msg, **kwargs)
1126 self.countries = countries
1129 class DownloadError(YoutubeDLError):
1130 """Download Error exception.
1132 This exception may be thrown by FileDownloader objects if they are not
1133 configured to continue on errors. They will contain the appropriate
1137 def __init__(self, msg, exc_info=None):
1138 """ exc_info, if given, is the original exception that caused the trouble (as returned by sys.exc_info()). """
1139 super(DownloadError, self).__init__(msg)
1140 self.exc_info = exc_info
1143 class EntryNotInPlaylist(YoutubeDLError):
1144 """Entry not in playlist exception.
1146 This exception will be thrown by YoutubeDL when a requested entry
1147 is not found in the playlist info_dict
1149 msg = 'Entry
not found
in info
'
1152 class SameFileError(YoutubeDLError):
1153 """Same File exception.
1155 This exception will be thrown by FileDownloader objects if they detect
1156 multiple files would have to be downloaded to the same file on disk.
1158 msg = 'Fixed output name but more than one
file to download
'
1160 def __init__(self, filename=None):
1161 if filename is not None:
1162 self.msg += f': {filename}
'
1163 super().__init__(self.msg)
1166 class PostProcessingError(YoutubeDLError):
1167 """Post Processing exception.
1169 This exception may be raised by PostProcessor's
.run() method to
1170 indicate an error
in the postprocessing task
.
1174 class DownloadCancelled(YoutubeDLError):
1175 """ Exception raised when the download queue should be interrupted
"""
1176 msg = 'The download was cancelled'
1179 class ExistingVideoReached(DownloadCancelled):
1180 """ --break-on
-existing triggered
"""
1181 msg = 'Encountered a video that is already in the archive, stopping due to --break-on-existing'
1184 class RejectedVideoReached(DownloadCancelled):
1185 """ --break-on
-reject triggered
"""
1186 msg = 'Encountered a video that did not match filter, stopping due to --break-on-reject'
1189 class MaxDownloadsReached(DownloadCancelled):
1190 """ --max-downloads limit has been reached
. """
1191 msg = 'Maximum number of downloads reached, stopping due to --max-downloads'
1194 class ReExtractInfo(YoutubeDLError):
1195 """ Video info needs to be re
-extracted
. """
1197 def __init__(self, msg, expected=False):
1198 super().__init__(msg)
1199 self.expected = expected
1202 class ThrottledDownload(ReExtractInfo):
1203 """ Download speed below
--throttled
-rate
. """
1204 msg = 'The download speed is below throttle limit'
1207 super().__init__(self.msg, expected=False)
1210 class UnavailableVideoError(YoutubeDLError):
1211 """Unavailable Format exception
.
1213 This exception will be thrown when a video
is requested
1214 in a format that
is not available
for that video
.
1216 msg = 'Unable to download video'
1218 def __init__(self, err=None):
1220 self.msg += f': {err}'
1221 super().__init__(self.msg)
1224 class ContentTooShortError(YoutubeDLError):
1225 """Content Too Short exception
.
1227 This exception may be raised by FileDownloader objects when a
file they
1228 download
is too small
for what the server announced first
, indicating
1229 the connection was probably interrupted
.
1232 def __init__(self, downloaded, expected):
1233 super(ContentTooShortError, self).__init__(
1234 'Downloaded {0} bytes, expected {1} bytes'.format(downloaded, expected)
1237 self.downloaded = downloaded
1238 self.expected = expected
1241 class XAttrMetadataError(YoutubeDLError):
1242 def __init__(self, code=None, msg='Unknown error'):
1243 super(XAttrMetadataError, self).__init__(msg)
1247 # Parsing code and msg
1248 if (self.code in (errno.ENOSPC, errno.EDQUOT)
1249 or 'No space left' in self.msg or 'Disk quota exceeded' in self.msg):
1250 self.reason = 'NO_SPACE'
1251 elif self.code == errno.E2BIG or 'Argument list too long' in self.msg:
1252 self.reason = 'VALUE_TOO_LONG'
1254 self.reason = 'NOT_SUPPORTED'
1257 class XAttrUnavailableError(YoutubeDLError):
1261 def _create_http_connection(ydl_handler, http_class, is_https, *args, **kwargs):
1262 # Working around python 2 bug (see http://bugs.python.org/issue17849) by limiting
1263 # expected HTTP responses to meet HTTP/1.0 or later (see also
1264 # https://github.com/ytdl-org/youtube-dl/issues/6727)
1265 if sys.version_info < (3, 0):
1266 kwargs['strict'] = True
1267 hc = http_class(*args, **compat_kwargs(kwargs))
1268 source_address = ydl_handler._params.get('source_address')
1270 if source_address is not None:
1271 # This is to workaround _create_connection() from socket where it will try all
1272 # address data from getaddrinfo() including IPv6. This filters the result from
1273 # getaddrinfo() based on the source_address value.
1274 # This is based on the cpython socket.create_connection() function.
1275 # https://github.com/python/cpython/blob/master/Lib/socket.py#L691
1276 def _create_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, source_address=None):
1277 host, port = address
1279 addrs = socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM)
1280 af = socket.AF_INET if '.' in source_address[0] else socket.AF_INET6
1281 ip_addrs = [addr for addr in addrs if addr[0] == af]
1282 if addrs and not ip_addrs:
1283 ip_version = 'v4' if af == socket.AF_INET else 'v6'
1285 "No remote IP%s addresses available for connect, can't use '%s' as source address"
1286 % (ip_version, source_address[0]))
1287 for res in ip_addrs:
1288 af, socktype, proto, canonname, sa = res
1291 sock = socket.socket(af, socktype, proto)
1292 if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT:
1293 sock.settimeout(timeout)
1294 sock.bind(source_address)
1296 err = None # Explicitly break reference cycle
1298 except socket.error as _:
1300 if sock is not None:
1305 raise socket.error('getaddrinfo returns an empty list')
1306 if hasattr(hc, '_create_connection'):
1307 hc._create_connection = _create_connection
1308 sa = (source_address, 0)
1309 if hasattr(hc, 'source_address'): # Python 2.7+
1310 hc.source_address = sa
1312 def _hc_connect(self, *args, **kwargs):
1313 sock = _create_connection(
1314 (self.host, self.port), self.timeout, sa)
1316 self.sock = ssl.wrap_socket(
1317 sock, self.key_file, self.cert_file,
1318 ssl_version=ssl.PROTOCOL_TLSv1)
1321 hc.connect = functools.partial(_hc_connect, hc)
1326 def handle_youtubedl_headers(headers):
1327 filtered_headers = headers
1329 if 'Youtubedl-no-compression' in filtered_headers:
1330 filtered_headers = dict((k, v) for k, v in filtered_headers.items() if k.lower() != 'accept-encoding')
1331 del filtered_headers['Youtubedl-no-compression']
1333 return filtered_headers
1336 class YoutubeDLHandler(compat_urllib_request.HTTPHandler):
1337 """Handler
for HTTP requests
and responses
.
1339 This
class, when installed
with an OpenerDirector
, automatically adds
1340 the standard headers to every HTTP request
and handles gzipped
and
1341 deflated responses
from web servers
. If compression
is to be avoided
in
1342 a particular request
, the original request
in the program code only has
1343 to include the HTTP header
"Youtubedl-no-compression", which will be
1344 removed before making the real request
.
1346 Part of this code was copied
from:
1348 http
://techknack
.net
/python
-urllib2
-handlers
/
1350 Andrew Rowls
, the author of that code
, agreed to release it to the
1354 def __init__(self, params, *args, **kwargs):
1355 compat_urllib_request.HTTPHandler.__init__(self, *args, **kwargs)
1356 self._params = params
1358 def http_open(self, req):
1359 conn_class = compat_http_client.HTTPConnection
1361 socks_proxy = req.headers.get('Ytdl-socks-proxy')
1363 conn_class = make_socks_conn_class(conn_class, socks_proxy)
1364 del req.headers['Ytdl-socks-proxy']
1366 return self.do_open(functools.partial(
1367 _create_http_connection, self, conn_class, False),
1375 return zlib.decompress(data, -zlib.MAX_WBITS)
1377 return zlib.decompress(data)
1383 return compat_brotli.decompress(data)
1385 def http_request(self, req):
1386 # According to RFC 3986, URLs can not contain non-ASCII characters, however this is not
1387 # always respected by websites, some tend to give out URLs with non percent-encoded
1388 # non-ASCII characters (see telemb.py, ard.py [#3412])
1389 # urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991)
1390 # To work around aforementioned issue we will replace request's original URL with
1391 # percent-encoded one
1392 # Since redirects are also affected (e.g. http://www.southpark.de/alle-episoden/s18e09)
1393 # the code of this workaround has been moved here from YoutubeDL.urlopen()
1394 url = req.get_full_url()
1395 url_escaped = escape_url(url)
1397 # Substitute URL if any change after escaping
1398 if url != url_escaped:
1399 req = update_Request(req, url=url_escaped)
1401 for h, v in self._params.get('http_headers', std_headers).items():
1402 # Capitalize is needed because of Python bug 2275: http://bugs.python.org/issue2275
1403 # The dict keys are capitalized because of this bug by urllib
1404 if h.capitalize() not in req.headers:
1405 req.add_header(h, v)
1407 if 'Accept-encoding' not in req.headers:
1408 req.add_header('Accept-encoding', ', '.join(SUPPORTED_ENCODINGS))
1410 req.headers = handle_youtubedl_headers(req.headers)
1412 if sys.version_info < (2, 7) and '#' in req.get_full_url():
1413 # Python 2.6 is brain-dead when it comes to fragments
1414 req._Request__original = req._Request__original.partition('#')[0]
1415 req._Request__r_type = req._Request__r_type.partition('#')[0]
1419 def http_response(self, req, resp):
1422 if resp.headers.get('Content-encoding', '') == 'gzip':
1423 content = resp.read()
1424 gz = gzip.GzipFile(fileobj=io.BytesIO(content), mode='rb')
1426 uncompressed = io.BytesIO(gz.read())
1427 except IOError as original_ioerror:
1428 # There may be junk add the end of the file
1429 # See http://stackoverflow.com/q/4928560/35070 for details
1430 for i in range(1, 1024):
1432 gz = gzip.GzipFile(fileobj=io.BytesIO(content[:-i]), mode='rb')
1433 uncompressed = io.BytesIO(gz.read())
1438 raise original_ioerror
1439 resp = compat_urllib_request.addinfourl(uncompressed, old_resp.headers, old_resp.url, old_resp.code)
1440 resp.msg = old_resp.msg
1441 del resp.headers['Content-encoding']
1443 if resp.headers.get('Content-encoding', '') == 'deflate':
1444 gz = io.BytesIO(self.deflate(resp.read()))
1445 resp = compat_urllib_request.addinfourl(gz, old_resp.headers, old_resp.url, old_resp.code)
1446 resp.msg = old_resp.msg
1447 del resp.headers['Content-encoding']
1449 if resp.headers.get('Content-encoding', '') == 'br':
1450 resp = compat_urllib_request.addinfourl(
1451 io.BytesIO(self.brotli(resp.read())), old_resp.headers, old_resp.url, old_resp.code)
1452 resp.msg = old_resp.msg
1453 del resp.headers['Content-encoding']
1454 # Percent-encode redirect URL of Location HTTP header to satisfy RFC 3986 (see
1455 # https://github.com/ytdl-org/youtube-dl/issues/6457).
1456 if 300 <= resp.code < 400:
1457 location = resp.headers.get('Location')
1459 # As of RFC 2616 default charset is iso-8859-1 that is respected by python 3
1460 if sys.version_info >= (3, 0):
1461 location = location.encode('iso-8859-1').decode('utf-8')
1463 location = location.decode('utf-8')
1464 location_escaped = escape_url(location)
1465 if location != location_escaped:
1466 del resp.headers['Location']
1467 if sys.version_info < (3, 0):
1468 location_escaped = location_escaped.encode('utf-8')
1469 resp.headers['Location'] = location_escaped
1472 https_request = http_request
1473 https_response = http_response
1476 def make_socks_conn_class(base_class, socks_proxy):
1477 assert issubclass(base_class, (
1478 compat_http_client.HTTPConnection, compat_http_client.HTTPSConnection))
1480 url_components = compat_urlparse.urlparse(socks_proxy)
1481 if url_components.scheme.lower() == 'socks5':
1482 socks_type = ProxyType.SOCKS5
1483 elif url_components.scheme.lower() in ('socks', 'socks4'):
1484 socks_type = ProxyType.SOCKS4
1485 elif url_components.scheme.lower() == 'socks4a':
1486 socks_type = ProxyType.SOCKS4A
1488 def unquote_if_non_empty(s):
1491 return compat_urllib_parse_unquote_plus(s)
1495 url_components.hostname, url_components.port or 1080,
1497 unquote_if_non_empty(url_components.username),
1498 unquote_if_non_empty(url_components.password),
1501 class SocksConnection(base_class):
1503 self.sock = sockssocket()
1504 self.sock.setproxy(*proxy_args)
1505 if type(self.timeout) in (int, float):
1506 self.sock.settimeout(self.timeout)
1507 self.sock.connect((self.host, self.port))
1509 if isinstance(self, compat_http_client.HTTPSConnection):
1510 if hasattr(self, '_context'): # Python > 2.6
1511 self.sock = self._context.wrap_socket(
1512 self.sock, server_hostname=self.host)
1514 self.sock = ssl.wrap_socket(self.sock)
1516 return SocksConnection
1519 class YoutubeDLHTTPSHandler(compat_urllib_request.HTTPSHandler):
1520 def __init__(self, params, https_conn_class=None, *args, **kwargs):
1521 compat_urllib_request.HTTPSHandler.__init__(self, *args, **kwargs)
1522 self._https_conn_class = https_conn_class or compat_http_client.HTTPSConnection
1523 self._params = params
1525 def https_open(self, req):
1527 conn_class = self._https_conn_class
1529 if hasattr(self, '_context'): # python > 2.6
1530 kwargs['context'] = self._context
1531 if hasattr(self, '_check_hostname'): # python 3.x
1532 kwargs['check_hostname'] = self._check_hostname
1534 socks_proxy = req.headers.get('Ytdl-socks-proxy')
1536 conn_class = make_socks_conn_class(conn_class, socks_proxy)
1537 del req.headers['Ytdl-socks-proxy']
1539 return self.do_open(functools.partial(
1540 _create_http_connection, self, conn_class, True),
1544 class YoutubeDLCookieJar(compat_cookiejar.MozillaCookieJar):
1546 See
[1] for cookie
file format
.
1548 1. https
://curl
.haxx
.se
/docs
/http
-cookies
.html
1550 _HTTPONLY_PREFIX = '#HttpOnly_'
1552 _HEADER = '''# Netscape HTTP Cookie File
1553 # This file is generated by yt-dlp. Do not edit.
1556 _CookieFileEntry = collections.namedtuple(
1558 ('domain_name', 'include_subdomains', 'path', 'https_only', 'expires_at', 'name', 'value'))
1560 def save(self, filename=None, ignore_discard=False, ignore_expires=False):
1562 Save cookies to a
file.
1564 Most of the code
is taken
from CPython
3.8 and slightly adapted
1565 to support cookie files
with UTF
-8 in both python
2 and 3.
1567 if filename is None:
1568 if self.filename is not None:
1569 filename = self.filename
1571 raise ValueError(compat_cookiejar.MISSING_FILENAME_TEXT)
1573 # Store session cookies with `expires` set to 0 instead of an empty
1576 if cookie.expires is None:
1579 with io.open(filename, 'w', encoding='utf-8') as f:
1580 f.write(self._HEADER)
1583 if not ignore_discard and cookie.discard:
1585 if not ignore_expires and cookie.is_expired(now):
1591 if cookie.domain.startswith('.'):
1592 initial_dot = 'TRUE'
1594 initial_dot = 'FALSE'
1595 if cookie.expires is not None:
1596 expires = compat_str(cookie.expires)
1599 if cookie.value is None:
1600 # cookies.txt regards 'Set-Cookie: foo' as a cookie
1601 # with no name, whereas http.cookiejar regards it as a
1602 # cookie with no value.
1607 value = cookie.value
1609 '\t'.join([cookie.domain, initial_dot, cookie.path,
1610 secure, expires, name, value]) + '\n')
1612 def load(self, filename=None, ignore_discard=False, ignore_expires=False):
1613 """Load cookies
from a
file."""
1614 if filename is None:
1615 if self.filename is not None:
1616 filename = self.filename
1618 raise ValueError(compat_cookiejar.MISSING_FILENAME_TEXT)
1620 def prepare_line(line):
1621 if line.startswith(self._HTTPONLY_PREFIX):
1622 line = line[len(self._HTTPONLY_PREFIX):]
1623 # comments and empty lines are fine
1624 if line.startswith('#') or not line.strip():
1626 cookie_list = line.split('\t')
1627 if len(cookie_list) != self._ENTRY_LEN:
1628 raise compat_cookiejar.LoadError('invalid length %d' % len(cookie_list))
1629 cookie = self._CookieFileEntry(*cookie_list)
1630 if cookie.expires_at and not cookie.expires_at.isdigit():
1631 raise compat_cookiejar.LoadError('invalid expires at %s' % cookie.expires_at)
1635 with io.open(filename, encoding='utf-8') as f:
1638 cf.write(prepare_line(line))
1639 except compat_cookiejar.LoadError as e:
1641 'WARNING: skipping cookie file entry due to %s: %r\n'
1642 % (e, line), sys.stderr)
1645 self._really_load(cf, filename, ignore_discard, ignore_expires)
1646 # Session cookies are denoted by either `expires` field set to
1647 # an empty string or 0. MozillaCookieJar only recognizes the former
1648 # (see [1]). So we need force the latter to be recognized as session
1649 # cookies on our own.
1650 # Session cookies may be important for cookies-based authentication,
1651 # e.g. usually, when user does not check 'Remember me' check box while
1652 # logging in on a site, some important cookies are stored as session
1653 # cookies so that not recognizing them will result in failed login.
1654 # 1. https://bugs.python.org/issue17164
1656 # Treat `expires=0` cookies as session cookies
1657 if cookie.expires == 0:
1658 cookie.expires = None
1659 cookie.discard = True
1662 class YoutubeDLCookieProcessor(compat_urllib_request.HTTPCookieProcessor):
1663 def __init__(self, cookiejar=None):
1664 compat_urllib_request.HTTPCookieProcessor.__init__(self, cookiejar)
1666 def http_response(self, request, response):
1667 # Python 2 will choke on next HTTP request in row if there are non-ASCII
1668 # characters in Set-Cookie HTTP header of last response (see
1669 # https://github.com/ytdl-org/youtube-dl/issues/6769).
1670 # In order to at least prevent crashing we will percent encode Set-Cookie
1671 # header before HTTPCookieProcessor starts processing it.
1672 # if sys.version_info < (3, 0) and response.headers:
1673 # for set_cookie_header in ('Set-Cookie', 'Set-Cookie2'):
1674 # set_cookie = response.headers.get(set_cookie_header)
1676 # set_cookie_escaped = compat_urllib_parse.quote(set_cookie, b"%/;:@&=+$,!~*'()?#[] ")
1677 # if set_cookie != set_cookie_escaped:
1678 # del response.headers[set_cookie_header]
1679 # response.headers[set_cookie_header] = set_cookie_escaped
1680 return compat_urllib_request.HTTPCookieProcessor.http_response(self, request, response)
1682 https_request = compat_urllib_request.HTTPCookieProcessor.http_request
1683 https_response = http_response
1686 class YoutubeDLRedirectHandler(compat_urllib_request.HTTPRedirectHandler):
1687 """YoutubeDL redirect handler
1689 The code
is based on HTTPRedirectHandler implementation
from CPython
[1].
1691 This redirect handler solves two issues
:
1692 - ensures redirect URL
is always
unicode under python
2
1693 - introduces support
for experimental HTTP response status code
1694 308 Permanent Redirect
[2] used by some sites
[3]
1696 1. https
://github
.com
/python
/cpython
/blob
/master
/Lib
/urllib
/request
.py
1697 2. https
://developer
.mozilla
.org
/en
-US
/docs
/Web
/HTTP
/Status
/308
1698 3. https
://github
.com
/ytdl
-org
/youtube
-dl
/issues
/28768
1701 http_error_301 = http_error_303 = http_error_307 = http_error_308 = compat_urllib_request.HTTPRedirectHandler.http_error_302
1703 def redirect_request(self, req, fp, code, msg, headers, newurl):
1704 """Return a Request
or None in response to a redirect
.
1706 This
is called by the http_error_30x methods when a
1707 redirection response
is received
. If a redirection should
1708 take place
, return a new Request to allow http_error_30x to
1709 perform the redirect
. Otherwise
, raise HTTPError
if no
-one
1710 else should
try to handle this url
. Return
None if you can
't
1711 but another Handler might.
1713 m = req.get_method()
1714 if (not (code in (301, 302, 303, 307, 308) and m in ("GET", "HEAD")
1715 or code in (301, 302, 303) and m == "POST")):
1716 raise compat_HTTPError(req.full_url, code, msg, headers, fp)
1717 # Strictly (according to RFC 2616), 301 or 302 in response to
1718 # a POST MUST NOT cause a redirection without confirmation
1719 # from the user (of urllib.request, in this case). In practice,
1720 # essentially all clients do redirect in this case, so we do
1723 # On python 2 urlh.geturl() may sometimes return redirect URL
1724 # as byte string instead of unicode. This workaround allows
1725 # to force it always return unicode.
1726 if sys.version_info[0] < 3:
1727 newurl = compat_str(newurl)
1729 # Be conciliant with URIs containing a space. This is mainly
1730 # redundant with the more complete encoding done in http_error_302(),
1731 # but it is kept for compatibility with other callers.
1732 newurl = newurl.replace(' ', '%20')
1734 CONTENT_HEADERS = ("content-length", "content-type")
1735 # NB: don't use
dict comprehension
for python
2.6 compatibility
1736 newheaders
= dict((k
, v
) for k
, v
in req
.headers
.items()
1737 if k
.lower() not in CONTENT_HEADERS
)
1738 return compat_urllib_request
.Request(
1739 newurl
, headers
=newheaders
, origin_req_host
=req
.origin_req_host
,
1743 def extract_timezone(date_str
):
1746 ^.{8,}? # >=8 char non-TZ prefix, if present
1747 (?P<tz>Z| # just the UTC Z, or
1748 (?:(?<=.\b\d{4}|\b\d{2}:\d\d)| # preceded by 4 digits or hh:mm or
1749 (?<!.\b[a-zA-Z]{3}|[a-zA-Z]{4}|..\b\d\d)) # not preceded by 3 alpha word or >= 4 alpha or 2 digits
1750 [ ]? # optional space
1751 (?P<sign>\+|-) # +/-
1752 (?P<hours>[0-9]{2}):?(?P<minutes>[0-9]{2}) # hh[:]mm
1756 timezone
= datetime
.timedelta()
1758 date_str
= date_str
[:-len(m
.group('tz'))]
1759 if not m
.group('sign'):
1760 timezone
= datetime
.timedelta()
1762 sign
= 1 if m
.group('sign') == '+' else -1
1763 timezone
= datetime
.timedelta(
1764 hours
=sign
* int(m
.group('hours')),
1765 minutes
=sign
* int(m
.group('minutes')))
1766 return timezone
, date_str
1769 def parse_iso8601(date_str
, delimiter
='T', timezone
=None):
1770 """ Return a UNIX timestamp from the given date """
1772 if date_str
is None:
1775 date_str
= re
.sub(r
'\.[0-9]+', '', date_str
)
1777 if timezone
is None:
1778 timezone
, date_str
= extract_timezone(date_str
)
1781 date_format
= '%Y-%m-%d{0}%H:%M:%S'.format(delimiter
)
1782 dt
= datetime
.datetime
.strptime(date_str
, date_format
) - timezone
1783 return calendar
.timegm(dt
.timetuple())
1788 def date_formats(day_first
=True):
1789 return DATE_FORMATS_DAY_FIRST
if day_first
else DATE_FORMATS_MONTH_FIRST
1792 def unified_strdate(date_str
, day_first
=True):
1793 """Return a string with the date in the format YYYYMMDD"""
1795 if date_str
is None:
1799 date_str
= date_str
.replace(',', ' ')
1800 # Remove AM/PM + timezone
1801 date_str
= re
.sub(r
'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str
)
1802 _
, date_str
= extract_timezone(date_str
)
1804 for expression
in date_formats(day_first
):
1806 upload_date
= datetime
.datetime
.strptime(date_str
, expression
).strftime('%Y%m%d')
1809 if upload_date
is None:
1810 timetuple
= email
.utils
.parsedate_tz(date_str
)
1813 upload_date
= datetime
.datetime(*timetuple
[:6]).strftime('%Y%m%d')
1816 if upload_date
is not None:
1817 return compat_str(upload_date
)
1820 def unified_timestamp(date_str
, day_first
=True):
1821 if date_str
is None:
1824 date_str
= re
.sub(r
'[,|]', '', date_str
)
1826 pm_delta
= 12 if re
.search(r
'(?i)PM', date_str
) else 0
1827 timezone
, date_str
= extract_timezone(date_str
)
1829 # Remove AM/PM + timezone
1830 date_str
= re
.sub(r
'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str
)
1832 # Remove unrecognized timezones from ISO 8601 alike timestamps
1833 m
= re
.search(r
'\d{1,2}:\d{1,2}(?:\.\d+)?(?P<tz>\s*[A-Z]+)$', date_str
)
1835 date_str
= date_str
[:-len(m
.group('tz'))]
1837 # Python only supports microseconds, so remove nanoseconds
1838 m
= re
.search(r
'^([0-9]{4,}-[0-9]{1,2}-[0-9]{1,2}T[0-9]{1,2}:[0-9]{1,2}:[0-9]{1,2}\.[0-9]{6})[0-9]+$', date_str
)
1840 date_str
= m
.group(1)
1842 for expression
in date_formats(day_first
):
1844 dt
= datetime
.datetime
.strptime(date_str
, expression
) - timezone
+ datetime
.timedelta(hours
=pm_delta
)
1845 return calendar
.timegm(dt
.timetuple())
1848 timetuple
= email
.utils
.parsedate_tz(date_str
)
1850 return calendar
.timegm(timetuple
) + pm_delta
* 3600
1853 def determine_ext(url
, default_ext
='unknown_video'):
1854 if url
is None or '.' not in url
:
1856 guess
= url
.partition('?')[0].rpartition('.')[2]
1857 if re
.match(r
'^[A-Za-z0-9]+$', guess
):
1859 # Try extract ext from URLs like http://example.com/foo/bar.mp4/?download
1860 elif guess
.rstrip('/') in KNOWN_EXTENSIONS
:
1861 return guess
.rstrip('/')
1866 def subtitles_filename(filename
, sub_lang
, sub_format
, expected_real_ext
=None):
1867 return replace_extension(filename
, sub_lang
+ '.' + sub_format
, expected_real_ext
)
1870 def datetime_from_str(date_str
, precision
='auto', format
='%Y%m%d'):
1872 Return a datetime object from a string in the format YYYYMMDD or
1873 (now|today|yesterday|date)[+-][0-9](microsecond|second|minute|hour|day|week|month|year)(s)?
1875 format: string date format used to return datetime object from
1876 precision: round the time portion of a datetime object.
1877 auto|microsecond|second|minute|hour|day.
1878 auto: round to the unit provided in date_str (if applicable).
1880 auto_precision
= False
1881 if precision
== 'auto':
1882 auto_precision
= True
1883 precision
= 'microsecond'
1884 today
= datetime_round(datetime
.datetime
.utcnow(), precision
)
1885 if date_str
in ('now', 'today'):
1887 if date_str
== 'yesterday':
1888 return today
- datetime
.timedelta(days
=1)
1890 r
'(?P<start>.+)(?P<sign>[+-])(?P<time>\d+)(?P<unit>microsecond|second|minute|hour|day|week|month|year)(s)?',
1892 if match
is not None:
1893 start_time
= datetime_from_str(match
.group('start'), precision
, format
)
1894 time
= int(match
.group('time')) * (-1 if match
.group('sign') == '-' else 1)
1895 unit
= match
.group('unit')
1896 if unit
== 'month' or unit
== 'year':
1897 new_date
= datetime_add_months(start_time
, time
* 12 if unit
== 'year' else time
)
1903 delta
= datetime
.timedelta(**{unit + 's': time}
)
1904 new_date
= start_time
+ delta
1906 return datetime_round(new_date
, unit
)
1909 return datetime_round(datetime
.datetime
.strptime(date_str
, format
), precision
)
1912 def date_from_str(date_str
, format
='%Y%m%d', strict
=False):
1914 Return a datetime object from a string in the format YYYYMMDD or
1915 (now|today|yesterday|date)[+-][0-9](microsecond|second|minute|hour|day|week|month|year)(s)?
1917 If "strict", only (now|today)[+-][0-9](day|week|month|year)(s)? is allowed
1919 format: string date format used to return datetime object from
1921 if strict
and not re
.fullmatch(r
'\d{8}|(now|today)[+-]\d+(day|week|month|year)(s)?', date_str
):
1922 raise ValueError(f
'Invalid date format {date_str}')
1923 return datetime_from_str(date_str
, precision
='microsecond', format
=format
).date()
1926 def datetime_add_months(dt
, months
):
1927 """Increment/Decrement a datetime object by months."""
1928 month
= dt
.month
+ months
- 1
1929 year
= dt
.year
+ month
// 12
1930 month
= month
% 12 + 1
1931 day
= min(dt
.day
, calendar
.monthrange(year
, month
)[1])
1932 return dt
.replace(year
, month
, day
)
1935 def datetime_round(dt
, precision
='day'):
1937 Round a datetime object's time to a specific precision
1939 if precision
== 'microsecond':
1948 roundto
= lambda x
, n
: ((x
+ n
/ 2) // n
) * n
1949 timestamp
= calendar
.timegm(dt
.timetuple())
1950 return datetime
.datetime
.utcfromtimestamp(roundto(timestamp
, unit_seconds
[precision
]))
1953 def hyphenate_date(date_str
):
1955 Convert a date in 'YYYYMMDD' format to 'YYYY-MM-DD' format"""
1956 match
= re
.match(r
'^(\d\d\d\d)(\d\d)(\d\d)$', date_str
)
1957 if match
is not None:
1958 return '-'.join(match
.groups())
1963 class DateRange(object):
1964 """Represents a time interval between two dates"""
1966 def __init__(self
, start
=None, end
=None):
1967 """start and end must be strings in the format accepted by date"""
1968 if start
is not None:
1969 self
.start
= date_from_str(start
, strict
=True)
1971 self
.start
= datetime
.datetime
.min.date()
1973 self
.end
= date_from_str(end
, strict
=True)
1975 self
.end
= datetime
.datetime
.max.date()
1976 if self
.start
> self
.end
:
1977 raise ValueError('Date range: "%s" , the start date must be before the end date' % self
)
1981 """Returns a range that only contains the given day"""
1982 return cls(day
, day
)
1984 def __contains__(self
, date
):
1985 """Check if the date is in the range"""
1986 if not isinstance(date
, datetime
.date
):
1987 date
= date_from_str(date
)
1988 return self
.start
<= date
<= self
.end
1991 return '%s - %s' % (self
.start
.isoformat(), self
.end
.isoformat())
1994 def platform_name():
1995 """ Returns the platform name as a compat_str """
1996 res
= platform
.platform()
1997 if isinstance(res
, bytes):
1998 res
= res
.decode(preferredencoding())
2000 assert isinstance(res
, compat_str
)
2004 def get_windows_version():
2005 ''' Get Windows version. None if it's not running on Windows '''
2006 if compat_os_name
== 'nt':
2007 return version_tuple(platform
.win32_ver()[1])
2012 def _windows_write_string(s
, out
):
2013 """ Returns True if the string was written using special methods,
2014 False if it has yet to be written out."""
2015 # Adapted from http://stackoverflow.com/a/3259271/35070
2017 import ctypes
.wintypes
2025 fileno
= out
.fileno()
2026 except AttributeError:
2027 # If the output stream doesn't have a fileno, it's virtual
2029 except io
.UnsupportedOperation
:
2030 # Some strange Windows pseudo files?
2032 if fileno
not in WIN_OUTPUT_IDS
:
2035 GetStdHandle
= compat_ctypes_WINFUNCTYPE(
2036 ctypes
.wintypes
.HANDLE
, ctypes
.wintypes
.DWORD
)(
2037 ('GetStdHandle', ctypes
.windll
.kernel32
))
2038 h
= GetStdHandle(WIN_OUTPUT_IDS
[fileno
])
2040 WriteConsoleW
= compat_ctypes_WINFUNCTYPE(
2041 ctypes
.wintypes
.BOOL
, ctypes
.wintypes
.HANDLE
, ctypes
.wintypes
.LPWSTR
,
2042 ctypes
.wintypes
.DWORD
, ctypes
.POINTER(ctypes
.wintypes
.DWORD
),
2043 ctypes
.wintypes
.LPVOID
)(('WriteConsoleW', ctypes
.windll
.kernel32
))
2044 written
= ctypes
.wintypes
.DWORD(0)
2046 GetFileType
= compat_ctypes_WINFUNCTYPE(ctypes
.wintypes
.DWORD
, ctypes
.wintypes
.DWORD
)(('GetFileType', ctypes
.windll
.kernel32
))
2047 FILE_TYPE_CHAR
= 0x0002
2048 FILE_TYPE_REMOTE
= 0x8000
2049 GetConsoleMode
= compat_ctypes_WINFUNCTYPE(
2050 ctypes
.wintypes
.BOOL
, ctypes
.wintypes
.HANDLE
,
2051 ctypes
.POINTER(ctypes
.wintypes
.DWORD
))(
2052 ('GetConsoleMode', ctypes
.windll
.kernel32
))
2053 INVALID_HANDLE_VALUE
= ctypes
.wintypes
.DWORD(-1).value
2055 def not_a_console(handle
):
2056 if handle
== INVALID_HANDLE_VALUE
or handle
is None:
2058 return ((GetFileType(handle
) & ~FILE_TYPE_REMOTE
) != FILE_TYPE_CHAR
2059 or GetConsoleMode(handle
, ctypes
.byref(ctypes
.wintypes
.DWORD())) == 0)
2061 if not_a_console(h
):
2064 def next_nonbmp_pos(s
):
2066 return next(i
for i
, c
in enumerate(s
) if ord(c
) > 0xffff)
2067 except StopIteration:
2071 count
= min(next_nonbmp_pos(s
), 1024)
2073 ret
= WriteConsoleW(
2074 h
, s
, count
if count
else 2, ctypes
.byref(written
), None)
2076 raise OSError('Failed to write string')
2077 if not count
: # We just wrote a non-BMP character
2078 assert written
.value
== 2
2081 assert written
.value
> 0
2082 s
= s
[written
.value
:]
2086 def write_string(s
, out
=None, encoding
=None):
2089 assert type(s
) == compat_str
2091 if sys
.platform
== 'win32' and encoding
is None and hasattr(out
, 'fileno'):
2092 if _windows_write_string(s
, out
):
2095 if ('b' in getattr(out
, 'mode', '')
2096 or sys
.version_info
[0] < 3): # Python 2 lies about mode of sys.stderr
2097 byt
= s
.encode(encoding
or preferredencoding(), 'ignore')
2099 elif hasattr(out
, 'buffer'):
2100 enc
= encoding
or getattr(out
, 'encoding', None) or preferredencoding()
2101 byt
= s
.encode(enc
, 'ignore')
2102 out
.buffer.write(byt
)
2108 def bytes_to_intlist(bs
):
2111 if isinstance(bs
[0], int): # Python 3
2114 return [ord(c
) for c
in bs
]
2117 def intlist_to_bytes(xs
):
2120 return compat_struct_pack('%dB' % len(xs
), *xs
)
2123 # Cross-platform file locking
2124 if sys
.platform
== 'win32':
2125 import ctypes
.wintypes
2128 class OVERLAPPED(ctypes
.Structure
):
2130 ('Internal', ctypes
.wintypes
.LPVOID
),
2131 ('InternalHigh', ctypes
.wintypes
.LPVOID
),
2132 ('Offset', ctypes
.wintypes
.DWORD
),
2133 ('OffsetHigh', ctypes
.wintypes
.DWORD
),
2134 ('hEvent', ctypes
.wintypes
.HANDLE
),
2137 kernel32
= ctypes
.windll
.kernel32
2138 LockFileEx
= kernel32
.LockFileEx
2139 LockFileEx
.argtypes
= [
2140 ctypes
.wintypes
.HANDLE
, # hFile
2141 ctypes
.wintypes
.DWORD
, # dwFlags
2142 ctypes
.wintypes
.DWORD
, # dwReserved
2143 ctypes
.wintypes
.DWORD
, # nNumberOfBytesToLockLow
2144 ctypes
.wintypes
.DWORD
, # nNumberOfBytesToLockHigh
2145 ctypes
.POINTER(OVERLAPPED
) # Overlapped
2147 LockFileEx
.restype
= ctypes
.wintypes
.BOOL
2148 UnlockFileEx
= kernel32
.UnlockFileEx
2149 UnlockFileEx
.argtypes
= [
2150 ctypes
.wintypes
.HANDLE
, # hFile
2151 ctypes
.wintypes
.DWORD
, # dwReserved
2152 ctypes
.wintypes
.DWORD
, # nNumberOfBytesToLockLow
2153 ctypes
.wintypes
.DWORD
, # nNumberOfBytesToLockHigh
2154 ctypes
.POINTER(OVERLAPPED
) # Overlapped
2156 UnlockFileEx
.restype
= ctypes
.wintypes
.BOOL
2157 whole_low
= 0xffffffff
2158 whole_high
= 0x7fffffff
2160 def _lock_file(f
, exclusive
, block
):
2161 overlapped
= OVERLAPPED()
2162 overlapped
.Offset
= 0
2163 overlapped
.OffsetHigh
= 0
2164 overlapped
.hEvent
= 0
2165 f
._lock
_file
_overlapped
_p
= ctypes
.pointer(overlapped
)
2167 if not LockFileEx(msvcrt
.get_osfhandle(f
.fileno()),
2168 (0x2 if exclusive
else 0x0) |
(0x0 if block
else 0x1),
2169 0, whole_low
, whole_high
, f
._lock
_file
_overlapped
_p
):
2170 raise BlockingIOError('Locking file failed: %r' % ctypes
.FormatError())
2172 def _unlock_file(f
):
2173 assert f
._lock
_file
_overlapped
_p
2174 handle
= msvcrt
.get_osfhandle(f
.fileno())
2175 if not UnlockFileEx(handle
, 0, whole_low
, whole_high
, f
._lock
_file
_overlapped
_p
):
2176 raise OSError('Unlocking file failed: %r' % ctypes
.FormatError())
2182 def _lock_file(f
, exclusive
, block
):
2185 fcntl
.LOCK_SH
if not exclusive
2186 else fcntl
.LOCK_EX
if block
2187 else fcntl
.LOCK_EX | fcntl
.LOCK_NB
)
2188 except BlockingIOError
:
2190 except OSError: # AOSP does not have flock()
2192 fcntl
.LOCK_SH
if not exclusive
2193 else fcntl
.LOCK_EX
if block
2194 else fcntl
.LOCK_EX | fcntl
.LOCK_NB
)
2196 def _unlock_file(f
):
2198 fcntl
.flock(f
, fcntl
.LOCK_UN
)
2200 fcntl
.lockf(f
, fcntl
.LOCK_UN
)
2203 UNSUPPORTED_MSG
= 'file locking is not supported on this platform'
2205 def _lock_file(f
, exclusive
, block
):
2206 raise IOError(UNSUPPORTED_MSG
)
2208 def _unlock_file(f
):
2209 raise IOError(UNSUPPORTED_MSG
)
2212 class locked_file(object):
2215 def __init__(self
, filename
, mode
, block
=True, encoding
=None):
2216 assert mode
in ['r', 'rb', 'a', 'ab', 'w', 'wb']
2217 self
.f
= io
.open(filename
, mode
, encoding
=encoding
)
2221 def __enter__(self
):
2222 exclusive
= 'r' not in self
.mode
2224 _lock_file(self
.f
, exclusive
, self
.block
)
2230 def __exit__(self
, etype
, value
, traceback
):
2232 if not self
._closed
:
2233 _unlock_file(self
.f
)
2241 def write(self
, *args
):
2242 return self
.f
.write(*args
)
2244 def read(self
, *args
):
2245 return self
.f
.read(*args
)
2251 return self
.__enter
__()
2253 def close(self
, *args
):
2254 self
.__exit
__(self
, *args
, value
=False, traceback
=False)
2257 def get_filesystem_encoding():
2258 encoding
= sys
.getfilesystemencoding()
2259 return encoding
if encoding
is not None else 'utf-8'
2262 def shell_quote(args
):
2264 encoding
= get_filesystem_encoding()
2266 if isinstance(a
, bytes):
2267 # We may get a filename encoded with 'encodeFilename'
2268 a
= a
.decode(encoding
)
2269 quoted_args
.append(compat_shlex_quote(a
))
2270 return ' '.join(quoted_args
)
2273 def smuggle_url(url
, data
):
2274 """ Pass additional data in a URL for internal use. """
2276 url
, idata
= unsmuggle_url(url
, {})
2278 sdata
= compat_urllib_parse_urlencode(
2279 {'__youtubedl_smuggle': json.dumps(data)}
)
2280 return url
+ '#' + sdata
2283 def unsmuggle_url(smug_url
, default
=None):
2284 if '#__youtubedl_smuggle' not in smug_url
:
2285 return smug_url
, default
2286 url
, _
, sdata
= smug_url
.rpartition('#')
2287 jsond
= compat_parse_qs(sdata
)['__youtubedl_smuggle'][0]
2288 data
= json
.loads(jsond
)
2292 def format_decimal_suffix(num
, fmt
='%d%s', *, factor
=1000):
2293 """ Formats numbers with decimal sufixes like K, M, etc """
2294 num
, factor
= float_or_none(num
), float(factor
)
2295 if num
is None or num
< 0:
2297 POSSIBLE_SUFFIXES
= 'kMGTPEZY'
2298 exponent
= 0 if num
== 0 else min(int(math
.log(num
, factor
)), len(POSSIBLE_SUFFIXES
))
2299 suffix
= ['', *POSSIBLE_SUFFIXES
][exponent
]
2301 suffix
= {'k': 'Ki', '': ''}
.get(suffix
, f
'{suffix}i')
2302 converted
= num
/ (factor
** exponent
)
2303 return fmt
% (converted
, suffix
)
2306 def format_bytes(bytes):
2307 return format_decimal_suffix(bytes, '%.2f%sB', factor
=1024) or 'N/A'
2310 def lookup_unit_table(unit_table
, s
):
2311 units_re
= '|'.join(re
.escape(u
) for u
in unit_table
)
2313 r
'(?P<num>[0-9]+(?:[,.][0-9]*)?)\s*(?P<unit>%s)\b' % units_re
, s
)
2316 num_str
= m
.group('num').replace(',', '.')
2317 mult
= unit_table
[m
.group('unit')]
2318 return int(float(num_str
) * mult
)
2321 def parse_filesize(s
):
2325 # The lower-case forms are of course incorrect and unofficial,
2326 # but we support those too
2343 'megabytes': 1000 ** 2,
2344 'mebibytes': 1024 ** 2,
2350 'gigabytes': 1000 ** 3,
2351 'gibibytes': 1024 ** 3,
2357 'terabytes': 1000 ** 4,
2358 'tebibytes': 1024 ** 4,
2364 'petabytes': 1000 ** 5,
2365 'pebibytes': 1024 ** 5,
2371 'exabytes': 1000 ** 6,
2372 'exbibytes': 1024 ** 6,
2378 'zettabytes': 1000 ** 7,
2379 'zebibytes': 1024 ** 7,
2385 'yottabytes': 1000 ** 8,
2386 'yobibytes': 1024 ** 8,
2389 return lookup_unit_table(_UNIT_TABLE
, s
)
2396 s
= re
.sub(r
'^[^\d]+\s', '', s
).strip()
2398 if re
.match(r
'^[\d,.]+$', s
):
2399 return str_to_int(s
)
2412 ret
= lookup_unit_table(_UNIT_TABLE
, s
)
2416 mobj
= re
.match(r
'([\d,.]+)(?:$|\s)', s
)
2418 return str_to_int(mobj
.group(1))
2421 def parse_resolution(s
, *, lenient
=False):
2426 mobj
= re
.search(r
'(?P<w>\d+)\s*[xX×,]\s*(?P<h>\d+)', s
)
2428 mobj
= re
.search(r
'(?<![a-zA-Z0-9])(?P<w>\d+)\s*[xX×,]\s*(?P<h>\d+)(?![a-zA-Z0-9])', s
)
2431 'width': int(mobj
.group('w')),
2432 'height': int(mobj
.group('h')),
2435 mobj
= re
.search(r
'(?<![a-zA-Z0-9])(\d+)[pPiI](?![a-zA-Z0-9])', s
)
2437 return {'height': int(mobj.group(1))}
2439 mobj
= re
.search(r
'\b([48])[kK]\b', s
)
2441 return {'height': int(mobj.group(1)) * 540}
2446 def parse_bitrate(s
):
2447 if not isinstance(s
, compat_str
):
2449 mobj
= re
.search(r
'\b(\d+)\s*kbps', s
)
2451 return int(mobj
.group(1))
2454 def month_by_name(name
, lang
='en'):
2455 """ Return the number of a month by (locale-independently) English name """
2457 month_names
= MONTH_NAMES
.get(lang
, MONTH_NAMES
['en'])
2460 return month_names
.index(name
) + 1
2465 def month_by_abbreviation(abbrev
):
2466 """ Return the number of a month by (locale-independently) English
2470 return [s
[:3] for s
in ENGLISH_MONTH_NAMES
].index(abbrev
) + 1
2475 def fix_xml_ampersands(xml_str
):
2476 """Replace all the '&' by '&' in XML"""
2478 r
'&(?!amp;|lt;|gt;|apos;|quot;|#x[0-9a-fA-F]{,4};|#[0-9]{,4};)',
2483 def setproctitle(title
):
2484 assert isinstance(title
, compat_str
)
2486 # ctypes in Jython is not complete
2487 # http://bugs.jython.org/issue2148
2488 if sys
.platform
.startswith('java'):
2492 libc
= ctypes
.cdll
.LoadLibrary('libc.so.6')
2496 # LoadLibrary in Windows Python 2.7.13 only expects
2497 # a bytestring, but since unicode_literals turns
2498 # every string into a unicode string, it fails.
2500 title_bytes
= title
.encode('utf-8')
2501 buf
= ctypes
.create_string_buffer(len(title_bytes
))
2502 buf
.value
= title_bytes
2504 libc
.prctl(15, buf
, 0, 0, 0)
2505 except AttributeError:
2506 return # Strange libc, just skip this
2509 def remove_start(s
, start
):
2510 return s
[len(start
):] if s
is not None and s
.startswith(start
) else s
2513 def remove_end(s
, end
):
2514 return s
[:-len(end
)] if s
is not None and s
.endswith(end
) else s
2517 def remove_quotes(s
):
2518 if s
is None or len(s
) < 2:
2520 for quote
in ('"', "'", ):
2521 if s
[0] == quote
and s
[-1] == quote
:
2526 def get_domain(url
):
2527 domain
= re
.match(r
'(?:https?:\/\/)?(?:www\.)?(?P<domain>[^\n\/]+\.[^\n\/]+)(?:\/(.*))?', url
)
2528 return domain
.group('domain') if domain
else None
2531 def url_basename(url
):
2532 path
= compat_urlparse
.urlparse(url
).path
2533 return path
.strip('/').split('/')[-1]
2537 return re
.match(r
'https?://[^?#&]+/', url
).group()
2540 def urljoin(base
, path
):
2541 if isinstance(path
, bytes):
2542 path
= path
.decode('utf-8')
2543 if not isinstance(path
, compat_str
) or not path
:
2545 if re
.match(r
'^(?:[a-zA-Z][a-zA-Z0-9+-.]*:)?//', path
):
2547 if isinstance(base
, bytes):
2548 base
= base
.decode('utf-8')
2549 if not isinstance(base
, compat_str
) or not re
.match(
2550 r
'^(?:https?:)?//', base
):
2552 return compat_urlparse
.urljoin(base
, path
)
2555 class HEADRequest(compat_urllib_request
.Request
):
2556 def get_method(self
):
2560 class PUTRequest(compat_urllib_request
.Request
):
2561 def get_method(self
):
2565 def int_or_none(v
, scale
=1, default
=None, get_attr
=None, invscale
=1):
2566 if get_attr
and v
is not None:
2567 v
= getattr(v
, get_attr
, None)
2569 return int(v
) * invscale
// scale
2570 except (ValueError, TypeError, OverflowError):
2574 def str_or_none(v
, default
=None):
2575 return default
if v
is None else compat_str(v
)
2578 def str_to_int(int_str
):
2579 """ A more relaxed version of int_or_none """
2580 if isinstance(int_str
, compat_integer_types
):
2582 elif isinstance(int_str
, compat_str
):
2583 int_str
= re
.sub(r
'[,\.\+]', '', int_str
)
2584 return int_or_none(int_str
)
2587 def float_or_none(v
, scale
=1, invscale
=1, default
=None):
2591 return float(v
) * invscale
/ scale
2592 except (ValueError, TypeError):
2596 def bool_or_none(v
, default
=None):
2597 return v
if isinstance(v
, bool) else default
2600 def strip_or_none(v
, default
=None):
2601 return v
.strip() if isinstance(v
, compat_str
) else default
2604 def url_or_none(url
):
2605 if not url
or not isinstance(url
, compat_str
):
2608 return url
if re
.match(r
'^(?:(?:https?|rt(?:m(?:pt?[es]?|fp)|sp[su]?)|mms|ftps?):)?//', url
) else None
2611 def request_to_url(req
):
2612 if isinstance(req
, compat_urllib_request
.Request
):
2613 return req
.get_full_url()
2618 def strftime_or_none(timestamp
, date_format
, default
=None):
2619 datetime_object
= None
2621 if isinstance(timestamp
, compat_numeric_types
): # unix timestamp
2622 datetime_object
= datetime
.datetime
.utcfromtimestamp(timestamp
)
2623 elif isinstance(timestamp
, compat_str
): # assume YYYYMMDD
2624 datetime_object
= datetime
.datetime
.strptime(timestamp
, '%Y%m%d')
2625 return datetime_object
.strftime(date_format
)
2626 except (ValueError, TypeError, AttributeError):
2630 def parse_duration(s
):
2631 if not isinstance(s
, compat_basestring
):
2637 days
, hours
, mins
, secs
, ms
= [None] * 5
2638 m
= re
.match(r
'''(?x)
2640 (?:(?:(?P<days>[0-9]+):)?(?P<hours>[0-9]+):)?(?P<mins>[0-9]+):)?
2641 (?P<secs>(?(before_secs)[0-9]{1,2}|[0-9]+))
2642 (?P<ms>[.:][0-9]+)?Z?$
2645 days
, hours
, mins
, secs
, ms
= m
.group('days', 'hours', 'mins', 'secs', 'ms')
2650 [0-9]+\s*y(?:ears?)?,?\s*
2653 [0-9]+\s*m(?:onths?)?,?\s*
2656 [0-9]+\s*w(?:eeks?)?,?\s*
2659 (?P<days>[0-9]+)\s*d(?:ays?)?,?\s*
2663 (?P<hours>[0-9]+)\s*h(?:ours?)?,?\s*
2666 (?P<mins>[0-9]+)\s*m(?:in(?:ute)?s?)?,?\s*
2669 (?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?\s*s(?:ec(?:ond)?s?)?\s*
2672 days
, hours
, mins
, secs
, ms
= m
.groups()
2674 m
= re
.match(r
'(?i)(?:(?P<hours>[0-9.]+)\s*(?:hours?)|(?P<mins>[0-9.]+)\s*(?:mins?\.?|minutes?)\s*)Z?$', s
)
2676 hours
, mins
= m
.groups()
2682 duration
+= float(secs
)
2684 duration
+= float(mins
) * 60
2686 duration
+= float(hours
) * 60 * 60
2688 duration
+= float(days
) * 24 * 60 * 60
2690 duration
+= float(ms
.replace(':', '.'))
2694 def prepend_extension(filename
, ext
, expected_real_ext
=None):
2695 name
, real_ext
= os
.path
.splitext(filename
)
2697 '{0}.{1}{2}'.format(name
, ext
, real_ext
)
2698 if not expected_real_ext
or real_ext
[1:] == expected_real_ext
2699 else '{0}.{1}'.format(filename
, ext
))
2702 def replace_extension(filename
, ext
, expected_real_ext
=None):
2703 name
, real_ext
= os
.path
.splitext(filename
)
2704 return '{0}.{1}'.format(
2705 name
if not expected_real_ext
or real_ext
[1:] == expected_real_ext
else filename
,
2709 def check_executable(exe
, args
=[]):
2710 """ Checks if the given binary is installed somewhere in PATH, and returns its name.
2711 args can be a list of arguments for a short output (like -version) """
2713 Popen([exe
] + args
, stdout
=subprocess
.PIPE
, stderr
=subprocess
.PIPE
).communicate_or_kill()
2719 def _get_exe_version_output(exe
, args
, *, to_screen
=None):
2721 to_screen(f
'Checking exe version: {shell_quote([exe] + args)}')
2723 # STDIN should be redirected too. On UNIX-like systems, ffmpeg triggers
2724 # SIGTTOU if yt-dlp is run in the background.
2725 # See https://github.com/ytdl-org/youtube-dl/issues/955#issuecomment-209789656
2727 [encodeArgument(exe
)] + args
, stdin
=subprocess
.PIPE
,
2728 stdout
=subprocess
.PIPE
, stderr
=subprocess
.STDOUT
).communicate_or_kill()
2731 if isinstance(out
, bytes): # Python 2.x
2732 out
= out
.decode('ascii', 'ignore')
2736 def detect_exe_version(output
, version_re
=None, unrecognized
='present'):
2737 assert isinstance(output
, compat_str
)
2738 if version_re
is None:
2739 version_re
= r
'version\s+([-0-9._a-zA-Z]+)'
2740 m
= re
.search(version_re
, output
)
2747 def get_exe_version(exe
, args
=['--version'],
2748 version_re
=None, unrecognized
='present'):
2749 """ Returns the version of the specified executable,
2750 or False if the executable is not present """
2751 out
= _get_exe_version_output(exe
, args
)
2752 return detect_exe_version(out
, version_re
, unrecognized
) if out
else False
2755 class LazyList(collections
.abc
.Sequence
):
2756 ''' Lazy immutable list from an iterable
2757 Note that slices of a LazyList are lists and not LazyList'''
2759 class IndexError(IndexError):
2762 def __init__(self
, iterable
, *, reverse
=False, _cache
=None):
2763 self
.__iterable
= iter(iterable
)
2764 self
.__cache
= [] if _cache
is None else _cache
2765 self
.__reversed
= reverse
2769 # We need to consume the entire iterable to iterate in reverse
2770 yield from self
.exhaust()
2772 yield from self
.__cache
2773 for item
in self
.__iterable
:
2774 self
.__cache
.append(item
)
2777 def __exhaust(self
):
2778 self
.__cache
.extend(self
.__iterable
)
2779 # Discard the emptied iterable to make it pickle-able
2780 self
.__iterable
= []
2784 ''' Evaluate the entire iterable '''
2785 return self
.__exhaust
()[::-1 if self
.__reversed
else 1]
2788 def __reverse_index(x
):
2789 return None if x
is None else -(x
+ 1)
2791 def __getitem__(self
, idx
):
2792 if isinstance(idx
, slice):
2794 idx
= slice(self
.__reverse
_index
(idx
.start
), self
.__reverse
_index
(idx
.stop
), -(idx
.step
or 1))
2795 start
, stop
, step
= idx
.start
, idx
.stop
, idx
.step
or 1
2796 elif isinstance(idx
, int):
2798 idx
= self
.__reverse
_index
(idx
)
2799 start
, stop
, step
= idx
, idx
, 0
2801 raise TypeError('indices must be integers or slices')
2802 if ((start
or 0) < 0 or (stop
or 0) < 0
2803 or (start
is None and step
< 0)
2804 or (stop
is None and step
> 0)):
2805 # We need to consume the entire iterable to be able to slice from the end
2806 # Obviously, never use this with infinite iterables
2809 return self
.__cache
[idx
]
2810 except IndexError as e
:
2811 raise self
.IndexError(e
) from e
2812 n
= max(start
or 0, stop
or 0) - len(self
.__cache
) + 1
2814 self
.__cache
.extend(itertools
.islice(self
.__iterable
, n
))
2816 return self
.__cache
[idx
]
2817 except IndexError as e
:
2818 raise self
.IndexError(e
) from e
2822 self
[-1] if self
.__reversed
else self
[0]
2823 except self
.IndexError:
2829 return len(self
.__cache
)
2831 def __reversed__(self
):
2832 return type(self
)(self
.__iterable
, reverse
=not self
.__reversed
, _cache
=self
.__cache
)
2835 return type(self
)(self
.__iterable
, reverse
=self
.__reversed
, _cache
=self
.__cache
)
2838 # repr and str should mimic a list. So we exhaust the iterable
2839 return repr(self
.exhaust())
2842 return repr(self
.exhaust())
2847 class IndexError(IndexError):
2851 # This is only useful for tests
2852 return len(self
.getslice())
2854 def __init__(self
, pagefunc
, pagesize
, use_cache
=True):
2855 self
._pagefunc
= pagefunc
2856 self
._pagesize
= pagesize
2857 self
._pagecount
= float('inf')
2858 self
._use
_cache
= use_cache
2861 def getpage(self
, pagenum
):
2862 page_results
= self
._cache
.get(pagenum
)
2863 if page_results
is None:
2864 page_results
= [] if pagenum
> self
._pagecount
else list(self
._pagefunc
(pagenum
))
2866 self
._cache
[pagenum
] = page_results
2869 def getslice(self
, start
=0, end
=None):
2870 return list(self
._getslice
(start
, end
))
2872 def _getslice(self
, start
, end
):
2873 raise NotImplementedError('This method must be implemented by subclasses')
2875 def __getitem__(self
, idx
):
2876 assert self
._use
_cache
, 'Indexing PagedList requires cache'
2877 if not isinstance(idx
, int) or idx
< 0:
2878 raise TypeError('indices must be non-negative integers')
2879 entries
= self
.getslice(idx
, idx
+ 1)
2881 raise self
.IndexError()
2885 class OnDemandPagedList(PagedList
):
2886 def _getslice(self
, start
, end
):
2887 for pagenum
in itertools
.count(start
// self
._pagesize
):
2888 firstid
= pagenum
* self
._pagesize
2889 nextfirstid
= pagenum
* self
._pagesize
+ self
._pagesize
2890 if start
>= nextfirstid
:
2894 start
% self
._pagesize
2895 if firstid
<= start
< nextfirstid
2898 ((end
- 1) % self
._pagesize
) + 1
2899 if (end
is not None and firstid
<= end
<= nextfirstid
)
2903 page_results
= self
.getpage(pagenum
)
2905 self
._pagecount
= pagenum
- 1
2907 if startv
!= 0 or endv
is not None:
2908 page_results
= page_results
[startv
:endv
]
2909 yield from page_results
2911 # A little optimization - if current page is not "full", ie. does
2912 # not contain page_size videos then we can assume that this page
2913 # is the last one - there are no more ids on further pages -
2914 # i.e. no need to query again.
2915 if len(page_results
) + startv
< self
._pagesize
:
2918 # If we got the whole page, but the next page is not interesting,
2919 # break out early as well
2920 if end
== nextfirstid
:
2924 class InAdvancePagedList(PagedList
):
2925 def __init__(self
, pagefunc
, pagecount
, pagesize
):
2926 PagedList
.__init
__(self
, pagefunc
, pagesize
, True)
2927 self
._pagecount
= pagecount
2929 def _getslice(self
, start
, end
):
2930 start_page
= start
// self
._pagesize
2931 end_page
= self
._pagecount
if end
is None else min(self
._pagecount
, end
// self
._pagesize
+ 1)
2932 skip_elems
= start
- start_page
* self
._pagesize
2933 only_more
= None if end
is None else end
- start
2934 for pagenum
in range(start_page
, end_page
):
2935 page_results
= self
.getpage(pagenum
)
2937 page_results
= page_results
[skip_elems
:]
2939 if only_more
is not None:
2940 if len(page_results
) < only_more
:
2941 only_more
-= len(page_results
)
2943 yield from page_results
[:only_more
]
2945 yield from page_results
2948 def uppercase_escape(s
):
2949 unicode_escape
= codecs
.getdecoder('unicode_escape')
2951 r
'\\U[0-9a-fA-F]{8}',
2952 lambda m
: unicode_escape(m
.group(0))[0],
2956 def lowercase_escape(s
):
2957 unicode_escape
= codecs
.getdecoder('unicode_escape')
2959 r
'\\u[0-9a-fA-F]{4}',
2960 lambda m
: unicode_escape(m
.group(0))[0],
2964 def escape_rfc3986(s
):
2965 """Escape non-ASCII characters as suggested by RFC 3986"""
2966 if sys
.version_info
< (3, 0) and isinstance(s
, compat_str
):
2967 s
= s
.encode('utf-8')
2968 return compat_urllib_parse
.quote(s
, b
"%/;:@&=+$,!~*'()?#[]")
2971 def escape_url(url
):
2972 """Escape URL as suggested by RFC 3986"""
2973 url_parsed
= compat_urllib_parse_urlparse(url
)
2974 return url_parsed
._replace
(
2975 netloc
=url_parsed
.netloc
.encode('idna').decode('ascii'),
2976 path
=escape_rfc3986(url_parsed
.path
),
2977 params
=escape_rfc3986(url_parsed
.params
),
2978 query
=escape_rfc3986(url_parsed
.query
),
2979 fragment
=escape_rfc3986(url_parsed
.fragment
)
2984 return compat_parse_qs(compat_urllib_parse_urlparse(url
).query
)
2987 def read_batch_urls(batch_fd
):
2989 if not isinstance(url
, compat_str
):
2990 url
= url
.decode('utf-8', 'replace')
2991 BOM_UTF8
= ('\xef\xbb\xbf', '\ufeff')
2992 for bom
in BOM_UTF8
:
2993 if url
.startswith(bom
):
2994 url
= url
[len(bom
):]
2996 if not url
or url
.startswith(('#', ';', ']')):
2998 # "#" cannot be stripped out since it is part of the URI
2999 # However, it can be safely stipped out if follwing a whitespace
3000 return re
.split(r
'\s#', url
, 1)[0].rstrip()
3002 with contextlib
.closing(batch_fd
) as fd
:
3003 return [url
for url
in map(fixup
, fd
) if url
]
3006 def urlencode_postdata(*args
, **kargs
):
3007 return compat_urllib_parse_urlencode(*args
, **kargs
).encode('ascii')
3010 def update_url_query(url
, query
):
3013 parsed_url
= compat_urlparse
.urlparse(url
)
3014 qs
= compat_parse_qs(parsed_url
.query
)
3016 return compat_urlparse
.urlunparse(parsed_url
._replace
(
3017 query
=compat_urllib_parse_urlencode(qs
, True)))
3020 def update_Request(req
, url
=None, data
=None, headers
={}, query={}
):
3021 req_headers
= req
.headers
.copy()
3022 req_headers
.update(headers
)
3023 req_data
= data
or req
.data
3024 req_url
= update_url_query(url
or req
.get_full_url(), query
)
3025 req_get_method
= req
.get_method()
3026 if req_get_method
== 'HEAD':
3027 req_type
= HEADRequest
3028 elif req_get_method
== 'PUT':
3029 req_type
= PUTRequest
3031 req_type
= compat_urllib_request
.Request
3033 req_url
, data
=req_data
, headers
=req_headers
,
3034 origin_req_host
=req
.origin_req_host
, unverifiable
=req
.unverifiable
)
3035 if hasattr(req
, 'timeout'):
3036 new_req
.timeout
= req
.timeout
3040 def _multipart_encode_impl(data
, boundary
):
3041 content_type
= 'multipart/form-data; boundary=%s' % boundary
3044 for k
, v
in data
.items():
3045 out
+= b
'--' + boundary
.encode('ascii') + b
'\r\n'
3046 if isinstance(k
, compat_str
):
3047 k
= k
.encode('utf-8')
3048 if isinstance(v
, compat_str
):
3049 v
= v
.encode('utf-8')
3050 # RFC 2047 requires non-ASCII field names to be encoded, while RFC 7578
3051 # suggests sending UTF-8 directly. Firefox sends UTF-8, too
3052 content
= b
'Content-Disposition: form-data; name="' + k
+ b
'"\r\n\r\n' + v
+ b
'\r\n'
3053 if boundary
.encode('ascii') in content
:
3054 raise ValueError('Boundary overlaps with data')
3057 out
+= b
'--' + boundary
.encode('ascii') + b
'--\r\n'
3059 return out
, content_type
3062 def multipart_encode(data
, boundary
=None):
3064 Encode a dict to RFC 7578-compliant form-data
3067 A dict where keys and values can be either Unicode or bytes-like
3070 If specified a Unicode object, it's used as the boundary. Otherwise
3071 a random boundary is generated.
3073 Reference: https://tools.ietf.org/html/rfc7578
3075 has_specified_boundary
= boundary
is not None
3078 if boundary
is None:
3079 boundary
= '---------------' + str(random
.randrange(0x0fffffff, 0xffffffff))
3082 out
, content_type
= _multipart_encode_impl(data
, boundary
)
3085 if has_specified_boundary
:
3089 return out
, content_type
3092 def dict_get(d
, key_or_keys
, default
=None, skip_false_values
=True):
3093 if isinstance(key_or_keys
, (list, tuple)):
3094 for key
in key_or_keys
:
3095 if key
not in d
or d
[key
] is None or skip_false_values
and not d
[key
]:
3099 return d
.get(key_or_keys
, default
)
3102 def try_call(*funcs
, expected_type
=None, args
=[], kwargs
={}):
3105 val
= f(*args
, **kwargs
)
3106 except (AttributeError, KeyError, TypeError, IndexError, ZeroDivisionError):
3109 if expected_type
is None or isinstance(val
, expected_type
):
3113 def try_get(src
, getter
, expected_type
=None):
3114 return try_call(*variadic(getter
), args
=(src
,), expected_type
=expected_type
)
3117 def filter_dict(dct
, cndn
=lambda _
, v
: v
is not None):
3118 return {k: v for k, v in dct.items() if cndn(k, v)}
3121 def merge_dicts(*dicts
):
3123 for a_dict
in dicts
:
3124 for k
, v
in a_dict
.items():
3125 if (v
is not None and k
not in merged
3126 or isinstance(v
, str) and merged
[k
] == ''):
3131 def encode_compat_str(string
, encoding
=preferredencoding(), errors
='strict'):
3132 return string
if isinstance(string
, compat_str
) else compat_str(string
, encoding
, errors
)
3144 TV_PARENTAL_GUIDELINES
= {
3154 def parse_age_limit(s
):
3156 return s
if 0 <= s
<= 21 else None
3157 if not isinstance(s
, compat_basestring
):
3159 m
= re
.match(r
'^(?P<age>\d{1,2})\+?$', s
)
3161 return int(m
.group('age'))
3164 return US_RATINGS
[s
]
3165 m
= re
.match(r
'^TV[_-]?(%s)$' % '|'.join(k
[3:] for k
in TV_PARENTAL_GUIDELINES
), s
)
3167 return TV_PARENTAL_GUIDELINES
['TV-' + m
.group(1)]
3171 def strip_jsonp(code
):
3174 (?:window\.)?(?P<func_name>[a-zA-Z0-9_.$]*)
3175 (?:\s*&&\s*(?P=func_name))?
3176 \s*\(\s*(?P<callback_data>.*)\);?
3177 \s*?(?://[^\n]*)*$''',
3178 r
'\g<callback_data>', code
)
3181 def js_to_json(code
, vars={}):
3182 # vars is a dict of var, val pairs to substitute
3183 COMMENT_RE
= r
'/\*(?:(?!\*/).)*?\*/|//[^\n]*\n'
3184 SKIP_RE
= r
'\s*(?:{comment})?\s*'.format(comment
=COMMENT_RE
)
3186 (r
'(?s)^(0[xX][0-9a-fA-F]+){skip}:?$'.format(skip
=SKIP_RE
), 16),
3187 (r
'(?s)^(0+[0-7]+){skip}:?$'.format(skip
=SKIP_RE
), 8),
3192 if v
in ('true', 'false', 'null'):
3194 elif v
in ('undefined', 'void 0'):
3196 elif v
.startswith('/*') or v
.startswith('//') or v
.startswith('!') or v
== ',':
3199 if v
[0] in ("'", '"'):
3200 v
= re
.sub(r
'(?s)\\.|"', lambda m
: {
3205 }.get(m
.group(0), m
.group(0)), v
[1:-1])
3207 for regex
, base
in INTEGER_TABLE
:
3208 im
= re
.match(regex
, v
)
3210 i
= int(im
.group(1), base
)
3211 return '"%d":' % i
if v
.endswith(':') else '%d' % i
3218 code
= re
.sub(r
'new Date\((".+")\)', r
'\g<1>', code
)
3220 return re
.sub(r
'''(?sx)
3221 "(?:[^"\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^"\\]*"|
3222 '(?:[^'\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^'\\]*'|
3223 {comment}|,(?={skip}[\]}}])|
3224 void\s0|(?:(?<![0-9])[eE]|[a-df-zA-DF-Z_$])[.a-zA-Z_$0-9]*|
3225 \b(?:0[xX][0-9a-fA-F]+|0+[0-7]+)(?:{skip}:)?|
3228 '''.format(comment
=COMMENT_RE
, skip
=SKIP_RE
), fix_kv
, code
)
3231 def qualities(quality_ids
):
3232 """ Get a numeric quality value out of a list of possible values """
3235 return quality_ids
.index(qid
)
3241 POSTPROCESS_WHEN
= {'pre_process', 'after_filter', 'before_dl', 'after_move', 'post_process', 'after_video', 'playlist'}
3245 'default': '%(title)s [%(id)s].%(ext)s',
3246 'chapter': '%(title)s - %(section_number)03d %(section_title)s [%(id)s].%(ext)s',
3252 'description': 'description',
3253 'annotation': 'annotations.xml',
3254 'infojson': 'info.json',
3257 'pl_thumbnail': None,
3258 'pl_description': 'description',
3259 'pl_infojson': 'info.json',
3262 # As of [1] format syntax is:
3263 # %[mapping_key][conversion_flags][minimum_width][.precision][length_modifier]type
3264 # 1. https://docs.python.org/2/library/stdtypes.html#string-formatting
3265 STR_FORMAT_RE_TMPL
= r
'''(?x)
3266 (?<!%)(?P<prefix>(?:%%)*)
3268 (?P<has_key>\((?P<key>{0})\))?
3270 (?P<conversion>[#0\-+ ]+)?
3272 (?P<precision>\.\d+)?
3273 (?P<len_mod>[hlL])? # unused in python
3274 {1} # conversion type
3279 STR_FORMAT_TYPES
= 'diouxXeEfFgGcrs'
3282 def limit_length(s
, length
):
3283 """ Add ellipses to overly long strings """
3288 return s
[:length
- len(ELLIPSES
)] + ELLIPSES
3292 def version_tuple(v
):
3293 return tuple(int(e
) for e
in re
.split(r
'[-.]', v
))
3296 def is_outdated_version(version
, limit
, assume_new
=True):
3298 return not assume_new
3300 return version_tuple(version
) < version_tuple(limit
)
3302 return not assume_new
3305 def ytdl_is_updateable():
3306 """ Returns if yt-dlp can be updated with -U """
3308 from .update
import is_non_updateable
3310 return not is_non_updateable()
3313 def args_to_str(args
):
3314 # Get a short string representation for a subprocess command
3315 return ' '.join(compat_shlex_quote(a
) for a
in args
)
3318 def error_to_compat_str(err
):
3320 # On python 2 error byte string must be decoded with proper
3321 # encoding rather than ascii
3322 if sys
.version_info
[0] < 3:
3323 err_str
= err_str
.decode(preferredencoding())
3327 def mimetype2ext(mt
):
3331 mt
, _
, params
= mt
.partition(';')
3336 # Per RFC 3003, audio/mpeg can be .mp1, .mp2 or .mp3. Here use .mp3 as
3337 # it's the most popular one
3338 'audio/mpeg': 'mp3',
3339 'audio/x-wav': 'wav',
3341 'audio/wave': 'wav',
3344 ext
= FULL_MAP
.get(mt
)
3350 'smptett+xml': 'tt',
3354 'x-mp4-fragmented': 'mp4',
3355 'x-ms-sami': 'sami',
3358 'x-mpegurl': 'm3u8',
3359 'vnd.apple.mpegurl': 'm3u8',
3363 'vnd.ms-sstr+xml': 'ism',
3367 'filmstrip+json': 'fs',
3371 _
, _
, subtype
= mt
.rpartition('/')
3372 ext
= SUBTYPE_MAP
.get(subtype
.lower())
3383 _
, _
, suffix
= subtype
.partition('+')
3384 ext
= SUFFIX_MAP
.get(suffix
)
3388 return subtype
.replace('+', '.')
3391 def ext2mimetype(ext_or_url
):
3394 if '.' not in ext_or_url
:
3395 ext_or_url
= f
'file.{ext_or_url}'
3396 return mimetypes
.guess_type(ext_or_url
)[0]
3399 def parse_codecs(codecs_str
):
3400 # http://tools.ietf.org/html/rfc6381
3403 split_codecs
= list(filter(None, map(
3404 str.strip
, codecs_str
.strip().strip(',').split(','))))
3405 vcodec
, acodec
, tcodec
, hdr
= None, None, None, None
3406 for full_codec
in split_codecs
:
3407 parts
= full_codec
.split('.')
3408 codec
= parts
[0].replace('0', '')
3409 if codec
in ('avc1', 'avc2', 'avc3', 'avc4', 'vp9', 'vp8', 'hev1', 'hev2',
3410 'h263', 'h264', 'mp4v', 'hvc1', 'av1', 'theora', 'dvh1', 'dvhe'):
3412 vcodec
= '.'.join(parts
[:4]) if codec
in ('vp9', 'av1', 'hvc1') else full_codec
3413 if codec
in ('dvh1', 'dvhe'):
3415 elif codec
== 'av1' and len(parts
) > 3 and parts
[3] == '10':
3417 elif full_codec
.replace('0', '').startswith('vp9.2'):
3419 elif codec
in ('flac', 'mp4a', 'opus', 'vorbis', 'mp3', 'aac', 'ac-3', 'ec-3', 'eac3', 'dtsc', 'dtse', 'dtsh', 'dtsl'):
3422 elif codec
in ('stpp', 'wvtt',):
3426 write_string('WARNING: Unknown codec %s\n' % full_codec
, sys
.stderr
)
3427 if vcodec
or acodec
or tcodec
:
3429 'vcodec': vcodec
or 'none',
3430 'acodec': acodec
or 'none',
3431 'dynamic_range': hdr
,
3432 **({'tcodec': tcodec}
if tcodec
is not None else {}),
3434 elif len(split_codecs
) == 2:
3436 'vcodec': split_codecs
[0],
3437 'acodec': split_codecs
[1],
3442 def urlhandle_detect_ext(url_handle
):
3443 getheader
= url_handle
.headers
.get
3445 cd
= getheader('Content-Disposition')
3447 m
= re
.match(r
'attachment;\s*filename="(?P<filename>[^"]+)"', cd
)
3449 e
= determine_ext(m
.group('filename'), default_ext
=None)
3453 return mimetype2ext(getheader('Content-Type'))
3456 def encode_data_uri(data
, mime_type
):
3457 return 'data:%s;base64,%s' % (mime_type
, base64
.b64encode(data
).decode('ascii'))
3460 def age_restricted(content_limit
, age_limit
):
3461 """ Returns True iff the content should be blocked """
3463 if age_limit
is None: # No limit set
3465 if content_limit
is None:
3466 return False # Content available for everyone
3467 return age_limit
< content_limit
3470 def is_html(first_bytes
):
3471 """ Detect whether a file contains HTML by examining its first bytes. """
3474 (b
'\xef\xbb\xbf', 'utf-8'),
3475 (b
'\x00\x00\xfe\xff', 'utf-32-be'),
3476 (b
'\xff\xfe\x00\x00', 'utf-32-le'),
3477 (b
'\xff\xfe', 'utf-16-le'),
3478 (b
'\xfe\xff', 'utf-16-be'),
3480 for bom
, enc
in BOMS
:
3481 if first_bytes
.startswith(bom
):
3482 s
= first_bytes
[len(bom
):].decode(enc
, 'replace')
3485 s
= first_bytes
.decode('utf-8', 'replace')
3487 return re
.match(r
'^\s*<', s
)
3490 def determine_protocol(info_dict
):
3491 protocol
= info_dict
.get('protocol')
3492 if protocol
is not None:
3495 url
= sanitize_url(info_dict
['url'])
3496 if url
.startswith('rtmp'):
3498 elif url
.startswith('mms'):
3500 elif url
.startswith('rtsp'):
3503 ext
= determine_ext(url
)
3509 return compat_urllib_parse_urlparse(url
).scheme
3512 def render_table(header_row
, data
, delim
=False, extra_gap
=0, hide_empty
=False):
3513 """ Render a list of rows, each as a list of values.
3514 Text after a \t will be right aligned """
3516 return len(remove_terminal_sequences(string
).replace('\t', ''))
3518 def get_max_lens(table
):
3519 return [max(width(str(v
)) for v
in col
) for col
in zip(*table
)]
3521 def filter_using_list(row
, filterArray
):
3522 return [col
for take
, col
in itertools
.zip_longest(filterArray
, row
, fillvalue
=True) if take
]
3524 max_lens
= get_max_lens(data
) if hide_empty
else []
3525 header_row
= filter_using_list(header_row
, max_lens
)
3526 data
= [filter_using_list(row
, max_lens
) for row
in data
]
3528 table
= [header_row
] + data
3529 max_lens
= get_max_lens(table
)
3532 table
= [header_row
, [delim
* (ml
+ extra_gap
) for ml
in max_lens
]] + data
3533 table
[1][-1] = table
[1][-1][:-extra_gap
* len(delim
)] # Remove extra_gap from end of delimiter
3535 for pos
, text
in enumerate(map(str, row
)):
3537 row
[pos
] = text
.replace('\t', ' ' * (max_lens
[pos
] - width(text
))) + ' ' * extra_gap
3539 row
[pos
] = text
+ ' ' * (max_lens
[pos
] - width(text
) + extra_gap
)
3540 ret
= '\n'.join(''.join(row
).rstrip() for row
in table
)
3544 def _match_one(filter_part
, dct
, incomplete
):
3545 # TODO: Generalize code with YoutubeDL._build_format_filter
3546 STRING_OPERATORS
= {
3547 '*=': operator
.contains
,
3548 '^=': lambda attr
, value
: attr
.startswith(value
),
3549 '$=': lambda attr
, value
: attr
.endswith(value
),
3550 '~=': lambda attr
, value
: re
.search(value
, attr
),
3552 COMPARISON_OPERATORS
= {
3554 '<=': operator
.le
, # "<=" must be defined above "<"
3561 if isinstance(incomplete
, bool):
3562 is_incomplete
= lambda _
: incomplete
3564 is_incomplete
= lambda k
: k
in incomplete
3566 operator_rex
= re
.compile(r
'''(?x)\s*
3568 \s*(?P<negation>!\s*)?(?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
3570 (?P<quote>["\'])(?P
<quotedstrval
>.+?
)(?P
=quote
)|
3574 ''' % '|'.join(map(re.escape, COMPARISON_OPERATORS.keys())))
3575 m = operator_rex.search(filter_part)
3578 unnegated_op = COMPARISON_OPERATORS[m['op']]
3580 op = lambda attr, value: not unnegated_op(attr, value)
3583 comparison_value = m['quotedstrval'] or m['strval'] or m['intval']
3585 comparison_value = comparison_value.replace(r'\%s' % m['quote'], m['quote'])
3586 actual_value = dct.get(m['key'])
3587 numeric_comparison = None
3588 if isinstance(actual_value, compat_numeric_types):
3589 # If the original field is a string and matching comparisonvalue is
3590 # a number we should respect the origin of the original field
3591 # and process comparison value as a string (see
3592 # https://github.com/ytdl-org/youtube-dl/issues/11082)
3594 numeric_comparison = int(comparison_value)
3596 numeric_comparison = parse_filesize(comparison_value)
3597 if numeric_comparison is None:
3598 numeric_comparison = parse_filesize(f'{comparison_value}B')
3599 if numeric_comparison is None:
3600 numeric_comparison = parse_duration(comparison_value)
3601 if numeric_comparison is not None and m['op'] in STRING_OPERATORS:
3602 raise ValueError('Operator %s only supports string values!' % m['op'])
3603 if actual_value is None:
3604 return is_incomplete(m['key']) or m['none_inclusive']
3605 return op(actual_value, comparison_value if numeric_comparison is None else numeric_comparison)
3608 '': lambda v: (v is True) if isinstance(v, bool) else (v is not None),
3609 '!': lambda v: (v is False) if isinstance(v, bool) else (v is None),
3611 operator_rex = re.compile(r'''(?x
)\s
*
3612 (?P
<op
>%s)\s
*(?P
<key
>[a
-z_
]+)
3614 ''' % '|'.join(map(re.escape, UNARY_OPERATORS.keys())))
3615 m = operator_rex.search(filter_part)
3617 op = UNARY_OPERATORS[m.group('op')]
3618 actual_value = dct.get(m.group('key'))
3619 if is_incomplete(m.group('key')) and actual_value is None:
3621 return op(actual_value)
3623 raise ValueError('Invalid filter part %r' % filter_part)
3626 def match_str(filter_str, dct, incomplete=False):
3627 """ Filter a dictionary with a simple string syntax.
3628 @returns Whether the filter passes
3629 @param incomplete Set of keys that is expected to be missing from dct.
3630 Can be True/False to indicate all/none of the keys may be missing.
3631 All conditions on incomplete keys pass if the key is missing
3634 _match_one(filter_part.replace(r'\&', '&'), dct, incomplete)
3635 for filter_part in re.split(r'(?<!\\)&', filter_str))
3638 def match_filter_func(filters):
3641 filters = variadic(filters)
3643 def _match_func(info_dict, *args, **kwargs):
3644 if any(match_str(f, info_dict, *args, **kwargs) for f in filters):
3647 video_title = info_dict.get('title') or info_dict.get('id') or 'video'
3648 filter_str = ') | ('.join(map(str.strip, filters))
3649 return f'{video_title} does not pass filter ({filter_str}), skipping ..'
3653 def parse_dfxp_time_expr(time_expr):
3657 mobj = re.match(r'^(?P<time_offset>\d+(?:\.\d+)?)s?$', time_expr)
3659 return float(mobj.group('time_offset'))
3661 mobj = re.match(r'^(\d+):(\d\d):(\d\d(?:(?:\.|:)\d+)?)$', time_expr)
3663 return 3600 * int(mobj.group(1)) + 60 * int(mobj.group(2)) + float(mobj.group(3).replace(':', '.'))
3666 def srt_subtitles_timecode(seconds):
3667 return '%02d:%02d:%02d,%03d' % timetuple_from_msec(seconds * 1000)
3670 def ass_subtitles_timecode(seconds):
3671 time = timetuple_from_msec(seconds * 1000)
3672 return '%01d:%02d:%02d.%02d' % (*time[:-1], time.milliseconds / 10)
3675 def dfxp2srt(dfxp_data):
3677 @param dfxp_data A
bytes-like
object containing DFXP data
3678 @returns A
unicode object containing converted SRT data
3680 LEGACY_NAMESPACES = (
3681 (b'http://www.w3.org/ns/ttml', [
3682 b'http://www.w3.org/2004/11/ttaf1',
3683 b'http://www.w3.org/2006/04/ttaf1',
3684 b'http://www.w3.org/2006/10/ttaf1',
3686 (b'http://www.w3.org/ns/ttml#styling', [
3687 b'http://www.w3.org/ns/ttml#style',
3691 SUPPORTED_STYLING = [
3700 _x = functools.partial(xpath_with_ns, ns_map={
3701 'xml': 'http://www.w3.org/XML/1998/namespace',
3702 'ttml': 'http://www.w3.org/ns/ttml',
3703 'tts': 'http://www.w3.org/ns/ttml#styling',
3709 class TTMLPElementParser(object):
3711 _unclosed_elements = []
3712 _applied_styles = []
3714 def start(self, tag, attrib):
3715 if tag in (_x('ttml:br'), 'br'):
3718 unclosed_elements = []
3720 element_style_id = attrib.get('style')
3722 style.update(default_style)
3723 if element_style_id:
3724 style.update(styles.get(element_style_id, {}))
3725 for prop in SUPPORTED_STYLING:
3726 prop_val = attrib.get(_x('tts:' + prop))
3728 style[prop] = prop_val
3731 for k, v in sorted(style.items()):
3732 if self._applied_styles and self._applied_styles[-1].get(k) == v:
3735 font += ' color="%s"' % v
3736 elif k == 'fontSize':
3737 font += ' size="%s"' % v
3738 elif k == 'fontFamily':
3739 font += ' face="%s"' % v
3740 elif k == 'fontWeight' and v == 'bold':
3742 unclosed_elements.append('b')
3743 elif k == 'fontStyle' and v == 'italic':
3745 unclosed_elements.append('i')
3746 elif k == 'textDecoration' and v == 'underline':
3748 unclosed_elements.append('u')
3750 self._out += '<font' + font + '>'
3751 unclosed_elements.append('font')
3753 if self._applied_styles:
3754 applied_style.update(self._applied_styles[-1])
3755 applied_style.update(style)
3756 self._applied_styles.append(applied_style)
3757 self._unclosed_elements.append(unclosed_elements)
3760 if tag not in (_x('ttml:br'), 'br'):
3761 unclosed_elements = self._unclosed_elements.pop()
3762 for element in reversed(unclosed_elements):
3763 self._out += '</%s>' % element
3764 if unclosed_elements and self._applied_styles:
3765 self._applied_styles.pop()
3767 def data(self, data):
3771 return self._out.strip()
3773 def parse_node(node):
3774 target = TTMLPElementParser()
3775 parser = xml.etree.ElementTree.XMLParser(target=target)
3776 parser.feed(xml.etree.ElementTree.tostring(node))
3777 return parser.close()
3779 for k, v in LEGACY_NAMESPACES:
3781 dfxp_data = dfxp_data.replace(ns, k)
3783 dfxp = compat_etree_fromstring(dfxp_data)
3785 paras = dfxp.findall(_x('.//ttml:p')) or dfxp.findall('.//p')
3788 raise ValueError('Invalid dfxp/TTML subtitle')
3792 for style in dfxp.findall(_x('.//ttml:style')):
3793 style_id = style.get('id') or style.get(_x('xml:id'))
3796 parent_style_id = style.get('style')
3798 if parent_style_id not in styles:
3801 styles[style_id] = styles[parent_style_id].copy()
3802 for prop in SUPPORTED_STYLING:
3803 prop_val = style.get(_x('tts:' + prop))
3805 styles.setdefault(style_id, {})[prop] = prop_val
3811 for p in ('body', 'div'):
3812 ele = xpath_element(dfxp, [_x('.//ttml:' + p), './/' + p])
3815 style = styles.get(ele.get('style'))
3818 default_style.update(style)
3820 for para, index in zip(paras, itertools.count(1)):
3821 begin_time = parse_dfxp_time_expr(para.attrib.get('begin'))
3822 end_time = parse_dfxp_time_expr(para.attrib.get('end'))
3823 dur = parse_dfxp_time_expr(para.attrib.get('dur'))
3824 if begin_time is None:
3829 end_time = begin_time + dur
3830 out.append('%d\n%s --> %s\n%s\n\n' % (
3832 srt_subtitles_timecode(begin_time),
3833 srt_subtitles_timecode(end_time),
3839 def cli_option(params, command_option, param):
3840 param = params.get(param)
3842 param = compat_str(param)
3843 return [command_option, param] if param is not None else []
3846 def cli_bool_option(params, command_option, param, true_value='true', false_value='false', separator=None):
3847 param = params.get(param)
3850 assert isinstance(param, bool)
3852 return [command_option + separator + (true_value if param else false_value)]
3853 return [command_option, true_value if param else false_value]
3856 def cli_valueless_option(params, command_option, param, expected_value=True):
3857 param = params.get(param)
3858 return [command_option] if param == expected_value else []
3861 def cli_configuration_args(argdict, keys, default=[], use_compat=True):
3862 if isinstance(argdict, (list, tuple)): # for backward compatibility
3869 assert isinstance(argdict, dict)
3871 assert isinstance(keys, (list, tuple))
3872 for key_list in keys:
3873 arg_list = list(filter(
3874 lambda x: x is not None,
3875 [argdict.get(key.lower()) for key in variadic(key_list)]))
3877 return [arg for args in arg_list for arg in args]
3881 def _configuration_args(main_key, argdict, exe, keys=None, default=[], use_compat=True):
3882 main_key, exe = main_key.lower(), exe.lower()
3883 root_key = exe if main_key == exe else f'{main_key}+{exe}'
3884 keys = [f'{root_key}{k}' for k in (keys or [''])]
3885 if root_key in keys:
3887 keys.append((main_key, exe))
3888 keys.append('default')
3891 return cli_configuration_args(argdict, keys, default, use_compat)
3894 class ISO639Utils(object):
3895 # See http://www.loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt
3954 'iw': 'heb', # Replaced by he in 1989 revision
3964 'in': 'ind', # Replaced by id in 1989 revision
4079 'ji': 'yid', # Replaced by yi in 1989 revision
4087 def short2long(cls, code):
4088 """Convert language code from ISO 639-1 to ISO 639-2/T"""
4089 return cls._lang_map.get(code[:2])
4092 def long2short(cls, code):
4093 """Convert language code from ISO 639-2/T to ISO 639-1"""
4094 for short_name, long_name in cls._lang_map.items():
4095 if long_name == code:
4099 class ISO3166Utils(object):
4100 # From http://data.okfn.org/data/core/country-list
4102 'AF': 'Afghanistan',
4103 'AX': 'Åland Islands',
4106 'AS': 'American Samoa',
4111 'AG': 'Antigua and Barbuda',
4128 'BO': 'Bolivia, Plurinational State of',
4129 'BQ': 'Bonaire, Sint Eustatius and Saba',
4130 'BA': 'Bosnia and Herzegovina',
4132 'BV': 'Bouvet Island',
4134 'IO': 'British Indian Ocean Territory',
4135 'BN': 'Brunei Darussalam',
4137 'BF': 'Burkina Faso',
4143 'KY': 'Cayman Islands',
4144 'CF': 'Central African Republic',
4148 'CX': 'Christmas Island',
4149 'CC': 'Cocos (Keeling) Islands',
4153 'CD': 'Congo, the Democratic Republic of the',
4154 'CK': 'Cook Islands',
4156 'CI': 'Côte d\'Ivoire',
4161 'CZ': 'Czech Republic',
4165 'DO': 'Dominican Republic',
4168 'SV': 'El Salvador',
4169 'GQ': 'Equatorial Guinea',
4173 'FK': 'Falkland Islands (Malvinas)',
4174 'FO': 'Faroe Islands',
4178 'GF': 'French Guiana',
4179 'PF': 'French Polynesia',
4180 'TF': 'French Southern Territories',
4195 'GW': 'Guinea-Bissau',
4198 'HM': 'Heard Island and McDonald Islands',
4199 'VA': 'Holy See (Vatican City State)',
4206 'IR': 'Iran, Islamic Republic of',
4209 'IM': 'Isle of Man',
4219 'KP': 'Korea, Democratic People\'s Republic of',
4220 'KR': 'Korea, Republic of',
4223 'LA': 'Lao People\'s Democratic Republic',
4229 'LI': 'Liechtenstein',
4233 'MK': 'Macedonia, the Former Yugoslav Republic of',
4240 'MH': 'Marshall Islands',
4246 'FM': 'Micronesia, Federated States of',
4247 'MD': 'Moldova, Republic of',
4258 'NL': 'Netherlands',
4259 'NC': 'New Caledonia',
4260 'NZ': 'New Zealand',
4265 'NF': 'Norfolk Island',
4266 'MP': 'Northern Mariana Islands',
4271 'PS': 'Palestine, State of',
4273 'PG': 'Papua New Guinea',
4276 'PH': 'Philippines',
4280 'PR': 'Puerto Rico',
4284 'RU': 'Russian Federation',
4286 'BL': 'Saint Barthélemy',
4287 'SH': 'Saint Helena, Ascension and Tristan da Cunha',
4288 'KN': 'Saint Kitts and Nevis',
4289 'LC': 'Saint Lucia',
4290 'MF': 'Saint Martin (French part)',
4291 'PM': 'Saint Pierre and Miquelon',
4292 'VC': 'Saint Vincent and the Grenadines',
4295 'ST': 'Sao Tome and Principe',
4296 'SA': 'Saudi Arabia',
4300 'SL': 'Sierra Leone',
4302 'SX': 'Sint Maarten (Dutch part)',
4305 'SB': 'Solomon Islands',
4307 'ZA': 'South Africa',
4308 'GS': 'South Georgia and the South Sandwich Islands',
4309 'SS': 'South Sudan',
4314 'SJ': 'Svalbard and Jan Mayen',
4317 'CH': 'Switzerland',
4318 'SY': 'Syrian Arab Republic',
4319 'TW': 'Taiwan, Province of China',
4321 'TZ': 'Tanzania, United Republic of',
4323 'TL': 'Timor-Leste',
4327 'TT': 'Trinidad and Tobago',
4330 'TM': 'Turkmenistan',
4331 'TC': 'Turks and Caicos Islands',
4335 'AE': 'United Arab Emirates',
4336 'GB': 'United Kingdom',
4337 'US': 'United States',
4338 'UM': 'United States Minor Outlying Islands',
4342 'VE': 'Venezuela, Bolivarian Republic of',
4344 'VG': 'Virgin Islands, British',
4345 'VI': 'Virgin Islands, U.S.',
4346 'WF': 'Wallis and Futuna',
4347 'EH': 'Western Sahara',
4354 def short2full(cls, code):
4355 """Convert an ISO 3166-2 country code to the corresponding full name"""
4356 return cls._country_map.get(code.upper())
4359 class GeoUtils(object):
4360 # Major IPv4 address blocks per country
4362 'AD': '46.172.224.0/19',
4363 'AE': '94.200.0.0/13',
4364 'AF': '149.54.0.0/17',
4365 'AG': '209.59.64.0/18',
4366 'AI': '204.14.248.0/21',
4367 'AL': '46.99.0.0/16',
4368 'AM': '46.70.0.0/15',
4369 'AO': '105.168.0.0/13',
4370 'AP': '182.50.184.0/21',
4371 'AQ': '23.154.160.0/24',
4372 'AR': '181.0.0.0/12',
4373 'AS': '202.70.112.0/20',
4374 'AT': '77.116.0.0/14',
4375 'AU': '1.128.0.0/11',
4376 'AW': '181.41.0.0/18',
4377 'AX': '185.217.4.0/22',
4378 'AZ': '5.197.0.0/16',
4379 'BA': '31.176.128.0/17',
4380 'BB': '65.48.128.0/17',
4381 'BD': '114.130.0.0/16',
4383 'BF': '102.178.0.0/15',
4384 'BG': '95.42.0.0/15',
4385 'BH': '37.131.0.0/17',
4386 'BI': '154.117.192.0/18',
4387 'BJ': '137.255.0.0/16',
4388 'BL': '185.212.72.0/23',
4389 'BM': '196.12.64.0/18',
4390 'BN': '156.31.0.0/16',
4391 'BO': '161.56.0.0/16',
4392 'BQ': '161.0.80.0/20',
4393 'BR': '191.128.0.0/12',
4394 'BS': '24.51.64.0/18',
4395 'BT': '119.2.96.0/19',
4396 'BW': '168.167.0.0/16',
4397 'BY': '178.120.0.0/13',
4398 'BZ': '179.42.192.0/18',
4399 'CA': '99.224.0.0/11',
4400 'CD': '41.243.0.0/16',
4401 'CF': '197.242.176.0/21',
4402 'CG': '160.113.0.0/16',
4403 'CH': '85.0.0.0/13',
4404 'CI': '102.136.0.0/14',
4405 'CK': '202.65.32.0/19',
4406 'CL': '152.172.0.0/14',
4407 'CM': '102.244.0.0/14',
4408 'CN': '36.128.0.0/10',
4409 'CO': '181.240.0.0/12',
4410 'CR': '201.192.0.0/12',
4411 'CU': '152.206.0.0/15',
4412 'CV': '165.90.96.0/19',
4413 'CW': '190.88.128.0/17',
4414 'CY': '31.153.0.0/16',
4415 'CZ': '88.100.0.0/14',
4417 'DJ': '197.241.0.0/17',
4418 'DK': '87.48.0.0/12',
4419 'DM': '192.243.48.0/20',
4420 'DO': '152.166.0.0/15',
4421 'DZ': '41.96.0.0/12',
4422 'EC': '186.68.0.0/15',
4423 'EE': '90.190.0.0/15',
4424 'EG': '156.160.0.0/11',
4425 'ER': '196.200.96.0/20',
4426 'ES': '88.0.0.0/11',
4427 'ET': '196.188.0.0/14',
4428 'EU': '2.16.0.0/13',
4429 'FI': '91.152.0.0/13',
4430 'FJ': '144.120.0.0/16',
4431 'FK': '80.73.208.0/21',
4432 'FM': '119.252.112.0/20',
4433 'FO': '88.85.32.0/19',
4435 'GA': '41.158.0.0/15',
4437 'GD': '74.122.88.0/21',
4438 'GE': '31.146.0.0/16',
4439 'GF': '161.22.64.0/18',
4440 'GG': '62.68.160.0/19',
4441 'GH': '154.160.0.0/12',
4442 'GI': '95.164.0.0/16',
4443 'GL': '88.83.0.0/19',
4444 'GM': '160.182.0.0/15',
4445 'GN': '197.149.192.0/18',
4446 'GP': '104.250.0.0/19',
4447 'GQ': '105.235.224.0/20',
4448 'GR': '94.64.0.0/13',
4449 'GT': '168.234.0.0/16',
4450 'GU': '168.123.0.0/16',
4451 'GW': '197.214.80.0/20',
4452 'GY': '181.41.64.0/18',
4453 'HK': '113.252.0.0/14',
4454 'HN': '181.210.0.0/16',
4455 'HR': '93.136.0.0/13',
4456 'HT': '148.102.128.0/17',
4457 'HU': '84.0.0.0/14',
4458 'ID': '39.192.0.0/10',
4459 'IE': '87.32.0.0/12',
4460 'IL': '79.176.0.0/13',
4461 'IM': '5.62.80.0/20',
4462 'IN': '117.192.0.0/10',
4463 'IO': '203.83.48.0/21',
4464 'IQ': '37.236.0.0/14',
4465 'IR': '2.176.0.0/12',
4466 'IS': '82.221.0.0/16',
4467 'IT': '79.0.0.0/10',
4468 'JE': '87.244.64.0/18',
4469 'JM': '72.27.0.0/17',
4470 'JO': '176.29.0.0/16',
4471 'JP': '133.0.0.0/8',
4472 'KE': '105.48.0.0/12',
4473 'KG': '158.181.128.0/17',
4474 'KH': '36.37.128.0/17',
4475 'KI': '103.25.140.0/22',
4476 'KM': '197.255.224.0/20',
4477 'KN': '198.167.192.0/19',
4478 'KP': '175.45.176.0/22',
4479 'KR': '175.192.0.0/10',
4480 'KW': '37.36.0.0/14',
4481 'KY': '64.96.0.0/15',
4482 'KZ': '2.72.0.0/13',
4483 'LA': '115.84.64.0/18',
4484 'LB': '178.135.0.0/16',
4485 'LC': '24.92.144.0/20',
4486 'LI': '82.117.0.0/19',
4487 'LK': '112.134.0.0/15',
4488 'LR': '102.183.0.0/16',
4489 'LS': '129.232.0.0/17',
4490 'LT': '78.56.0.0/13',
4491 'LU': '188.42.0.0/16',
4492 'LV': '46.109.0.0/16',
4493 'LY': '41.252.0.0/14',
4494 'MA': '105.128.0.0/11',
4495 'MC': '88.209.64.0/18',
4496 'MD': '37.246.0.0/16',
4497 'ME': '178.175.0.0/17',
4498 'MF': '74.112.232.0/21',
4499 'MG': '154.126.0.0/17',
4500 'MH': '117.103.88.0/21',
4501 'MK': '77.28.0.0/15',
4502 'ML': '154.118.128.0/18',
4503 'MM': '37.111.0.0/17',
4504 'MN': '49.0.128.0/17',
4505 'MO': '60.246.0.0/16',
4506 'MP': '202.88.64.0/20',
4507 'MQ': '109.203.224.0/19',
4508 'MR': '41.188.64.0/18',
4509 'MS': '208.90.112.0/22',
4510 'MT': '46.11.0.0/16',
4511 'MU': '105.16.0.0/12',
4512 'MV': '27.114.128.0/18',
4513 'MW': '102.70.0.0/15',
4514 'MX': '187.192.0.0/11',
4515 'MY': '175.136.0.0/13',
4516 'MZ': '197.218.0.0/15',
4517 'NA': '41.182.0.0/16',
4518 'NC': '101.101.0.0/18',
4519 'NE': '197.214.0.0/18',
4520 'NF': '203.17.240.0/22',
4521 'NG': '105.112.0.0/12',
4522 'NI': '186.76.0.0/15',
4523 'NL': '145.96.0.0/11',
4524 'NO': '84.208.0.0/13',
4525 'NP': '36.252.0.0/15',
4526 'NR': '203.98.224.0/19',
4527 'NU': '49.156.48.0/22',
4528 'NZ': '49.224.0.0/14',
4529 'OM': '5.36.0.0/15',
4530 'PA': '186.72.0.0/15',
4531 'PE': '186.160.0.0/14',
4532 'PF': '123.50.64.0/18',
4533 'PG': '124.240.192.0/19',
4534 'PH': '49.144.0.0/13',
4535 'PK': '39.32.0.0/11',
4536 'PL': '83.0.0.0/11',
4537 'PM': '70.36.0.0/20',
4538 'PR': '66.50.0.0/16',
4539 'PS': '188.161.0.0/16',
4540 'PT': '85.240.0.0/13',
4541 'PW': '202.124.224.0/20',
4542 'PY': '181.120.0.0/14',
4543 'QA': '37.210.0.0/15',
4544 'RE': '102.35.0.0/16',
4545 'RO': '79.112.0.0/13',
4546 'RS': '93.86.0.0/15',
4547 'RU': '5.136.0.0/13',
4548 'RW': '41.186.0.0/16',
4549 'SA': '188.48.0.0/13',
4550 'SB': '202.1.160.0/19',
4551 'SC': '154.192.0.0/11',
4552 'SD': '102.120.0.0/13',
4553 'SE': '78.64.0.0/12',
4554 'SG': '8.128.0.0/10',
4555 'SI': '188.196.0.0/14',
4556 'SK': '78.98.0.0/15',
4557 'SL': '102.143.0.0/17',
4558 'SM': '89.186.32.0/19',
4559 'SN': '41.82.0.0/15',
4560 'SO': '154.115.192.0/18',
4561 'SR': '186.179.128.0/17',
4562 'SS': '105.235.208.0/21',
4563 'ST': '197.159.160.0/19',
4564 'SV': '168.243.0.0/16',
4565 'SX': '190.102.0.0/20',
4567 'SZ': '41.84.224.0/19',
4568 'TC': '65.255.48.0/20',
4569 'TD': '154.68.128.0/19',
4570 'TG': '196.168.0.0/14',
4571 'TH': '171.96.0.0/13',
4572 'TJ': '85.9.128.0/18',
4573 'TK': '27.96.24.0/21',
4574 'TL': '180.189.160.0/20',
4575 'TM': '95.85.96.0/19',
4576 'TN': '197.0.0.0/11',
4577 'TO': '175.176.144.0/21',
4578 'TR': '78.160.0.0/11',
4579 'TT': '186.44.0.0/15',
4580 'TV': '202.2.96.0/19',
4581 'TW': '120.96.0.0/11',
4582 'TZ': '156.156.0.0/14',
4583 'UA': '37.52.0.0/14',
4584 'UG': '102.80.0.0/13',
4586 'UY': '167.56.0.0/13',
4587 'UZ': '84.54.64.0/18',
4588 'VA': '212.77.0.0/19',
4589 'VC': '207.191.240.0/21',
4590 'VE': '186.88.0.0/13',
4591 'VG': '66.81.192.0/20',
4592 'VI': '146.226.0.0/16',
4593 'VN': '14.160.0.0/11',
4594 'VU': '202.80.32.0/20',
4595 'WF': '117.20.32.0/21',
4596 'WS': '202.4.32.0/19',
4597 'YE': '134.35.0.0/16',
4598 'YT': '41.242.116.0/22',
4599 'ZA': '41.0.0.0/11',
4600 'ZM': '102.144.0.0/13',
4601 'ZW': '102.177.192.0/18',
4605 def random_ipv4(cls, code_or_block):
4606 if len(code_or_block) == 2:
4607 block = cls._country_ip_map.get(code_or_block.upper())
4611 block = code_or_block
4612 addr, preflen = block.split('/')
4613 addr_min = compat_struct_unpack('!L', socket.inet_aton(addr))[0]
4614 addr_max = addr_min | (0xffffffff >> int(preflen))
4615 return compat_str(socket.inet_ntoa(
4616 compat_struct_pack('!L', random.randint(addr_min, addr_max))))
4619 class PerRequestProxyHandler(compat_urllib_request.ProxyHandler):
4620 def __init__(self, proxies=None):
4621 # Set default handlers
4622 for type in ('http', 'https'):
4623 setattr(self, '%s_open' % type,
4624 lambda r, proxy='__noproxy__', type=type, meth=self.proxy_open:
4625 meth(r, proxy, type))
4626 compat_urllib_request.ProxyHandler.__init__(self, proxies)
4628 def proxy_open(self, req, proxy, type):
4629 req_proxy = req.headers.get('Ytdl-request-proxy')
4630 if req_proxy is not None:
4632 del req.headers['Ytdl-request-proxy']
4634 if proxy == '__noproxy__':
4635 return None # No Proxy
4636 if compat_urlparse.urlparse(proxy).scheme.lower() in ('socks', 'socks4', 'socks4a', 'socks5'):
4637 req.add_header('Ytdl-socks-proxy', proxy)
4638 # yt-dlp's http/https handlers do wrapping the socket with socks
4640 return compat_urllib_request.ProxyHandler.proxy_open(
4641 self, req, proxy, type)
4644 # Both long_to_bytes and bytes_to_long are adapted from PyCrypto, which is
4645 # released into Public Domain
4646 # https://github.com/dlitz/pycrypto/blob/master/lib/Crypto/Util/number.py#L387
4648 def long_to_bytes(n, blocksize=0):
4649 """long_to_bytes(n:long, blocksize:int) : string
4650 Convert a long integer to a byte string.
4652 If optional blocksize is given and greater than zero, pad the front of the
4653 byte string with binary zeros so that the length is a multiple of
4656 # after much testing, this algorithm was deemed to be the fastest
4660 s = compat_struct_pack('>I', n & 0xffffffff) + s
4662 # strip off leading zeros
4663 for i in range(len(s)):
4664 if s[i] != b'\000'[0]:
4667 # only happens when n == 0
4671 # add back some pad bytes. this could be done more efficiently w.r.t. the
4672 # de-padding being done above, but sigh...
4673 if blocksize > 0 and len(s) % blocksize:
4674 s = (blocksize - len(s) % blocksize) * b'\000' + s
4678 def bytes_to_long(s):
4679 """bytes_to_long(string) : long
4680 Convert a byte string to a long integer.
4682 This is (essentially) the inverse of long_to_bytes().
4687 extra = (4 - length % 4)
4688 s = b'\000' * extra + s
4689 length = length + extra
4690 for i in range(0, length, 4):
4691 acc = (acc << 32) + compat_struct_unpack('>I', s[i:i + 4])[0]
4695 def ohdave_rsa_encrypt(data, exponent, modulus):
4697 Implement OHDave
's RSA algorithm. See http://www.ohdave.com/rsa/
4700 data: data to encrypt, bytes-like object
4701 exponent, modulus: parameter e and N of RSA algorithm, both integer
4702 Output: hex string of encrypted data
4704 Limitation: supports one block encryption only
4707 payload = int(binascii.hexlify(data[::-1]), 16)
4708 encrypted = pow(payload, exponent, modulus)
4709 return '%x' % encrypted
4712 def pkcs1pad(data, length):
4714 Padding input data with PKCS#1 scheme
4716 @param {int[]} data input data
4717 @param {int} length target length
4718 @returns {int[]} padded data
4720 if len(data) > length - 11:
4721 raise ValueError('Input data too
long for PKCS
#1 padding')
4723 pseudo_random
= [random
.randint(0, 254) for _
in range(length
- len(data
) - 3)]
4724 return [0, 2] + pseudo_random
+ [0] + data
4727 def encode_base_n(num
, n
, table
=None):
4728 FULL_TABLE
= '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
4730 table
= FULL_TABLE
[:n
]
4733 raise ValueError('base %d exceeds table length %d' % (n
, len(table
)))
4740 ret
= table
[num
% n
] + ret
4745 def decode_packed_codes(code
):
4746 mobj
= re
.search(PACKED_CODES_RE
, code
)
4747 obfuscated_code
, base
, count
, symbols
= mobj
.groups()
4750 symbols
= symbols
.split('|')
4755 base_n_count
= encode_base_n(count
, base
)
4756 symbol_table
[base_n_count
] = symbols
[count
] or base_n_count
4759 r
'\b(\w+)\b', lambda mobj
: symbol_table
[mobj
.group(0)],
4763 def caesar(s
, alphabet
, shift
):
4768 alphabet
[(alphabet
.index(c
) + shift
) % l
] if c
in alphabet
else c
4773 return caesar(s
, r
'''!"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~''', 47)
4776 def parse_m3u8_attributes(attrib
):
4778 for (key
, val
) in re
.findall(r
'(?P<key>[A-Z0-9-]+)=(?P<val>"[^"]+"|[^",]+)(?:,|$)', attrib
):
4779 if val
.startswith('"'):
4785 def urshift(val
, n
):
4786 return val
>> n
if val
>= 0 else (val
+ 0x100000000) >> n
4789 # Based on png2str() written by @gdkchan and improved by @yokrysty
4790 # Originally posted at https://github.com/ytdl-org/youtube-dl/issues/9706
4791 def decode_png(png_data
):
4792 # Reference: https://www.w3.org/TR/PNG/
4793 header
= png_data
[8:]
4795 if png_data
[:8] != b
'\x89PNG\x0d\x0a\x1a\x0a' or header
[4:8] != b
'IHDR':
4796 raise IOError('Not a valid PNG file.')
4798 int_map
= {1: '>B', 2: '>H', 4: '>I'}
4799 unpack_integer
= lambda x
: compat_struct_unpack(int_map
[len(x
)], x
)[0]
4804 length
= unpack_integer(header
[:4])
4807 chunk_type
= header
[:4]
4810 chunk_data
= header
[:length
]
4811 header
= header
[length
:]
4813 header
= header
[4:] # Skip CRC
4821 ihdr
= chunks
[0]['data']
4823 width
= unpack_integer(ihdr
[:4])
4824 height
= unpack_integer(ihdr
[4:8])
4828 for chunk
in chunks
:
4829 if chunk
['type'] == b
'IDAT':
4830 idat
+= chunk
['data']
4833 raise IOError('Unable to read PNG data.')
4835 decompressed_data
= bytearray(zlib
.decompress(idat
))
4840 def _get_pixel(idx
):
4845 for y
in range(height
):
4846 basePos
= y
* (1 + stride
)
4847 filter_type
= decompressed_data
[basePos
]
4851 pixels
.append(current_row
)
4853 for x
in range(stride
):
4854 color
= decompressed_data
[1 + basePos
+ x
]
4855 basex
= y
* stride
+ x
4860 left
= _get_pixel(basex
- 3)
4862 up
= _get_pixel(basex
- stride
)
4864 if filter_type
== 1: # Sub
4865 color
= (color
+ left
) & 0xff
4866 elif filter_type
== 2: # Up
4867 color
= (color
+ up
) & 0xff
4868 elif filter_type
== 3: # Average
4869 color
= (color
+ ((left
+ up
) >> 1)) & 0xff
4870 elif filter_type
== 4: # Paeth
4876 c
= _get_pixel(basex
- stride
- 3)
4884 if pa
<= pb
and pa
<= pc
:
4885 color
= (color
+ a
) & 0xff
4887 color
= (color
+ b
) & 0xff
4889 color
= (color
+ c
) & 0xff
4891 current_row
.append(color
)
4893 return width
, height
, pixels
4896 def write_xattr(path
, key
, value
):
4897 # This mess below finds the best xattr tool for the job
4899 # try the pyxattr module...
4902 if hasattr(xattr
, 'set'): # pyxattr
4903 # Unicode arguments are not supported in python-pyxattr until
4905 # See https://github.com/ytdl-org/youtube-dl/issues/5498
4906 pyxattr_required_version
= '0.5.0'
4907 if version_tuple(xattr
.__version
__) < version_tuple(pyxattr_required_version
):
4908 # TODO: fallback to CLI tools
4909 raise XAttrUnavailableError(
4910 'python-pyxattr is detected but is too old. '
4911 'yt-dlp requires %s or above while your version is %s. '
4912 'Falling back to other xattr implementations' % (
4913 pyxattr_required_version
, xattr
.__version
__))
4915 setxattr
= xattr
.set
4917 setxattr
= xattr
.setxattr
4920 setxattr(path
, key
, value
)
4921 except EnvironmentError as e
:
4922 raise XAttrMetadataError(e
.errno
, e
.strerror
)
4925 if compat_os_name
== 'nt':
4926 # Write xattrs to NTFS Alternate Data Streams:
4927 # http://en.wikipedia.org/wiki/NTFS#Alternate_data_streams_.28ADS.29
4928 assert ':' not in key
4929 assert os
.path
.exists(path
)
4931 ads_fn
= path
+ ':' + key
4933 with open(ads_fn
, 'wb') as f
:
4935 except EnvironmentError as e
:
4936 raise XAttrMetadataError(e
.errno
, e
.strerror
)
4938 user_has_setfattr
= check_executable('setfattr', ['--version'])
4939 user_has_xattr
= check_executable('xattr', ['-h'])
4941 if user_has_setfattr
or user_has_xattr
:
4943 value
= value
.decode('utf-8')
4944 if user_has_setfattr
:
4945 executable
= 'setfattr'
4946 opts
= ['-n', key
, '-v', value
]
4947 elif user_has_xattr
:
4948 executable
= 'xattr'
4949 opts
= ['-w', key
, value
]
4951 cmd
= ([encodeFilename(executable
, True)]
4952 + [encodeArgument(o
) for o
in opts
]
4953 + [encodeFilename(path
, True)])
4957 cmd
, stdout
=subprocess
.PIPE
, stderr
=subprocess
.PIPE
, stdin
=subprocess
.PIPE
)
4958 except EnvironmentError as e
:
4959 raise XAttrMetadataError(e
.errno
, e
.strerror
)
4960 stdout
, stderr
= p
.communicate_or_kill()
4961 stderr
= stderr
.decode('utf-8', 'replace')
4962 if p
.returncode
!= 0:
4963 raise XAttrMetadataError(p
.returncode
, stderr
)
4966 # On Unix, and can't find pyxattr, setfattr, or xattr.
4967 if sys
.platform
.startswith('linux'):
4968 raise XAttrUnavailableError(
4969 "Couldn't find a tool to set the xattrs. "
4970 "Install either the python 'pyxattr' or 'xattr' "
4971 "modules, or the GNU 'attr' package "
4972 "(which contains the 'setfattr' tool).")
4974 raise XAttrUnavailableError(
4975 "Couldn't find a tool to set the xattrs. "
4976 "Install either the python 'xattr' module, "
4977 "or the 'xattr' binary.")
4980 def random_birthday(year_field
, month_field
, day_field
):
4981 start_date
= datetime
.date(1950, 1, 1)
4982 end_date
= datetime
.date(1995, 12, 31)
4983 offset
= random
.randint(0, (end_date
- start_date
).days
)
4984 random_date
= start_date
+ datetime
.timedelta(offset
)
4986 year_field
: str(random_date
.year
),
4987 month_field
: str(random_date
.month
),
4988 day_field
: str(random_date
.day
),
4992 # Templates for internet shortcut files, which are plain text files.
4993 DOT_URL_LINK_TEMPLATE
= '''
4998 DOT_WEBLOC_LINK_TEMPLATE
= '''
4999 <?xml version="1.0" encoding="UTF-8"?>
5000 <!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
5001 <plist version="1.0">
5004 \t<string>%(url)s</string>
5009 DOT_DESKTOP_LINK_TEMPLATE
= '''
5019 'url': DOT_URL_LINK_TEMPLATE
,
5020 'desktop': DOT_DESKTOP_LINK_TEMPLATE
,
5021 'webloc': DOT_WEBLOC_LINK_TEMPLATE
,
5025 def iri_to_uri(iri
):
5027 Converts an IRI (Internationalized Resource Identifier, allowing Unicode characters) to a URI (Uniform Resource Identifier, ASCII-only).
5029 The function doesn't add an additional layer of escaping; e.g., it doesn't escape `%3C` as `%253C`. Instead, it percent-escapes characters with an underlying UTF-8 encoding *besides* those already escaped, leaving the URI intact.
5032 iri_parts
= compat_urllib_parse_urlparse(iri
)
5034 if '[' in iri_parts
.netloc
:
5035 raise ValueError('IPv6 URIs are not, yet, supported.')
5036 # Querying `.netloc`, when there's only one bracket, also raises a ValueError.
5038 # The `safe` argument values, that the following code uses, contain the characters that should not be percent-encoded. Everything else but letters, digits and '_.-' will be percent-encoded with an underlying UTF-8 encoding. Everything already percent-encoded will be left as is.
5041 if iri_parts
.username
:
5042 net_location
+= compat_urllib_parse_quote(iri_parts
.username
, safe
=r
"!$%&'()*+,~")
5043 if iri_parts
.password
is not None:
5044 net_location
+= ':' + compat_urllib_parse_quote(iri_parts
.password
, safe
=r
"!$%&'()*+,~")
5047 net_location
+= iri_parts
.hostname
.encode('idna').decode('utf-8') # Punycode for Unicode hostnames.
5048 # The 'idna' encoding produces ASCII text.
5049 if iri_parts
.port
is not None and iri_parts
.port
!= 80:
5050 net_location
+= ':' + str(iri_parts
.port
)
5052 return compat_urllib_parse_urlunparse(
5056 compat_urllib_parse_quote_plus(iri_parts
.path
, safe
=r
"!$%&'()*+,/:;=@|~"),
5058 # Unsure about the `safe` argument, since this is a legacy way of handling parameters.
5059 compat_urllib_parse_quote_plus(iri_parts
.params
, safe
=r
"!$%&'()*+,/:;=@|~"),
5061 # Not totally sure about the `safe` argument, since the source does not explicitly mention the query URI component.
5062 compat_urllib_parse_quote_plus(iri_parts
.query
, safe
=r
"!$%&'()*+,/:;=?@{|}~"),
5064 compat_urllib_parse_quote_plus(iri_parts
.fragment
, safe
=r
"!#$%&'()*+,/:;=?@{|}~")))
5066 # Source for `safe` arguments: https://url.spec.whatwg.org/#percent-encoded-bytes.
5069 def to_high_limit_path(path
):
5070 if sys
.platform
in ['win32', 'cygwin']:
5071 # Work around MAX_PATH limitation on Windows. The maximum allowed length for the individual path segments may still be quite limited.
5072 return r
'\\?\ '.rstrip() + os
.path
.abspath(path
)
5077 def format_field(obj
, field
=None, template
='%s', ignore
=(None, ''), default
='', func
=None):
5078 val
= traverse_obj(obj
, *variadic(field
))
5081 return template
% (func(val
) if func
else val
)
5084 def clean_podcast_url(url
):
5085 return re
.sub(r
'''(?x)
5089 media\.blubrry\.com| # https://create.blubrry.com/resources/podcast-media-download-statistics/getting-started/
5092 (?:dts|www)\.podtrac\.com/(?:pts/)?redirect\.[0-9a-z]{3,4}| # http://analytics.podtrac.com/how-to-measure
5095 cn\.co| # https://podcorn.com/analytics-prefix/
5096 st\.fm # https://podsights.com/docs/
5101 _HEX_TABLE
= '0123456789abcdef'
5104 def random_uuidv4():
5105 return re
.sub(r
'[xy]', lambda x
: _HEX_TABLE
[random
.randint(0, 15)], 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx')
5108 def make_dir(path
, to_screen
=None):
5110 dn
= os
.path
.dirname(path
)
5111 if dn
and not os
.path
.exists(dn
):
5114 except (OSError, IOError) as err
:
5115 if callable(to_screen
) is not None:
5116 to_screen('unable to create directory ' + error_to_compat_str(err
))
5120 def get_executable_path():
5121 from zipimport
import zipimporter
5122 if hasattr(sys
, 'frozen'): # Running from PyInstaller
5123 path
= os
.path
.dirname(sys
.executable
)
5124 elif isinstance(globals().get('__loader__'), zipimporter
): # Running from ZIP
5125 path
= os
.path
.join(os
.path
.dirname(__file__
), '../..')
5127 path
= os
.path
.join(os
.path
.dirname(__file__
), '..')
5128 return os
.path
.abspath(path
)
5131 def load_plugins(name
, suffix
, namespace
):
5134 plugins_spec
= importlib
.util
.spec_from_file_location(
5135 name
, os
.path
.join(get_executable_path(), 'ytdlp_plugins', name
, '__init__.py'))
5136 plugins
= importlib
.util
.module_from_spec(plugins_spec
)
5137 sys
.modules
[plugins_spec
.name
] = plugins
5138 plugins_spec
.loader
.exec_module(plugins
)
5139 for name
in dir(plugins
):
5140 if name
in namespace
:
5142 if not name
.endswith(suffix
):
5144 klass
= getattr(plugins
, name
)
5145 classes
[name
] = namespace
[name
] = klass
5146 except FileNotFoundError
:
5152 obj
, *path_list
, default
=None, expected_type
=None, get_all
=True,
5153 casesense
=True, is_user_input
=False, traverse_string
=False):
5154 ''' Traverse nested list/dict/tuple
5155 @param path_list A list of paths which are checked one by one.
5156 Each path is a list of keys where each key is a string,
5157 a function, a tuple of strings/None or "...".
5158 When a fuction is given, it takes the key and value as arguments
5159 and returns whether the key matches or not. When a tuple is given,
5160 all the keys given in the tuple are traversed, and
5161 "..." traverses all the keys in the object
5162 "None" returns the object without traversal
5163 @param default Default value to return
5164 @param expected_type Only accept final value of this type (Can also be any callable)
5165 @param get_all Return all the values obtained from a path or only the first one
5166 @param casesense Whether to consider dictionary keys as case sensitive
5167 @param is_user_input Whether the keys are generated from user input. If True,
5168 strings are converted to int/slice if necessary
5169 @param traverse_string Whether to traverse inside strings. If True, any
5170 non-compatible object will also be converted into a string
5174 _lower
= lambda k
: (k
.lower() if isinstance(k
, str) else k
)
5175 path_list
= (map(_lower
, variadic(path
)) for path
in path_list
)
5177 def _traverse_obj(obj
, path
, _current_depth
=0):
5179 path
= tuple(variadic(path
))
5180 for i
, key
in enumerate(path
):
5181 if None in (key
, obj
):
5183 if isinstance(key
, (list, tuple)):
5184 obj
= [_traverse_obj(obj
, sub_key
, _current_depth
) for sub_key
in key
]
5187 obj
= (obj
.values() if isinstance(obj
, dict)
5188 else obj
if isinstance(obj
, (list, tuple, LazyList
))
5189 else str(obj
) if traverse_string
else [])
5191 depth
= max(depth
, _current_depth
)
5192 return [_traverse_obj(inner_obj
, path
[i
+ 1:], _current_depth
) for inner_obj
in obj
]
5194 if isinstance(obj
, (list, tuple, LazyList
)):
5195 obj
= enumerate(obj
)
5196 elif isinstance(obj
, dict):
5199 if not traverse_string
:
5203 depth
= max(depth
, _current_depth
)
5204 return [_traverse_obj(v
, path
[i
+ 1:], _current_depth
) for k
, v
in obj
if try_call(key
, args
=(k
, v
))]
5205 elif isinstance(obj
, dict) and not (is_user_input
and key
== ':'):
5206 obj
= (obj
.get(key
) if casesense
or (key
in obj
)
5207 else next((v
for k
, v
in obj
.items() if _lower(k
) == key
), None))
5210 key
= (int_or_none(key
) if ':' not in key
5211 else slice(*map(int_or_none
, key
.split(':'))))
5212 if key
== slice(None):
5213 return _traverse_obj(obj
, (..., *path
[i
+ 1:]), _current_depth
)
5214 if not isinstance(key
, (int, slice)):
5216 if not isinstance(obj
, (list, tuple, LazyList
)):
5217 if not traverse_string
:
5226 if isinstance(expected_type
, type):
5227 type_test
= lambda val
: val
if isinstance(val
, expected_type
) else None
5228 elif expected_type
is not None:
5229 type_test
= expected_type
5231 type_test
= lambda val
: val
5233 for path
in path_list
:
5235 val
= _traverse_obj(obj
, path
)
5238 for _
in range(depth
- 1):
5239 val
= itertools
.chain
.from_iterable(v
for v
in val
if v
is not None)
5240 val
= [v
for v
in map(type_test
, val
) if v
is not None]
5242 return val
if get_all
else val
[0]
5244 val
= type_test(val
)
5250 def traverse_dict(dictn
, keys
, casesense
=True):
5251 write_string('DeprecationWarning: yt_dlp.utils.traverse_dict is deprecated '
5252 'and may be removed in a future version. Use yt_dlp.utils.traverse_obj instead')
5253 return traverse_obj(dictn
, keys
, casesense
=casesense
, is_user_input
=True, traverse_string
=True)
5256 def get_first(obj
, keys
, **kwargs
):
5257 return traverse_obj(obj
, (..., *variadic(keys
)), **kwargs
, get_all
=False)
5260 def variadic(x
, allowed_types
=(str, bytes, dict)):
5261 return x
if isinstance(x
, collections
.abc
.Iterable
) and not isinstance(x
, allowed_types
) else (x
,)
5264 def decode_base(value
, digits
):
5265 # This will convert given base-x string to scalar (long or int)
5266 table
= {char: index for index, char in enumerate(digits)}
5271 result
+= table
[chr]
5275 def time_seconds(**kwargs
):
5276 t
= datetime
.datetime
.now(datetime
.timezone(datetime
.timedelta(**kwargs
)))
5277 return t
.timestamp()
5280 # create a JSON Web Signature (jws) with HS256 algorithm
5281 # the resulting format is in JWS Compact Serialization
5282 # implemented following JWT https://www.rfc-editor.org/rfc/rfc7519.html
5283 # implemented following JWS https://www.rfc-editor.org/rfc/rfc7515.html
5284 def jwt_encode_hs256(payload_data
, key
, headers
={}):
5290 header_data
.update(headers
)
5291 header_b64
= base64
.b64encode(json
.dumps(header_data
).encode('utf-8'))
5292 payload_b64
= base64
.b64encode(json
.dumps(payload_data
).encode('utf-8'))
5293 h
= hmac
.new(key
.encode('utf-8'), header_b64
+ b
'.' + payload_b64
, hashlib
.sha256
)
5294 signature_b64
= base64
.b64encode(h
.digest())
5295 token
= header_b64
+ b
'.' + payload_b64
+ b
'.' + signature_b64
5299 # can be extended in future to verify the signature and parse header and return the algorithm used if it's not HS256
5300 def jwt_decode_hs256(jwt
):
5301 header_b64
, payload_b64
, signature_b64
= jwt
.split('.')
5302 payload_data
= json
.loads(base64
.urlsafe_b64decode(payload_b64
))
5306 def supports_terminal_sequences(stream
):
5307 if compat_os_name
== 'nt':
5308 from .compat
import WINDOWS_VT_MODE
# Must be imported locally
5309 if not WINDOWS_VT_MODE
or get_windows_version() < (10, 0, 10586):
5311 elif not os
.getenv('TERM'):
5314 return stream
.isatty()
5315 except BaseException
:
5319 _terminal_sequences_re
= re
.compile('\033\\[[^m]+m')
5322 def remove_terminal_sequences(string
):
5323 return _terminal_sequences_re
.sub('', string
)
5326 def number_of_digits(number
):
5327 return len('%d' % number
)
5330 def join_nonempty(*values
, delim
='-', from_dict
=None):
5331 if from_dict
is not None:
5332 values
= map(from_dict
.get
, values
)
5333 return delim
.join(map(str, filter(None, values
)))
5336 def scale_thumbnails_to_max_format_width(formats
, thumbnails
, url_width_re
):
5338 Find the largest format dimensions in terms of video width and, for each thumbnail:
5339 * Modify the URL: Match the width with the provided regex and replace with the former width
5342 This function is useful with video services that scale the provided thumbnails on demand
5344 _keys
= ('width', 'height')
5345 max_dimensions
= max(
5346 [tuple(format
.get(k
) or 0 for k
in _keys
) for format
in formats
],
5348 if not max_dimensions
[0]:
5352 {'url': re.sub(url_width_re, str(max_dimensions[0]), thumbnail['url'])}
,
5353 dict(zip(_keys
, max_dimensions
)), thumbnail
)
5354 for thumbnail
in thumbnails
5358 def parse_http_range(range):
5359 """ Parse value of "Range" or "Content-Range" HTTP header into tuple. """
5361 return None, None, None
5362 crg
= re
.search(r
'bytes[ =](\d+)-(\d+)?(?:/(\d+))?', range)
5364 return None, None, None
5365 return int(crg
.group(1)), int_or_none(crg
.group(2)), int_or_none(crg
.group(3))
5371 __initialized
= False
5373 def __init__(self
, parser
, label
=None):
5374 self
._parser
, self
.label
= parser
, label
5375 self
._loaded
_paths
, self
.configs
= set(), []
5377 def init(self
, args
=None, filename
=None):
5378 assert not self
.__initialized
5381 location
= os
.path
.realpath(filename
)
5382 directory
= os
.path
.dirname(location
)
5383 if location
in self
._loaded
_paths
:
5385 self
._loaded
_paths
.add(location
)
5387 self
.__initialized
= True
5388 self
.own_args
, self
.filename
= args
, filename
5389 for location
in self
._parser
.parse_args(args
)[0].config_locations
or []:
5390 location
= os
.path
.join(directory
, expand_path(location
))
5391 if os
.path
.isdir(location
):
5392 location
= os
.path
.join(location
, 'yt-dlp.conf')
5393 if not os
.path
.exists(location
):
5394 self
._parser
.error(f
'config location {location} does not exist')
5395 self
.append_config(self
.read_file(location
), location
)
5399 label
= join_nonempty(
5400 self
.label
, 'config', f
'"{self.filename}"' if self
.filename
else '',
5402 return join_nonempty(
5403 self
.own_args
is not None and f
'{label[0].upper()}{label[1:]}: {self.hide_login_info(self.own_args)}',
5404 *(f
'\n{c}'.replace('\n', '\n| ')[1:] for c
in self
.configs
),
5408 def read_file(filename
, default
=[]):
5410 optionf
= open(filename
)
5412 return default
# silently skip if file is not present
5414 # FIXME: https://github.com/ytdl-org/youtube-dl/commit/dfe5fa49aed02cf36ba9f743b11b0903554b5e56
5415 contents
= optionf
.read()
5416 if sys
.version_info
< (3,):
5417 contents
= contents
.decode(preferredencoding())
5418 res
= compat_shlex_split(contents
, comments
=True)
5424 def hide_login_info(opts
):
5425 PRIVATE_OPTS
= set(['-p', '--password', '-u', '--username', '--video-password', '--ap-password', '--ap-username'])
5426 eqre
= re
.compile('^(?P<key>' + ('|'.join(re
.escape(po
) for po
in PRIVATE_OPTS
)) + ')=.+$')
5431 return m
.group('key') + '=PRIVATE'
5435 opts
= list(map(_scrub_eq
, opts
))
5436 for idx
, opt
in enumerate(opts
):
5437 if opt
in PRIVATE_OPTS
and idx
+ 1 < len(opts
):
5438 opts
[idx
+ 1] = 'PRIVATE'
5441 def append_config(self
, *args
, label
=None):
5442 config
= type(self
)(self
._parser
, label
)
5443 config
._loaded
_paths
= self
._loaded
_paths
5444 if config
.init(*args
):
5445 self
.configs
.append(config
)
5449 for config
in reversed(self
.configs
):
5450 yield from config
.all_args
5451 yield from self
.own_args
or []
5453 def parse_args(self
):
5454 return self
._parser
.parse_args(list(self
.all_args
))
5457 class WebSocketsWrapper():
5458 """Wraps websockets module to use in non-async scopes"""
5460 def __init__(self
, url
, headers
=None, connect
=True):
5461 self
.loop
= asyncio
.events
.new_event_loop()
5462 self
.conn
= compat_websockets
.connect(
5463 url
, extra_headers
=headers
, ping_interval
=None,
5464 close_timeout
=float('inf'), loop
=self
.loop
, ping_timeout
=float('inf'))
5467 atexit
.register(self
.__exit
__, None, None, None)
5469 def __enter__(self
):
5471 self
.pool
= self
.run_with_loop(self
.conn
.__aenter
__(), self
.loop
)
5474 def send(self
, *args
):
5475 self
.run_with_loop(self
.pool
.send(*args
), self
.loop
)
5477 def recv(self
, *args
):
5478 return self
.run_with_loop(self
.pool
.recv(*args
), self
.loop
)
5480 def __exit__(self
, type, value
, traceback
):
5482 return self
.run_with_loop(self
.conn
.__aexit
__(type, value
, traceback
), self
.loop
)
5485 self
._cancel
_all
_tasks
(self
.loop
)
5487 # taken from https://github.com/python/cpython/blob/3.9/Lib/asyncio/runners.py with modifications
5488 # for contributors: If there's any new library using asyncio needs to be run in non-async, move these function out of this class
5490 def run_with_loop(main
, loop
):
5491 if not asyncio
.coroutines
.iscoroutine(main
):
5492 raise ValueError(f
'a coroutine was expected, got {main!r}')
5495 return loop
.run_until_complete(main
)
5497 loop
.run_until_complete(loop
.shutdown_asyncgens())
5498 if hasattr(loop
, 'shutdown_default_executor'):
5499 loop
.run_until_complete(loop
.shutdown_default_executor())
5502 def _cancel_all_tasks(loop
):
5503 to_cancel
= asyncio
.tasks
.all_tasks(loop
)
5508 for task
in to_cancel
:
5511 loop
.run_until_complete(
5512 asyncio
.tasks
.gather(*to_cancel
, loop
=loop
, return_exceptions
=True))
5514 for task
in to_cancel
:
5515 if task
.cancelled():
5517 if task
.exception() is not None:
5518 loop
.call_exception_handler({
5519 'message': 'unhandled exception during asyncio.run() shutdown',
5520 'exception': task
.exception(),
5525 has_websockets
= bool(compat_websockets
)
5528 def merge_headers(*dicts
):
5529 """Merge dicts of http headers case insensitively, prioritizing the latter ones"""
5530 return {k.title(): v for k, v in itertools.chain.from_iterable(map(dict.items, dicts))}
5533 class classproperty
:
5534 def __init__(self
, f
):
5537 def __get__(self
, _
, cls
):