4 from __future__
import unicode_literals
41 import xml
.etree
.ElementTree
46 compat_HTMLParseError
,
53 compat_ctypes_WINFUNCTYPE
,
54 compat_etree_fromstring
,
57 compat_html_entities_html5
,
71 compat_urllib_parse_urlencode
,
72 compat_urllib_parse_urlparse
,
73 compat_urllib_parse_urlunparse
,
74 compat_urllib_parse_quote
,
75 compat_urllib_parse_quote_plus
,
76 compat_urllib_parse_unquote_plus
,
77 compat_urllib_request
,
95 def register_socks_protocols():
96 # "Register" SOCKS protocols
97 # In Python < 2.6.5, urlsplit() suffers from bug https://bugs.python.org/issue7904
98 # URLs with protocols not in urlparse.uses_netloc are not handled correctly
99 for scheme
in ('socks', 'socks4', 'socks4a', 'socks5'):
100 if scheme
not in compat_urlparse
.uses_netloc
:
101 compat_urlparse
.uses_netloc
.append(scheme
)
104 # This is not clearly defined otherwise
105 compiled_regex_type
= type(re
.compile(''))
108 def random_user_agent():
109 _USER_AGENT_TPL
= 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/%s Safari/537.36'
150 return _USER_AGENT_TPL
% random
.choice(_CHROME_VERSIONS
)
153 SUPPORTED_ENCODINGS
= [
157 SUPPORTED_ENCODINGS
.append('br')
160 'User-Agent': random_user_agent(),
161 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
162 'Accept-Language': 'en-us,en;q=0.5',
163 'Sec-Fetch-Mode': 'navigate',
168 'Safari': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27',
172 NO_DEFAULT
= object()
174 ENGLISH_MONTH_NAMES
= [
175 'January', 'February', 'March', 'April', 'May', 'June',
176 'July', 'August', 'September', 'October', 'November', 'December']
179 'en': ENGLISH_MONTH_NAMES
,
181 'janvier', 'février', 'mars', 'avril', 'mai', 'juin',
182 'juillet', 'août', 'septembre', 'octobre', 'novembre', 'décembre'],
186 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'aac',
187 'flv', 'f4v', 'f4a', 'f4b',
188 'webm', 'ogg', 'ogv', 'oga', 'ogx', 'spx', 'opus',
189 'mkv', 'mka', 'mk3d',
198 'f4f', 'f4m', 'm3u8', 'smil')
200 # needed for sanitizing filenames in restricted mode
201 ACCENT_CHARS
= dict(zip('ÂÃÄÀÁÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖŐØŒÙÚÛÜŰÝÞßàáâãäåæçèéêëìíîïðñòóôõöőøœùúûüűýþÿ',
202 itertools
.chain('AAAAAA', ['AE'], 'CEEEEIIIIDNOOOOOOO', ['OE'], 'UUUUUY', ['TH', 'ss'],
203 'aaaaaa', ['ae'], 'ceeeeiiiionooooooo', ['oe'], 'uuuuuy', ['th'], 'y')))
233 '%Y-%m-%d %H:%M:%S.%f',
234 '%Y-%m-%d %H:%M:%S:%f',
237 '%Y-%m-%dT%H:%M:%SZ',
238 '%Y-%m-%dT%H:%M:%S.%fZ',
239 '%Y-%m-%dT%H:%M:%S.%f0Z',
241 '%Y-%m-%dT%H:%M:%S.%f',
244 '%b %d %Y at %H:%M:%S',
246 '%B %d %Y at %H:%M:%S',
250 DATE_FORMATS_DAY_FIRST
= list(DATE_FORMATS
)
251 DATE_FORMATS_DAY_FIRST
.extend([
260 DATE_FORMATS_MONTH_FIRST
= list(DATE_FORMATS
)
261 DATE_FORMATS_MONTH_FIRST
.extend([
269 PACKED_CODES_RE
= r
"}\('(.+)',(\d+),(\d+),'([^']+)'\.split\('\|'\)"
270 JSON_LD_RE
= r
'(?is)<script[^>]+type=(["\']?
)application
/ld\
+json\
1[^
>]*>(?P
<json_ld
>.+?
)</script
>'
273 def preferredencoding():
274 """Get preferred encoding.
276 Returns the best encoding scheme for the system, based on
277 locale.getpreferredencoding() and some further tweaks.
280 pref = locale.getpreferredencoding()
288 def write_json_file(obj, fn):
289 """ Encode obj as JSON and write it to fn, atomically if possible """
291 fn = encodeFilename(fn)
292 if sys.version_info < (3, 0) and sys.platform != 'win32
':
293 encoding = get_filesystem_encoding()
294 # os.path.basename returns a bytes object, but NamedTemporaryFile
295 # will fail if the filename contains non ascii characters unless we
296 # use a unicode object
297 path_basename = lambda f: os.path.basename(fn).decode(encoding)
298 # the same for os.path.dirname
299 path_dirname = lambda f: os.path.dirname(fn).decode(encoding)
301 path_basename = os.path.basename
302 path_dirname = os.path.dirname
306 'prefix
': path_basename(fn) + '.',
307 'dir': path_dirname(fn),
311 # In Python 2.x, json.dump expects a bytestream.
312 # In Python 3.x, it writes to a character stream
313 if sys.version_info < (3, 0):
321 tf = tempfile.NamedTemporaryFile(**compat_kwargs(args))
325 json.dump(obj, tf, ensure_ascii=False)
326 if sys.platform == 'win32
':
327 # Need to remove existing file on Windows, else os.rename raises
328 # WindowsError or FileExistsError.
336 os.chmod(tf.name, 0o666 & ~mask)
339 os.rename(tf.name, fn)
348 if sys.version_info >= (2, 7):
349 def find_xpath_attr(node, xpath, key, val=None):
350 """ Find the xpath xpath[@key=val] """
351 assert re.match(r'^
[a
-zA
-Z_
-]+$
', key)
352 expr = xpath + ('[@%s]' % key if val is None else "[@%s='%s']" % (key, val))
353 return node.find(expr)
355 def find_xpath_attr(node, xpath, key, val=None):
356 for f in node.findall(compat_xpath(xpath)):
357 if key not in f.attrib:
359 if val is None or f.attrib.get(key) == val:
363 # On python2.6 the xml.etree.ElementTree.Element methods don't support
364 # the namespace parameter
367 def xpath_with_ns(path
, ns_map
):
368 components
= [c
.split(':') for c
in path
.split('/')]
372 replaced
.append(c
[0])
375 replaced
.append('{%s}%s' % (ns_map
[ns
], tag
))
376 return '/'.join(replaced
)
379 def xpath_element(node
, xpath
, name
=None, fatal
=False, default
=NO_DEFAULT
):
380 def _find_xpath(xpath
):
381 return node
.find(compat_xpath(xpath
))
383 if isinstance(xpath
, (str, compat_str
)):
384 n
= _find_xpath(xpath
)
392 if default
is not NO_DEFAULT
:
395 name
= xpath
if name
is None else name
396 raise ExtractorError('Could not find XML element %s' % name
)
402 def xpath_text(node
, xpath
, name
=None, fatal
=False, default
=NO_DEFAULT
):
403 n
= xpath_element(node
, xpath
, name
, fatal
=fatal
, default
=default
)
404 if n
is None or n
== default
:
407 if default
is not NO_DEFAULT
:
410 name
= xpath
if name
is None else name
411 raise ExtractorError('Could not find XML element\'s text %s' % name
)
417 def xpath_attr(node
, xpath
, key
, name
=None, fatal
=False, default
=NO_DEFAULT
):
418 n
= find_xpath_attr(node
, xpath
, key
)
420 if default
is not NO_DEFAULT
:
423 name
= '%s[@%s]' % (xpath
, key
) if name
is None else name
424 raise ExtractorError('Could not find XML attribute %s' % name
)
430 def get_element_by_id(id, html
):
431 """Return the content of the tag with the specified ID in the passed HTML document"""
432 return get_element_by_attribute('id', id, html
)
435 def get_element_html_by_id(id, html
):
436 """Return the html of the tag with the specified ID in the passed HTML document"""
437 return get_element_html_by_attribute('id', id, html
)
440 def get_element_by_class(class_name
, html
):
441 """Return the content of the first tag with the specified class in the passed HTML document"""
442 retval
= get_elements_by_class(class_name
, html
)
443 return retval
[0] if retval
else None
446 def get_element_html_by_class(class_name
, html
):
447 """Return the html of the first tag with the specified class in the passed HTML document"""
448 retval
= get_elements_html_by_class(class_name
, html
)
449 return retval
[0] if retval
else None
452 def get_element_by_attribute(attribute
, value
, html
, escape_value
=True):
453 retval
= get_elements_by_attribute(attribute
, value
, html
, escape_value
)
454 return retval
[0] if retval
else None
457 def get_element_html_by_attribute(attribute
, value
, html
, escape_value
=True):
458 retval
= get_elements_html_by_attribute(attribute
, value
, html
, escape_value
)
459 return retval
[0] if retval
else None
462 def get_elements_by_class(class_name
, html
):
463 """Return the content of all tags with the specified class in the passed HTML document as a list"""
464 return get_elements_by_attribute(
465 'class', r
'[^\'"]*\b%s\b[^\'"]*' % re.escape(class_name),
466 html, escape_value=False)
469 def get_elements_html_by_class(class_name, html):
470 """Return the html of all tags with the specified class in the passed HTML document as a list"""
471 return get_elements_html_by_attribute(
472 'class', r'[^
\'"]*\b%s\b[^\'"]*' % re.escape(class_name),
473 html, escape_value=False)
476 def get_elements_by_attribute(*args, **kwargs):
477 """Return the content of the tag with the specified attribute in the passed HTML document"""
478 return [content for content, _ in get_elements_text_and_html_by_attribute(*args, **kwargs)]
481 def get_elements_html_by_attribute(*args, **kwargs):
482 """Return the html of the tag with the specified attribute in the passed HTML document"""
483 return [whole for _, whole in get_elements_text_and_html_by_attribute(*args, **kwargs)]
486 def get_elements_text_and_html_by_attribute(attribute, value, html, escape_value=True):
488 Return the text (content) and the html (whole) of the tag with the specified
489 attribute in the passed HTML document
492 value_quote_optional = '' if re.match(r'''[\s"'`
=<>]''', value) else '?'
494 value = re.escape(value) if escape_value else value
496 partial_element_re = r'''(?x
)
497 <(?P
<tag
>[a
-zA
-Z0
-9:._-]+)
498 (?
:\
s(?
:[^
>"']|"[^
"]*"|
'[^']*')*)?
499 \s%(attribute)s\s*=\s*(?P<_q>['"]%(vqo)s)(?-x:%(value)s)(?P=_q)
500 ''' % {'attribute': re.escape(attribute), 'value': value, 'vqo': value_quote_optional}
502 for m in re.finditer(partial_element_re, html):
503 content, whole = get_element_text_and_html_by_tag(m.group('tag'), html[m.start():])
506 unescapeHTML(re.sub(r'^(?P<q>["\'])(?P
<content
>.*)(?P
=q
)$
', r'\g
<content
>', content, flags=re.DOTALL)),
511 class HTMLBreakOnClosingTagParser(compat_HTMLParser):
513 HTML parser which raises HTMLBreakOnClosingTagException upon reaching the
514 closing tag for the first opening tag it has encountered, and can be used
518 class HTMLBreakOnClosingTagException(Exception):
522 self.tagstack = collections.deque()
523 compat_HTMLParser.__init__(self)
528 def __exit__(self, *_):
532 # handle_endtag does not return upon raising HTMLBreakOnClosingTagException,
533 # so data remains buffered; we no longer have any interest in it, thus
534 # override this method to discard it
537 def handle_starttag(self, tag, _):
538 self.tagstack.append(tag)
540 def handle_endtag(self, tag):
541 if not self.tagstack:
542 raise compat_HTMLParseError('no tags
in the stack
')
544 inner_tag = self.tagstack.pop()
548 raise compat_HTMLParseError(f'matching opening tag
for closing {tag} tag
not found
')
549 if not self.tagstack:
550 raise self.HTMLBreakOnClosingTagException()
553 def get_element_text_and_html_by_tag(tag, html):
555 For the first element with the specified tag in the passed HTML document
556 return its' content (text
) and the whole
element (html
)
558 def find_or_raise(haystack, needle, exc):
560 return haystack.index(needle)
563 closing_tag = f'</{tag}>'
564 whole_start = find_or_raise(
565 html, f'<{tag}', compat_HTMLParseError(f'opening {tag} tag not found'))
566 content_start = find_or_raise(
567 html[whole_start:], '>', compat_HTMLParseError(f'malformed opening {tag} tag'))
568 content_start += whole_start + 1
569 with HTMLBreakOnClosingTagParser() as parser:
570 parser.feed(html[whole_start:content_start])
571 if not parser.tagstack or parser.tagstack[0] != tag:
572 raise compat_HTMLParseError(f'parser did not match opening {tag} tag')
573 offset = content_start
574 while offset < len(html):
575 next_closing_tag_start = find_or_raise(
576 html[offset:], closing_tag,
577 compat_HTMLParseError(f'closing {tag} tag not found'))
578 next_closing_tag_end = next_closing_tag_start + len(closing_tag)
580 parser.feed(html[offset:offset + next_closing_tag_end])
581 offset += next_closing_tag_end
582 except HTMLBreakOnClosingTagParser.HTMLBreakOnClosingTagException:
583 return html[content_start:offset + next_closing_tag_start], \
584 html[whole_start:offset + next_closing_tag_end]
585 raise compat_HTMLParseError('unexpected end of html')
588 class HTMLAttributeParser(compat_HTMLParser):
589 """Trivial HTML parser to gather the attributes
for a single element
"""
593 compat_HTMLParser.__init__(self)
595 def handle_starttag(self, tag, attrs):
596 self.attrs = dict(attrs)
599 class HTMLListAttrsParser(compat_HTMLParser):
600 """HTML parser to gather the attributes
for the elements of a
list"""
603 compat_HTMLParser.__init__(self)
607 def handle_starttag(self, tag, attrs):
608 if tag == 'li' and self._level == 0:
609 self.items.append(dict(attrs))
612 def handle_endtag(self, tag):
616 def extract_attributes(html_element):
617 """Given a string
for an HTML element such
as
619 a
="foo" B
="bar" c
="&98;az" d
=boz
620 empty
= noval entity
="&"
623 Decode
and return a dictionary of attributes
.
625 'a': 'foo', 'b': 'bar', c
: 'baz', d
: 'boz',
626 'empty': '', 'noval': None, 'entity': '&',
627 'sq': '"', 'dq': '\''
629 NB HTMLParser
is stricter
in Python
2.6 & 3.2 than
in later versions
,
630 but the cases
in the unit test will work
for all of
2.6, 2.7, 3.2-3.5.
632 parser = HTMLAttributeParser()
634 parser.feed(html_element)
636 # Older Python may throw HTMLParseError in case of malformed HTML
637 except compat_HTMLParseError:
642 def parse_list(webpage):
643 """Given a string
for an series of HTML
<li
> elements
,
644 return a dictionary of their attributes
"""
645 parser = HTMLListAttrsParser()
651 def clean_html(html):
652 """Clean an HTML snippet into a readable string
"""
654 if html is None: # Convenience for sanitizing descriptions etc.
657 html = re.sub(r'\s+', ' ', html)
658 html = re.sub(r'(?u)\s?<\s?br\s?/?\s?>\s?', '\n', html)
659 html = re.sub(r'(?u)<\s?/\s?p\s?>\s?<\s?p[^>]*>', '\n', html)
661 html = re.sub('<.*?>', '', html)
662 # Replace html entities
663 html = unescapeHTML(html)
667 def sanitize_open(filename, open_mode):
668 """Try to
open the given filename
, and slightly tweak it
if this fails
.
670 Attempts to
open the given filename
. If this fails
, it tries to change
671 the filename slightly
, step by step
, until it
's either able to open it
672 or it fails and raises a final exception, like the standard open()
675 It returns the tuple (stream, definitive_file_name).
679 if sys.platform == 'win32
':
681 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
682 return (sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout, filename)
683 stream = locked_file(filename, open_mode, block=False).open()
684 return (stream, filename)
685 except (IOError, OSError) as err:
686 if err.errno in (errno.EACCES,):
689 # In case of error, try to remove win32 forbidden chars
690 alt_filename = sanitize_path(filename)
691 if alt_filename == filename:
694 # An exception here should be caught in the caller
695 stream = locked_file(filename, open_mode, block=False).open()
696 return (stream, alt_filename)
699 def timeconvert(timestr):
700 """Convert RFC 2822 defined time string into system timestamp"""
702 timetuple = email.utils.parsedate_tz(timestr)
703 if timetuple is not None:
704 timestamp = email.utils.mktime_tz(timetuple)
708 def sanitize_filename(s, restricted=False, is_id=NO_DEFAULT):
709 """Sanitizes a string so it could be used as part of a filename.
710 @param restricted Use a stricter subset of allowed characters
711 @param is_id Whether this is an ID that should be kept unchanged if possible.
712 If unset, yt-dlp's new sanitization rules are
in effect
717 def replace_insane(char):
718 if restricted and char in ACCENT_CHARS:
719 return ACCENT_CHARS[char]
720 elif not restricted and char == '\n':
722 elif char == '?' or ord(char) < 32 or ord(char) == 127:
725 return '' if restricted else '\''
727 return '\0_\0-' if restricted else '\0 \0-'
728 elif char in '\\/|*<>':
730 if restricted and (char in '!&\'()[]{}$;`^,#' or char.isspace() or ord(char) > 127):
734 s = re.sub(r'[0-9]+(?::[0-9]+)+', lambda m: m.group(0).replace(':', '_'), s) # Handle timestamps
735 result = ''.join(map(replace_insane, s))
736 if is_id is NO_DEFAULT:
737 result = re.sub('(\0.)(?:(?=\\1)..)+', r'\1', result) # Remove repeated substitute chars
738 STRIP_RE = '(?:\0.|[ _-])*'
739 result = re.sub(f'^\0.{STRIP_RE}|{STRIP_RE}\0.$', '', result) # Remove substitute chars from start/end
740 result = result.replace('\0', '') or '_'
743 while '__' in result:
744 result = result.replace('__', '_')
745 result = result.strip('_')
746 # Common case of "Foreign band name - English song title"
747 if restricted and result.startswith('-_'):
749 if result.startswith('-'):
750 result = '_' + result[len('-'):]
751 result = result.lstrip('.')
757 def sanitize_path(s, force=False):
758 """Sanitizes
and normalizes path on Windows
"""
759 if sys.platform == 'win32':
761 drive_or_unc, _ = os.path.splitdrive(s)
762 if sys.version_info < (2, 7) and not drive_or_unc:
763 drive_or_unc, _ = os.path.splitunc(s)
769 norm_path = os.path.normpath(remove_start(s, drive_or_unc)).split(os.path.sep)
773 path_part if path_part in ['.', '..'] else re.sub(r'(?:[/<>:"\|\\?\*]|[\s.]$)', '#', path_part)
774 for path_part in norm_path]
776 sanitized_path.insert(0, drive_or_unc + os.path.sep)
777 elif force and s[0] == os.path.sep:
778 sanitized_path.insert(0, os.path.sep)
779 return os.path.join(*sanitized_path)
782 def sanitize_url(url):
783 # Prepend protocol-less URLs with `http:` scheme in order to mitigate
784 # the number of unwanted failures due to missing protocol
785 if url.startswith('//'):
786 return 'http:%s' % url
787 # Fix some common typos seen so far
789 # https://github.com/ytdl-org/youtube-dl/issues/15649
790 (r'^httpss://', r'https://'),
791 # https://bx1.be/lives/direct-tv/
792 (r'^rmtp([es]?)://', r'rtmp\1://'),
794 for mistake, fixup in COMMON_TYPOS:
795 if re.match(mistake, url):
796 return re.sub(mistake, fixup, url)
800 def extract_basic_auth(url):
801 parts = compat_urlparse.urlsplit(url)
802 if parts.username is None:
804 url = compat_urlparse.urlunsplit(parts._replace(netloc=(
805 parts.hostname if parts.port is None
806 else '%s:%d' % (parts.hostname, parts.port))))
807 auth_payload = base64.b64encode(
808 ('%s:%s' % (parts.username, parts.password or '')).encode('utf-8'))
809 return url, 'Basic ' + auth_payload.decode('utf-8')
812 def sanitized_Request(url, *args, **kwargs):
813 url, auth_header = extract_basic_auth(escape_url(sanitize_url(url)))
814 if auth_header is not None:
815 headers = args[1] if len(args) >= 2 else kwargs.setdefault('headers', {})
816 headers['Authorization'] = auth_header
817 return compat_urllib_request.Request(url, *args, **kwargs)
821 """Expand shell variables
and ~
"""
822 return os.path.expandvars(compat_expanduser(s))
825 def orderedSet(iterable):
826 """ Remove all duplicates
from the
input iterable
"""
834 def _htmlentity_transform(entity_with_semicolon):
835 """Transforms an HTML entity to a character
."""
836 entity = entity_with_semicolon[:-1]
838 # Known non-numeric HTML entity
839 if entity in compat_html_entities.name2codepoint:
840 return compat_chr(compat_html_entities.name2codepoint[entity])
842 # TODO: HTML5 allows entities without a semicolon. For example,
843 # 'Éric' should be decoded as 'Éric'.
844 if entity_with_semicolon in compat_html_entities_html5:
845 return compat_html_entities_html5[entity_with_semicolon]
847 mobj = re.match(r'#(x[0-9a-fA-F]+|[0-9]+)', entity)
849 numstr = mobj.group(1)
850 if numstr.startswith('x'):
852 numstr = '0%s' % numstr
855 # See https://github.com/ytdl-org/youtube-dl/issues/7518
857 return compat_chr(int(numstr, base))
861 # Unknown entity in name, return its literal representation
862 return '&%s;' % entity
868 assert type(s) == compat_str
871 r'&([^&;]+;)', lambda m: _htmlentity_transform(m.group(1)), s)
874 def escapeHTML(text):
877 .replace('&', '&')
878 .replace('<', '<')
879 .replace('>', '>')
880 .replace('"', '"')
881 .replace("'", ''')
885 def process_communicate_or_kill(p, *args, **kwargs):
887 return p.communicate(*args, **kwargs)
888 except BaseException: # Including KeyboardInterrupt
894 class Popen(subprocess.Popen):
895 if sys.platform == 'win32':
896 _startupinfo = subprocess.STARTUPINFO()
897 _startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
901 def __init__(self, *args, **kwargs):
902 super(Popen, self).__init__(*args, **kwargs, startupinfo=self._startupinfo)
904 def communicate_or_kill(self, *args, **kwargs):
905 return process_communicate_or_kill(self, *args, **kwargs)
908 def get_subprocess_encoding():
909 if sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5:
910 # For subprocess calls, encode with locale encoding
911 # Refer to http://stackoverflow.com/a/9951851/35070
912 encoding = preferredencoding()
914 encoding = sys.getfilesystemencoding()
920 def encodeFilename(s, for_subprocess=False):
922 @param s The name of the
file
925 assert type(s) == compat_str
927 # Python 3 has a Unicode API
928 if sys.version_info >= (3, 0):
931 # Pass '' directly to use Unicode APIs on Windows 2000 and up
932 # (Detecting Windows NT 4 is tricky because 'major >= 4' would
933 # match Windows 9x series as well. Besides, NT 4 is obsolete.)
934 if not for_subprocess and sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5:
937 # Jython assumes filenames are Unicode strings though reported as Python 2.x compatible
938 if sys.platform.startswith('java'):
941 return s.encode(get_subprocess_encoding(), 'ignore')
944 def decodeFilename(b, for_subprocess=False):
946 if sys.version_info >= (3, 0):
949 if not isinstance(b, bytes):
952 return b.decode(get_subprocess_encoding(), 'ignore')
955 def encodeArgument(s):
956 if not isinstance(s, compat_str):
957 # Legacy code that uses byte strings
958 # Uncomment the following line after fixing all post processors
959 # assert False, 'Internal error: %r should be of type %r, is %r' % (s, compat_str, type(s))
960 s = s.decode('ascii')
961 return encodeFilename(s, True)
964 def decodeArgument(b):
965 return decodeFilename(b, True)
968 def decodeOption(optval):
971 if isinstance(optval, bytes):
972 optval = optval.decode(preferredencoding())
974 assert isinstance(optval, compat_str)
978 _timetuple = collections.namedtuple('Time', ('hours', 'minutes', 'seconds', 'milliseconds'))
981 def timetuple_from_msec(msec):
982 secs, msec = divmod(msec, 1000)
983 mins, secs = divmod(secs, 60)
984 hrs, mins = divmod(mins, 60)
985 return _timetuple(hrs, mins, secs, msec)
988 def formatSeconds(secs, delim=':', msec=False):
989 time = timetuple_from_msec(secs * 1000)
991 ret = '%d%s%02d%s%02d' % (time.hours, delim, time.minutes, delim, time.seconds)
993 ret = '%d%s%02d' % (time.minutes, delim, time.seconds)
995 ret = '%d' % time.seconds
996 return '%s.%03d' % (ret, time.milliseconds) if msec else ret
999 def _ssl_load_windows_store_certs(ssl_context, storename):
1000 # Code adapted from _load_windows_store_certs in https://github.com/python/cpython/blob/main/Lib/ssl.py
1002 certs = [cert for cert, encoding, trust in ssl.enum_certificates(storename)
1003 if encoding == 'x509_asn' and (
1004 trust is True or ssl.Purpose.SERVER_AUTH.oid in trust)]
1005 except PermissionError:
1009 ssl_context.load_verify_locations(cadata=cert)
1010 except ssl.SSLError:
1014 def make_HTTPS_handler(params, **kwargs):
1015 opts_check_certificate = not params.get('nocheckcertificate')
1016 context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
1017 context.check_hostname = opts_check_certificate
1018 if params.get('legacyserverconnect'):
1019 context.options |= 4 # SSL_OP_LEGACY_SERVER_CONNECT
1020 context.verify_mode = ssl.CERT_REQUIRED if opts_check_certificate else ssl.CERT_NONE
1021 if opts_check_certificate:
1022 if has_certifi and 'no-certifi' not in params.get('compat_opts', []):
1023 context.load_verify_locations(cafile=certifi.where())
1026 context.load_default_certs()
1027 # Work around the issue in load_default_certs when there are bad certificates. See:
1028 # https://github.com/yt-dlp/yt-dlp/issues/1060,
1029 # https://bugs.python.org/issue35665, https://bugs.python.org/issue45312
1030 except ssl.SSLError:
1031 # enum_certificates is not present in mingw python. See https://github.com/yt-dlp/yt-dlp/issues/1151
1032 if sys.platform == 'win32' and hasattr(ssl, 'enum_certificates'):
1033 # Create a new context to discard any certificates that were already loaded
1034 context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
1035 context.check_hostname, context.verify_mode = True, ssl.CERT_REQUIRED
1036 for storename in ('CA', 'ROOT'):
1037 _ssl_load_windows_store_certs(context, storename)
1038 context.set_default_verify_paths()
1039 return YoutubeDLHTTPSHandler(params, context=context, **kwargs)
1042 def bug_reports_message(before=';'):
1043 msg = ('please report this issue on https://github.com/yt-dlp/yt-dlp , '
1044 'filling out the appropriate issue template. '
1045 'Confirm you are on the latest version using yt-dlp -U')
1047 before = before.rstrip()
1048 if not before or before.endswith(('.', '!', '?')):
1049 msg = msg[0].title() + msg[1:]
1051 return (before + ' ' if before else '') + msg
1054 class YoutubeDLError(Exception):
1055 """Base exception
for YoutubeDL errors
."""
1058 def __init__(self, msg=None):
1061 elif self.msg is None:
1062 self.msg = type(self).__name__
1063 super().__init__(self.msg)
1066 network_exceptions = [compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error]
1067 if hasattr(ssl, 'CertificateError'):
1068 network_exceptions.append(ssl.CertificateError)
1069 network_exceptions = tuple(network_exceptions)
1072 class ExtractorError(YoutubeDLError):
1073 """Error during info extraction
."""
1075 def __init__(self, msg, tb=None, expected=False, cause=None, video_id=None, ie=None):
1076 """ tb
, if given
, is the original
traceback (so that it can be printed out
).
1077 If expected
is set, this
is a normal error message
and most likely
not a bug
in yt
-dlp
.
1079 if sys.exc_info()[0] in network_exceptions:
1082 self.orig_msg = str(msg)
1084 self.expected = expected
1086 self.video_id = video_id
1088 self.exc_info = sys.exc_info() # preserve original exception
1090 super(ExtractorError, self).__init__(''.join((
1091 format_field(ie, template='[%s] '),
1092 format_field(video_id, template='%s: '),
1094 format_field(cause, template=' (caused by %r)'),
1095 '' if expected else bug_reports_message())))
1097 def format_traceback(self):
1098 return join_nonempty(
1099 self.traceback and ''.join(traceback.format_tb(self.traceback)),
1100 self.cause and ''.join(traceback.format_exception(None, self.cause, self.cause.__traceback__)[1:]),
1104 class UnsupportedError(ExtractorError):
1105 def __init__(self, url):
1106 super(UnsupportedError, self).__init__(
1107 'Unsupported URL: %s' % url, expected=True)
1111 class RegexNotFoundError(ExtractorError):
1112 """Error when a regex didn
't match"""
1116 class GeoRestrictedError(ExtractorError):
1117 """Geographic restriction Error exception.
1119 This exception may be thrown when a video is not available from your
1120 geographic location due to geographic restrictions imposed by a website.
1123 def __init__(self, msg, countries=None, **kwargs):
1124 kwargs['expected
'] = True
1125 super(GeoRestrictedError, self).__init__(msg, **kwargs)
1126 self.countries = countries
1129 class DownloadError(YoutubeDLError):
1130 """Download Error exception.
1132 This exception may be thrown by FileDownloader objects if they are not
1133 configured to continue on errors. They will contain the appropriate
1137 def __init__(self, msg, exc_info=None):
1138 """ exc_info, if given, is the original exception that caused the trouble (as returned by sys.exc_info()). """
1139 super(DownloadError, self).__init__(msg)
1140 self.exc_info = exc_info
1143 class EntryNotInPlaylist(YoutubeDLError):
1144 """Entry not in playlist exception.
1146 This exception will be thrown by YoutubeDL when a requested entry
1147 is not found in the playlist info_dict
1149 msg = 'Entry
not found
in info
'
1152 class SameFileError(YoutubeDLError):
1153 """Same File exception.
1155 This exception will be thrown by FileDownloader objects if they detect
1156 multiple files would have to be downloaded to the same file on disk.
1158 msg = 'Fixed output name but more than one
file to download
'
1160 def __init__(self, filename=None):
1161 if filename is not None:
1162 self.msg += f': {filename}
'
1163 super().__init__(self.msg)
1166 class PostProcessingError(YoutubeDLError):
1167 """Post Processing exception.
1169 This exception may be raised by PostProcessor's
.run() method to
1170 indicate an error
in the postprocessing task
.
1174 class DownloadCancelled(YoutubeDLError):
1175 """ Exception raised when the download queue should be interrupted
"""
1176 msg = 'The download was cancelled'
1179 class ExistingVideoReached(DownloadCancelled):
1180 """ --break-on
-existing triggered
"""
1181 msg = 'Encountered a video that is already in the archive, stopping due to --break-on-existing'
1184 class RejectedVideoReached(DownloadCancelled):
1185 """ --break-on
-reject triggered
"""
1186 msg = 'Encountered a video that did not match filter, stopping due to --break-on-reject'
1189 class MaxDownloadsReached(DownloadCancelled):
1190 """ --max-downloads limit has been reached
. """
1191 msg = 'Maximum number of downloads reached, stopping due to --max-downloads'
1194 class ReExtractInfo(YoutubeDLError):
1195 """ Video info needs to be re
-extracted
. """
1197 def __init__(self, msg, expected=False):
1198 super().__init__(msg)
1199 self.expected = expected
1202 class ThrottledDownload(ReExtractInfo):
1203 """ Download speed below
--throttled
-rate
. """
1204 msg = 'The download speed is below throttle limit'
1207 super().__init__(self.msg, expected=False)
1210 class UnavailableVideoError(YoutubeDLError):
1211 """Unavailable Format exception
.
1213 This exception will be thrown when a video
is requested
1214 in a format that
is not available
for that video
.
1216 msg = 'Unable to download video'
1218 def __init__(self, err=None):
1220 self.msg += f': {err}'
1221 super().__init__(self.msg)
1224 class ContentTooShortError(YoutubeDLError):
1225 """Content Too Short exception
.
1227 This exception may be raised by FileDownloader objects when a
file they
1228 download
is too small
for what the server announced first
, indicating
1229 the connection was probably interrupted
.
1232 def __init__(self, downloaded, expected):
1233 super(ContentTooShortError, self).__init__(
1234 'Downloaded {0} bytes, expected {1} bytes'.format(downloaded, expected)
1237 self.downloaded = downloaded
1238 self.expected = expected
1241 class XAttrMetadataError(YoutubeDLError):
1242 def __init__(self, code=None, msg='Unknown error'):
1243 super(XAttrMetadataError, self).__init__(msg)
1247 # Parsing code and msg
1248 if (self.code in (errno.ENOSPC, errno.EDQUOT)
1249 or 'No space left' in self.msg or 'Disk quota exceeded' in self.msg):
1250 self.reason = 'NO_SPACE'
1251 elif self.code == errno.E2BIG or 'Argument list too long' in self.msg:
1252 self.reason = 'VALUE_TOO_LONG'
1254 self.reason = 'NOT_SUPPORTED'
1257 class XAttrUnavailableError(YoutubeDLError):
1261 def _create_http_connection(ydl_handler, http_class, is_https, *args, **kwargs):
1262 # Working around python 2 bug (see http://bugs.python.org/issue17849) by limiting
1263 # expected HTTP responses to meet HTTP/1.0 or later (see also
1264 # https://github.com/ytdl-org/youtube-dl/issues/6727)
1265 if sys.version_info < (3, 0):
1266 kwargs['strict'] = True
1267 hc = http_class(*args, **compat_kwargs(kwargs))
1268 source_address = ydl_handler._params.get('source_address')
1270 if source_address is not None:
1271 # This is to workaround _create_connection() from socket where it will try all
1272 # address data from getaddrinfo() including IPv6. This filters the result from
1273 # getaddrinfo() based on the source_address value.
1274 # This is based on the cpython socket.create_connection() function.
1275 # https://github.com/python/cpython/blob/master/Lib/socket.py#L691
1276 def _create_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, source_address=None):
1277 host, port = address
1279 addrs = socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM)
1280 af = socket.AF_INET if '.' in source_address[0] else socket.AF_INET6
1281 ip_addrs = [addr for addr in addrs if addr[0] == af]
1282 if addrs and not ip_addrs:
1283 ip_version = 'v4' if af == socket.AF_INET else 'v6'
1285 "No remote IP%s addresses available for connect, can't use '%s' as source address"
1286 % (ip_version, source_address[0]))
1287 for res in ip_addrs:
1288 af, socktype, proto, canonname, sa = res
1291 sock = socket.socket(af, socktype, proto)
1292 if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT:
1293 sock.settimeout(timeout)
1294 sock.bind(source_address)
1296 err = None # Explicitly break reference cycle
1298 except socket.error as _:
1300 if sock is not None:
1305 raise socket.error('getaddrinfo returns an empty list')
1306 if hasattr(hc, '_create_connection'):
1307 hc._create_connection = _create_connection
1308 sa = (source_address, 0)
1309 if hasattr(hc, 'source_address'): # Python 2.7+
1310 hc.source_address = sa
1312 def _hc_connect(self, *args, **kwargs):
1313 sock = _create_connection(
1314 (self.host, self.port), self.timeout, sa)
1316 self.sock = ssl.wrap_socket(
1317 sock, self.key_file, self.cert_file,
1318 ssl_version=ssl.PROTOCOL_TLSv1)
1321 hc.connect = functools.partial(_hc_connect, hc)
1326 def handle_youtubedl_headers(headers):
1327 filtered_headers = headers
1329 if 'Youtubedl-no-compression' in filtered_headers:
1330 filtered_headers = dict((k, v) for k, v in filtered_headers.items() if k.lower() != 'accept-encoding')
1331 del filtered_headers['Youtubedl-no-compression']
1333 return filtered_headers
1336 class YoutubeDLHandler(compat_urllib_request.HTTPHandler):
1337 """Handler
for HTTP requests
and responses
.
1339 This
class, when installed
with an OpenerDirector
, automatically adds
1340 the standard headers to every HTTP request
and handles gzipped
and
1341 deflated responses
from web servers
. If compression
is to be avoided
in
1342 a particular request
, the original request
in the program code only has
1343 to include the HTTP header
"Youtubedl-no-compression", which will be
1344 removed before making the real request
.
1346 Part of this code was copied
from:
1348 http
://techknack
.net
/python
-urllib2
-handlers
/
1350 Andrew Rowls
, the author of that code
, agreed to release it to the
1354 def __init__(self, params, *args, **kwargs):
1355 compat_urllib_request.HTTPHandler.__init__(self, *args, **kwargs)
1356 self._params = params
1358 def http_open(self, req):
1359 conn_class = compat_http_client.HTTPConnection
1361 socks_proxy = req.headers.get('Ytdl-socks-proxy')
1363 conn_class = make_socks_conn_class(conn_class, socks_proxy)
1364 del req.headers['Ytdl-socks-proxy']
1366 return self.do_open(functools.partial(
1367 _create_http_connection, self, conn_class, False),
1375 return zlib.decompress(data, -zlib.MAX_WBITS)
1377 return zlib.decompress(data)
1383 return compat_brotli.decompress(data)
1385 def http_request(self, req):
1386 # According to RFC 3986, URLs can not contain non-ASCII characters, however this is not
1387 # always respected by websites, some tend to give out URLs with non percent-encoded
1388 # non-ASCII characters (see telemb.py, ard.py [#3412])
1389 # urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991)
1390 # To work around aforementioned issue we will replace request's original URL with
1391 # percent-encoded one
1392 # Since redirects are also affected (e.g. http://www.southpark.de/alle-episoden/s18e09)
1393 # the code of this workaround has been moved here from YoutubeDL.urlopen()
1394 url = req.get_full_url()
1395 url_escaped = escape_url(url)
1397 # Substitute URL if any change after escaping
1398 if url != url_escaped:
1399 req = update_Request(req, url=url_escaped)
1401 for h, v in self._params.get('http_headers', std_headers).items():
1402 # Capitalize is needed because of Python bug 2275: http://bugs.python.org/issue2275
1403 # The dict keys are capitalized because of this bug by urllib
1404 if h.capitalize() not in req.headers:
1405 req.add_header(h, v)
1407 if 'Accept-encoding' not in req.headers:
1408 req.add_header('Accept-encoding', ', '.join(SUPPORTED_ENCODINGS))
1410 req.headers = handle_youtubedl_headers(req.headers)
1412 if sys.version_info < (2, 7) and '#' in req.get_full_url():
1413 # Python 2.6 is brain-dead when it comes to fragments
1414 req._Request__original = req._Request__original.partition('#')[0]
1415 req._Request__r_type = req._Request__r_type.partition('#')[0]
1419 def http_response(self, req, resp):
1422 if resp.headers.get('Content-encoding', '') == 'gzip':
1423 content = resp.read()
1424 gz = gzip.GzipFile(fileobj=io.BytesIO(content), mode='rb')
1426 uncompressed = io.BytesIO(gz.read())
1427 except IOError as original_ioerror:
1428 # There may be junk add the end of the file
1429 # See http://stackoverflow.com/q/4928560/35070 for details
1430 for i in range(1, 1024):
1432 gz = gzip.GzipFile(fileobj=io.BytesIO(content[:-i]), mode='rb')
1433 uncompressed = io.BytesIO(gz.read())
1438 raise original_ioerror
1439 resp = compat_urllib_request.addinfourl(uncompressed, old_resp.headers, old_resp.url, old_resp.code)
1440 resp.msg = old_resp.msg
1441 del resp.headers['Content-encoding']
1443 if resp.headers.get('Content-encoding', '') == 'deflate':
1444 gz = io.BytesIO(self.deflate(resp.read()))
1445 resp = compat_urllib_request.addinfourl(gz, old_resp.headers, old_resp.url, old_resp.code)
1446 resp.msg = old_resp.msg
1447 del resp.headers['Content-encoding']
1449 if resp.headers.get('Content-encoding', '') == 'br':
1450 resp = compat_urllib_request.addinfourl(
1451 io.BytesIO(self.brotli(resp.read())), old_resp.headers, old_resp.url, old_resp.code)
1452 resp.msg = old_resp.msg
1453 del resp.headers['Content-encoding']
1454 # Percent-encode redirect URL of Location HTTP header to satisfy RFC 3986 (see
1455 # https://github.com/ytdl-org/youtube-dl/issues/6457).
1456 if 300 <= resp.code < 400:
1457 location = resp.headers.get('Location')
1459 # As of RFC 2616 default charset is iso-8859-1 that is respected by python 3
1460 if sys.version_info >= (3, 0):
1461 location = location.encode('iso-8859-1').decode('utf-8')
1463 location = location.decode('utf-8')
1464 location_escaped = escape_url(location)
1465 if location != location_escaped:
1466 del resp.headers['Location']
1467 if sys.version_info < (3, 0):
1468 location_escaped = location_escaped.encode('utf-8')
1469 resp.headers['Location'] = location_escaped
1472 https_request = http_request
1473 https_response = http_response
1476 def make_socks_conn_class(base_class, socks_proxy):
1477 assert issubclass(base_class, (
1478 compat_http_client.HTTPConnection, compat_http_client.HTTPSConnection))
1480 url_components = compat_urlparse.urlparse(socks_proxy)
1481 if url_components.scheme.lower() == 'socks5':
1482 socks_type = ProxyType.SOCKS5
1483 elif url_components.scheme.lower() in ('socks', 'socks4'):
1484 socks_type = ProxyType.SOCKS4
1485 elif url_components.scheme.lower() == 'socks4a':
1486 socks_type = ProxyType.SOCKS4A
1488 def unquote_if_non_empty(s):
1491 return compat_urllib_parse_unquote_plus(s)
1495 url_components.hostname, url_components.port or 1080,
1497 unquote_if_non_empty(url_components.username),
1498 unquote_if_non_empty(url_components.password),
1501 class SocksConnection(base_class):
1503 self.sock = sockssocket()
1504 self.sock.setproxy(*proxy_args)
1505 if type(self.timeout) in (int, float):
1506 self.sock.settimeout(self.timeout)
1507 self.sock.connect((self.host, self.port))
1509 if isinstance(self, compat_http_client.HTTPSConnection):
1510 if hasattr(self, '_context'): # Python > 2.6
1511 self.sock = self._context.wrap_socket(
1512 self.sock, server_hostname=self.host)
1514 self.sock = ssl.wrap_socket(self.sock)
1516 return SocksConnection
1519 class YoutubeDLHTTPSHandler(compat_urllib_request.HTTPSHandler):
1520 def __init__(self, params, https_conn_class=None, *args, **kwargs):
1521 compat_urllib_request.HTTPSHandler.__init__(self, *args, **kwargs)
1522 self._https_conn_class = https_conn_class or compat_http_client.HTTPSConnection
1523 self._params = params
1525 def https_open(self, req):
1527 conn_class = self._https_conn_class
1529 if hasattr(self, '_context'): # python > 2.6
1530 kwargs['context'] = self._context
1531 if hasattr(self, '_check_hostname'): # python 3.x
1532 kwargs['check_hostname'] = self._check_hostname
1534 socks_proxy = req.headers.get('Ytdl-socks-proxy')
1536 conn_class = make_socks_conn_class(conn_class, socks_proxy)
1537 del req.headers['Ytdl-socks-proxy']
1539 return self.do_open(functools.partial(
1540 _create_http_connection, self, conn_class, True),
1544 class YoutubeDLCookieJar(compat_cookiejar.MozillaCookieJar):
1546 See
[1] for cookie
file format
.
1548 1. https
://curl
.haxx
.se
/docs
/http
-cookies
.html
1550 _HTTPONLY_PREFIX = '#HttpOnly_'
1552 _HEADER = '''# Netscape HTTP Cookie File
1553 # This file is generated by yt-dlp. Do not edit.
1556 _CookieFileEntry = collections.namedtuple(
1558 ('domain_name', 'include_subdomains', 'path', 'https_only', 'expires_at', 'name', 'value'))
1560 def save(self, filename=None, ignore_discard=False, ignore_expires=False):
1562 Save cookies to a
file.
1564 Most of the code
is taken
from CPython
3.8 and slightly adapted
1565 to support cookie files
with UTF
-8 in both python
2 and 3.
1567 if filename is None:
1568 if self.filename is not None:
1569 filename = self.filename
1571 raise ValueError(compat_cookiejar.MISSING_FILENAME_TEXT)
1573 # Store session cookies with `expires` set to 0 instead of an empty
1576 if cookie.expires is None:
1579 with io.open(filename, 'w', encoding='utf-8') as f:
1580 f.write(self._HEADER)
1583 if not ignore_discard and cookie.discard:
1585 if not ignore_expires and cookie.is_expired(now):
1591 if cookie.domain.startswith('.'):
1592 initial_dot = 'TRUE'
1594 initial_dot = 'FALSE'
1595 if cookie.expires is not None:
1596 expires = compat_str(cookie.expires)
1599 if cookie.value is None:
1600 # cookies.txt regards 'Set-Cookie: foo' as a cookie
1601 # with no name, whereas http.cookiejar regards it as a
1602 # cookie with no value.
1607 value = cookie.value
1609 '\t'.join([cookie.domain, initial_dot, cookie.path,
1610 secure, expires, name, value]) + '\n')
1612 def load(self, filename=None, ignore_discard=False, ignore_expires=False):
1613 """Load cookies
from a
file."""
1614 if filename is None:
1615 if self.filename is not None:
1616 filename = self.filename
1618 raise ValueError(compat_cookiejar.MISSING_FILENAME_TEXT)
1620 def prepare_line(line):
1621 if line.startswith(self._HTTPONLY_PREFIX):
1622 line = line[len(self._HTTPONLY_PREFIX):]
1623 # comments and empty lines are fine
1624 if line.startswith('#') or not line.strip():
1626 cookie_list = line.split('\t')
1627 if len(cookie_list) != self._ENTRY_LEN:
1628 raise compat_cookiejar.LoadError('invalid length %d' % len(cookie_list))
1629 cookie = self._CookieFileEntry(*cookie_list)
1630 if cookie.expires_at and not cookie.expires_at.isdigit():
1631 raise compat_cookiejar.LoadError('invalid expires at %s' % cookie.expires_at)
1635 with io.open(filename, encoding='utf-8') as f:
1638 cf.write(prepare_line(line))
1639 except compat_cookiejar.LoadError as e:
1641 'WARNING: skipping cookie file entry due to %s: %r\n'
1642 % (e, line), sys.stderr)
1645 self._really_load(cf, filename, ignore_discard, ignore_expires)
1646 # Session cookies are denoted by either `expires` field set to
1647 # an empty string or 0. MozillaCookieJar only recognizes the former
1648 # (see [1]). So we need force the latter to be recognized as session
1649 # cookies on our own.
1650 # Session cookies may be important for cookies-based authentication,
1651 # e.g. usually, when user does not check 'Remember me' check box while
1652 # logging in on a site, some important cookies are stored as session
1653 # cookies so that not recognizing them will result in failed login.
1654 # 1. https://bugs.python.org/issue17164
1656 # Treat `expires=0` cookies as session cookies
1657 if cookie.expires == 0:
1658 cookie.expires = None
1659 cookie.discard = True
1662 class YoutubeDLCookieProcessor(compat_urllib_request.HTTPCookieProcessor):
1663 def __init__(self, cookiejar=None):
1664 compat_urllib_request.HTTPCookieProcessor.__init__(self, cookiejar)
1666 def http_response(self, request, response):
1667 # Python 2 will choke on next HTTP request in row if there are non-ASCII
1668 # characters in Set-Cookie HTTP header of last response (see
1669 # https://github.com/ytdl-org/youtube-dl/issues/6769).
1670 # In order to at least prevent crashing we will percent encode Set-Cookie
1671 # header before HTTPCookieProcessor starts processing it.
1672 # if sys.version_info < (3, 0) and response.headers:
1673 # for set_cookie_header in ('Set-Cookie', 'Set-Cookie2'):
1674 # set_cookie = response.headers.get(set_cookie_header)
1676 # set_cookie_escaped = compat_urllib_parse.quote(set_cookie, b"%/;:@&=+$,!~*'()?#[] ")
1677 # if set_cookie != set_cookie_escaped:
1678 # del response.headers[set_cookie_header]
1679 # response.headers[set_cookie_header] = set_cookie_escaped
1680 return compat_urllib_request.HTTPCookieProcessor.http_response(self, request, response)
1682 https_request = compat_urllib_request.HTTPCookieProcessor.http_request
1683 https_response = http_response
1686 class YoutubeDLRedirectHandler(compat_urllib_request.HTTPRedirectHandler):
1687 """YoutubeDL redirect handler
1689 The code
is based on HTTPRedirectHandler implementation
from CPython
[1].
1691 This redirect handler solves two issues
:
1692 - ensures redirect URL
is always
unicode under python
2
1693 - introduces support
for experimental HTTP response status code
1694 308 Permanent Redirect
[2] used by some sites
[3]
1696 1. https
://github
.com
/python
/cpython
/blob
/master
/Lib
/urllib
/request
.py
1697 2. https
://developer
.mozilla
.org
/en
-US
/docs
/Web
/HTTP
/Status
/308
1698 3. https
://github
.com
/ytdl
-org
/youtube
-dl
/issues
/28768
1701 http_error_301 = http_error_303 = http_error_307 = http_error_308 = compat_urllib_request.HTTPRedirectHandler.http_error_302
1703 def redirect_request(self, req, fp, code, msg, headers, newurl):
1704 """Return a Request
or None in response to a redirect
.
1706 This
is called by the http_error_30x methods when a
1707 redirection response
is received
. If a redirection should
1708 take place
, return a new Request to allow http_error_30x to
1709 perform the redirect
. Otherwise
, raise HTTPError
if no
-one
1710 else should
try to handle this url
. Return
None if you can
't
1711 but another Handler might.
1713 m = req.get_method()
1714 if (not (code in (301, 302, 303, 307, 308) and m in ("GET", "HEAD")
1715 or code in (301, 302, 303) and m == "POST")):
1716 raise compat_HTTPError(req.full_url, code, msg, headers, fp)
1717 # Strictly (according to RFC 2616), 301 or 302 in response to
1718 # a POST MUST NOT cause a redirection without confirmation
1719 # from the user (of urllib.request, in this case). In practice,
1720 # essentially all clients do redirect in this case, so we do
1723 # On python 2 urlh.geturl() may sometimes return redirect URL
1724 # as byte string instead of unicode. This workaround allows
1725 # to force it always return unicode.
1726 if sys.version_info[0] < 3:
1727 newurl = compat_str(newurl)
1729 # Be conciliant with URIs containing a space. This is mainly
1730 # redundant with the more complete encoding done in http_error_302(),
1731 # but it is kept for compatibility with other callers.
1732 newurl = newurl.replace(' ', '%20')
1734 CONTENT_HEADERS = ("content-length", "content-type")
1735 # NB: don't use
dict comprehension
for python
2.6 compatibility
1736 newheaders
= dict((k
, v
) for k
, v
in req
.headers
.items()
1737 if k
.lower() not in CONTENT_HEADERS
)
1738 return compat_urllib_request
.Request(
1739 newurl
, headers
=newheaders
, origin_req_host
=req
.origin_req_host
,
1743 def extract_timezone(date_str
):
1746 ^.{8,}? # >=8 char non-TZ prefix, if present
1747 (?P<tz>Z| # just the UTC Z, or
1748 (?:(?<=.\b\d{4}|\b\d{2}:\d\d)| # preceded by 4 digits or hh:mm or
1749 (?<!.\b[a-zA-Z]{3}|[a-zA-Z]{4}|..\b\d\d)) # not preceded by 3 alpha word or >= 4 alpha or 2 digits
1750 [ ]? # optional space
1751 (?P<sign>\+|-) # +/-
1752 (?P<hours>[0-9]{2}):?(?P<minutes>[0-9]{2}) # hh[:]mm
1756 timezone
= datetime
.timedelta()
1758 date_str
= date_str
[:-len(m
.group('tz'))]
1759 if not m
.group('sign'):
1760 timezone
= datetime
.timedelta()
1762 sign
= 1 if m
.group('sign') == '+' else -1
1763 timezone
= datetime
.timedelta(
1764 hours
=sign
* int(m
.group('hours')),
1765 minutes
=sign
* int(m
.group('minutes')))
1766 return timezone
, date_str
1769 def parse_iso8601(date_str
, delimiter
='T', timezone
=None):
1770 """ Return a UNIX timestamp from the given date """
1772 if date_str
is None:
1775 date_str
= re
.sub(r
'\.[0-9]+', '', date_str
)
1777 if timezone
is None:
1778 timezone
, date_str
= extract_timezone(date_str
)
1781 date_format
= '%Y-%m-%d{0}%H:%M:%S'.format(delimiter
)
1782 dt
= datetime
.datetime
.strptime(date_str
, date_format
) - timezone
1783 return calendar
.timegm(dt
.timetuple())
1788 def date_formats(day_first
=True):
1789 return DATE_FORMATS_DAY_FIRST
if day_first
else DATE_FORMATS_MONTH_FIRST
1792 def unified_strdate(date_str
, day_first
=True):
1793 """Return a string with the date in the format YYYYMMDD"""
1795 if date_str
is None:
1799 date_str
= date_str
.replace(',', ' ')
1800 # Remove AM/PM + timezone
1801 date_str
= re
.sub(r
'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str
)
1802 _
, date_str
= extract_timezone(date_str
)
1804 for expression
in date_formats(day_first
):
1806 upload_date
= datetime
.datetime
.strptime(date_str
, expression
).strftime('%Y%m%d')
1809 if upload_date
is None:
1810 timetuple
= email
.utils
.parsedate_tz(date_str
)
1813 upload_date
= datetime
.datetime(*timetuple
[:6]).strftime('%Y%m%d')
1816 if upload_date
is not None:
1817 return compat_str(upload_date
)
1820 def unified_timestamp(date_str
, day_first
=True):
1821 if date_str
is None:
1824 date_str
= re
.sub(r
'[,|]', '', date_str
)
1826 pm_delta
= 12 if re
.search(r
'(?i)PM', date_str
) else 0
1827 timezone
, date_str
= extract_timezone(date_str
)
1829 # Remove AM/PM + timezone
1830 date_str
= re
.sub(r
'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str
)
1832 # Remove unrecognized timezones from ISO 8601 alike timestamps
1833 m
= re
.search(r
'\d{1,2}:\d{1,2}(?:\.\d+)?(?P<tz>\s*[A-Z]+)$', date_str
)
1835 date_str
= date_str
[:-len(m
.group('tz'))]
1837 # Python only supports microseconds, so remove nanoseconds
1838 m
= re
.search(r
'^([0-9]{4,}-[0-9]{1,2}-[0-9]{1,2}T[0-9]{1,2}:[0-9]{1,2}:[0-9]{1,2}\.[0-9]{6})[0-9]+$', date_str
)
1840 date_str
= m
.group(1)
1842 for expression
in date_formats(day_first
):
1844 dt
= datetime
.datetime
.strptime(date_str
, expression
) - timezone
+ datetime
.timedelta(hours
=pm_delta
)
1845 return calendar
.timegm(dt
.timetuple())
1848 timetuple
= email
.utils
.parsedate_tz(date_str
)
1850 return calendar
.timegm(timetuple
) + pm_delta
* 3600
1853 def determine_ext(url
, default_ext
='unknown_video'):
1854 if url
is None or '.' not in url
:
1856 guess
= url
.partition('?')[0].rpartition('.')[2]
1857 if re
.match(r
'^[A-Za-z0-9]+$', guess
):
1859 # Try extract ext from URLs like http://example.com/foo/bar.mp4/?download
1860 elif guess
.rstrip('/') in KNOWN_EXTENSIONS
:
1861 return guess
.rstrip('/')
1866 def subtitles_filename(filename
, sub_lang
, sub_format
, expected_real_ext
=None):
1867 return replace_extension(filename
, sub_lang
+ '.' + sub_format
, expected_real_ext
)
1870 def datetime_from_str(date_str
, precision
='auto', format
='%Y%m%d'):
1872 Return a datetime object from a string in the format YYYYMMDD or
1873 (now|today|yesterday|date)[+-][0-9](microsecond|second|minute|hour|day|week|month|year)(s)?
1875 format: string date format used to return datetime object from
1876 precision: round the time portion of a datetime object.
1877 auto|microsecond|second|minute|hour|day.
1878 auto: round to the unit provided in date_str (if applicable).
1880 auto_precision
= False
1881 if precision
== 'auto':
1882 auto_precision
= True
1883 precision
= 'microsecond'
1884 today
= datetime_round(datetime
.datetime
.utcnow(), precision
)
1885 if date_str
in ('now', 'today'):
1887 if date_str
== 'yesterday':
1888 return today
- datetime
.timedelta(days
=1)
1890 r
'(?P<start>.+)(?P<sign>[+-])(?P<time>\d+)(?P<unit>microsecond|second|minute|hour|day|week|month|year)(s)?',
1892 if match
is not None:
1893 start_time
= datetime_from_str(match
.group('start'), precision
, format
)
1894 time
= int(match
.group('time')) * (-1 if match
.group('sign') == '-' else 1)
1895 unit
= match
.group('unit')
1896 if unit
== 'month' or unit
== 'year':
1897 new_date
= datetime_add_months(start_time
, time
* 12 if unit
== 'year' else time
)
1903 delta
= datetime
.timedelta(**{unit + 's': time}
)
1904 new_date
= start_time
+ delta
1906 return datetime_round(new_date
, unit
)
1909 return datetime_round(datetime
.datetime
.strptime(date_str
, format
), precision
)
1912 def date_from_str(date_str
, format
='%Y%m%d', strict
=False):
1914 Return a datetime object from a string in the format YYYYMMDD or
1915 (now|today|yesterday|date)[+-][0-9](microsecond|second|minute|hour|day|week|month|year)(s)?
1917 If "strict", only (now|today)[+-][0-9](day|week|month|year)(s)? is allowed
1919 format: string date format used to return datetime object from
1921 if strict
and not re
.fullmatch(r
'\d{8}|(now|today)[+-]\d+(day|week|month|year)(s)?', date_str
):
1922 raise ValueError(f
'Invalid date format {date_str}')
1923 return datetime_from_str(date_str
, precision
='microsecond', format
=format
).date()
1926 def datetime_add_months(dt
, months
):
1927 """Increment/Decrement a datetime object by months."""
1928 month
= dt
.month
+ months
- 1
1929 year
= dt
.year
+ month
// 12
1930 month
= month
% 12 + 1
1931 day
= min(dt
.day
, calendar
.monthrange(year
, month
)[1])
1932 return dt
.replace(year
, month
, day
)
1935 def datetime_round(dt
, precision
='day'):
1937 Round a datetime object's time to a specific precision
1939 if precision
== 'microsecond':
1948 roundto
= lambda x
, n
: ((x
+ n
/ 2) // n
) * n
1949 timestamp
= calendar
.timegm(dt
.timetuple())
1950 return datetime
.datetime
.utcfromtimestamp(roundto(timestamp
, unit_seconds
[precision
]))
1953 def hyphenate_date(date_str
):
1955 Convert a date in 'YYYYMMDD' format to 'YYYY-MM-DD' format"""
1956 match
= re
.match(r
'^(\d\d\d\d)(\d\d)(\d\d)$', date_str
)
1957 if match
is not None:
1958 return '-'.join(match
.groups())
1963 class DateRange(object):
1964 """Represents a time interval between two dates"""
1966 def __init__(self
, start
=None, end
=None):
1967 """start and end must be strings in the format accepted by date"""
1968 if start
is not None:
1969 self
.start
= date_from_str(start
, strict
=True)
1971 self
.start
= datetime
.datetime
.min.date()
1973 self
.end
= date_from_str(end
, strict
=True)
1975 self
.end
= datetime
.datetime
.max.date()
1976 if self
.start
> self
.end
:
1977 raise ValueError('Date range: "%s" , the start date must be before the end date' % self
)
1981 """Returns a range that only contains the given day"""
1982 return cls(day
, day
)
1984 def __contains__(self
, date
):
1985 """Check if the date is in the range"""
1986 if not isinstance(date
, datetime
.date
):
1987 date
= date_from_str(date
)
1988 return self
.start
<= date
<= self
.end
1991 return '%s - %s' % (self
.start
.isoformat(), self
.end
.isoformat())
1994 def platform_name():
1995 """ Returns the platform name as a compat_str """
1996 res
= platform
.platform()
1997 if isinstance(res
, bytes):
1998 res
= res
.decode(preferredencoding())
2000 assert isinstance(res
, compat_str
)
2004 def get_windows_version():
2005 ''' Get Windows version. None if it's not running on Windows '''
2006 if compat_os_name
== 'nt':
2007 return version_tuple(platform
.win32_ver()[1])
2012 def _windows_write_string(s
, out
):
2013 """ Returns True if the string was written using special methods,
2014 False if it has yet to be written out."""
2015 # Adapted from http://stackoverflow.com/a/3259271/35070
2017 import ctypes
.wintypes
2025 fileno
= out
.fileno()
2026 except AttributeError:
2027 # If the output stream doesn't have a fileno, it's virtual
2029 except io
.UnsupportedOperation
:
2030 # Some strange Windows pseudo files?
2032 if fileno
not in WIN_OUTPUT_IDS
:
2035 GetStdHandle
= compat_ctypes_WINFUNCTYPE(
2036 ctypes
.wintypes
.HANDLE
, ctypes
.wintypes
.DWORD
)(
2037 ('GetStdHandle', ctypes
.windll
.kernel32
))
2038 h
= GetStdHandle(WIN_OUTPUT_IDS
[fileno
])
2040 WriteConsoleW
= compat_ctypes_WINFUNCTYPE(
2041 ctypes
.wintypes
.BOOL
, ctypes
.wintypes
.HANDLE
, ctypes
.wintypes
.LPWSTR
,
2042 ctypes
.wintypes
.DWORD
, ctypes
.POINTER(ctypes
.wintypes
.DWORD
),
2043 ctypes
.wintypes
.LPVOID
)(('WriteConsoleW', ctypes
.windll
.kernel32
))
2044 written
= ctypes
.wintypes
.DWORD(0)
2046 GetFileType
= compat_ctypes_WINFUNCTYPE(ctypes
.wintypes
.DWORD
, ctypes
.wintypes
.DWORD
)(('GetFileType', ctypes
.windll
.kernel32
))
2047 FILE_TYPE_CHAR
= 0x0002
2048 FILE_TYPE_REMOTE
= 0x8000
2049 GetConsoleMode
= compat_ctypes_WINFUNCTYPE(
2050 ctypes
.wintypes
.BOOL
, ctypes
.wintypes
.HANDLE
,
2051 ctypes
.POINTER(ctypes
.wintypes
.DWORD
))(
2052 ('GetConsoleMode', ctypes
.windll
.kernel32
))
2053 INVALID_HANDLE_VALUE
= ctypes
.wintypes
.DWORD(-1).value
2055 def not_a_console(handle
):
2056 if handle
== INVALID_HANDLE_VALUE
or handle
is None:
2058 return ((GetFileType(handle
) & ~FILE_TYPE_REMOTE
) != FILE_TYPE_CHAR
2059 or GetConsoleMode(handle
, ctypes
.byref(ctypes
.wintypes
.DWORD())) == 0)
2061 if not_a_console(h
):
2064 def next_nonbmp_pos(s
):
2066 return next(i
for i
, c
in enumerate(s
) if ord(c
) > 0xffff)
2067 except StopIteration:
2071 count
= min(next_nonbmp_pos(s
), 1024)
2073 ret
= WriteConsoleW(
2074 h
, s
, count
if count
else 2, ctypes
.byref(written
), None)
2076 raise OSError('Failed to write string')
2077 if not count
: # We just wrote a non-BMP character
2078 assert written
.value
== 2
2081 assert written
.value
> 0
2082 s
= s
[written
.value
:]
2086 def write_string(s
, out
=None, encoding
=None):
2089 assert type(s
) == compat_str
2091 if sys
.platform
== 'win32' and encoding
is None and hasattr(out
, 'fileno'):
2092 if _windows_write_string(s
, out
):
2095 if ('b' in getattr(out
, 'mode', '')
2096 or sys
.version_info
[0] < 3): # Python 2 lies about mode of sys.stderr
2097 byt
= s
.encode(encoding
or preferredencoding(), 'ignore')
2099 elif hasattr(out
, 'buffer'):
2100 enc
= encoding
or getattr(out
, 'encoding', None) or preferredencoding()
2101 byt
= s
.encode(enc
, 'ignore')
2102 out
.buffer.write(byt
)
2108 def bytes_to_intlist(bs
):
2111 if isinstance(bs
[0], int): # Python 3
2114 return [ord(c
) for c
in bs
]
2117 def intlist_to_bytes(xs
):
2120 return compat_struct_pack('%dB' % len(xs
), *xs
)
2123 # Cross-platform file locking
2124 if sys
.platform
== 'win32':
2125 import ctypes
.wintypes
2128 class OVERLAPPED(ctypes
.Structure
):
2130 ('Internal', ctypes
.wintypes
.LPVOID
),
2131 ('InternalHigh', ctypes
.wintypes
.LPVOID
),
2132 ('Offset', ctypes
.wintypes
.DWORD
),
2133 ('OffsetHigh', ctypes
.wintypes
.DWORD
),
2134 ('hEvent', ctypes
.wintypes
.HANDLE
),
2137 kernel32
= ctypes
.windll
.kernel32
2138 LockFileEx
= kernel32
.LockFileEx
2139 LockFileEx
.argtypes
= [
2140 ctypes
.wintypes
.HANDLE
, # hFile
2141 ctypes
.wintypes
.DWORD
, # dwFlags
2142 ctypes
.wintypes
.DWORD
, # dwReserved
2143 ctypes
.wintypes
.DWORD
, # nNumberOfBytesToLockLow
2144 ctypes
.wintypes
.DWORD
, # nNumberOfBytesToLockHigh
2145 ctypes
.POINTER(OVERLAPPED
) # Overlapped
2147 LockFileEx
.restype
= ctypes
.wintypes
.BOOL
2148 UnlockFileEx
= kernel32
.UnlockFileEx
2149 UnlockFileEx
.argtypes
= [
2150 ctypes
.wintypes
.HANDLE
, # hFile
2151 ctypes
.wintypes
.DWORD
, # dwReserved
2152 ctypes
.wintypes
.DWORD
, # nNumberOfBytesToLockLow
2153 ctypes
.wintypes
.DWORD
, # nNumberOfBytesToLockHigh
2154 ctypes
.POINTER(OVERLAPPED
) # Overlapped
2156 UnlockFileEx
.restype
= ctypes
.wintypes
.BOOL
2157 whole_low
= 0xffffffff
2158 whole_high
= 0x7fffffff
2160 def _lock_file(f
, exclusive
, block
):
2161 overlapped
= OVERLAPPED()
2162 overlapped
.Offset
= 0
2163 overlapped
.OffsetHigh
= 0
2164 overlapped
.hEvent
= 0
2165 f
._lock
_file
_overlapped
_p
= ctypes
.pointer(overlapped
)
2167 if not LockFileEx(msvcrt
.get_osfhandle(f
.fileno()),
2168 (0x2 if exclusive
else 0x0) |
(0x0 if block
else 0x1),
2169 0, whole_low
, whole_high
, f
._lock
_file
_overlapped
_p
):
2170 raise BlockingIOError('Locking file failed: %r' % ctypes
.FormatError())
2172 def _unlock_file(f
):
2173 assert f
._lock
_file
_overlapped
_p
2174 handle
= msvcrt
.get_osfhandle(f
.fileno())
2175 if not UnlockFileEx(handle
, 0, whole_low
, whole_high
, f
._lock
_file
_overlapped
_p
):
2176 raise OSError('Unlocking file failed: %r' % ctypes
.FormatError())
2182 def _lock_file(f
, exclusive
, block
):
2185 fcntl
.LOCK_SH
if not exclusive
2186 else fcntl
.LOCK_EX
if block
2187 else fcntl
.LOCK_EX | fcntl
.LOCK_NB
)
2188 except BlockingIOError
:
2190 except OSError: # AOSP does not have flock()
2192 fcntl
.LOCK_SH
if not exclusive
2193 else fcntl
.LOCK_EX
if block
2194 else fcntl
.LOCK_EX | fcntl
.LOCK_NB
)
2196 def _unlock_file(f
):
2198 fcntl
.flock(f
, fcntl
.LOCK_UN
)
2200 fcntl
.lockf(f
, fcntl
.LOCK_UN
)
2203 UNSUPPORTED_MSG
= 'file locking is not supported on this platform'
2205 def _lock_file(f
, exclusive
, block
):
2206 raise IOError(UNSUPPORTED_MSG
)
2208 def _unlock_file(f
):
2209 raise IOError(UNSUPPORTED_MSG
)
2212 class locked_file(object):
2215 def __init__(self
, filename
, mode
, block
=True, encoding
=None):
2216 assert mode
in ['r', 'rb', 'a', 'ab', 'w', 'wb']
2217 self
.f
= io
.open(filename
, mode
, encoding
=encoding
)
2221 def __enter__(self
):
2222 exclusive
= 'r' not in self
.mode
2224 _lock_file(self
.f
, exclusive
, self
.block
)
2230 def __exit__(self
, etype
, value
, traceback
):
2232 if not self
._closed
:
2233 _unlock_file(self
.f
)
2241 def write(self
, *args
):
2242 return self
.f
.write(*args
)
2244 def read(self
, *args
):
2245 return self
.f
.read(*args
)
2251 return self
.__enter
__()
2253 def close(self
, *args
):
2254 self
.__exit
__(self
, *args
, value
=False, traceback
=False)
2257 def get_filesystem_encoding():
2258 encoding
= sys
.getfilesystemencoding()
2259 return encoding
if encoding
is not None else 'utf-8'
2262 def shell_quote(args
):
2264 encoding
= get_filesystem_encoding()
2266 if isinstance(a
, bytes):
2267 # We may get a filename encoded with 'encodeFilename'
2268 a
= a
.decode(encoding
)
2269 quoted_args
.append(compat_shlex_quote(a
))
2270 return ' '.join(quoted_args
)
2273 def smuggle_url(url
, data
):
2274 """ Pass additional data in a URL for internal use. """
2276 url
, idata
= unsmuggle_url(url
, {})
2278 sdata
= compat_urllib_parse_urlencode(
2279 {'__youtubedl_smuggle': json.dumps(data)}
)
2280 return url
+ '#' + sdata
2283 def unsmuggle_url(smug_url
, default
=None):
2284 if '#__youtubedl_smuggle' not in smug_url
:
2285 return smug_url
, default
2286 url
, _
, sdata
= smug_url
.rpartition('#')
2287 jsond
= compat_parse_qs(sdata
)['__youtubedl_smuggle'][0]
2288 data
= json
.loads(jsond
)
2292 def format_decimal_suffix(num
, fmt
='%d%s', *, factor
=1000):
2293 """ Formats numbers with decimal sufixes like K, M, etc """
2294 num
, factor
= float_or_none(num
), float(factor
)
2295 if num
is None or num
< 0:
2297 POSSIBLE_SUFFIXES
= 'kMGTPEZY'
2298 exponent
= 0 if num
== 0 else min(int(math
.log(num
, factor
)), len(POSSIBLE_SUFFIXES
))
2299 suffix
= ['', *POSSIBLE_SUFFIXES
][exponent
]
2301 suffix
= {'k': 'Ki', '': ''}
.get(suffix
, f
'{suffix}i')
2302 converted
= num
/ (factor
** exponent
)
2303 return fmt
% (converted
, suffix
)
2306 def format_bytes(bytes):
2307 return format_decimal_suffix(bytes, '%.2f%sB', factor
=1024) or 'N/A'
2310 def lookup_unit_table(unit_table
, s
):
2311 units_re
= '|'.join(re
.escape(u
) for u
in unit_table
)
2313 r
'(?P<num>[0-9]+(?:[,.][0-9]*)?)\s*(?P<unit>%s)\b' % units_re
, s
)
2316 num_str
= m
.group('num').replace(',', '.')
2317 mult
= unit_table
[m
.group('unit')]
2318 return int(float(num_str
) * mult
)
2321 def parse_filesize(s
):
2325 # The lower-case forms are of course incorrect and unofficial,
2326 # but we support those too
2343 'megabytes': 1000 ** 2,
2344 'mebibytes': 1024 ** 2,
2350 'gigabytes': 1000 ** 3,
2351 'gibibytes': 1024 ** 3,
2357 'terabytes': 1000 ** 4,
2358 'tebibytes': 1024 ** 4,
2364 'petabytes': 1000 ** 5,
2365 'pebibytes': 1024 ** 5,
2371 'exabytes': 1000 ** 6,
2372 'exbibytes': 1024 ** 6,
2378 'zettabytes': 1000 ** 7,
2379 'zebibytes': 1024 ** 7,
2385 'yottabytes': 1000 ** 8,
2386 'yobibytes': 1024 ** 8,
2389 return lookup_unit_table(_UNIT_TABLE
, s
)
2396 s
= re
.sub(r
'^[^\d]+\s', '', s
).strip()
2398 if re
.match(r
'^[\d,.]+$', s
):
2399 return str_to_int(s
)
2412 ret
= lookup_unit_table(_UNIT_TABLE
, s
)
2416 mobj
= re
.match(r
'([\d,.]+)(?:$|\s)', s
)
2418 return str_to_int(mobj
.group(1))
2421 def parse_resolution(s
):
2425 mobj
= re
.search(r
'(?<![a-zA-Z0-9])(?P<w>\d+)\s*[xX×,]\s*(?P<h>\d+)(?![a-zA-Z0-9])', s
)
2428 'width': int(mobj
.group('w')),
2429 'height': int(mobj
.group('h')),
2432 mobj
= re
.search(r
'(?<![a-zA-Z0-9])(\d+)[pPiI](?![a-zA-Z0-9])', s
)
2434 return {'height': int(mobj.group(1))}
2436 mobj
= re
.search(r
'\b([48])[kK]\b', s
)
2438 return {'height': int(mobj.group(1)) * 540}
2443 def parse_bitrate(s
):
2444 if not isinstance(s
, compat_str
):
2446 mobj
= re
.search(r
'\b(\d+)\s*kbps', s
)
2448 return int(mobj
.group(1))
2451 def month_by_name(name
, lang
='en'):
2452 """ Return the number of a month by (locale-independently) English name """
2454 month_names
= MONTH_NAMES
.get(lang
, MONTH_NAMES
['en'])
2457 return month_names
.index(name
) + 1
2462 def month_by_abbreviation(abbrev
):
2463 """ Return the number of a month by (locale-independently) English
2467 return [s
[:3] for s
in ENGLISH_MONTH_NAMES
].index(abbrev
) + 1
2472 def fix_xml_ampersands(xml_str
):
2473 """Replace all the '&' by '&' in XML"""
2475 r
'&(?!amp;|lt;|gt;|apos;|quot;|#x[0-9a-fA-F]{,4};|#[0-9]{,4};)',
2480 def setproctitle(title
):
2481 assert isinstance(title
, compat_str
)
2483 # ctypes in Jython is not complete
2484 # http://bugs.jython.org/issue2148
2485 if sys
.platform
.startswith('java'):
2489 libc
= ctypes
.cdll
.LoadLibrary('libc.so.6')
2493 # LoadLibrary in Windows Python 2.7.13 only expects
2494 # a bytestring, but since unicode_literals turns
2495 # every string into a unicode string, it fails.
2497 title_bytes
= title
.encode('utf-8')
2498 buf
= ctypes
.create_string_buffer(len(title_bytes
))
2499 buf
.value
= title_bytes
2501 libc
.prctl(15, buf
, 0, 0, 0)
2502 except AttributeError:
2503 return # Strange libc, just skip this
2506 def remove_start(s
, start
):
2507 return s
[len(start
):] if s
is not None and s
.startswith(start
) else s
2510 def remove_end(s
, end
):
2511 return s
[:-len(end
)] if s
is not None and s
.endswith(end
) else s
2514 def remove_quotes(s
):
2515 if s
is None or len(s
) < 2:
2517 for quote
in ('"', "'", ):
2518 if s
[0] == quote
and s
[-1] == quote
:
2523 def get_domain(url
):
2524 domain
= re
.match(r
'(?:https?:\/\/)?(?:www\.)?(?P<domain>[^\n\/]+\.[^\n\/]+)(?:\/(.*))?', url
)
2525 return domain
.group('domain') if domain
else None
2528 def url_basename(url
):
2529 path
= compat_urlparse
.urlparse(url
).path
2530 return path
.strip('/').split('/')[-1]
2534 return re
.match(r
'https?://[^?#&]+/', url
).group()
2537 def urljoin(base
, path
):
2538 if isinstance(path
, bytes):
2539 path
= path
.decode('utf-8')
2540 if not isinstance(path
, compat_str
) or not path
:
2542 if re
.match(r
'^(?:[a-zA-Z][a-zA-Z0-9+-.]*:)?//', path
):
2544 if isinstance(base
, bytes):
2545 base
= base
.decode('utf-8')
2546 if not isinstance(base
, compat_str
) or not re
.match(
2547 r
'^(?:https?:)?//', base
):
2549 return compat_urlparse
.urljoin(base
, path
)
2552 class HEADRequest(compat_urllib_request
.Request
):
2553 def get_method(self
):
2557 class PUTRequest(compat_urllib_request
.Request
):
2558 def get_method(self
):
2562 def int_or_none(v
, scale
=1, default
=None, get_attr
=None, invscale
=1):
2563 if get_attr
and v
is not None:
2564 v
= getattr(v
, get_attr
, None)
2566 return int(v
) * invscale
// scale
2567 except (ValueError, TypeError, OverflowError):
2571 def str_or_none(v
, default
=None):
2572 return default
if v
is None else compat_str(v
)
2575 def str_to_int(int_str
):
2576 """ A more relaxed version of int_or_none """
2577 if isinstance(int_str
, compat_integer_types
):
2579 elif isinstance(int_str
, compat_str
):
2580 int_str
= re
.sub(r
'[,\.\+]', '', int_str
)
2581 return int_or_none(int_str
)
2584 def float_or_none(v
, scale
=1, invscale
=1, default
=None):
2588 return float(v
) * invscale
/ scale
2589 except (ValueError, TypeError):
2593 def bool_or_none(v
, default
=None):
2594 return v
if isinstance(v
, bool) else default
2597 def strip_or_none(v
, default
=None):
2598 return v
.strip() if isinstance(v
, compat_str
) else default
2601 def url_or_none(url
):
2602 if not url
or not isinstance(url
, compat_str
):
2605 return url
if re
.match(r
'^(?:(?:https?|rt(?:m(?:pt?[es]?|fp)|sp[su]?)|mms|ftps?):)?//', url
) else None
2608 def request_to_url(req
):
2609 if isinstance(req
, compat_urllib_request
.Request
):
2610 return req
.get_full_url()
2615 def strftime_or_none(timestamp
, date_format
, default
=None):
2616 datetime_object
= None
2618 if isinstance(timestamp
, compat_numeric_types
): # unix timestamp
2619 datetime_object
= datetime
.datetime
.utcfromtimestamp(timestamp
)
2620 elif isinstance(timestamp
, compat_str
): # assume YYYYMMDD
2621 datetime_object
= datetime
.datetime
.strptime(timestamp
, '%Y%m%d')
2622 return datetime_object
.strftime(date_format
)
2623 except (ValueError, TypeError, AttributeError):
2627 def parse_duration(s
):
2628 if not isinstance(s
, compat_basestring
):
2634 days
, hours
, mins
, secs
, ms
= [None] * 5
2635 m
= re
.match(r
'''(?x)
2637 (?:(?:(?P<days>[0-9]+):)?(?P<hours>[0-9]+):)?(?P<mins>[0-9]+):)?
2638 (?P<secs>(?(before_secs)[0-9]{1,2}|[0-9]+))
2639 (?P<ms>[.:][0-9]+)?Z?$
2642 days
, hours
, mins
, secs
, ms
= m
.group('days', 'hours', 'mins', 'secs', 'ms')
2647 [0-9]+\s*y(?:ears?)?,?\s*
2650 [0-9]+\s*m(?:onths?)?,?\s*
2653 [0-9]+\s*w(?:eeks?)?,?\s*
2656 (?P<days>[0-9]+)\s*d(?:ays?)?,?\s*
2660 (?P<hours>[0-9]+)\s*h(?:ours?)?,?\s*
2663 (?P<mins>[0-9]+)\s*m(?:in(?:ute)?s?)?,?\s*
2666 (?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?\s*s(?:ec(?:ond)?s?)?\s*
2669 days
, hours
, mins
, secs
, ms
= m
.groups()
2671 m
= re
.match(r
'(?i)(?:(?P<hours>[0-9.]+)\s*(?:hours?)|(?P<mins>[0-9.]+)\s*(?:mins?\.?|minutes?)\s*)Z?$', s
)
2673 hours
, mins
= m
.groups()
2679 duration
+= float(secs
)
2681 duration
+= float(mins
) * 60
2683 duration
+= float(hours
) * 60 * 60
2685 duration
+= float(days
) * 24 * 60 * 60
2687 duration
+= float(ms
.replace(':', '.'))
2691 def prepend_extension(filename
, ext
, expected_real_ext
=None):
2692 name
, real_ext
= os
.path
.splitext(filename
)
2694 '{0}.{1}{2}'.format(name
, ext
, real_ext
)
2695 if not expected_real_ext
or real_ext
[1:] == expected_real_ext
2696 else '{0}.{1}'.format(filename
, ext
))
2699 def replace_extension(filename
, ext
, expected_real_ext
=None):
2700 name
, real_ext
= os
.path
.splitext(filename
)
2701 return '{0}.{1}'.format(
2702 name
if not expected_real_ext
or real_ext
[1:] == expected_real_ext
else filename
,
2706 def check_executable(exe
, args
=[]):
2707 """ Checks if the given binary is installed somewhere in PATH, and returns its name.
2708 args can be a list of arguments for a short output (like -version) """
2710 Popen([exe
] + args
, stdout
=subprocess
.PIPE
, stderr
=subprocess
.PIPE
).communicate_or_kill()
2716 def _get_exe_version_output(exe
, args
):
2718 # STDIN should be redirected too. On UNIX-like systems, ffmpeg triggers
2719 # SIGTTOU if yt-dlp is run in the background.
2720 # See https://github.com/ytdl-org/youtube-dl/issues/955#issuecomment-209789656
2722 [encodeArgument(exe
)] + args
, stdin
=subprocess
.PIPE
,
2723 stdout
=subprocess
.PIPE
, stderr
=subprocess
.STDOUT
).communicate_or_kill()
2726 if isinstance(out
, bytes): # Python 2.x
2727 out
= out
.decode('ascii', 'ignore')
2731 def detect_exe_version(output
, version_re
=None, unrecognized
='present'):
2732 assert isinstance(output
, compat_str
)
2733 if version_re
is None:
2734 version_re
= r
'version\s+([-0-9._a-zA-Z]+)'
2735 m
= re
.search(version_re
, output
)
2742 def get_exe_version(exe
, args
=['--version'],
2743 version_re
=None, unrecognized
='present'):
2744 """ Returns the version of the specified executable,
2745 or False if the executable is not present """
2746 out
= _get_exe_version_output(exe
, args
)
2747 return detect_exe_version(out
, version_re
, unrecognized
) if out
else False
2750 class LazyList(collections
.abc
.Sequence
):
2751 ''' Lazy immutable list from an iterable
2752 Note that slices of a LazyList are lists and not LazyList'''
2754 class IndexError(IndexError):
2757 def __init__(self
, iterable
, *, reverse
=False, _cache
=None):
2758 self
.__iterable
= iter(iterable
)
2759 self
.__cache
= [] if _cache
is None else _cache
2760 self
.__reversed
= reverse
2764 # We need to consume the entire iterable to iterate in reverse
2765 yield from self
.exhaust()
2767 yield from self
.__cache
2768 for item
in self
.__iterable
:
2769 self
.__cache
.append(item
)
2772 def __exhaust(self
):
2773 self
.__cache
.extend(self
.__iterable
)
2774 # Discard the emptied iterable to make it pickle-able
2775 self
.__iterable
= []
2779 ''' Evaluate the entire iterable '''
2780 return self
.__exhaust
()[::-1 if self
.__reversed
else 1]
2783 def __reverse_index(x
):
2784 return None if x
is None else -(x
+ 1)
2786 def __getitem__(self
, idx
):
2787 if isinstance(idx
, slice):
2789 idx
= slice(self
.__reverse
_index
(idx
.start
), self
.__reverse
_index
(idx
.stop
), -(idx
.step
or 1))
2790 start
, stop
, step
= idx
.start
, idx
.stop
, idx
.step
or 1
2791 elif isinstance(idx
, int):
2793 idx
= self
.__reverse
_index
(idx
)
2794 start
, stop
, step
= idx
, idx
, 0
2796 raise TypeError('indices must be integers or slices')
2797 if ((start
or 0) < 0 or (stop
or 0) < 0
2798 or (start
is None and step
< 0)
2799 or (stop
is None and step
> 0)):
2800 # We need to consume the entire iterable to be able to slice from the end
2801 # Obviously, never use this with infinite iterables
2804 return self
.__cache
[idx
]
2805 except IndexError as e
:
2806 raise self
.IndexError(e
) from e
2807 n
= max(start
or 0, stop
or 0) - len(self
.__cache
) + 1
2809 self
.__cache
.extend(itertools
.islice(self
.__iterable
, n
))
2811 return self
.__cache
[idx
]
2812 except IndexError as e
:
2813 raise self
.IndexError(e
) from e
2817 self
[-1] if self
.__reversed
else self
[0]
2818 except self
.IndexError:
2824 return len(self
.__cache
)
2826 def __reversed__(self
):
2827 return type(self
)(self
.__iterable
, reverse
=not self
.__reversed
, _cache
=self
.__cache
)
2830 return type(self
)(self
.__iterable
, reverse
=self
.__reversed
, _cache
=self
.__cache
)
2833 # repr and str should mimic a list. So we exhaust the iterable
2834 return repr(self
.exhaust())
2837 return repr(self
.exhaust())
2842 class IndexError(IndexError):
2846 # This is only useful for tests
2847 return len(self
.getslice())
2849 def __init__(self
, pagefunc
, pagesize
, use_cache
=True):
2850 self
._pagefunc
= pagefunc
2851 self
._pagesize
= pagesize
2852 self
._pagecount
= float('inf')
2853 self
._use
_cache
= use_cache
2856 def getpage(self
, pagenum
):
2857 page_results
= self
._cache
.get(pagenum
)
2858 if page_results
is None:
2859 page_results
= [] if pagenum
> self
._pagecount
else list(self
._pagefunc
(pagenum
))
2861 self
._cache
[pagenum
] = page_results
2864 def getslice(self
, start
=0, end
=None):
2865 return list(self
._getslice
(start
, end
))
2867 def _getslice(self
, start
, end
):
2868 raise NotImplementedError('This method must be implemented by subclasses')
2870 def __getitem__(self
, idx
):
2871 assert self
._use
_cache
, 'Indexing PagedList requires cache'
2872 if not isinstance(idx
, int) or idx
< 0:
2873 raise TypeError('indices must be non-negative integers')
2874 entries
= self
.getslice(idx
, idx
+ 1)
2876 raise self
.IndexError()
2880 class OnDemandPagedList(PagedList
):
2881 def _getslice(self
, start
, end
):
2882 for pagenum
in itertools
.count(start
// self
._pagesize
):
2883 firstid
= pagenum
* self
._pagesize
2884 nextfirstid
= pagenum
* self
._pagesize
+ self
._pagesize
2885 if start
>= nextfirstid
:
2889 start
% self
._pagesize
2890 if firstid
<= start
< nextfirstid
2893 ((end
- 1) % self
._pagesize
) + 1
2894 if (end
is not None and firstid
<= end
<= nextfirstid
)
2898 page_results
= self
.getpage(pagenum
)
2900 self
._pagecount
= pagenum
- 1
2902 if startv
!= 0 or endv
is not None:
2903 page_results
= page_results
[startv
:endv
]
2904 yield from page_results
2906 # A little optimization - if current page is not "full", ie. does
2907 # not contain page_size videos then we can assume that this page
2908 # is the last one - there are no more ids on further pages -
2909 # i.e. no need to query again.
2910 if len(page_results
) + startv
< self
._pagesize
:
2913 # If we got the whole page, but the next page is not interesting,
2914 # break out early as well
2915 if end
== nextfirstid
:
2919 class InAdvancePagedList(PagedList
):
2920 def __init__(self
, pagefunc
, pagecount
, pagesize
):
2921 PagedList
.__init
__(self
, pagefunc
, pagesize
, True)
2922 self
._pagecount
= pagecount
2924 def _getslice(self
, start
, end
):
2925 start_page
= start
// self
._pagesize
2926 end_page
= self
._pagecount
if end
is None else min(self
._pagecount
, end
// self
._pagesize
+ 1)
2927 skip_elems
= start
- start_page
* self
._pagesize
2928 only_more
= None if end
is None else end
- start
2929 for pagenum
in range(start_page
, end_page
):
2930 page_results
= self
.getpage(pagenum
)
2932 page_results
= page_results
[skip_elems
:]
2934 if only_more
is not None:
2935 if len(page_results
) < only_more
:
2936 only_more
-= len(page_results
)
2938 yield from page_results
[:only_more
]
2940 yield from page_results
2943 def uppercase_escape(s
):
2944 unicode_escape
= codecs
.getdecoder('unicode_escape')
2946 r
'\\U[0-9a-fA-F]{8}',
2947 lambda m
: unicode_escape(m
.group(0))[0],
2951 def lowercase_escape(s
):
2952 unicode_escape
= codecs
.getdecoder('unicode_escape')
2954 r
'\\u[0-9a-fA-F]{4}',
2955 lambda m
: unicode_escape(m
.group(0))[0],
2959 def escape_rfc3986(s
):
2960 """Escape non-ASCII characters as suggested by RFC 3986"""
2961 if sys
.version_info
< (3, 0) and isinstance(s
, compat_str
):
2962 s
= s
.encode('utf-8')
2963 return compat_urllib_parse
.quote(s
, b
"%/;:@&=+$,!~*'()?#[]")
2966 def escape_url(url
):
2967 """Escape URL as suggested by RFC 3986"""
2968 url_parsed
= compat_urllib_parse_urlparse(url
)
2969 return url_parsed
._replace
(
2970 netloc
=url_parsed
.netloc
.encode('idna').decode('ascii'),
2971 path
=escape_rfc3986(url_parsed
.path
),
2972 params
=escape_rfc3986(url_parsed
.params
),
2973 query
=escape_rfc3986(url_parsed
.query
),
2974 fragment
=escape_rfc3986(url_parsed
.fragment
)
2979 return compat_parse_qs(compat_urllib_parse_urlparse(url
).query
)
2982 def read_batch_urls(batch_fd
):
2984 if not isinstance(url
, compat_str
):
2985 url
= url
.decode('utf-8', 'replace')
2986 BOM_UTF8
= ('\xef\xbb\xbf', '\ufeff')
2987 for bom
in BOM_UTF8
:
2988 if url
.startswith(bom
):
2989 url
= url
[len(bom
):]
2991 if not url
or url
.startswith(('#', ';', ']')):
2993 # "#" cannot be stripped out since it is part of the URI
2994 # However, it can be safely stipped out if follwing a whitespace
2995 return re
.split(r
'\s#', url
, 1)[0].rstrip()
2997 with contextlib
.closing(batch_fd
) as fd
:
2998 return [url
for url
in map(fixup
, fd
) if url
]
3001 def urlencode_postdata(*args
, **kargs
):
3002 return compat_urllib_parse_urlencode(*args
, **kargs
).encode('ascii')
3005 def update_url_query(url
, query
):
3008 parsed_url
= compat_urlparse
.urlparse(url
)
3009 qs
= compat_parse_qs(parsed_url
.query
)
3011 return compat_urlparse
.urlunparse(parsed_url
._replace
(
3012 query
=compat_urllib_parse_urlencode(qs
, True)))
3015 def update_Request(req
, url
=None, data
=None, headers
={}, query={}
):
3016 req_headers
= req
.headers
.copy()
3017 req_headers
.update(headers
)
3018 req_data
= data
or req
.data
3019 req_url
= update_url_query(url
or req
.get_full_url(), query
)
3020 req_get_method
= req
.get_method()
3021 if req_get_method
== 'HEAD':
3022 req_type
= HEADRequest
3023 elif req_get_method
== 'PUT':
3024 req_type
= PUTRequest
3026 req_type
= compat_urllib_request
.Request
3028 req_url
, data
=req_data
, headers
=req_headers
,
3029 origin_req_host
=req
.origin_req_host
, unverifiable
=req
.unverifiable
)
3030 if hasattr(req
, 'timeout'):
3031 new_req
.timeout
= req
.timeout
3035 def _multipart_encode_impl(data
, boundary
):
3036 content_type
= 'multipart/form-data; boundary=%s' % boundary
3039 for k
, v
in data
.items():
3040 out
+= b
'--' + boundary
.encode('ascii') + b
'\r\n'
3041 if isinstance(k
, compat_str
):
3042 k
= k
.encode('utf-8')
3043 if isinstance(v
, compat_str
):
3044 v
= v
.encode('utf-8')
3045 # RFC 2047 requires non-ASCII field names to be encoded, while RFC 7578
3046 # suggests sending UTF-8 directly. Firefox sends UTF-8, too
3047 content
= b
'Content-Disposition: form-data; name="' + k
+ b
'"\r\n\r\n' + v
+ b
'\r\n'
3048 if boundary
.encode('ascii') in content
:
3049 raise ValueError('Boundary overlaps with data')
3052 out
+= b
'--' + boundary
.encode('ascii') + b
'--\r\n'
3054 return out
, content_type
3057 def multipart_encode(data
, boundary
=None):
3059 Encode a dict to RFC 7578-compliant form-data
3062 A dict where keys and values can be either Unicode or bytes-like
3065 If specified a Unicode object, it's used as the boundary. Otherwise
3066 a random boundary is generated.
3068 Reference: https://tools.ietf.org/html/rfc7578
3070 has_specified_boundary
= boundary
is not None
3073 if boundary
is None:
3074 boundary
= '---------------' + str(random
.randrange(0x0fffffff, 0xffffffff))
3077 out
, content_type
= _multipart_encode_impl(data
, boundary
)
3080 if has_specified_boundary
:
3084 return out
, content_type
3087 def dict_get(d
, key_or_keys
, default
=None, skip_false_values
=True):
3088 if isinstance(key_or_keys
, (list, tuple)):
3089 for key
in key_or_keys
:
3090 if key
not in d
or d
[key
] is None or skip_false_values
and not d
[key
]:
3094 return d
.get(key_or_keys
, default
)
3097 def try_get(src
, getter
, expected_type
=None):
3098 for get
in variadic(getter
):
3101 except (AttributeError, KeyError, TypeError, IndexError):
3104 if expected_type
is None or isinstance(v
, expected_type
):
3108 def merge_dicts(*dicts
):
3110 for a_dict
in dicts
:
3111 for k
, v
in a_dict
.items():
3115 or (isinstance(v
, compat_str
) and v
3116 and isinstance(merged
[k
], compat_str
)
3117 and not merged
[k
])):
3122 def encode_compat_str(string
, encoding
=preferredencoding(), errors
='strict'):
3123 return string
if isinstance(string
, compat_str
) else compat_str(string
, encoding
, errors
)
3135 TV_PARENTAL_GUIDELINES
= {
3145 def parse_age_limit(s
):
3147 return s
if 0 <= s
<= 21 else None
3148 if not isinstance(s
, compat_basestring
):
3150 m
= re
.match(r
'^(?P<age>\d{1,2})\+?$', s
)
3152 return int(m
.group('age'))
3155 return US_RATINGS
[s
]
3156 m
= re
.match(r
'^TV[_-]?(%s)$' % '|'.join(k
[3:] for k
in TV_PARENTAL_GUIDELINES
), s
)
3158 return TV_PARENTAL_GUIDELINES
['TV-' + m
.group(1)]
3162 def strip_jsonp(code
):
3165 (?:window\.)?(?P<func_name>[a-zA-Z0-9_.$]*)
3166 (?:\s*&&\s*(?P=func_name))?
3167 \s*\(\s*(?P<callback_data>.*)\);?
3168 \s*?(?://[^\n]*)*$''',
3169 r
'\g<callback_data>', code
)
3172 def js_to_json(code
, vars={}):
3173 # vars is a dict of var, val pairs to substitute
3174 COMMENT_RE
= r
'/\*(?:(?!\*/).)*?\*/|//[^\n]*\n'
3175 SKIP_RE
= r
'\s*(?:{comment})?\s*'.format(comment
=COMMENT_RE
)
3177 (r
'(?s)^(0[xX][0-9a-fA-F]+){skip}:?$'.format(skip
=SKIP_RE
), 16),
3178 (r
'(?s)^(0+[0-7]+){skip}:?$'.format(skip
=SKIP_RE
), 8),
3183 if v
in ('true', 'false', 'null'):
3185 elif v
in ('undefined', 'void 0'):
3187 elif v
.startswith('/*') or v
.startswith('//') or v
.startswith('!') or v
== ',':
3190 if v
[0] in ("'", '"'):
3191 v
= re
.sub(r
'(?s)\\.|"', lambda m
: {
3196 }.get(m
.group(0), m
.group(0)), v
[1:-1])
3198 for regex
, base
in INTEGER_TABLE
:
3199 im
= re
.match(regex
, v
)
3201 i
= int(im
.group(1), base
)
3202 return '"%d":' % i
if v
.endswith(':') else '%d' % i
3209 code
= re
.sub(r
'new Date\((".+")\)', r
'\g<1>', code
)
3211 return re
.sub(r
'''(?sx)
3212 "(?:[^"\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^"\\]*"|
3213 '(?:[^'\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^'\\]*'|
3214 {comment}|,(?={skip}[\]}}])|
3215 void\s0|(?:(?<![0-9])[eE]|[a-df-zA-DF-Z_$])[.a-zA-Z_$0-9]*|
3216 \b(?:0[xX][0-9a-fA-F]+|0+[0-7]+)(?:{skip}:)?|
3219 '''.format(comment
=COMMENT_RE
, skip
=SKIP_RE
), fix_kv
, code
)
3222 def qualities(quality_ids
):
3223 """ Get a numeric quality value out of a list of possible values """
3226 return quality_ids
.index(qid
)
3232 POSTPROCESS_WHEN
= {'pre_process', 'after_filter', 'before_dl', 'after_move', 'post_process', 'after_video', 'playlist'}
3236 'default': '%(title)s [%(id)s].%(ext)s',
3237 'chapter': '%(title)s - %(section_number)03d %(section_title)s [%(id)s].%(ext)s',
3243 'description': 'description',
3244 'annotation': 'annotations.xml',
3245 'infojson': 'info.json',
3248 'pl_thumbnail': None,
3249 'pl_description': 'description',
3250 'pl_infojson': 'info.json',
3253 # As of [1] format syntax is:
3254 # %[mapping_key][conversion_flags][minimum_width][.precision][length_modifier]type
3255 # 1. https://docs.python.org/2/library/stdtypes.html#string-formatting
3256 STR_FORMAT_RE_TMPL
= r
'''(?x)
3257 (?<!%)(?P<prefix>(?:%%)*)
3259 (?P<has_key>\((?P<key>{0})\))?
3261 (?P<conversion>[#0\-+ ]+)?
3263 (?P<precision>\.\d+)?
3264 (?P<len_mod>[hlL])? # unused in python
3265 {1} # conversion type
3270 STR_FORMAT_TYPES
= 'diouxXeEfFgGcrs'
3273 def limit_length(s
, length
):
3274 """ Add ellipses to overly long strings """
3279 return s
[:length
- len(ELLIPSES
)] + ELLIPSES
3283 def version_tuple(v
):
3284 return tuple(int(e
) for e
in re
.split(r
'[-.]', v
))
3287 def is_outdated_version(version
, limit
, assume_new
=True):
3289 return not assume_new
3291 return version_tuple(version
) < version_tuple(limit
)
3293 return not assume_new
3296 def ytdl_is_updateable():
3297 """ Returns if yt-dlp can be updated with -U """
3299 from .update
import is_non_updateable
3301 return not is_non_updateable()
3304 def args_to_str(args
):
3305 # Get a short string representation for a subprocess command
3306 return ' '.join(compat_shlex_quote(a
) for a
in args
)
3309 def error_to_compat_str(err
):
3311 # On python 2 error byte string must be decoded with proper
3312 # encoding rather than ascii
3313 if sys
.version_info
[0] < 3:
3314 err_str
= err_str
.decode(preferredencoding())
3318 def mimetype2ext(mt
):
3322 mt
, _
, params
= mt
.partition(';')
3327 # Per RFC 3003, audio/mpeg can be .mp1, .mp2 or .mp3. Here use .mp3 as
3328 # it's the most popular one
3329 'audio/mpeg': 'mp3',
3330 'audio/x-wav': 'wav',
3332 'audio/wave': 'wav',
3335 ext
= FULL_MAP
.get(mt
)
3341 'smptett+xml': 'tt',
3345 'x-mp4-fragmented': 'mp4',
3346 'x-ms-sami': 'sami',
3349 'x-mpegurl': 'm3u8',
3350 'vnd.apple.mpegurl': 'm3u8',
3354 'vnd.ms-sstr+xml': 'ism',
3358 'filmstrip+json': 'fs',
3362 _
, _
, subtype
= mt
.rpartition('/')
3363 ext
= SUBTYPE_MAP
.get(subtype
.lower())
3374 _
, _
, suffix
= subtype
.partition('+')
3375 ext
= SUFFIX_MAP
.get(suffix
)
3379 return subtype
.replace('+', '.')
3382 def ext2mimetype(ext_or_url
):
3385 if '.' not in ext_or_url
:
3386 ext_or_url
= f
'file.{ext_or_url}'
3387 return mimetypes
.guess_type(ext_or_url
)[0]
3390 def parse_codecs(codecs_str
):
3391 # http://tools.ietf.org/html/rfc6381
3394 split_codecs
= list(filter(None, map(
3395 str.strip
, codecs_str
.strip().strip(',').split(','))))
3396 vcodec
, acodec
, tcodec
, hdr
= None, None, None, None
3397 for full_codec
in split_codecs
:
3398 parts
= full_codec
.split('.')
3399 codec
= parts
[0].replace('0', '')
3400 if codec
in ('avc1', 'avc2', 'avc3', 'avc4', 'vp9', 'vp8', 'hev1', 'hev2',
3401 'h263', 'h264', 'mp4v', 'hvc1', 'av1', 'theora', 'dvh1', 'dvhe'):
3403 vcodec
= '.'.join(parts
[:4]) if codec
in ('vp9', 'av1', 'hvc1') else full_codec
3404 if codec
in ('dvh1', 'dvhe'):
3406 elif codec
== 'av1' and len(parts
) > 3 and parts
[3] == '10':
3408 elif full_codec
.replace('0', '').startswith('vp9.2'):
3410 elif codec
in ('flac', 'mp4a', 'opus', 'vorbis', 'mp3', 'aac', 'ac-3', 'ec-3', 'eac3', 'dtsc', 'dtse', 'dtsh', 'dtsl'):
3413 elif codec
in ('stpp', 'wvtt',):
3417 write_string('WARNING: Unknown codec %s\n' % full_codec
, sys
.stderr
)
3418 if vcodec
or acodec
or tcodec
:
3420 'vcodec': vcodec
or 'none',
3421 'acodec': acodec
or 'none',
3422 'dynamic_range': hdr
,
3423 **({'tcodec': tcodec}
if tcodec
is not None else {}),
3425 elif len(split_codecs
) == 2:
3427 'vcodec': split_codecs
[0],
3428 'acodec': split_codecs
[1],
3433 def urlhandle_detect_ext(url_handle
):
3434 getheader
= url_handle
.headers
.get
3436 cd
= getheader('Content-Disposition')
3438 m
= re
.match(r
'attachment;\s*filename="(?P<filename>[^"]+)"', cd
)
3440 e
= determine_ext(m
.group('filename'), default_ext
=None)
3444 return mimetype2ext(getheader('Content-Type'))
3447 def encode_data_uri(data
, mime_type
):
3448 return 'data:%s;base64,%s' % (mime_type
, base64
.b64encode(data
).decode('ascii'))
3451 def age_restricted(content_limit
, age_limit
):
3452 """ Returns True iff the content should be blocked """
3454 if age_limit
is None: # No limit set
3456 if content_limit
is None:
3457 return False # Content available for everyone
3458 return age_limit
< content_limit
3461 def is_html(first_bytes
):
3462 """ Detect whether a file contains HTML by examining its first bytes. """
3465 (b
'\xef\xbb\xbf', 'utf-8'),
3466 (b
'\x00\x00\xfe\xff', 'utf-32-be'),
3467 (b
'\xff\xfe\x00\x00', 'utf-32-le'),
3468 (b
'\xff\xfe', 'utf-16-le'),
3469 (b
'\xfe\xff', 'utf-16-be'),
3471 for bom
, enc
in BOMS
:
3472 if first_bytes
.startswith(bom
):
3473 s
= first_bytes
[len(bom
):].decode(enc
, 'replace')
3476 s
= first_bytes
.decode('utf-8', 'replace')
3478 return re
.match(r
'^\s*<', s
)
3481 def determine_protocol(info_dict
):
3482 protocol
= info_dict
.get('protocol')
3483 if protocol
is not None:
3486 url
= sanitize_url(info_dict
['url'])
3487 if url
.startswith('rtmp'):
3489 elif url
.startswith('mms'):
3491 elif url
.startswith('rtsp'):
3494 ext
= determine_ext(url
)
3500 return compat_urllib_parse_urlparse(url
).scheme
3503 def render_table(header_row
, data
, delim
=False, extra_gap
=0, hide_empty
=False):
3504 """ Render a list of rows, each as a list of values.
3505 Text after a \t will be right aligned """
3507 return len(remove_terminal_sequences(string
).replace('\t', ''))
3509 def get_max_lens(table
):
3510 return [max(width(str(v
)) for v
in col
) for col
in zip(*table
)]
3512 def filter_using_list(row
, filterArray
):
3513 return [col
for take
, col
in itertools
.zip_longest(filterArray
, row
, fillvalue
=True) if take
]
3515 max_lens
= get_max_lens(data
) if hide_empty
else []
3516 header_row
= filter_using_list(header_row
, max_lens
)
3517 data
= [filter_using_list(row
, max_lens
) for row
in data
]
3519 table
= [header_row
] + data
3520 max_lens
= get_max_lens(table
)
3523 table
= [header_row
, [delim
* (ml
+ extra_gap
) for ml
in max_lens
]] + data
3524 table
[1][-1] = table
[1][-1][:-extra_gap
* len(delim
)] # Remove extra_gap from end of delimiter
3526 for pos
, text
in enumerate(map(str, row
)):
3528 row
[pos
] = text
.replace('\t', ' ' * (max_lens
[pos
] - width(text
))) + ' ' * extra_gap
3530 row
[pos
] = text
+ ' ' * (max_lens
[pos
] - width(text
) + extra_gap
)
3531 ret
= '\n'.join(''.join(row
).rstrip() for row
in table
)
3535 def _match_one(filter_part
, dct
, incomplete
):
3536 # TODO: Generalize code with YoutubeDL._build_format_filter
3537 STRING_OPERATORS
= {
3538 '*=': operator
.contains
,
3539 '^=': lambda attr
, value
: attr
.startswith(value
),
3540 '$=': lambda attr
, value
: attr
.endswith(value
),
3541 '~=': lambda attr
, value
: re
.search(value
, attr
),
3543 COMPARISON_OPERATORS
= {
3545 '<=': operator
.le
, # "<=" must be defined above "<"
3552 if isinstance(incomplete
, bool):
3553 is_incomplete
= lambda _
: incomplete
3555 is_incomplete
= lambda k
: k
in incomplete
3557 operator_rex
= re
.compile(r
'''(?x)\s*
3559 \s*(?P<negation>!\s*)?(?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
3561 (?P<quote>["\'])(?P
<quotedstrval
>.+?
)(?P
=quote
)|
3565 ''' % '|'.join(map(re.escape, COMPARISON_OPERATORS.keys())))
3566 m = operator_rex.search(filter_part)
3569 unnegated_op = COMPARISON_OPERATORS[m['op']]
3571 op = lambda attr, value: not unnegated_op(attr, value)
3574 comparison_value = m['quotedstrval'] or m['strval'] or m['intval']
3576 comparison_value = comparison_value.replace(r'\%s' % m['quote'], m['quote'])
3577 actual_value = dct.get(m['key'])
3578 numeric_comparison = None
3579 if isinstance(actual_value, compat_numeric_types):
3580 # If the original field is a string and matching comparisonvalue is
3581 # a number we should respect the origin of the original field
3582 # and process comparison value as a string (see
3583 # https://github.com/ytdl-org/youtube-dl/issues/11082)
3585 numeric_comparison = int(comparison_value)
3587 numeric_comparison = parse_filesize(comparison_value)
3588 if numeric_comparison is None:
3589 numeric_comparison = parse_filesize(f'{comparison_value}B')
3590 if numeric_comparison is None:
3591 numeric_comparison = parse_duration(comparison_value)
3592 if numeric_comparison is not None and m['op'] in STRING_OPERATORS:
3593 raise ValueError('Operator %s only supports string values!' % m['op'])
3594 if actual_value is None:
3595 return is_incomplete(m['key']) or m['none_inclusive']
3596 return op(actual_value, comparison_value if numeric_comparison is None else numeric_comparison)
3599 '': lambda v: (v is True) if isinstance(v, bool) else (v is not None),
3600 '!': lambda v: (v is False) if isinstance(v, bool) else (v is None),
3602 operator_rex = re.compile(r'''(?x
)\s
*
3603 (?P
<op
>%s)\s
*(?P
<key
>[a
-z_
]+)
3605 ''' % '|'.join(map(re.escape, UNARY_OPERATORS.keys())))
3606 m = operator_rex.search(filter_part)
3608 op = UNARY_OPERATORS[m.group('op')]
3609 actual_value = dct.get(m.group('key'))
3610 if is_incomplete(m.group('key')) and actual_value is None:
3612 return op(actual_value)
3614 raise ValueError('Invalid filter part %r' % filter_part)
3617 def match_str(filter_str, dct, incomplete=False):
3618 """ Filter a dictionary with a simple string syntax.
3619 @returns Whether the filter passes
3620 @param incomplete Set of keys that is expected to be missing from dct.
3621 Can be True/False to indicate all/none of the keys may be missing.
3622 All conditions on incomplete keys pass if the key is missing
3625 _match_one(filter_part.replace(r'\&', '&'), dct, incomplete)
3626 for filter_part in re.split(r'(?<!\\)&', filter_str))
3629 def match_filter_func(filters):
3632 filters = variadic(filters)
3634 def _match_func(info_dict, *args, **kwargs):
3635 if any(match_str(f, info_dict, *args, **kwargs) for f in filters):
3638 video_title = info_dict.get('title') or info_dict.get('id') or 'video'
3639 filter_str = ') | ('.join(map(str.strip, filters))
3640 return f'{video_title} does not pass filter ({filter_str}), skipping ..'
3644 def parse_dfxp_time_expr(time_expr):
3648 mobj = re.match(r'^(?P<time_offset>\d+(?:\.\d+)?)s?$', time_expr)
3650 return float(mobj.group('time_offset'))
3652 mobj = re.match(r'^(\d+):(\d\d):(\d\d(?:(?:\.|:)\d+)?)$', time_expr)
3654 return 3600 * int(mobj.group(1)) + 60 * int(mobj.group(2)) + float(mobj.group(3).replace(':', '.'))
3657 def srt_subtitles_timecode(seconds):
3658 return '%02d:%02d:%02d,%03d' % timetuple_from_msec(seconds * 1000)
3661 def ass_subtitles_timecode(seconds):
3662 time = timetuple_from_msec(seconds * 1000)
3663 return '%01d:%02d:%02d.%02d' % (*time[:-1], time.milliseconds / 10)
3666 def dfxp2srt(dfxp_data):
3668 @param dfxp_data A
bytes-like
object containing DFXP data
3669 @returns A
unicode object containing converted SRT data
3671 LEGACY_NAMESPACES = (
3672 (b'http://www.w3.org/ns/ttml', [
3673 b'http://www.w3.org/2004/11/ttaf1',
3674 b'http://www.w3.org/2006/04/ttaf1',
3675 b'http://www.w3.org/2006/10/ttaf1',
3677 (b'http://www.w3.org/ns/ttml#styling', [
3678 b'http://www.w3.org/ns/ttml#style',
3682 SUPPORTED_STYLING = [
3691 _x = functools.partial(xpath_with_ns, ns_map={
3692 'xml': 'http://www.w3.org/XML/1998/namespace',
3693 'ttml': 'http://www.w3.org/ns/ttml',
3694 'tts': 'http://www.w3.org/ns/ttml#styling',
3700 class TTMLPElementParser(object):
3702 _unclosed_elements = []
3703 _applied_styles = []
3705 def start(self, tag, attrib):
3706 if tag in (_x('ttml:br'), 'br'):
3709 unclosed_elements = []
3711 element_style_id = attrib.get('style')
3713 style.update(default_style)
3714 if element_style_id:
3715 style.update(styles.get(element_style_id, {}))
3716 for prop in SUPPORTED_STYLING:
3717 prop_val = attrib.get(_x('tts:' + prop))
3719 style[prop] = prop_val
3722 for k, v in sorted(style.items()):
3723 if self._applied_styles and self._applied_styles[-1].get(k) == v:
3726 font += ' color="%s"' % v
3727 elif k == 'fontSize':
3728 font += ' size="%s"' % v
3729 elif k == 'fontFamily':
3730 font += ' face="%s"' % v
3731 elif k == 'fontWeight' and v == 'bold':
3733 unclosed_elements.append('b')
3734 elif k == 'fontStyle' and v == 'italic':
3736 unclosed_elements.append('i')
3737 elif k == 'textDecoration' and v == 'underline':
3739 unclosed_elements.append('u')
3741 self._out += '<font' + font + '>'
3742 unclosed_elements.append('font')
3744 if self._applied_styles:
3745 applied_style.update(self._applied_styles[-1])
3746 applied_style.update(style)
3747 self._applied_styles.append(applied_style)
3748 self._unclosed_elements.append(unclosed_elements)
3751 if tag not in (_x('ttml:br'), 'br'):
3752 unclosed_elements = self._unclosed_elements.pop()
3753 for element in reversed(unclosed_elements):
3754 self._out += '</%s>' % element
3755 if unclosed_elements and self._applied_styles:
3756 self._applied_styles.pop()
3758 def data(self, data):
3762 return self._out.strip()
3764 def parse_node(node):
3765 target = TTMLPElementParser()
3766 parser = xml.etree.ElementTree.XMLParser(target=target)
3767 parser.feed(xml.etree.ElementTree.tostring(node))
3768 return parser.close()
3770 for k, v in LEGACY_NAMESPACES:
3772 dfxp_data = dfxp_data.replace(ns, k)
3774 dfxp = compat_etree_fromstring(dfxp_data)
3776 paras = dfxp.findall(_x('.//ttml:p')) or dfxp.findall('.//p')
3779 raise ValueError('Invalid dfxp/TTML subtitle')
3783 for style in dfxp.findall(_x('.//ttml:style')):
3784 style_id = style.get('id') or style.get(_x('xml:id'))
3787 parent_style_id = style.get('style')
3789 if parent_style_id not in styles:
3792 styles[style_id] = styles[parent_style_id].copy()
3793 for prop in SUPPORTED_STYLING:
3794 prop_val = style.get(_x('tts:' + prop))
3796 styles.setdefault(style_id, {})[prop] = prop_val
3802 for p in ('body', 'div'):
3803 ele = xpath_element(dfxp, [_x('.//ttml:' + p), './/' + p])
3806 style = styles.get(ele.get('style'))
3809 default_style.update(style)
3811 for para, index in zip(paras, itertools.count(1)):
3812 begin_time = parse_dfxp_time_expr(para.attrib.get('begin'))
3813 end_time = parse_dfxp_time_expr(para.attrib.get('end'))
3814 dur = parse_dfxp_time_expr(para.attrib.get('dur'))
3815 if begin_time is None:
3820 end_time = begin_time + dur
3821 out.append('%d\n%s --> %s\n%s\n\n' % (
3823 srt_subtitles_timecode(begin_time),
3824 srt_subtitles_timecode(end_time),
3830 def cli_option(params, command_option, param):
3831 param = params.get(param)
3833 param = compat_str(param)
3834 return [command_option, param] if param is not None else []
3837 def cli_bool_option(params, command_option, param, true_value='true', false_value='false', separator=None):
3838 param = params.get(param)
3841 assert isinstance(param, bool)
3843 return [command_option + separator + (true_value if param else false_value)]
3844 return [command_option, true_value if param else false_value]
3847 def cli_valueless_option(params, command_option, param, expected_value=True):
3848 param = params.get(param)
3849 return [command_option] if param == expected_value else []
3852 def cli_configuration_args(argdict, keys, default=[], use_compat=True):
3853 if isinstance(argdict, (list, tuple)): # for backward compatibility
3860 assert isinstance(argdict, dict)
3862 assert isinstance(keys, (list, tuple))
3863 for key_list in keys:
3864 arg_list = list(filter(
3865 lambda x: x is not None,
3866 [argdict.get(key.lower()) for key in variadic(key_list)]))
3868 return [arg for args in arg_list for arg in args]
3872 def _configuration_args(main_key, argdict, exe, keys=None, default=[], use_compat=True):
3873 main_key, exe = main_key.lower(), exe.lower()
3874 root_key = exe if main_key == exe else f'{main_key}+{exe}'
3875 keys = [f'{root_key}{k}' for k in (keys or [''])]
3876 if root_key in keys:
3878 keys.append((main_key, exe))
3879 keys.append('default')
3882 return cli_configuration_args(argdict, keys, default, use_compat)
3885 class ISO639Utils(object):
3886 # See http://www.loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt
3945 'iw': 'heb', # Replaced by he in 1989 revision
3955 'in': 'ind', # Replaced by id in 1989 revision
4070 'ji': 'yid', # Replaced by yi in 1989 revision
4078 def short2long(cls, code):
4079 """Convert language code from ISO 639-1 to ISO 639-2/T"""
4080 return cls._lang_map.get(code[:2])
4083 def long2short(cls, code):
4084 """Convert language code from ISO 639-2/T to ISO 639-1"""
4085 for short_name, long_name in cls._lang_map.items():
4086 if long_name == code:
4090 class ISO3166Utils(object):
4091 # From http://data.okfn.org/data/core/country-list
4093 'AF': 'Afghanistan',
4094 'AX': 'Åland Islands',
4097 'AS': 'American Samoa',
4102 'AG': 'Antigua and Barbuda',
4119 'BO': 'Bolivia, Plurinational State of',
4120 'BQ': 'Bonaire, Sint Eustatius and Saba',
4121 'BA': 'Bosnia and Herzegovina',
4123 'BV': 'Bouvet Island',
4125 'IO': 'British Indian Ocean Territory',
4126 'BN': 'Brunei Darussalam',
4128 'BF': 'Burkina Faso',
4134 'KY': 'Cayman Islands',
4135 'CF': 'Central African Republic',
4139 'CX': 'Christmas Island',
4140 'CC': 'Cocos (Keeling) Islands',
4144 'CD': 'Congo, the Democratic Republic of the',
4145 'CK': 'Cook Islands',
4147 'CI': 'Côte d\'Ivoire',
4152 'CZ': 'Czech Republic',
4156 'DO': 'Dominican Republic',
4159 'SV': 'El Salvador',
4160 'GQ': 'Equatorial Guinea',
4164 'FK': 'Falkland Islands (Malvinas)',
4165 'FO': 'Faroe Islands',
4169 'GF': 'French Guiana',
4170 'PF': 'French Polynesia',
4171 'TF': 'French Southern Territories',
4186 'GW': 'Guinea-Bissau',
4189 'HM': 'Heard Island and McDonald Islands',
4190 'VA': 'Holy See (Vatican City State)',
4197 'IR': 'Iran, Islamic Republic of',
4200 'IM': 'Isle of Man',
4210 'KP': 'Korea, Democratic People\'s Republic of',
4211 'KR': 'Korea, Republic of',
4214 'LA': 'Lao People\'s Democratic Republic',
4220 'LI': 'Liechtenstein',
4224 'MK': 'Macedonia, the Former Yugoslav Republic of',
4231 'MH': 'Marshall Islands',
4237 'FM': 'Micronesia, Federated States of',
4238 'MD': 'Moldova, Republic of',
4249 'NL': 'Netherlands',
4250 'NC': 'New Caledonia',
4251 'NZ': 'New Zealand',
4256 'NF': 'Norfolk Island',
4257 'MP': 'Northern Mariana Islands',
4262 'PS': 'Palestine, State of',
4264 'PG': 'Papua New Guinea',
4267 'PH': 'Philippines',
4271 'PR': 'Puerto Rico',
4275 'RU': 'Russian Federation',
4277 'BL': 'Saint Barthélemy',
4278 'SH': 'Saint Helena, Ascension and Tristan da Cunha',
4279 'KN': 'Saint Kitts and Nevis',
4280 'LC': 'Saint Lucia',
4281 'MF': 'Saint Martin (French part)',
4282 'PM': 'Saint Pierre and Miquelon',
4283 'VC': 'Saint Vincent and the Grenadines',
4286 'ST': 'Sao Tome and Principe',
4287 'SA': 'Saudi Arabia',
4291 'SL': 'Sierra Leone',
4293 'SX': 'Sint Maarten (Dutch part)',
4296 'SB': 'Solomon Islands',
4298 'ZA': 'South Africa',
4299 'GS': 'South Georgia and the South Sandwich Islands',
4300 'SS': 'South Sudan',
4305 'SJ': 'Svalbard and Jan Mayen',
4308 'CH': 'Switzerland',
4309 'SY': 'Syrian Arab Republic',
4310 'TW': 'Taiwan, Province of China',
4312 'TZ': 'Tanzania, United Republic of',
4314 'TL': 'Timor-Leste',
4318 'TT': 'Trinidad and Tobago',
4321 'TM': 'Turkmenistan',
4322 'TC': 'Turks and Caicos Islands',
4326 'AE': 'United Arab Emirates',
4327 'GB': 'United Kingdom',
4328 'US': 'United States',
4329 'UM': 'United States Minor Outlying Islands',
4333 'VE': 'Venezuela, Bolivarian Republic of',
4335 'VG': 'Virgin Islands, British',
4336 'VI': 'Virgin Islands, U.S.',
4337 'WF': 'Wallis and Futuna',
4338 'EH': 'Western Sahara',
4345 def short2full(cls, code):
4346 """Convert an ISO 3166-2 country code to the corresponding full name"""
4347 return cls._country_map.get(code.upper())
4350 class GeoUtils(object):
4351 # Major IPv4 address blocks per country
4353 'AD': '46.172.224.0/19',
4354 'AE': '94.200.0.0/13',
4355 'AF': '149.54.0.0/17',
4356 'AG': '209.59.64.0/18',
4357 'AI': '204.14.248.0/21',
4358 'AL': '46.99.0.0/16',
4359 'AM': '46.70.0.0/15',
4360 'AO': '105.168.0.0/13',
4361 'AP': '182.50.184.0/21',
4362 'AQ': '23.154.160.0/24',
4363 'AR': '181.0.0.0/12',
4364 'AS': '202.70.112.0/20',
4365 'AT': '77.116.0.0/14',
4366 'AU': '1.128.0.0/11',
4367 'AW': '181.41.0.0/18',
4368 'AX': '185.217.4.0/22',
4369 'AZ': '5.197.0.0/16',
4370 'BA': '31.176.128.0/17',
4371 'BB': '65.48.128.0/17',
4372 'BD': '114.130.0.0/16',
4374 'BF': '102.178.0.0/15',
4375 'BG': '95.42.0.0/15',
4376 'BH': '37.131.0.0/17',
4377 'BI': '154.117.192.0/18',
4378 'BJ': '137.255.0.0/16',
4379 'BL': '185.212.72.0/23',
4380 'BM': '196.12.64.0/18',
4381 'BN': '156.31.0.0/16',
4382 'BO': '161.56.0.0/16',
4383 'BQ': '161.0.80.0/20',
4384 'BR': '191.128.0.0/12',
4385 'BS': '24.51.64.0/18',
4386 'BT': '119.2.96.0/19',
4387 'BW': '168.167.0.0/16',
4388 'BY': '178.120.0.0/13',
4389 'BZ': '179.42.192.0/18',
4390 'CA': '99.224.0.0/11',
4391 'CD': '41.243.0.0/16',
4392 'CF': '197.242.176.0/21',
4393 'CG': '160.113.0.0/16',
4394 'CH': '85.0.0.0/13',
4395 'CI': '102.136.0.0/14',
4396 'CK': '202.65.32.0/19',
4397 'CL': '152.172.0.0/14',
4398 'CM': '102.244.0.0/14',
4399 'CN': '36.128.0.0/10',
4400 'CO': '181.240.0.0/12',
4401 'CR': '201.192.0.0/12',
4402 'CU': '152.206.0.0/15',
4403 'CV': '165.90.96.0/19',
4404 'CW': '190.88.128.0/17',
4405 'CY': '31.153.0.0/16',
4406 'CZ': '88.100.0.0/14',
4408 'DJ': '197.241.0.0/17',
4409 'DK': '87.48.0.0/12',
4410 'DM': '192.243.48.0/20',
4411 'DO': '152.166.0.0/15',
4412 'DZ': '41.96.0.0/12',
4413 'EC': '186.68.0.0/15',
4414 'EE': '90.190.0.0/15',
4415 'EG': '156.160.0.0/11',
4416 'ER': '196.200.96.0/20',
4417 'ES': '88.0.0.0/11',
4418 'ET': '196.188.0.0/14',
4419 'EU': '2.16.0.0/13',
4420 'FI': '91.152.0.0/13',
4421 'FJ': '144.120.0.0/16',
4422 'FK': '80.73.208.0/21',
4423 'FM': '119.252.112.0/20',
4424 'FO': '88.85.32.0/19',
4426 'GA': '41.158.0.0/15',
4428 'GD': '74.122.88.0/21',
4429 'GE': '31.146.0.0/16',
4430 'GF': '161.22.64.0/18',
4431 'GG': '62.68.160.0/19',
4432 'GH': '154.160.0.0/12',
4433 'GI': '95.164.0.0/16',
4434 'GL': '88.83.0.0/19',
4435 'GM': '160.182.0.0/15',
4436 'GN': '197.149.192.0/18',
4437 'GP': '104.250.0.0/19',
4438 'GQ': '105.235.224.0/20',
4439 'GR': '94.64.0.0/13',
4440 'GT': '168.234.0.0/16',
4441 'GU': '168.123.0.0/16',
4442 'GW': '197.214.80.0/20',
4443 'GY': '181.41.64.0/18',
4444 'HK': '113.252.0.0/14',
4445 'HN': '181.210.0.0/16',
4446 'HR': '93.136.0.0/13',
4447 'HT': '148.102.128.0/17',
4448 'HU': '84.0.0.0/14',
4449 'ID': '39.192.0.0/10',
4450 'IE': '87.32.0.0/12',
4451 'IL': '79.176.0.0/13',
4452 'IM': '5.62.80.0/20',
4453 'IN': '117.192.0.0/10',
4454 'IO': '203.83.48.0/21',
4455 'IQ': '37.236.0.0/14',
4456 'IR': '2.176.0.0/12',
4457 'IS': '82.221.0.0/16',
4458 'IT': '79.0.0.0/10',
4459 'JE': '87.244.64.0/18',
4460 'JM': '72.27.0.0/17',
4461 'JO': '176.29.0.0/16',
4462 'JP': '133.0.0.0/8',
4463 'KE': '105.48.0.0/12',
4464 'KG': '158.181.128.0/17',
4465 'KH': '36.37.128.0/17',
4466 'KI': '103.25.140.0/22',
4467 'KM': '197.255.224.0/20',
4468 'KN': '198.167.192.0/19',
4469 'KP': '175.45.176.0/22',
4470 'KR': '175.192.0.0/10',
4471 'KW': '37.36.0.0/14',
4472 'KY': '64.96.0.0/15',
4473 'KZ': '2.72.0.0/13',
4474 'LA': '115.84.64.0/18',
4475 'LB': '178.135.0.0/16',
4476 'LC': '24.92.144.0/20',
4477 'LI': '82.117.0.0/19',
4478 'LK': '112.134.0.0/15',
4479 'LR': '102.183.0.0/16',
4480 'LS': '129.232.0.0/17',
4481 'LT': '78.56.0.0/13',
4482 'LU': '188.42.0.0/16',
4483 'LV': '46.109.0.0/16',
4484 'LY': '41.252.0.0/14',
4485 'MA': '105.128.0.0/11',
4486 'MC': '88.209.64.0/18',
4487 'MD': '37.246.0.0/16',
4488 'ME': '178.175.0.0/17',
4489 'MF': '74.112.232.0/21',
4490 'MG': '154.126.0.0/17',
4491 'MH': '117.103.88.0/21',
4492 'MK': '77.28.0.0/15',
4493 'ML': '154.118.128.0/18',
4494 'MM': '37.111.0.0/17',
4495 'MN': '49.0.128.0/17',
4496 'MO': '60.246.0.0/16',
4497 'MP': '202.88.64.0/20',
4498 'MQ': '109.203.224.0/19',
4499 'MR': '41.188.64.0/18',
4500 'MS': '208.90.112.0/22',
4501 'MT': '46.11.0.0/16',
4502 'MU': '105.16.0.0/12',
4503 'MV': '27.114.128.0/18',
4504 'MW': '102.70.0.0/15',
4505 'MX': '187.192.0.0/11',
4506 'MY': '175.136.0.0/13',
4507 'MZ': '197.218.0.0/15',
4508 'NA': '41.182.0.0/16',
4509 'NC': '101.101.0.0/18',
4510 'NE': '197.214.0.0/18',
4511 'NF': '203.17.240.0/22',
4512 'NG': '105.112.0.0/12',
4513 'NI': '186.76.0.0/15',
4514 'NL': '145.96.0.0/11',
4515 'NO': '84.208.0.0/13',
4516 'NP': '36.252.0.0/15',
4517 'NR': '203.98.224.0/19',
4518 'NU': '49.156.48.0/22',
4519 'NZ': '49.224.0.0/14',
4520 'OM': '5.36.0.0/15',
4521 'PA': '186.72.0.0/15',
4522 'PE': '186.160.0.0/14',
4523 'PF': '123.50.64.0/18',
4524 'PG': '124.240.192.0/19',
4525 'PH': '49.144.0.0/13',
4526 'PK': '39.32.0.0/11',
4527 'PL': '83.0.0.0/11',
4528 'PM': '70.36.0.0/20',
4529 'PR': '66.50.0.0/16',
4530 'PS': '188.161.0.0/16',
4531 'PT': '85.240.0.0/13',
4532 'PW': '202.124.224.0/20',
4533 'PY': '181.120.0.0/14',
4534 'QA': '37.210.0.0/15',
4535 'RE': '102.35.0.0/16',
4536 'RO': '79.112.0.0/13',
4537 'RS': '93.86.0.0/15',
4538 'RU': '5.136.0.0/13',
4539 'RW': '41.186.0.0/16',
4540 'SA': '188.48.0.0/13',
4541 'SB': '202.1.160.0/19',
4542 'SC': '154.192.0.0/11',
4543 'SD': '102.120.0.0/13',
4544 'SE': '78.64.0.0/12',
4545 'SG': '8.128.0.0/10',
4546 'SI': '188.196.0.0/14',
4547 'SK': '78.98.0.0/15',
4548 'SL': '102.143.0.0/17',
4549 'SM': '89.186.32.0/19',
4550 'SN': '41.82.0.0/15',
4551 'SO': '154.115.192.0/18',
4552 'SR': '186.179.128.0/17',
4553 'SS': '105.235.208.0/21',
4554 'ST': '197.159.160.0/19',
4555 'SV': '168.243.0.0/16',
4556 'SX': '190.102.0.0/20',
4558 'SZ': '41.84.224.0/19',
4559 'TC': '65.255.48.0/20',
4560 'TD': '154.68.128.0/19',
4561 'TG': '196.168.0.0/14',
4562 'TH': '171.96.0.0/13',
4563 'TJ': '85.9.128.0/18',
4564 'TK': '27.96.24.0/21',
4565 'TL': '180.189.160.0/20',
4566 'TM': '95.85.96.0/19',
4567 'TN': '197.0.0.0/11',
4568 'TO': '175.176.144.0/21',
4569 'TR': '78.160.0.0/11',
4570 'TT': '186.44.0.0/15',
4571 'TV': '202.2.96.0/19',
4572 'TW': '120.96.0.0/11',
4573 'TZ': '156.156.0.0/14',
4574 'UA': '37.52.0.0/14',
4575 'UG': '102.80.0.0/13',
4577 'UY': '167.56.0.0/13',
4578 'UZ': '84.54.64.0/18',
4579 'VA': '212.77.0.0/19',
4580 'VC': '207.191.240.0/21',
4581 'VE': '186.88.0.0/13',
4582 'VG': '66.81.192.0/20',
4583 'VI': '146.226.0.0/16',
4584 'VN': '14.160.0.0/11',
4585 'VU': '202.80.32.0/20',
4586 'WF': '117.20.32.0/21',
4587 'WS': '202.4.32.0/19',
4588 'YE': '134.35.0.0/16',
4589 'YT': '41.242.116.0/22',
4590 'ZA': '41.0.0.0/11',
4591 'ZM': '102.144.0.0/13',
4592 'ZW': '102.177.192.0/18',
4596 def random_ipv4(cls, code_or_block):
4597 if len(code_or_block) == 2:
4598 block = cls._country_ip_map.get(code_or_block.upper())
4602 block = code_or_block
4603 addr, preflen = block.split('/')
4604 addr_min = compat_struct_unpack('!L', socket.inet_aton(addr))[0]
4605 addr_max = addr_min | (0xffffffff >> int(preflen))
4606 return compat_str(socket.inet_ntoa(
4607 compat_struct_pack('!L', random.randint(addr_min, addr_max))))
4610 class PerRequestProxyHandler(compat_urllib_request.ProxyHandler):
4611 def __init__(self, proxies=None):
4612 # Set default handlers
4613 for type in ('http', 'https'):
4614 setattr(self, '%s_open' % type,
4615 lambda r, proxy='__noproxy__', type=type, meth=self.proxy_open:
4616 meth(r, proxy, type))
4617 compat_urllib_request.ProxyHandler.__init__(self, proxies)
4619 def proxy_open(self, req, proxy, type):
4620 req_proxy = req.headers.get('Ytdl-request-proxy')
4621 if req_proxy is not None:
4623 del req.headers['Ytdl-request-proxy']
4625 if proxy == '__noproxy__':
4626 return None # No Proxy
4627 if compat_urlparse.urlparse(proxy).scheme.lower() in ('socks', 'socks4', 'socks4a', 'socks5'):
4628 req.add_header('Ytdl-socks-proxy', proxy)
4629 # yt-dlp's http/https handlers do wrapping the socket with socks
4631 return compat_urllib_request.ProxyHandler.proxy_open(
4632 self, req, proxy, type)
4635 # Both long_to_bytes and bytes_to_long are adapted from PyCrypto, which is
4636 # released into Public Domain
4637 # https://github.com/dlitz/pycrypto/blob/master/lib/Crypto/Util/number.py#L387
4639 def long_to_bytes(n, blocksize=0):
4640 """long_to_bytes(n:long, blocksize:int) : string
4641 Convert a long integer to a byte string.
4643 If optional blocksize is given and greater than zero, pad the front of the
4644 byte string with binary zeros so that the length is a multiple of
4647 # after much testing, this algorithm was deemed to be the fastest
4651 s = compat_struct_pack('>I', n & 0xffffffff) + s
4653 # strip off leading zeros
4654 for i in range(len(s)):
4655 if s[i] != b'\000'[0]:
4658 # only happens when n == 0
4662 # add back some pad bytes. this could be done more efficiently w.r.t. the
4663 # de-padding being done above, but sigh...
4664 if blocksize > 0 and len(s) % blocksize:
4665 s = (blocksize - len(s) % blocksize) * b'\000' + s
4669 def bytes_to_long(s):
4670 """bytes_to_long(string) : long
4671 Convert a byte string to a long integer.
4673 This is (essentially) the inverse of long_to_bytes().
4678 extra = (4 - length % 4)
4679 s = b'\000' * extra + s
4680 length = length + extra
4681 for i in range(0, length, 4):
4682 acc = (acc << 32) + compat_struct_unpack('>I', s[i:i + 4])[0]
4686 def ohdave_rsa_encrypt(data, exponent, modulus):
4688 Implement OHDave
's RSA algorithm. See http://www.ohdave.com/rsa/
4691 data: data to encrypt, bytes-like object
4692 exponent, modulus: parameter e and N of RSA algorithm, both integer
4693 Output: hex string of encrypted data
4695 Limitation: supports one block encryption only
4698 payload = int(binascii.hexlify(data[::-1]), 16)
4699 encrypted = pow(payload, exponent, modulus)
4700 return '%x' % encrypted
4703 def pkcs1pad(data, length):
4705 Padding input data with PKCS#1 scheme
4707 @param {int[]} data input data
4708 @param {int} length target length
4709 @returns {int[]} padded data
4711 if len(data) > length - 11:
4712 raise ValueError('Input data too
long for PKCS
#1 padding')
4714 pseudo_random
= [random
.randint(0, 254) for _
in range(length
- len(data
) - 3)]
4715 return [0, 2] + pseudo_random
+ [0] + data
4718 def encode_base_n(num
, n
, table
=None):
4719 FULL_TABLE
= '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
4721 table
= FULL_TABLE
[:n
]
4724 raise ValueError('base %d exceeds table length %d' % (n
, len(table
)))
4731 ret
= table
[num
% n
] + ret
4736 def decode_packed_codes(code
):
4737 mobj
= re
.search(PACKED_CODES_RE
, code
)
4738 obfuscated_code
, base
, count
, symbols
= mobj
.groups()
4741 symbols
= symbols
.split('|')
4746 base_n_count
= encode_base_n(count
, base
)
4747 symbol_table
[base_n_count
] = symbols
[count
] or base_n_count
4750 r
'\b(\w+)\b', lambda mobj
: symbol_table
[mobj
.group(0)],
4754 def caesar(s
, alphabet
, shift
):
4759 alphabet
[(alphabet
.index(c
) + shift
) % l
] if c
in alphabet
else c
4764 return caesar(s
, r
'''!"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~''', 47)
4767 def parse_m3u8_attributes(attrib
):
4769 for (key
, val
) in re
.findall(r
'(?P<key>[A-Z0-9-]+)=(?P<val>"[^"]+"|[^",]+)(?:,|$)', attrib
):
4770 if val
.startswith('"'):
4776 def urshift(val
, n
):
4777 return val
>> n
if val
>= 0 else (val
+ 0x100000000) >> n
4780 # Based on png2str() written by @gdkchan and improved by @yokrysty
4781 # Originally posted at https://github.com/ytdl-org/youtube-dl/issues/9706
4782 def decode_png(png_data
):
4783 # Reference: https://www.w3.org/TR/PNG/
4784 header
= png_data
[8:]
4786 if png_data
[:8] != b
'\x89PNG\x0d\x0a\x1a\x0a' or header
[4:8] != b
'IHDR':
4787 raise IOError('Not a valid PNG file.')
4789 int_map
= {1: '>B', 2: '>H', 4: '>I'}
4790 unpack_integer
= lambda x
: compat_struct_unpack(int_map
[len(x
)], x
)[0]
4795 length
= unpack_integer(header
[:4])
4798 chunk_type
= header
[:4]
4801 chunk_data
= header
[:length
]
4802 header
= header
[length
:]
4804 header
= header
[4:] # Skip CRC
4812 ihdr
= chunks
[0]['data']
4814 width
= unpack_integer(ihdr
[:4])
4815 height
= unpack_integer(ihdr
[4:8])
4819 for chunk
in chunks
:
4820 if chunk
['type'] == b
'IDAT':
4821 idat
+= chunk
['data']
4824 raise IOError('Unable to read PNG data.')
4826 decompressed_data
= bytearray(zlib
.decompress(idat
))
4831 def _get_pixel(idx
):
4836 for y
in range(height
):
4837 basePos
= y
* (1 + stride
)
4838 filter_type
= decompressed_data
[basePos
]
4842 pixels
.append(current_row
)
4844 for x
in range(stride
):
4845 color
= decompressed_data
[1 + basePos
+ x
]
4846 basex
= y
* stride
+ x
4851 left
= _get_pixel(basex
- 3)
4853 up
= _get_pixel(basex
- stride
)
4855 if filter_type
== 1: # Sub
4856 color
= (color
+ left
) & 0xff
4857 elif filter_type
== 2: # Up
4858 color
= (color
+ up
) & 0xff
4859 elif filter_type
== 3: # Average
4860 color
= (color
+ ((left
+ up
) >> 1)) & 0xff
4861 elif filter_type
== 4: # Paeth
4867 c
= _get_pixel(basex
- stride
- 3)
4875 if pa
<= pb
and pa
<= pc
:
4876 color
= (color
+ a
) & 0xff
4878 color
= (color
+ b
) & 0xff
4880 color
= (color
+ c
) & 0xff
4882 current_row
.append(color
)
4884 return width
, height
, pixels
4887 def write_xattr(path
, key
, value
):
4888 # This mess below finds the best xattr tool for the job
4890 # try the pyxattr module...
4893 if hasattr(xattr
, 'set'): # pyxattr
4894 # Unicode arguments are not supported in python-pyxattr until
4896 # See https://github.com/ytdl-org/youtube-dl/issues/5498
4897 pyxattr_required_version
= '0.5.0'
4898 if version_tuple(xattr
.__version
__) < version_tuple(pyxattr_required_version
):
4899 # TODO: fallback to CLI tools
4900 raise XAttrUnavailableError(
4901 'python-pyxattr is detected but is too old. '
4902 'yt-dlp requires %s or above while your version is %s. '
4903 'Falling back to other xattr implementations' % (
4904 pyxattr_required_version
, xattr
.__version
__))
4906 setxattr
= xattr
.set
4908 setxattr
= xattr
.setxattr
4911 setxattr(path
, key
, value
)
4912 except EnvironmentError as e
:
4913 raise XAttrMetadataError(e
.errno
, e
.strerror
)
4916 if compat_os_name
== 'nt':
4917 # Write xattrs to NTFS Alternate Data Streams:
4918 # http://en.wikipedia.org/wiki/NTFS#Alternate_data_streams_.28ADS.29
4919 assert ':' not in key
4920 assert os
.path
.exists(path
)
4922 ads_fn
= path
+ ':' + key
4924 with open(ads_fn
, 'wb') as f
:
4926 except EnvironmentError as e
:
4927 raise XAttrMetadataError(e
.errno
, e
.strerror
)
4929 user_has_setfattr
= check_executable('setfattr', ['--version'])
4930 user_has_xattr
= check_executable('xattr', ['-h'])
4932 if user_has_setfattr
or user_has_xattr
:
4934 value
= value
.decode('utf-8')
4935 if user_has_setfattr
:
4936 executable
= 'setfattr'
4937 opts
= ['-n', key
, '-v', value
]
4938 elif user_has_xattr
:
4939 executable
= 'xattr'
4940 opts
= ['-w', key
, value
]
4942 cmd
= ([encodeFilename(executable
, True)]
4943 + [encodeArgument(o
) for o
in opts
]
4944 + [encodeFilename(path
, True)])
4948 cmd
, stdout
=subprocess
.PIPE
, stderr
=subprocess
.PIPE
, stdin
=subprocess
.PIPE
)
4949 except EnvironmentError as e
:
4950 raise XAttrMetadataError(e
.errno
, e
.strerror
)
4951 stdout
, stderr
= p
.communicate_or_kill()
4952 stderr
= stderr
.decode('utf-8', 'replace')
4953 if p
.returncode
!= 0:
4954 raise XAttrMetadataError(p
.returncode
, stderr
)
4957 # On Unix, and can't find pyxattr, setfattr, or xattr.
4958 if sys
.platform
.startswith('linux'):
4959 raise XAttrUnavailableError(
4960 "Couldn't find a tool to set the xattrs. "
4961 "Install either the python 'pyxattr' or 'xattr' "
4962 "modules, or the GNU 'attr' package "
4963 "(which contains the 'setfattr' tool).")
4965 raise XAttrUnavailableError(
4966 "Couldn't find a tool to set the xattrs. "
4967 "Install either the python 'xattr' module, "
4968 "or the 'xattr' binary.")
4971 def random_birthday(year_field
, month_field
, day_field
):
4972 start_date
= datetime
.date(1950, 1, 1)
4973 end_date
= datetime
.date(1995, 12, 31)
4974 offset
= random
.randint(0, (end_date
- start_date
).days
)
4975 random_date
= start_date
+ datetime
.timedelta(offset
)
4977 year_field
: str(random_date
.year
),
4978 month_field
: str(random_date
.month
),
4979 day_field
: str(random_date
.day
),
4983 # Templates for internet shortcut files, which are plain text files.
4984 DOT_URL_LINK_TEMPLATE
= '''
4989 DOT_WEBLOC_LINK_TEMPLATE
= '''
4990 <?xml version="1.0" encoding="UTF-8"?>
4991 <!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
4992 <plist version="1.0">
4995 \t<string>%(url)s</string>
5000 DOT_DESKTOP_LINK_TEMPLATE
= '''
5010 'url': DOT_URL_LINK_TEMPLATE
,
5011 'desktop': DOT_DESKTOP_LINK_TEMPLATE
,
5012 'webloc': DOT_WEBLOC_LINK_TEMPLATE
,
5016 def iri_to_uri(iri
):
5018 Converts an IRI (Internationalized Resource Identifier, allowing Unicode characters) to a URI (Uniform Resource Identifier, ASCII-only).
5020 The function doesn't add an additional layer of escaping; e.g., it doesn't escape `%3C` as `%253C`. Instead, it percent-escapes characters with an underlying UTF-8 encoding *besides* those already escaped, leaving the URI intact.
5023 iri_parts
= compat_urllib_parse_urlparse(iri
)
5025 if '[' in iri_parts
.netloc
:
5026 raise ValueError('IPv6 URIs are not, yet, supported.')
5027 # Querying `.netloc`, when there's only one bracket, also raises a ValueError.
5029 # The `safe` argument values, that the following code uses, contain the characters that should not be percent-encoded. Everything else but letters, digits and '_.-' will be percent-encoded with an underlying UTF-8 encoding. Everything already percent-encoded will be left as is.
5032 if iri_parts
.username
:
5033 net_location
+= compat_urllib_parse_quote(iri_parts
.username
, safe
=r
"!$%&'()*+,~")
5034 if iri_parts
.password
is not None:
5035 net_location
+= ':' + compat_urllib_parse_quote(iri_parts
.password
, safe
=r
"!$%&'()*+,~")
5038 net_location
+= iri_parts
.hostname
.encode('idna').decode('utf-8') # Punycode for Unicode hostnames.
5039 # The 'idna' encoding produces ASCII text.
5040 if iri_parts
.port
is not None and iri_parts
.port
!= 80:
5041 net_location
+= ':' + str(iri_parts
.port
)
5043 return compat_urllib_parse_urlunparse(
5047 compat_urllib_parse_quote_plus(iri_parts
.path
, safe
=r
"!$%&'()*+,/:;=@|~"),
5049 # Unsure about the `safe` argument, since this is a legacy way of handling parameters.
5050 compat_urllib_parse_quote_plus(iri_parts
.params
, safe
=r
"!$%&'()*+,/:;=@|~"),
5052 # Not totally sure about the `safe` argument, since the source does not explicitly mention the query URI component.
5053 compat_urllib_parse_quote_plus(iri_parts
.query
, safe
=r
"!$%&'()*+,/:;=?@{|}~"),
5055 compat_urllib_parse_quote_plus(iri_parts
.fragment
, safe
=r
"!#$%&'()*+,/:;=?@{|}~")))
5057 # Source for `safe` arguments: https://url.spec.whatwg.org/#percent-encoded-bytes.
5060 def to_high_limit_path(path
):
5061 if sys
.platform
in ['win32', 'cygwin']:
5062 # Work around MAX_PATH limitation on Windows. The maximum allowed length for the individual path segments may still be quite limited.
5063 return r
'\\?\ '.rstrip() + os
.path
.abspath(path
)
5068 def format_field(obj
, field
=None, template
='%s', ignore
=(None, ''), default
='', func
=None):
5069 val
= traverse_obj(obj
, *variadic(field
))
5072 return template
% (func(val
) if func
else val
)
5075 def clean_podcast_url(url
):
5076 return re
.sub(r
'''(?x)
5080 media\.blubrry\.com| # https://create.blubrry.com/resources/podcast-media-download-statistics/getting-started/
5083 (?:dts|www)\.podtrac\.com/(?:pts/)?redirect\.[0-9a-z]{3,4}| # http://analytics.podtrac.com/how-to-measure
5086 cn\.co| # https://podcorn.com/analytics-prefix/
5087 st\.fm # https://podsights.com/docs/
5092 _HEX_TABLE
= '0123456789abcdef'
5095 def random_uuidv4():
5096 return re
.sub(r
'[xy]', lambda x
: _HEX_TABLE
[random
.randint(0, 15)], 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx')
5099 def make_dir(path
, to_screen
=None):
5101 dn
= os
.path
.dirname(path
)
5102 if dn
and not os
.path
.exists(dn
):
5105 except (OSError, IOError) as err
:
5106 if callable(to_screen
) is not None:
5107 to_screen('unable to create directory ' + error_to_compat_str(err
))
5111 def get_executable_path():
5112 from zipimport
import zipimporter
5113 if hasattr(sys
, 'frozen'): # Running from PyInstaller
5114 path
= os
.path
.dirname(sys
.executable
)
5115 elif isinstance(globals().get('__loader__'), zipimporter
): # Running from ZIP
5116 path
= os
.path
.join(os
.path
.dirname(__file__
), '../..')
5118 path
= os
.path
.join(os
.path
.dirname(__file__
), '..')
5119 return os
.path
.abspath(path
)
5122 def load_plugins(name
, suffix
, namespace
):
5125 plugins_spec
= importlib
.util
.spec_from_file_location(
5126 name
, os
.path
.join(get_executable_path(), 'ytdlp_plugins', name
, '__init__.py'))
5127 plugins
= importlib
.util
.module_from_spec(plugins_spec
)
5128 sys
.modules
[plugins_spec
.name
] = plugins
5129 plugins_spec
.loader
.exec_module(plugins
)
5130 for name
in dir(plugins
):
5131 if name
in namespace
:
5133 if not name
.endswith(suffix
):
5135 klass
= getattr(plugins
, name
)
5136 classes
[name
] = namespace
[name
] = klass
5137 except FileNotFoundError
:
5143 obj
, *path_list
, default
=None, expected_type
=None, get_all
=True,
5144 casesense
=True, is_user_input
=False, traverse_string
=False):
5145 ''' Traverse nested list/dict/tuple
5146 @param path_list A list of paths which are checked one by one.
5147 Each path is a list of keys where each key is a string,
5148 a function, a tuple of strings/None or "...".
5149 When a fuction is given, it takes the key as argument and
5150 returns whether the key matches or not. When a tuple is given,
5151 all the keys given in the tuple are traversed, and
5152 "..." traverses all the keys in the object
5153 "None" returns the object without traversal
5154 @param default Default value to return
5155 @param expected_type Only accept final value of this type (Can also be any callable)
5156 @param get_all Return all the values obtained from a path or only the first one
5157 @param casesense Whether to consider dictionary keys as case sensitive
5158 @param is_user_input Whether the keys are generated from user input. If True,
5159 strings are converted to int/slice if necessary
5160 @param traverse_string Whether to traverse inside strings. If True, any
5161 non-compatible object will also be converted into a string
5165 _lower
= lambda k
: (k
.lower() if isinstance(k
, str) else k
)
5166 path_list
= (map(_lower
, variadic(path
)) for path
in path_list
)
5168 def _traverse_obj(obj
, path
, _current_depth
=0):
5170 path
= tuple(variadic(path
))
5171 for i
, key
in enumerate(path
):
5172 if None in (key
, obj
):
5174 if isinstance(key
, (list, tuple)):
5175 obj
= [_traverse_obj(obj
, sub_key
, _current_depth
) for sub_key
in key
]
5178 obj
= (obj
.values() if isinstance(obj
, dict)
5179 else obj
if isinstance(obj
, (list, tuple, LazyList
))
5180 else str(obj
) if traverse_string
else [])
5182 depth
= max(depth
, _current_depth
)
5183 return [_traverse_obj(inner_obj
, path
[i
+ 1:], _current_depth
) for inner_obj
in obj
]
5185 if isinstance(obj
, (list, tuple, LazyList
)):
5186 obj
= enumerate(obj
)
5187 elif isinstance(obj
, dict):
5190 if not traverse_string
:
5194 depth
= max(depth
, _current_depth
)
5195 return [_traverse_obj(v
, path
[i
+ 1:], _current_depth
) for k
, v
in obj
if key(k
)]
5196 elif isinstance(obj
, dict) and not (is_user_input
and key
== ':'):
5197 obj
= (obj
.get(key
) if casesense
or (key
in obj
)
5198 else next((v
for k
, v
in obj
.items() if _lower(k
) == key
), None))
5201 key
= (int_or_none(key
) if ':' not in key
5202 else slice(*map(int_or_none
, key
.split(':'))))
5203 if key
== slice(None):
5204 return _traverse_obj(obj
, (..., *path
[i
+ 1:]), _current_depth
)
5205 if not isinstance(key
, (int, slice)):
5207 if not isinstance(obj
, (list, tuple, LazyList
)):
5208 if not traverse_string
:
5217 if isinstance(expected_type
, type):
5218 type_test
= lambda val
: val
if isinstance(val
, expected_type
) else None
5219 elif expected_type
is not None:
5220 type_test
= expected_type
5222 type_test
= lambda val
: val
5224 for path
in path_list
:
5226 val
= _traverse_obj(obj
, path
)
5229 for _
in range(depth
- 1):
5230 val
= itertools
.chain
.from_iterable(v
for v
in val
if v
is not None)
5231 val
= [v
for v
in map(type_test
, val
) if v
is not None]
5233 return val
if get_all
else val
[0]
5235 val
= type_test(val
)
5241 def traverse_dict(dictn
, keys
, casesense
=True):
5242 write_string('DeprecationWarning: yt_dlp.utils.traverse_dict is deprecated '
5243 'and may be removed in a future version. Use yt_dlp.utils.traverse_obj instead')
5244 return traverse_obj(dictn
, keys
, casesense
=casesense
, is_user_input
=True, traverse_string
=True)
5247 def get_first(obj
, keys
, **kwargs
):
5248 return traverse_obj(obj
, (..., *variadic(keys
)), **kwargs
, get_all
=False)
5251 def variadic(x
, allowed_types
=(str, bytes, dict)):
5252 return x
if isinstance(x
, collections
.abc
.Iterable
) and not isinstance(x
, allowed_types
) else (x
,)
5255 def decode_base(value
, digits
):
5256 # This will convert given base-x string to scalar (long or int)
5257 table
= {char: index for index, char in enumerate(digits)}
5262 result
+= table
[chr]
5266 def time_seconds(**kwargs
):
5267 t
= datetime
.datetime
.now(datetime
.timezone(datetime
.timedelta(**kwargs
)))
5268 return t
.timestamp()
5271 # create a JSON Web Signature (jws) with HS256 algorithm
5272 # the resulting format is in JWS Compact Serialization
5273 # implemented following JWT https://www.rfc-editor.org/rfc/rfc7519.html
5274 # implemented following JWS https://www.rfc-editor.org/rfc/rfc7515.html
5275 def jwt_encode_hs256(payload_data
, key
, headers
={}):
5281 header_data
.update(headers
)
5282 header_b64
= base64
.b64encode(json
.dumps(header_data
).encode('utf-8'))
5283 payload_b64
= base64
.b64encode(json
.dumps(payload_data
).encode('utf-8'))
5284 h
= hmac
.new(key
.encode('utf-8'), header_b64
+ b
'.' + payload_b64
, hashlib
.sha256
)
5285 signature_b64
= base64
.b64encode(h
.digest())
5286 token
= header_b64
+ b
'.' + payload_b64
+ b
'.' + signature_b64
5290 # can be extended in future to verify the signature and parse header and return the algorithm used if it's not HS256
5291 def jwt_decode_hs256(jwt
):
5292 header_b64
, payload_b64
, signature_b64
= jwt
.split('.')
5293 payload_data
= json
.loads(base64
.urlsafe_b64decode(payload_b64
))
5297 def supports_terminal_sequences(stream
):
5298 if compat_os_name
== 'nt':
5299 from .compat
import WINDOWS_VT_MODE
# Must be imported locally
5300 if not WINDOWS_VT_MODE
or get_windows_version() < (10, 0, 10586):
5302 elif not os
.getenv('TERM'):
5305 return stream
.isatty()
5306 except BaseException
:
5310 _terminal_sequences_re
= re
.compile('\033\\[[^m]+m')
5313 def remove_terminal_sequences(string
):
5314 return _terminal_sequences_re
.sub('', string
)
5317 def number_of_digits(number
):
5318 return len('%d' % number
)
5321 def join_nonempty(*values
, delim
='-', from_dict
=None):
5322 if from_dict
is not None:
5323 values
= map(from_dict
.get
, values
)
5324 return delim
.join(map(str, filter(None, values
)))
5327 def scale_thumbnails_to_max_format_width(formats
, thumbnails
, url_width_re
):
5329 Find the largest format dimensions in terms of video width and, for each thumbnail:
5330 * Modify the URL: Match the width with the provided regex and replace with the former width
5333 This function is useful with video services that scale the provided thumbnails on demand
5335 _keys
= ('width', 'height')
5336 max_dimensions
= max(
5337 [tuple(format
.get(k
) or 0 for k
in _keys
) for format
in formats
],
5339 if not max_dimensions
[0]:
5343 {'url': re.sub(url_width_re, str(max_dimensions[0]), thumbnail['url'])}
,
5344 dict(zip(_keys
, max_dimensions
)), thumbnail
)
5345 for thumbnail
in thumbnails
5349 def parse_http_range(range):
5350 """ Parse value of "Range" or "Content-Range" HTTP header into tuple. """
5352 return None, None, None
5353 crg
= re
.search(r
'bytes[ =](\d+)-(\d+)?(?:/(\d+))?', range)
5355 return None, None, None
5356 return int(crg
.group(1)), int_or_none(crg
.group(2)), int_or_none(crg
.group(3))
5362 __initialized
= False
5364 def __init__(self
, parser
, label
=None):
5365 self
._parser
, self
.label
= parser
, label
5366 self
._loaded
_paths
, self
.configs
= set(), []
5368 def init(self
, args
=None, filename
=None):
5369 assert not self
.__initialized
5372 location
= os
.path
.realpath(filename
)
5373 directory
= os
.path
.dirname(location
)
5374 if location
in self
._loaded
_paths
:
5376 self
._loaded
_paths
.add(location
)
5378 self
.__initialized
= True
5379 self
.own_args
, self
.filename
= args
, filename
5380 for location
in self
._parser
.parse_args(args
)[0].config_locations
or []:
5381 location
= os
.path
.join(directory
, expand_path(location
))
5382 if os
.path
.isdir(location
):
5383 location
= os
.path
.join(location
, 'yt-dlp.conf')
5384 if not os
.path
.exists(location
):
5385 self
._parser
.error(f
'config location {location} does not exist')
5386 self
.append_config(self
.read_file(location
), location
)
5390 label
= join_nonempty(
5391 self
.label
, 'config', f
'"{self.filename}"' if self
.filename
else '',
5393 return join_nonempty(
5394 self
.own_args
is not None and f
'{label[0].upper()}{label[1:]}: {self.hide_login_info(self.own_args)}',
5395 *(f
'\n{c}'.replace('\n', '\n| ')[1:] for c
in self
.configs
),
5399 def read_file(filename
, default
=[]):
5401 optionf
= open(filename
)
5403 return default
# silently skip if file is not present
5405 # FIXME: https://github.com/ytdl-org/youtube-dl/commit/dfe5fa49aed02cf36ba9f743b11b0903554b5e56
5406 contents
= optionf
.read()
5407 if sys
.version_info
< (3,):
5408 contents
= contents
.decode(preferredencoding())
5409 res
= compat_shlex_split(contents
, comments
=True)
5415 def hide_login_info(opts
):
5416 PRIVATE_OPTS
= set(['-p', '--password', '-u', '--username', '--video-password', '--ap-password', '--ap-username'])
5417 eqre
= re
.compile('^(?P<key>' + ('|'.join(re
.escape(po
) for po
in PRIVATE_OPTS
)) + ')=.+$')
5422 return m
.group('key') + '=PRIVATE'
5426 opts
= list(map(_scrub_eq
, opts
))
5427 for idx
, opt
in enumerate(opts
):
5428 if opt
in PRIVATE_OPTS
and idx
+ 1 < len(opts
):
5429 opts
[idx
+ 1] = 'PRIVATE'
5432 def append_config(self
, *args
, label
=None):
5433 config
= type(self
)(self
._parser
, label
)
5434 config
._loaded
_paths
= self
._loaded
_paths
5435 if config
.init(*args
):
5436 self
.configs
.append(config
)
5440 for config
in reversed(self
.configs
):
5441 yield from config
.all_args
5442 yield from self
.own_args
or []
5444 def parse_args(self
):
5445 return self
._parser
.parse_args(list(self
.all_args
))
5448 class WebSocketsWrapper():
5449 """Wraps websockets module to use in non-async scopes"""
5451 def __init__(self
, url
, headers
=None, connect
=True):
5452 self
.loop
= asyncio
.events
.new_event_loop()
5453 self
.conn
= compat_websockets
.connect(
5454 url
, extra_headers
=headers
, ping_interval
=None,
5455 close_timeout
=float('inf'), loop
=self
.loop
, ping_timeout
=float('inf'))
5458 atexit
.register(self
.__exit
__, None, None, None)
5460 def __enter__(self
):
5462 self
.pool
= self
.run_with_loop(self
.conn
.__aenter
__(), self
.loop
)
5465 def send(self
, *args
):
5466 self
.run_with_loop(self
.pool
.send(*args
), self
.loop
)
5468 def recv(self
, *args
):
5469 return self
.run_with_loop(self
.pool
.recv(*args
), self
.loop
)
5471 def __exit__(self
, type, value
, traceback
):
5473 return self
.run_with_loop(self
.conn
.__aexit
__(type, value
, traceback
), self
.loop
)
5476 self
._cancel
_all
_tasks
(self
.loop
)
5478 # taken from https://github.com/python/cpython/blob/3.9/Lib/asyncio/runners.py with modifications
5479 # for contributors: If there's any new library using asyncio needs to be run in non-async, move these function out of this class
5481 def run_with_loop(main
, loop
):
5482 if not asyncio
.coroutines
.iscoroutine(main
):
5483 raise ValueError(f
'a coroutine was expected, got {main!r}')
5486 return loop
.run_until_complete(main
)
5488 loop
.run_until_complete(loop
.shutdown_asyncgens())
5489 if hasattr(loop
, 'shutdown_default_executor'):
5490 loop
.run_until_complete(loop
.shutdown_default_executor())
5493 def _cancel_all_tasks(loop
):
5494 to_cancel
= asyncio
.tasks
.all_tasks(loop
)
5499 for task
in to_cancel
:
5502 loop
.run_until_complete(
5503 asyncio
.tasks
.gather(*to_cancel
, loop
=loop
, return_exceptions
=True))
5505 for task
in to_cancel
:
5506 if task
.cancelled():
5508 if task
.exception() is not None:
5509 loop
.call_exception_handler({
5510 'message': 'unhandled exception during asyncio.run() shutdown',
5511 'exception': task
.exception(),
5516 has_websockets
= bool(compat_websockets
)
5519 def merge_headers(*dicts
):
5520 """Merge dicts of http headers case insensitively, prioritizing the latter ones"""
5521 return {k.title(): v for k, v in itertools.chain.from_iterable(map(dict.items, dicts))}
5524 class classproperty
:
5525 def __init__(self
, f
):
5528 def __get__(self
, _
, cls
):