]> jfr.im git - yt-dlp.git/blob - yt_dlp/utils.py
fd3912d188fb3c56bcc244d68a80891fc0771c1a
[yt-dlp.git] / yt_dlp / utils.py
1 #!/usr/bin/env python3
2 # coding: utf-8
3
4 from __future__ import unicode_literals
5
6 import base64
7 import binascii
8 import calendar
9 import codecs
10 import collections
11 import contextlib
12 import ctypes
13 import datetime
14 import email.utils
15 import email.header
16 import errno
17 import functools
18 import gzip
19 import hashlib
20 import hmac
21 import importlib.util
22 import io
23 import itertools
24 import json
25 import locale
26 import math
27 import operator
28 import os
29 import platform
30 import random
31 import re
32 import socket
33 import ssl
34 import subprocess
35 import sys
36 import tempfile
37 import time
38 import traceback
39 import xml.etree.ElementTree
40 import zlib
41 import mimetypes
42
43 from .compat import (
44 compat_HTMLParseError,
45 compat_HTMLParser,
46 compat_HTTPError,
47 compat_basestring,
48 compat_chr,
49 compat_cookiejar,
50 compat_ctypes_WINFUNCTYPE,
51 compat_etree_fromstring,
52 compat_expanduser,
53 compat_html_entities,
54 compat_html_entities_html5,
55 compat_http_client,
56 compat_integer_types,
57 compat_numeric_types,
58 compat_kwargs,
59 compat_os_name,
60 compat_parse_qs,
61 compat_shlex_split,
62 compat_shlex_quote,
63 compat_str,
64 compat_struct_pack,
65 compat_struct_unpack,
66 compat_urllib_error,
67 compat_urllib_parse,
68 compat_urllib_parse_urlencode,
69 compat_urllib_parse_urlparse,
70 compat_urllib_parse_urlunparse,
71 compat_urllib_parse_quote,
72 compat_urllib_parse_quote_plus,
73 compat_urllib_parse_unquote_plus,
74 compat_urllib_request,
75 compat_urlparse,
76 compat_xpath,
77 )
78
79 from .socks import (
80 ProxyType,
81 sockssocket,
82 )
83
84
85 def register_socks_protocols():
86 # "Register" SOCKS protocols
87 # In Python < 2.6.5, urlsplit() suffers from bug https://bugs.python.org/issue7904
88 # URLs with protocols not in urlparse.uses_netloc are not handled correctly
89 for scheme in ('socks', 'socks4', 'socks4a', 'socks5'):
90 if scheme not in compat_urlparse.uses_netloc:
91 compat_urlparse.uses_netloc.append(scheme)
92
93
94 # This is not clearly defined otherwise
95 compiled_regex_type = type(re.compile(''))
96
97
98 def random_user_agent():
99 _USER_AGENT_TPL = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/%s Safari/537.36'
100 _CHROME_VERSIONS = (
101 '90.0.4430.212',
102 '90.0.4430.24',
103 '90.0.4430.70',
104 '90.0.4430.72',
105 '90.0.4430.85',
106 '90.0.4430.93',
107 '91.0.4472.101',
108 '91.0.4472.106',
109 '91.0.4472.114',
110 '91.0.4472.124',
111 '91.0.4472.164',
112 '91.0.4472.19',
113 '91.0.4472.77',
114 '92.0.4515.107',
115 '92.0.4515.115',
116 '92.0.4515.131',
117 '92.0.4515.159',
118 '92.0.4515.43',
119 '93.0.4556.0',
120 '93.0.4577.15',
121 '93.0.4577.63',
122 '93.0.4577.82',
123 '94.0.4606.41',
124 '94.0.4606.54',
125 '94.0.4606.61',
126 '94.0.4606.71',
127 '94.0.4606.81',
128 '94.0.4606.85',
129 '95.0.4638.17',
130 '95.0.4638.50',
131 '95.0.4638.54',
132 '95.0.4638.69',
133 '95.0.4638.74',
134 '96.0.4664.18',
135 '96.0.4664.45',
136 '96.0.4664.55',
137 '96.0.4664.93',
138 '97.0.4692.20',
139 )
140 return _USER_AGENT_TPL % random.choice(_CHROME_VERSIONS)
141
142
143 std_headers = {
144 'User-Agent': random_user_agent(),
145 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
146 'Accept-Encoding': 'gzip, deflate',
147 'Accept-Language': 'en-us,en;q=0.5',
148 'Sec-Fetch-Mode': 'navigate',
149 }
150
151
152 USER_AGENTS = {
153 'Safari': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27',
154 }
155
156
157 NO_DEFAULT = object()
158
159 ENGLISH_MONTH_NAMES = [
160 'January', 'February', 'March', 'April', 'May', 'June',
161 'July', 'August', 'September', 'October', 'November', 'December']
162
163 MONTH_NAMES = {
164 'en': ENGLISH_MONTH_NAMES,
165 'fr': [
166 'janvier', 'février', 'mars', 'avril', 'mai', 'juin',
167 'juillet', 'août', 'septembre', 'octobre', 'novembre', 'décembre'],
168 }
169
170 KNOWN_EXTENSIONS = (
171 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'aac',
172 'flv', 'f4v', 'f4a', 'f4b',
173 'webm', 'ogg', 'ogv', 'oga', 'ogx', 'spx', 'opus',
174 'mkv', 'mka', 'mk3d',
175 'avi', 'divx',
176 'mov',
177 'asf', 'wmv', 'wma',
178 '3gp', '3g2',
179 'mp3',
180 'flac',
181 'ape',
182 'wav',
183 'f4f', 'f4m', 'm3u8', 'smil')
184
185 # needed for sanitizing filenames in restricted mode
186 ACCENT_CHARS = dict(zip('ÂÃÄÀÁÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖŐØŒÙÚÛÜŰÝÞßàáâãäåæçèéêëìíîïðñòóôõöőøœùúûüűýþÿ',
187 itertools.chain('AAAAAA', ['AE'], 'CEEEEIIIIDNOOOOOOO', ['OE'], 'UUUUUY', ['TH', 'ss'],
188 'aaaaaa', ['ae'], 'ceeeeiiiionooooooo', ['oe'], 'uuuuuy', ['th'], 'y')))
189
190 DATE_FORMATS = (
191 '%d %B %Y',
192 '%d %b %Y',
193 '%B %d %Y',
194 '%B %dst %Y',
195 '%B %dnd %Y',
196 '%B %drd %Y',
197 '%B %dth %Y',
198 '%b %d %Y',
199 '%b %dst %Y',
200 '%b %dnd %Y',
201 '%b %drd %Y',
202 '%b %dth %Y',
203 '%b %dst %Y %I:%M',
204 '%b %dnd %Y %I:%M',
205 '%b %drd %Y %I:%M',
206 '%b %dth %Y %I:%M',
207 '%Y %m %d',
208 '%Y-%m-%d',
209 '%Y.%m.%d.',
210 '%Y/%m/%d',
211 '%Y/%m/%d %H:%M',
212 '%Y/%m/%d %H:%M:%S',
213 '%Y%m%d%H%M',
214 '%Y%m%d%H%M%S',
215 '%Y%m%d',
216 '%Y-%m-%d %H:%M',
217 '%Y-%m-%d %H:%M:%S',
218 '%Y-%m-%d %H:%M:%S.%f',
219 '%Y-%m-%d %H:%M:%S:%f',
220 '%d.%m.%Y %H:%M',
221 '%d.%m.%Y %H.%M',
222 '%Y-%m-%dT%H:%M:%SZ',
223 '%Y-%m-%dT%H:%M:%S.%fZ',
224 '%Y-%m-%dT%H:%M:%S.%f0Z',
225 '%Y-%m-%dT%H:%M:%S',
226 '%Y-%m-%dT%H:%M:%S.%f',
227 '%Y-%m-%dT%H:%M',
228 '%b %d %Y at %H:%M',
229 '%b %d %Y at %H:%M:%S',
230 '%B %d %Y at %H:%M',
231 '%B %d %Y at %H:%M:%S',
232 '%H:%M %d-%b-%Y',
233 )
234
235 DATE_FORMATS_DAY_FIRST = list(DATE_FORMATS)
236 DATE_FORMATS_DAY_FIRST.extend([
237 '%d-%m-%Y',
238 '%d.%m.%Y',
239 '%d.%m.%y',
240 '%d/%m/%Y',
241 '%d/%m/%y',
242 '%d/%m/%Y %H:%M:%S',
243 ])
244
245 DATE_FORMATS_MONTH_FIRST = list(DATE_FORMATS)
246 DATE_FORMATS_MONTH_FIRST.extend([
247 '%m-%d-%Y',
248 '%m.%d.%Y',
249 '%m/%d/%Y',
250 '%m/%d/%y',
251 '%m/%d/%Y %H:%M:%S',
252 ])
253
254 PACKED_CODES_RE = r"}\('(.+)',(\d+),(\d+),'([^']+)'\.split\('\|'\)"
255 JSON_LD_RE = r'(?is)<script[^>]+type=(["\']?)application/ld\+json\1[^>]*>(?P<json_ld>.+?)</script>'
256
257
258 def preferredencoding():
259 """Get preferred encoding.
260
261 Returns the best encoding scheme for the system, based on
262 locale.getpreferredencoding() and some further tweaks.
263 """
264 try:
265 pref = locale.getpreferredencoding()
266 'TEST'.encode(pref)
267 except Exception:
268 pref = 'UTF-8'
269
270 return pref
271
272
273 def write_json_file(obj, fn):
274 """ Encode obj as JSON and write it to fn, atomically if possible """
275
276 fn = encodeFilename(fn)
277 if sys.version_info < (3, 0) and sys.platform != 'win32':
278 encoding = get_filesystem_encoding()
279 # os.path.basename returns a bytes object, but NamedTemporaryFile
280 # will fail if the filename contains non ascii characters unless we
281 # use a unicode object
282 path_basename = lambda f: os.path.basename(fn).decode(encoding)
283 # the same for os.path.dirname
284 path_dirname = lambda f: os.path.dirname(fn).decode(encoding)
285 else:
286 path_basename = os.path.basename
287 path_dirname = os.path.dirname
288
289 args = {
290 'suffix': '.tmp',
291 'prefix': path_basename(fn) + '.',
292 'dir': path_dirname(fn),
293 'delete': False,
294 }
295
296 # In Python 2.x, json.dump expects a bytestream.
297 # In Python 3.x, it writes to a character stream
298 if sys.version_info < (3, 0):
299 args['mode'] = 'wb'
300 else:
301 args.update({
302 'mode': 'w',
303 'encoding': 'utf-8',
304 })
305
306 tf = tempfile.NamedTemporaryFile(**compat_kwargs(args))
307
308 try:
309 with tf:
310 json.dump(obj, tf, ensure_ascii=False)
311 if sys.platform == 'win32':
312 # Need to remove existing file on Windows, else os.rename raises
313 # WindowsError or FileExistsError.
314 try:
315 os.unlink(fn)
316 except OSError:
317 pass
318 try:
319 mask = os.umask(0)
320 os.umask(mask)
321 os.chmod(tf.name, 0o666 & ~mask)
322 except OSError:
323 pass
324 os.rename(tf.name, fn)
325 except Exception:
326 try:
327 os.remove(tf.name)
328 except OSError:
329 pass
330 raise
331
332
333 if sys.version_info >= (2, 7):
334 def find_xpath_attr(node, xpath, key, val=None):
335 """ Find the xpath xpath[@key=val] """
336 assert re.match(r'^[a-zA-Z_-]+$', key)
337 expr = xpath + ('[@%s]' % key if val is None else "[@%s='%s']" % (key, val))
338 return node.find(expr)
339 else:
340 def find_xpath_attr(node, xpath, key, val=None):
341 for f in node.findall(compat_xpath(xpath)):
342 if key not in f.attrib:
343 continue
344 if val is None or f.attrib.get(key) == val:
345 return f
346 return None
347
348 # On python2.6 the xml.etree.ElementTree.Element methods don't support
349 # the namespace parameter
350
351
352 def xpath_with_ns(path, ns_map):
353 components = [c.split(':') for c in path.split('/')]
354 replaced = []
355 for c in components:
356 if len(c) == 1:
357 replaced.append(c[0])
358 else:
359 ns, tag = c
360 replaced.append('{%s}%s' % (ns_map[ns], tag))
361 return '/'.join(replaced)
362
363
364 def xpath_element(node, xpath, name=None, fatal=False, default=NO_DEFAULT):
365 def _find_xpath(xpath):
366 return node.find(compat_xpath(xpath))
367
368 if isinstance(xpath, (str, compat_str)):
369 n = _find_xpath(xpath)
370 else:
371 for xp in xpath:
372 n = _find_xpath(xp)
373 if n is not None:
374 break
375
376 if n is None:
377 if default is not NO_DEFAULT:
378 return default
379 elif fatal:
380 name = xpath if name is None else name
381 raise ExtractorError('Could not find XML element %s' % name)
382 else:
383 return None
384 return n
385
386
387 def xpath_text(node, xpath, name=None, fatal=False, default=NO_DEFAULT):
388 n = xpath_element(node, xpath, name, fatal=fatal, default=default)
389 if n is None or n == default:
390 return n
391 if n.text is None:
392 if default is not NO_DEFAULT:
393 return default
394 elif fatal:
395 name = xpath if name is None else name
396 raise ExtractorError('Could not find XML element\'s text %s' % name)
397 else:
398 return None
399 return n.text
400
401
402 def xpath_attr(node, xpath, key, name=None, fatal=False, default=NO_DEFAULT):
403 n = find_xpath_attr(node, xpath, key)
404 if n is None:
405 if default is not NO_DEFAULT:
406 return default
407 elif fatal:
408 name = '%s[@%s]' % (xpath, key) if name is None else name
409 raise ExtractorError('Could not find XML attribute %s' % name)
410 else:
411 return None
412 return n.attrib[key]
413
414
415 def get_element_by_id(id, html):
416 """Return the content of the tag with the specified ID in the passed HTML document"""
417 return get_element_by_attribute('id', id, html)
418
419
420 def get_element_html_by_id(id, html):
421 """Return the html of the tag with the specified ID in the passed HTML document"""
422 return get_element_html_by_attribute('id', id, html)
423
424
425 def get_element_by_class(class_name, html):
426 """Return the content of the first tag with the specified class in the passed HTML document"""
427 retval = get_elements_by_class(class_name, html)
428 return retval[0] if retval else None
429
430
431 def get_element_html_by_class(class_name, html):
432 """Return the html of the first tag with the specified class in the passed HTML document"""
433 retval = get_elements_html_by_class(class_name, html)
434 return retval[0] if retval else None
435
436
437 def get_element_by_attribute(attribute, value, html, escape_value=True):
438 retval = get_elements_by_attribute(attribute, value, html, escape_value)
439 return retval[0] if retval else None
440
441
442 def get_element_html_by_attribute(attribute, value, html, escape_value=True):
443 retval = get_elements_html_by_attribute(attribute, value, html, escape_value)
444 return retval[0] if retval else None
445
446
447 def get_elements_by_class(class_name, html):
448 """Return the content of all tags with the specified class in the passed HTML document as a list"""
449 return get_elements_by_attribute(
450 'class', r'[^\'"]*\b%s\b[^\'"]*' % re.escape(class_name),
451 html, escape_value=False)
452
453
454 def get_elements_html_by_class(class_name, html):
455 """Return the html of all tags with the specified class in the passed HTML document as a list"""
456 return get_elements_html_by_attribute(
457 'class', r'[^\'"]*\b%s\b[^\'"]*' % re.escape(class_name),
458 html, escape_value=False)
459
460
461 def get_elements_by_attribute(*args, **kwargs):
462 """Return the content of the tag with the specified attribute in the passed HTML document"""
463 return [content for content, _ in get_elements_text_and_html_by_attribute(*args, **kwargs)]
464
465
466 def get_elements_html_by_attribute(*args, **kwargs):
467 """Return the html of the tag with the specified attribute in the passed HTML document"""
468 return [whole for _, whole in get_elements_text_and_html_by_attribute(*args, **kwargs)]
469
470
471 def get_elements_text_and_html_by_attribute(attribute, value, html, escape_value=True):
472 """
473 Return the text (content) and the html (whole) of the tag with the specified
474 attribute in the passed HTML document
475 """
476
477 value_quote_optional = '' if re.match(r'''[\s"'`=<>]''', value) else '?'
478
479 value = re.escape(value) if escape_value else value
480
481 partial_element_re = r'''(?x)
482 <(?P<tag>[a-zA-Z0-9:._-]+)
483 (?:\s(?:[^>"']|"[^"]*"|'[^']*')*)?
484 \s%(attribute)s\s*=\s*(?P<_q>['"]%(vqo)s)(?-x:%(value)s)(?P=_q)
485 ''' % {'attribute': re.escape(attribute), 'value': value, 'vqo': value_quote_optional}
486
487 for m in re.finditer(partial_element_re, html):
488 content, whole = get_element_text_and_html_by_tag(m.group('tag'), html[m.start():])
489
490 yield (
491 unescapeHTML(re.sub(r'^(?P<q>["\'])(?P<content>.*)(?P=q)$', r'\g<content>', content, flags=re.DOTALL)),
492 whole
493 )
494
495
496 class HTMLBreakOnClosingTagParser(compat_HTMLParser):
497 """
498 HTML parser which raises HTMLBreakOnClosingTagException upon reaching the
499 closing tag for the first opening tag it has encountered, and can be used
500 as a context manager
501 """
502
503 class HTMLBreakOnClosingTagException(Exception):
504 pass
505
506 def __init__(self):
507 self.tagstack = collections.deque()
508 compat_HTMLParser.__init__(self)
509
510 def __enter__(self):
511 return self
512
513 def __exit__(self, *_):
514 self.close()
515
516 def close(self):
517 # handle_endtag does not return upon raising HTMLBreakOnClosingTagException,
518 # so data remains buffered; we no longer have any interest in it, thus
519 # override this method to discard it
520 pass
521
522 def handle_starttag(self, tag, _):
523 self.tagstack.append(tag)
524
525 def handle_endtag(self, tag):
526 if not self.tagstack:
527 raise compat_HTMLParseError('no tags in the stack')
528 while self.tagstack:
529 inner_tag = self.tagstack.pop()
530 if inner_tag == tag:
531 break
532 else:
533 raise compat_HTMLParseError(f'matching opening tag for closing {tag} tag not found')
534 if not self.tagstack:
535 raise self.HTMLBreakOnClosingTagException()
536
537
538 def get_element_text_and_html_by_tag(tag, html):
539 """
540 For the first element with the specified tag in the passed HTML document
541 return its' content (text) and the whole element (html)
542 """
543 def find_or_raise(haystack, needle, exc):
544 try:
545 return haystack.index(needle)
546 except ValueError:
547 raise exc
548 closing_tag = f'</{tag}>'
549 whole_start = find_or_raise(
550 html, f'<{tag}', compat_HTMLParseError(f'opening {tag} tag not found'))
551 content_start = find_or_raise(
552 html[whole_start:], '>', compat_HTMLParseError(f'malformed opening {tag} tag'))
553 content_start += whole_start + 1
554 with HTMLBreakOnClosingTagParser() as parser:
555 parser.feed(html[whole_start:content_start])
556 if not parser.tagstack or parser.tagstack[0] != tag:
557 raise compat_HTMLParseError(f'parser did not match opening {tag} tag')
558 offset = content_start
559 while offset < len(html):
560 next_closing_tag_start = find_or_raise(
561 html[offset:], closing_tag,
562 compat_HTMLParseError(f'closing {tag} tag not found'))
563 next_closing_tag_end = next_closing_tag_start + len(closing_tag)
564 try:
565 parser.feed(html[offset:offset + next_closing_tag_end])
566 offset += next_closing_tag_end
567 except HTMLBreakOnClosingTagParser.HTMLBreakOnClosingTagException:
568 return html[content_start:offset + next_closing_tag_start], \
569 html[whole_start:offset + next_closing_tag_end]
570 raise compat_HTMLParseError('unexpected end of html')
571
572
573 class HTMLAttributeParser(compat_HTMLParser):
574 """Trivial HTML parser to gather the attributes for a single element"""
575
576 def __init__(self):
577 self.attrs = {}
578 compat_HTMLParser.__init__(self)
579
580 def handle_starttag(self, tag, attrs):
581 self.attrs = dict(attrs)
582
583
584 class HTMLListAttrsParser(compat_HTMLParser):
585 """HTML parser to gather the attributes for the elements of a list"""
586
587 def __init__(self):
588 compat_HTMLParser.__init__(self)
589 self.items = []
590 self._level = 0
591
592 def handle_starttag(self, tag, attrs):
593 if tag == 'li' and self._level == 0:
594 self.items.append(dict(attrs))
595 self._level += 1
596
597 def handle_endtag(self, tag):
598 self._level -= 1
599
600
601 def extract_attributes(html_element):
602 """Given a string for an HTML element such as
603 <el
604 a="foo" B="bar" c="&98;az" d=boz
605 empty= noval entity="&amp;"
606 sq='"' dq="'"
607 >
608 Decode and return a dictionary of attributes.
609 {
610 'a': 'foo', 'b': 'bar', c: 'baz', d: 'boz',
611 'empty': '', 'noval': None, 'entity': '&',
612 'sq': '"', 'dq': '\''
613 }.
614 NB HTMLParser is stricter in Python 2.6 & 3.2 than in later versions,
615 but the cases in the unit test will work for all of 2.6, 2.7, 3.2-3.5.
616 """
617 parser = HTMLAttributeParser()
618 try:
619 parser.feed(html_element)
620 parser.close()
621 # Older Python may throw HTMLParseError in case of malformed HTML
622 except compat_HTMLParseError:
623 pass
624 return parser.attrs
625
626
627 def parse_list(webpage):
628 """Given a string for an series of HTML <li> elements,
629 return a dictionary of their attributes"""
630 parser = HTMLListAttrsParser()
631 parser.feed(webpage)
632 parser.close()
633 return parser.items
634
635
636 def clean_html(html):
637 """Clean an HTML snippet into a readable string"""
638
639 if html is None: # Convenience for sanitizing descriptions etc.
640 return html
641
642 html = re.sub(r'\s+', ' ', html)
643 html = re.sub(r'(?u)\s?<\s?br\s?/?\s?>\s?', '\n', html)
644 html = re.sub(r'(?u)<\s?/\s?p\s?>\s?<\s?p[^>]*>', '\n', html)
645 # Strip html tags
646 html = re.sub('<.*?>', '', html)
647 # Replace html entities
648 html = unescapeHTML(html)
649 return html.strip()
650
651
652 def sanitize_open(filename, open_mode):
653 """Try to open the given filename, and slightly tweak it if this fails.
654
655 Attempts to open the given filename. If this fails, it tries to change
656 the filename slightly, step by step, until it's either able to open it
657 or it fails and raises a final exception, like the standard open()
658 function.
659
660 It returns the tuple (stream, definitive_file_name).
661 """
662 try:
663 if filename == '-':
664 if sys.platform == 'win32':
665 import msvcrt
666 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
667 return (sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout, filename)
668 stream = open(encodeFilename(filename), open_mode)
669 return (stream, filename)
670 except (IOError, OSError) as err:
671 if err.errno in (errno.EACCES,):
672 raise
673
674 # In case of error, try to remove win32 forbidden chars
675 alt_filename = sanitize_path(filename)
676 if alt_filename == filename:
677 raise
678 else:
679 # An exception here should be caught in the caller
680 stream = open(encodeFilename(alt_filename), open_mode)
681 return (stream, alt_filename)
682
683
684 def timeconvert(timestr):
685 """Convert RFC 2822 defined time string into system timestamp"""
686 timestamp = None
687 timetuple = email.utils.parsedate_tz(timestr)
688 if timetuple is not None:
689 timestamp = email.utils.mktime_tz(timetuple)
690 return timestamp
691
692
693 def sanitize_filename(s, restricted=False, is_id=False):
694 """Sanitizes a string so it could be used as part of a filename.
695 If restricted is set, use a stricter subset of allowed characters.
696 Set is_id if this is not an arbitrary string, but an ID that should be kept
697 if possible.
698 """
699 def replace_insane(char):
700 if restricted and char in ACCENT_CHARS:
701 return ACCENT_CHARS[char]
702 elif not restricted and char == '\n':
703 return ' '
704 elif char == '?' or ord(char) < 32 or ord(char) == 127:
705 return ''
706 elif char == '"':
707 return '' if restricted else '\''
708 elif char == ':':
709 return '_-' if restricted else ' -'
710 elif char in '\\/|*<>':
711 return '_'
712 if restricted and (char in '!&\'()[]{}$;`^,#' or char.isspace()):
713 return '_'
714 if restricted and ord(char) > 127:
715 return '_'
716 return char
717
718 if s == '':
719 return ''
720 # Handle timestamps
721 s = re.sub(r'[0-9]+(?::[0-9]+)+', lambda m: m.group(0).replace(':', '_'), s)
722 result = ''.join(map(replace_insane, s))
723 if not is_id:
724 while '__' in result:
725 result = result.replace('__', '_')
726 result = result.strip('_')
727 # Common case of "Foreign band name - English song title"
728 if restricted and result.startswith('-_'):
729 result = result[2:]
730 if result.startswith('-'):
731 result = '_' + result[len('-'):]
732 result = result.lstrip('.')
733 if not result:
734 result = '_'
735 return result
736
737
738 def sanitize_path(s, force=False):
739 """Sanitizes and normalizes path on Windows"""
740 if sys.platform == 'win32':
741 force = False
742 drive_or_unc, _ = os.path.splitdrive(s)
743 if sys.version_info < (2, 7) and not drive_or_unc:
744 drive_or_unc, _ = os.path.splitunc(s)
745 elif force:
746 drive_or_unc = ''
747 else:
748 return s
749
750 norm_path = os.path.normpath(remove_start(s, drive_or_unc)).split(os.path.sep)
751 if drive_or_unc:
752 norm_path.pop(0)
753 sanitized_path = [
754 path_part if path_part in ['.', '..'] else re.sub(r'(?:[/<>:"\|\\?\*]|[\s.]$)', '#', path_part)
755 for path_part in norm_path]
756 if drive_or_unc:
757 sanitized_path.insert(0, drive_or_unc + os.path.sep)
758 elif force and s[0] == os.path.sep:
759 sanitized_path.insert(0, os.path.sep)
760 return os.path.join(*sanitized_path)
761
762
763 def sanitize_url(url):
764 # Prepend protocol-less URLs with `http:` scheme in order to mitigate
765 # the number of unwanted failures due to missing protocol
766 if url.startswith('//'):
767 return 'http:%s' % url
768 # Fix some common typos seen so far
769 COMMON_TYPOS = (
770 # https://github.com/ytdl-org/youtube-dl/issues/15649
771 (r'^httpss://', r'https://'),
772 # https://bx1.be/lives/direct-tv/
773 (r'^rmtp([es]?)://', r'rtmp\1://'),
774 )
775 for mistake, fixup in COMMON_TYPOS:
776 if re.match(mistake, url):
777 return re.sub(mistake, fixup, url)
778 return url
779
780
781 def extract_basic_auth(url):
782 parts = compat_urlparse.urlsplit(url)
783 if parts.username is None:
784 return url, None
785 url = compat_urlparse.urlunsplit(parts._replace(netloc=(
786 parts.hostname if parts.port is None
787 else '%s:%d' % (parts.hostname, parts.port))))
788 auth_payload = base64.b64encode(
789 ('%s:%s' % (parts.username, parts.password or '')).encode('utf-8'))
790 return url, 'Basic ' + auth_payload.decode('utf-8')
791
792
793 def sanitized_Request(url, *args, **kwargs):
794 url, auth_header = extract_basic_auth(escape_url(sanitize_url(url)))
795 if auth_header is not None:
796 headers = args[1] if len(args) >= 2 else kwargs.setdefault('headers', {})
797 headers['Authorization'] = auth_header
798 return compat_urllib_request.Request(url, *args, **kwargs)
799
800
801 def expand_path(s):
802 """Expand shell variables and ~"""
803 return os.path.expandvars(compat_expanduser(s))
804
805
806 def orderedSet(iterable):
807 """ Remove all duplicates from the input iterable """
808 res = []
809 for el in iterable:
810 if el not in res:
811 res.append(el)
812 return res
813
814
815 def _htmlentity_transform(entity_with_semicolon):
816 """Transforms an HTML entity to a character."""
817 entity = entity_with_semicolon[:-1]
818
819 # Known non-numeric HTML entity
820 if entity in compat_html_entities.name2codepoint:
821 return compat_chr(compat_html_entities.name2codepoint[entity])
822
823 # TODO: HTML5 allows entities without a semicolon. For example,
824 # '&Eacuteric' should be decoded as 'Éric'.
825 if entity_with_semicolon in compat_html_entities_html5:
826 return compat_html_entities_html5[entity_with_semicolon]
827
828 mobj = re.match(r'#(x[0-9a-fA-F]+|[0-9]+)', entity)
829 if mobj is not None:
830 numstr = mobj.group(1)
831 if numstr.startswith('x'):
832 base = 16
833 numstr = '0%s' % numstr
834 else:
835 base = 10
836 # See https://github.com/ytdl-org/youtube-dl/issues/7518
837 try:
838 return compat_chr(int(numstr, base))
839 except ValueError:
840 pass
841
842 # Unknown entity in name, return its literal representation
843 return '&%s;' % entity
844
845
846 def unescapeHTML(s):
847 if s is None:
848 return None
849 assert type(s) == compat_str
850
851 return re.sub(
852 r'&([^&;]+;)', lambda m: _htmlentity_transform(m.group(1)), s)
853
854
855 def escapeHTML(text):
856 return (
857 text
858 .replace('&', '&amp;')
859 .replace('<', '&lt;')
860 .replace('>', '&gt;')
861 .replace('"', '&quot;')
862 .replace("'", '&#39;')
863 )
864
865
866 def process_communicate_or_kill(p, *args, **kwargs):
867 try:
868 return p.communicate(*args, **kwargs)
869 except BaseException: # Including KeyboardInterrupt
870 p.kill()
871 p.wait()
872 raise
873
874
875 class Popen(subprocess.Popen):
876 if sys.platform == 'win32':
877 _startupinfo = subprocess.STARTUPINFO()
878 _startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
879 else:
880 _startupinfo = None
881
882 def __init__(self, *args, **kwargs):
883 super(Popen, self).__init__(*args, **kwargs, startupinfo=self._startupinfo)
884
885 def communicate_or_kill(self, *args, **kwargs):
886 return process_communicate_or_kill(self, *args, **kwargs)
887
888
889 def get_subprocess_encoding():
890 if sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5:
891 # For subprocess calls, encode with locale encoding
892 # Refer to http://stackoverflow.com/a/9951851/35070
893 encoding = preferredencoding()
894 else:
895 encoding = sys.getfilesystemencoding()
896 if encoding is None:
897 encoding = 'utf-8'
898 return encoding
899
900
901 def encodeFilename(s, for_subprocess=False):
902 """
903 @param s The name of the file
904 """
905
906 assert type(s) == compat_str
907
908 # Python 3 has a Unicode API
909 if sys.version_info >= (3, 0):
910 return s
911
912 # Pass '' directly to use Unicode APIs on Windows 2000 and up
913 # (Detecting Windows NT 4 is tricky because 'major >= 4' would
914 # match Windows 9x series as well. Besides, NT 4 is obsolete.)
915 if not for_subprocess and sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5:
916 return s
917
918 # Jython assumes filenames are Unicode strings though reported as Python 2.x compatible
919 if sys.platform.startswith('java'):
920 return s
921
922 return s.encode(get_subprocess_encoding(), 'ignore')
923
924
925 def decodeFilename(b, for_subprocess=False):
926
927 if sys.version_info >= (3, 0):
928 return b
929
930 if not isinstance(b, bytes):
931 return b
932
933 return b.decode(get_subprocess_encoding(), 'ignore')
934
935
936 def encodeArgument(s):
937 if not isinstance(s, compat_str):
938 # Legacy code that uses byte strings
939 # Uncomment the following line after fixing all post processors
940 # assert False, 'Internal error: %r should be of type %r, is %r' % (s, compat_str, type(s))
941 s = s.decode('ascii')
942 return encodeFilename(s, True)
943
944
945 def decodeArgument(b):
946 return decodeFilename(b, True)
947
948
949 def decodeOption(optval):
950 if optval is None:
951 return optval
952 if isinstance(optval, bytes):
953 optval = optval.decode(preferredencoding())
954
955 assert isinstance(optval, compat_str)
956 return optval
957
958
959 _timetuple = collections.namedtuple('Time', ('hours', 'minutes', 'seconds', 'milliseconds'))
960
961
962 def timetuple_from_msec(msec):
963 secs, msec = divmod(msec, 1000)
964 mins, secs = divmod(secs, 60)
965 hrs, mins = divmod(mins, 60)
966 return _timetuple(hrs, mins, secs, msec)
967
968
969 def formatSeconds(secs, delim=':', msec=False):
970 time = timetuple_from_msec(secs * 1000)
971 if time.hours:
972 ret = '%d%s%02d%s%02d' % (time.hours, delim, time.minutes, delim, time.seconds)
973 elif time.minutes:
974 ret = '%d%s%02d' % (time.minutes, delim, time.seconds)
975 else:
976 ret = '%d' % time.seconds
977 return '%s.%03d' % (ret, time.milliseconds) if msec else ret
978
979
980 def _ssl_load_windows_store_certs(ssl_context, storename):
981 # Code adapted from _load_windows_store_certs in https://github.com/python/cpython/blob/main/Lib/ssl.py
982 try:
983 certs = [cert for cert, encoding, trust in ssl.enum_certificates(storename)
984 if encoding == 'x509_asn' and (
985 trust is True or ssl.Purpose.SERVER_AUTH.oid in trust)]
986 except PermissionError:
987 return
988 for cert in certs:
989 try:
990 ssl_context.load_verify_locations(cadata=cert)
991 except ssl.SSLError:
992 pass
993
994
995 def make_HTTPS_handler(params, **kwargs):
996 opts_check_certificate = not params.get('nocheckcertificate')
997 context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
998 context.check_hostname = opts_check_certificate
999 if params.get('legacyserverconnect'):
1000 context.options |= 4 # SSL_OP_LEGACY_SERVER_CONNECT
1001 context.verify_mode = ssl.CERT_REQUIRED if opts_check_certificate else ssl.CERT_NONE
1002 if opts_check_certificate:
1003 try:
1004 context.load_default_certs()
1005 # Work around the issue in load_default_certs when there are bad certificates. See:
1006 # https://github.com/yt-dlp/yt-dlp/issues/1060,
1007 # https://bugs.python.org/issue35665, https://bugs.python.org/issue45312
1008 except ssl.SSLError:
1009 # enum_certificates is not present in mingw python. See https://github.com/yt-dlp/yt-dlp/issues/1151
1010 if sys.platform == 'win32' and hasattr(ssl, 'enum_certificates'):
1011 # Create a new context to discard any certificates that were already loaded
1012 context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
1013 context.check_hostname, context.verify_mode = True, ssl.CERT_REQUIRED
1014 for storename in ('CA', 'ROOT'):
1015 _ssl_load_windows_store_certs(context, storename)
1016 context.set_default_verify_paths()
1017 return YoutubeDLHTTPSHandler(params, context=context, **kwargs)
1018
1019
1020 def bug_reports_message(before=';'):
1021 if ytdl_is_updateable():
1022 update_cmd = 'type yt-dlp -U to update'
1023 else:
1024 update_cmd = 'see https://github.com/yt-dlp/yt-dlp on how to update'
1025 msg = 'please report this issue on https://github.com/yt-dlp/yt-dlp .'
1026 msg += ' Make sure you are using the latest version; %s.' % update_cmd
1027 msg += ' Be sure to call yt-dlp with the --verbose flag and include its complete output.'
1028
1029 before = before.rstrip()
1030 if not before or before.endswith(('.', '!', '?')):
1031 msg = msg[0].title() + msg[1:]
1032
1033 return (before + ' ' if before else '') + msg
1034
1035
1036 class YoutubeDLError(Exception):
1037 """Base exception for YoutubeDL errors."""
1038 msg = None
1039
1040 def __init__(self, msg=None):
1041 if msg is not None:
1042 self.msg = msg
1043 elif self.msg is None:
1044 self.msg = type(self).__name__
1045 super().__init__(self.msg)
1046
1047
1048 network_exceptions = [compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error]
1049 if hasattr(ssl, 'CertificateError'):
1050 network_exceptions.append(ssl.CertificateError)
1051 network_exceptions = tuple(network_exceptions)
1052
1053
1054 class ExtractorError(YoutubeDLError):
1055 """Error during info extraction."""
1056
1057 def __init__(self, msg, tb=None, expected=False, cause=None, video_id=None, ie=None):
1058 """ tb, if given, is the original traceback (so that it can be printed out).
1059 If expected is set, this is a normal error message and most likely not a bug in yt-dlp.
1060 """
1061 if sys.exc_info()[0] in network_exceptions:
1062 expected = True
1063
1064 self.msg = str(msg)
1065 self.traceback = tb
1066 self.expected = expected
1067 self.cause = cause
1068 self.video_id = video_id
1069 self.ie = ie
1070 self.exc_info = sys.exc_info() # preserve original exception
1071
1072 super(ExtractorError, self).__init__(''.join((
1073 format_field(ie, template='[%s] '),
1074 format_field(video_id, template='%s: '),
1075 self.msg,
1076 format_field(cause, template=' (caused by %r)'),
1077 '' if expected else bug_reports_message())))
1078
1079 def format_traceback(self):
1080 if self.traceback is None:
1081 return None
1082 return ''.join(traceback.format_tb(self.traceback))
1083
1084
1085 class UnsupportedError(ExtractorError):
1086 def __init__(self, url):
1087 super(UnsupportedError, self).__init__(
1088 'Unsupported URL: %s' % url, expected=True)
1089 self.url = url
1090
1091
1092 class RegexNotFoundError(ExtractorError):
1093 """Error when a regex didn't match"""
1094 pass
1095
1096
1097 class GeoRestrictedError(ExtractorError):
1098 """Geographic restriction Error exception.
1099
1100 This exception may be thrown when a video is not available from your
1101 geographic location due to geographic restrictions imposed by a website.
1102 """
1103
1104 def __init__(self, msg, countries=None, **kwargs):
1105 kwargs['expected'] = True
1106 super(GeoRestrictedError, self).__init__(msg, **kwargs)
1107 self.countries = countries
1108
1109
1110 class DownloadError(YoutubeDLError):
1111 """Download Error exception.
1112
1113 This exception may be thrown by FileDownloader objects if they are not
1114 configured to continue on errors. They will contain the appropriate
1115 error message.
1116 """
1117
1118 def __init__(self, msg, exc_info=None):
1119 """ exc_info, if given, is the original exception that caused the trouble (as returned by sys.exc_info()). """
1120 super(DownloadError, self).__init__(msg)
1121 self.exc_info = exc_info
1122
1123
1124 class EntryNotInPlaylist(YoutubeDLError):
1125 """Entry not in playlist exception.
1126
1127 This exception will be thrown by YoutubeDL when a requested entry
1128 is not found in the playlist info_dict
1129 """
1130 msg = 'Entry not found in info'
1131
1132
1133 class SameFileError(YoutubeDLError):
1134 """Same File exception.
1135
1136 This exception will be thrown by FileDownloader objects if they detect
1137 multiple files would have to be downloaded to the same file on disk.
1138 """
1139 msg = 'Fixed output name but more than one file to download'
1140
1141 def __init__(self, filename=None):
1142 if filename is not None:
1143 self.msg += f': {filename}'
1144 super().__init__(self.msg)
1145
1146
1147 class PostProcessingError(YoutubeDLError):
1148 """Post Processing exception.
1149
1150 This exception may be raised by PostProcessor's .run() method to
1151 indicate an error in the postprocessing task.
1152 """
1153
1154
1155 class DownloadCancelled(YoutubeDLError):
1156 """ Exception raised when the download queue should be interrupted """
1157 msg = 'The download was cancelled'
1158
1159
1160 class ExistingVideoReached(DownloadCancelled):
1161 """ --break-on-existing triggered """
1162 msg = 'Encountered a video that is already in the archive, stopping due to --break-on-existing'
1163
1164
1165 class RejectedVideoReached(DownloadCancelled):
1166 """ --break-on-reject triggered """
1167 msg = 'Encountered a video that did not match filter, stopping due to --break-on-reject'
1168
1169
1170 class MaxDownloadsReached(DownloadCancelled):
1171 """ --max-downloads limit has been reached. """
1172 msg = 'Maximum number of downloads reached, stopping due to --max-downloads'
1173
1174
1175 class ReExtractInfo(YoutubeDLError):
1176 """ Video info needs to be re-extracted. """
1177
1178 def __init__(self, msg, expected=False):
1179 super().__init__(msg)
1180 self.expected = expected
1181
1182
1183 class ThrottledDownload(ReExtractInfo):
1184 """ Download speed below --throttled-rate. """
1185 msg = 'The download speed is below throttle limit'
1186
1187 def __init__(self):
1188 super().__init__(self.msg, expected=False)
1189
1190
1191 class UnavailableVideoError(YoutubeDLError):
1192 """Unavailable Format exception.
1193
1194 This exception will be thrown when a video is requested
1195 in a format that is not available for that video.
1196 """
1197 msg = 'Unable to download video'
1198
1199 def __init__(self, err=None):
1200 if err is not None:
1201 self.msg += f': {err}'
1202 super().__init__(self.msg)
1203
1204
1205 class ContentTooShortError(YoutubeDLError):
1206 """Content Too Short exception.
1207
1208 This exception may be raised by FileDownloader objects when a file they
1209 download is too small for what the server announced first, indicating
1210 the connection was probably interrupted.
1211 """
1212
1213 def __init__(self, downloaded, expected):
1214 super(ContentTooShortError, self).__init__(
1215 'Downloaded {0} bytes, expected {1} bytes'.format(downloaded, expected)
1216 )
1217 # Both in bytes
1218 self.downloaded = downloaded
1219 self.expected = expected
1220
1221
1222 class XAttrMetadataError(YoutubeDLError):
1223 def __init__(self, code=None, msg='Unknown error'):
1224 super(XAttrMetadataError, self).__init__(msg)
1225 self.code = code
1226 self.msg = msg
1227
1228 # Parsing code and msg
1229 if (self.code in (errno.ENOSPC, errno.EDQUOT)
1230 or 'No space left' in self.msg or 'Disk quota exceeded' in self.msg):
1231 self.reason = 'NO_SPACE'
1232 elif self.code == errno.E2BIG or 'Argument list too long' in self.msg:
1233 self.reason = 'VALUE_TOO_LONG'
1234 else:
1235 self.reason = 'NOT_SUPPORTED'
1236
1237
1238 class XAttrUnavailableError(YoutubeDLError):
1239 pass
1240
1241
1242 def _create_http_connection(ydl_handler, http_class, is_https, *args, **kwargs):
1243 # Working around python 2 bug (see http://bugs.python.org/issue17849) by limiting
1244 # expected HTTP responses to meet HTTP/1.0 or later (see also
1245 # https://github.com/ytdl-org/youtube-dl/issues/6727)
1246 if sys.version_info < (3, 0):
1247 kwargs['strict'] = True
1248 hc = http_class(*args, **compat_kwargs(kwargs))
1249 source_address = ydl_handler._params.get('source_address')
1250
1251 if source_address is not None:
1252 # This is to workaround _create_connection() from socket where it will try all
1253 # address data from getaddrinfo() including IPv6. This filters the result from
1254 # getaddrinfo() based on the source_address value.
1255 # This is based on the cpython socket.create_connection() function.
1256 # https://github.com/python/cpython/blob/master/Lib/socket.py#L691
1257 def _create_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, source_address=None):
1258 host, port = address
1259 err = None
1260 addrs = socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM)
1261 af = socket.AF_INET if '.' in source_address[0] else socket.AF_INET6
1262 ip_addrs = [addr for addr in addrs if addr[0] == af]
1263 if addrs and not ip_addrs:
1264 ip_version = 'v4' if af == socket.AF_INET else 'v6'
1265 raise socket.error(
1266 "No remote IP%s addresses available for connect, can't use '%s' as source address"
1267 % (ip_version, source_address[0]))
1268 for res in ip_addrs:
1269 af, socktype, proto, canonname, sa = res
1270 sock = None
1271 try:
1272 sock = socket.socket(af, socktype, proto)
1273 if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT:
1274 sock.settimeout(timeout)
1275 sock.bind(source_address)
1276 sock.connect(sa)
1277 err = None # Explicitly break reference cycle
1278 return sock
1279 except socket.error as _:
1280 err = _
1281 if sock is not None:
1282 sock.close()
1283 if err is not None:
1284 raise err
1285 else:
1286 raise socket.error('getaddrinfo returns an empty list')
1287 if hasattr(hc, '_create_connection'):
1288 hc._create_connection = _create_connection
1289 sa = (source_address, 0)
1290 if hasattr(hc, 'source_address'): # Python 2.7+
1291 hc.source_address = sa
1292 else: # Python 2.6
1293 def _hc_connect(self, *args, **kwargs):
1294 sock = _create_connection(
1295 (self.host, self.port), self.timeout, sa)
1296 if is_https:
1297 self.sock = ssl.wrap_socket(
1298 sock, self.key_file, self.cert_file,
1299 ssl_version=ssl.PROTOCOL_TLSv1)
1300 else:
1301 self.sock = sock
1302 hc.connect = functools.partial(_hc_connect, hc)
1303
1304 return hc
1305
1306
1307 def handle_youtubedl_headers(headers):
1308 filtered_headers = headers
1309
1310 if 'Youtubedl-no-compression' in filtered_headers:
1311 filtered_headers = dict((k, v) for k, v in filtered_headers.items() if k.lower() != 'accept-encoding')
1312 del filtered_headers['Youtubedl-no-compression']
1313
1314 return filtered_headers
1315
1316
1317 class YoutubeDLHandler(compat_urllib_request.HTTPHandler):
1318 """Handler for HTTP requests and responses.
1319
1320 This class, when installed with an OpenerDirector, automatically adds
1321 the standard headers to every HTTP request and handles gzipped and
1322 deflated responses from web servers. If compression is to be avoided in
1323 a particular request, the original request in the program code only has
1324 to include the HTTP header "Youtubedl-no-compression", which will be
1325 removed before making the real request.
1326
1327 Part of this code was copied from:
1328
1329 http://techknack.net/python-urllib2-handlers/
1330
1331 Andrew Rowls, the author of that code, agreed to release it to the
1332 public domain.
1333 """
1334
1335 def __init__(self, params, *args, **kwargs):
1336 compat_urllib_request.HTTPHandler.__init__(self, *args, **kwargs)
1337 self._params = params
1338
1339 def http_open(self, req):
1340 conn_class = compat_http_client.HTTPConnection
1341
1342 socks_proxy = req.headers.get('Ytdl-socks-proxy')
1343 if socks_proxy:
1344 conn_class = make_socks_conn_class(conn_class, socks_proxy)
1345 del req.headers['Ytdl-socks-proxy']
1346
1347 return self.do_open(functools.partial(
1348 _create_http_connection, self, conn_class, False),
1349 req)
1350
1351 @staticmethod
1352 def deflate(data):
1353 if not data:
1354 return data
1355 try:
1356 return zlib.decompress(data, -zlib.MAX_WBITS)
1357 except zlib.error:
1358 return zlib.decompress(data)
1359
1360 def http_request(self, req):
1361 # According to RFC 3986, URLs can not contain non-ASCII characters, however this is not
1362 # always respected by websites, some tend to give out URLs with non percent-encoded
1363 # non-ASCII characters (see telemb.py, ard.py [#3412])
1364 # urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991)
1365 # To work around aforementioned issue we will replace request's original URL with
1366 # percent-encoded one
1367 # Since redirects are also affected (e.g. http://www.southpark.de/alle-episoden/s18e09)
1368 # the code of this workaround has been moved here from YoutubeDL.urlopen()
1369 url = req.get_full_url()
1370 url_escaped = escape_url(url)
1371
1372 # Substitute URL if any change after escaping
1373 if url != url_escaped:
1374 req = update_Request(req, url=url_escaped)
1375
1376 for h, v in std_headers.items():
1377 # Capitalize is needed because of Python bug 2275: http://bugs.python.org/issue2275
1378 # The dict keys are capitalized because of this bug by urllib
1379 if h.capitalize() not in req.headers:
1380 req.add_header(h, v)
1381
1382 req.headers = handle_youtubedl_headers(req.headers)
1383
1384 if sys.version_info < (2, 7) and '#' in req.get_full_url():
1385 # Python 2.6 is brain-dead when it comes to fragments
1386 req._Request__original = req._Request__original.partition('#')[0]
1387 req._Request__r_type = req._Request__r_type.partition('#')[0]
1388
1389 return req
1390
1391 def http_response(self, req, resp):
1392 old_resp = resp
1393 # gzip
1394 if resp.headers.get('Content-encoding', '') == 'gzip':
1395 content = resp.read()
1396 gz = gzip.GzipFile(fileobj=io.BytesIO(content), mode='rb')
1397 try:
1398 uncompressed = io.BytesIO(gz.read())
1399 except IOError as original_ioerror:
1400 # There may be junk add the end of the file
1401 # See http://stackoverflow.com/q/4928560/35070 for details
1402 for i in range(1, 1024):
1403 try:
1404 gz = gzip.GzipFile(fileobj=io.BytesIO(content[:-i]), mode='rb')
1405 uncompressed = io.BytesIO(gz.read())
1406 except IOError:
1407 continue
1408 break
1409 else:
1410 raise original_ioerror
1411 resp = compat_urllib_request.addinfourl(uncompressed, old_resp.headers, old_resp.url, old_resp.code)
1412 resp.msg = old_resp.msg
1413 del resp.headers['Content-encoding']
1414 # deflate
1415 if resp.headers.get('Content-encoding', '') == 'deflate':
1416 gz = io.BytesIO(self.deflate(resp.read()))
1417 resp = compat_urllib_request.addinfourl(gz, old_resp.headers, old_resp.url, old_resp.code)
1418 resp.msg = old_resp.msg
1419 del resp.headers['Content-encoding']
1420 # Percent-encode redirect URL of Location HTTP header to satisfy RFC 3986 (see
1421 # https://github.com/ytdl-org/youtube-dl/issues/6457).
1422 if 300 <= resp.code < 400:
1423 location = resp.headers.get('Location')
1424 if location:
1425 # As of RFC 2616 default charset is iso-8859-1 that is respected by python 3
1426 if sys.version_info >= (3, 0):
1427 location = location.encode('iso-8859-1').decode('utf-8')
1428 else:
1429 location = location.decode('utf-8')
1430 location_escaped = escape_url(location)
1431 if location != location_escaped:
1432 del resp.headers['Location']
1433 if sys.version_info < (3, 0):
1434 location_escaped = location_escaped.encode('utf-8')
1435 resp.headers['Location'] = location_escaped
1436 return resp
1437
1438 https_request = http_request
1439 https_response = http_response
1440
1441
1442 def make_socks_conn_class(base_class, socks_proxy):
1443 assert issubclass(base_class, (
1444 compat_http_client.HTTPConnection, compat_http_client.HTTPSConnection))
1445
1446 url_components = compat_urlparse.urlparse(socks_proxy)
1447 if url_components.scheme.lower() == 'socks5':
1448 socks_type = ProxyType.SOCKS5
1449 elif url_components.scheme.lower() in ('socks', 'socks4'):
1450 socks_type = ProxyType.SOCKS4
1451 elif url_components.scheme.lower() == 'socks4a':
1452 socks_type = ProxyType.SOCKS4A
1453
1454 def unquote_if_non_empty(s):
1455 if not s:
1456 return s
1457 return compat_urllib_parse_unquote_plus(s)
1458
1459 proxy_args = (
1460 socks_type,
1461 url_components.hostname, url_components.port or 1080,
1462 True, # Remote DNS
1463 unquote_if_non_empty(url_components.username),
1464 unquote_if_non_empty(url_components.password),
1465 )
1466
1467 class SocksConnection(base_class):
1468 def connect(self):
1469 self.sock = sockssocket()
1470 self.sock.setproxy(*proxy_args)
1471 if type(self.timeout) in (int, float):
1472 self.sock.settimeout(self.timeout)
1473 self.sock.connect((self.host, self.port))
1474
1475 if isinstance(self, compat_http_client.HTTPSConnection):
1476 if hasattr(self, '_context'): # Python > 2.6
1477 self.sock = self._context.wrap_socket(
1478 self.sock, server_hostname=self.host)
1479 else:
1480 self.sock = ssl.wrap_socket(self.sock)
1481
1482 return SocksConnection
1483
1484
1485 class YoutubeDLHTTPSHandler(compat_urllib_request.HTTPSHandler):
1486 def __init__(self, params, https_conn_class=None, *args, **kwargs):
1487 compat_urllib_request.HTTPSHandler.__init__(self, *args, **kwargs)
1488 self._https_conn_class = https_conn_class or compat_http_client.HTTPSConnection
1489 self._params = params
1490
1491 def https_open(self, req):
1492 kwargs = {}
1493 conn_class = self._https_conn_class
1494
1495 if hasattr(self, '_context'): # python > 2.6
1496 kwargs['context'] = self._context
1497 if hasattr(self, '_check_hostname'): # python 3.x
1498 kwargs['check_hostname'] = self._check_hostname
1499
1500 socks_proxy = req.headers.get('Ytdl-socks-proxy')
1501 if socks_proxy:
1502 conn_class = make_socks_conn_class(conn_class, socks_proxy)
1503 del req.headers['Ytdl-socks-proxy']
1504
1505 return self.do_open(functools.partial(
1506 _create_http_connection, self, conn_class, True),
1507 req, **kwargs)
1508
1509
1510 class YoutubeDLCookieJar(compat_cookiejar.MozillaCookieJar):
1511 """
1512 See [1] for cookie file format.
1513
1514 1. https://curl.haxx.se/docs/http-cookies.html
1515 """
1516 _HTTPONLY_PREFIX = '#HttpOnly_'
1517 _ENTRY_LEN = 7
1518 _HEADER = '''# Netscape HTTP Cookie File
1519 # This file is generated by yt-dlp. Do not edit.
1520
1521 '''
1522 _CookieFileEntry = collections.namedtuple(
1523 'CookieFileEntry',
1524 ('domain_name', 'include_subdomains', 'path', 'https_only', 'expires_at', 'name', 'value'))
1525
1526 def save(self, filename=None, ignore_discard=False, ignore_expires=False):
1527 """
1528 Save cookies to a file.
1529
1530 Most of the code is taken from CPython 3.8 and slightly adapted
1531 to support cookie files with UTF-8 in both python 2 and 3.
1532 """
1533 if filename is None:
1534 if self.filename is not None:
1535 filename = self.filename
1536 else:
1537 raise ValueError(compat_cookiejar.MISSING_FILENAME_TEXT)
1538
1539 # Store session cookies with `expires` set to 0 instead of an empty
1540 # string
1541 for cookie in self:
1542 if cookie.expires is None:
1543 cookie.expires = 0
1544
1545 with io.open(filename, 'w', encoding='utf-8') as f:
1546 f.write(self._HEADER)
1547 now = time.time()
1548 for cookie in self:
1549 if not ignore_discard and cookie.discard:
1550 continue
1551 if not ignore_expires and cookie.is_expired(now):
1552 continue
1553 if cookie.secure:
1554 secure = 'TRUE'
1555 else:
1556 secure = 'FALSE'
1557 if cookie.domain.startswith('.'):
1558 initial_dot = 'TRUE'
1559 else:
1560 initial_dot = 'FALSE'
1561 if cookie.expires is not None:
1562 expires = compat_str(cookie.expires)
1563 else:
1564 expires = ''
1565 if cookie.value is None:
1566 # cookies.txt regards 'Set-Cookie: foo' as a cookie
1567 # with no name, whereas http.cookiejar regards it as a
1568 # cookie with no value.
1569 name = ''
1570 value = cookie.name
1571 else:
1572 name = cookie.name
1573 value = cookie.value
1574 f.write(
1575 '\t'.join([cookie.domain, initial_dot, cookie.path,
1576 secure, expires, name, value]) + '\n')
1577
1578 def load(self, filename=None, ignore_discard=False, ignore_expires=False):
1579 """Load cookies from a file."""
1580 if filename is None:
1581 if self.filename is not None:
1582 filename = self.filename
1583 else:
1584 raise ValueError(compat_cookiejar.MISSING_FILENAME_TEXT)
1585
1586 def prepare_line(line):
1587 if line.startswith(self._HTTPONLY_PREFIX):
1588 line = line[len(self._HTTPONLY_PREFIX):]
1589 # comments and empty lines are fine
1590 if line.startswith('#') or not line.strip():
1591 return line
1592 cookie_list = line.split('\t')
1593 if len(cookie_list) != self._ENTRY_LEN:
1594 raise compat_cookiejar.LoadError('invalid length %d' % len(cookie_list))
1595 cookie = self._CookieFileEntry(*cookie_list)
1596 if cookie.expires_at and not cookie.expires_at.isdigit():
1597 raise compat_cookiejar.LoadError('invalid expires at %s' % cookie.expires_at)
1598 return line
1599
1600 cf = io.StringIO()
1601 with io.open(filename, encoding='utf-8') as f:
1602 for line in f:
1603 try:
1604 cf.write(prepare_line(line))
1605 except compat_cookiejar.LoadError as e:
1606 write_string(
1607 'WARNING: skipping cookie file entry due to %s: %r\n'
1608 % (e, line), sys.stderr)
1609 continue
1610 cf.seek(0)
1611 self._really_load(cf, filename, ignore_discard, ignore_expires)
1612 # Session cookies are denoted by either `expires` field set to
1613 # an empty string or 0. MozillaCookieJar only recognizes the former
1614 # (see [1]). So we need force the latter to be recognized as session
1615 # cookies on our own.
1616 # Session cookies may be important for cookies-based authentication,
1617 # e.g. usually, when user does not check 'Remember me' check box while
1618 # logging in on a site, some important cookies are stored as session
1619 # cookies so that not recognizing them will result in failed login.
1620 # 1. https://bugs.python.org/issue17164
1621 for cookie in self:
1622 # Treat `expires=0` cookies as session cookies
1623 if cookie.expires == 0:
1624 cookie.expires = None
1625 cookie.discard = True
1626
1627
1628 class YoutubeDLCookieProcessor(compat_urllib_request.HTTPCookieProcessor):
1629 def __init__(self, cookiejar=None):
1630 compat_urllib_request.HTTPCookieProcessor.__init__(self, cookiejar)
1631
1632 def http_response(self, request, response):
1633 # Python 2 will choke on next HTTP request in row if there are non-ASCII
1634 # characters in Set-Cookie HTTP header of last response (see
1635 # https://github.com/ytdl-org/youtube-dl/issues/6769).
1636 # In order to at least prevent crashing we will percent encode Set-Cookie
1637 # header before HTTPCookieProcessor starts processing it.
1638 # if sys.version_info < (3, 0) and response.headers:
1639 # for set_cookie_header in ('Set-Cookie', 'Set-Cookie2'):
1640 # set_cookie = response.headers.get(set_cookie_header)
1641 # if set_cookie:
1642 # set_cookie_escaped = compat_urllib_parse.quote(set_cookie, b"%/;:@&=+$,!~*'()?#[] ")
1643 # if set_cookie != set_cookie_escaped:
1644 # del response.headers[set_cookie_header]
1645 # response.headers[set_cookie_header] = set_cookie_escaped
1646 return compat_urllib_request.HTTPCookieProcessor.http_response(self, request, response)
1647
1648 https_request = compat_urllib_request.HTTPCookieProcessor.http_request
1649 https_response = http_response
1650
1651
1652 class YoutubeDLRedirectHandler(compat_urllib_request.HTTPRedirectHandler):
1653 """YoutubeDL redirect handler
1654
1655 The code is based on HTTPRedirectHandler implementation from CPython [1].
1656
1657 This redirect handler solves two issues:
1658 - ensures redirect URL is always unicode under python 2
1659 - introduces support for experimental HTTP response status code
1660 308 Permanent Redirect [2] used by some sites [3]
1661
1662 1. https://github.com/python/cpython/blob/master/Lib/urllib/request.py
1663 2. https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/308
1664 3. https://github.com/ytdl-org/youtube-dl/issues/28768
1665 """
1666
1667 http_error_301 = http_error_303 = http_error_307 = http_error_308 = compat_urllib_request.HTTPRedirectHandler.http_error_302
1668
1669 def redirect_request(self, req, fp, code, msg, headers, newurl):
1670 """Return a Request or None in response to a redirect.
1671
1672 This is called by the http_error_30x methods when a
1673 redirection response is received. If a redirection should
1674 take place, return a new Request to allow http_error_30x to
1675 perform the redirect. Otherwise, raise HTTPError if no-one
1676 else should try to handle this url. Return None if you can't
1677 but another Handler might.
1678 """
1679 m = req.get_method()
1680 if (not (code in (301, 302, 303, 307, 308) and m in ("GET", "HEAD")
1681 or code in (301, 302, 303) and m == "POST")):
1682 raise compat_HTTPError(req.full_url, code, msg, headers, fp)
1683 # Strictly (according to RFC 2616), 301 or 302 in response to
1684 # a POST MUST NOT cause a redirection without confirmation
1685 # from the user (of urllib.request, in this case). In practice,
1686 # essentially all clients do redirect in this case, so we do
1687 # the same.
1688
1689 # On python 2 urlh.geturl() may sometimes return redirect URL
1690 # as byte string instead of unicode. This workaround allows
1691 # to force it always return unicode.
1692 if sys.version_info[0] < 3:
1693 newurl = compat_str(newurl)
1694
1695 # Be conciliant with URIs containing a space. This is mainly
1696 # redundant with the more complete encoding done in http_error_302(),
1697 # but it is kept for compatibility with other callers.
1698 newurl = newurl.replace(' ', '%20')
1699
1700 CONTENT_HEADERS = ("content-length", "content-type")
1701 # NB: don't use dict comprehension for python 2.6 compatibility
1702 newheaders = dict((k, v) for k, v in req.headers.items()
1703 if k.lower() not in CONTENT_HEADERS)
1704 return compat_urllib_request.Request(
1705 newurl, headers=newheaders, origin_req_host=req.origin_req_host,
1706 unverifiable=True)
1707
1708
1709 def extract_timezone(date_str):
1710 m = re.search(
1711 r'''(?x)
1712 ^.{8,}? # >=8 char non-TZ prefix, if present
1713 (?P<tz>Z| # just the UTC Z, or
1714 (?:(?<=.\b\d{4}|\b\d{2}:\d\d)| # preceded by 4 digits or hh:mm or
1715 (?<!.\b[a-zA-Z]{3}|[a-zA-Z]{4}|..\b\d\d)) # not preceded by 3 alpha word or >= 4 alpha or 2 digits
1716 [ ]? # optional space
1717 (?P<sign>\+|-) # +/-
1718 (?P<hours>[0-9]{2}):?(?P<minutes>[0-9]{2}) # hh[:]mm
1719 $)
1720 ''', date_str)
1721 if not m:
1722 timezone = datetime.timedelta()
1723 else:
1724 date_str = date_str[:-len(m.group('tz'))]
1725 if not m.group('sign'):
1726 timezone = datetime.timedelta()
1727 else:
1728 sign = 1 if m.group('sign') == '+' else -1
1729 timezone = datetime.timedelta(
1730 hours=sign * int(m.group('hours')),
1731 minutes=sign * int(m.group('minutes')))
1732 return timezone, date_str
1733
1734
1735 def parse_iso8601(date_str, delimiter='T', timezone=None):
1736 """ Return a UNIX timestamp from the given date """
1737
1738 if date_str is None:
1739 return None
1740
1741 date_str = re.sub(r'\.[0-9]+', '', date_str)
1742
1743 if timezone is None:
1744 timezone, date_str = extract_timezone(date_str)
1745
1746 try:
1747 date_format = '%Y-%m-%d{0}%H:%M:%S'.format(delimiter)
1748 dt = datetime.datetime.strptime(date_str, date_format) - timezone
1749 return calendar.timegm(dt.timetuple())
1750 except ValueError:
1751 pass
1752
1753
1754 def date_formats(day_first=True):
1755 return DATE_FORMATS_DAY_FIRST if day_first else DATE_FORMATS_MONTH_FIRST
1756
1757
1758 def unified_strdate(date_str, day_first=True):
1759 """Return a string with the date in the format YYYYMMDD"""
1760
1761 if date_str is None:
1762 return None
1763 upload_date = None
1764 # Replace commas
1765 date_str = date_str.replace(',', ' ')
1766 # Remove AM/PM + timezone
1767 date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str)
1768 _, date_str = extract_timezone(date_str)
1769
1770 for expression in date_formats(day_first):
1771 try:
1772 upload_date = datetime.datetime.strptime(date_str, expression).strftime('%Y%m%d')
1773 except ValueError:
1774 pass
1775 if upload_date is None:
1776 timetuple = email.utils.parsedate_tz(date_str)
1777 if timetuple:
1778 try:
1779 upload_date = datetime.datetime(*timetuple[:6]).strftime('%Y%m%d')
1780 except ValueError:
1781 pass
1782 if upload_date is not None:
1783 return compat_str(upload_date)
1784
1785
1786 def unified_timestamp(date_str, day_first=True):
1787 if date_str is None:
1788 return None
1789
1790 date_str = re.sub(r'[,|]', '', date_str)
1791
1792 pm_delta = 12 if re.search(r'(?i)PM', date_str) else 0
1793 timezone, date_str = extract_timezone(date_str)
1794
1795 # Remove AM/PM + timezone
1796 date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str)
1797
1798 # Remove unrecognized timezones from ISO 8601 alike timestamps
1799 m = re.search(r'\d{1,2}:\d{1,2}(?:\.\d+)?(?P<tz>\s*[A-Z]+)$', date_str)
1800 if m:
1801 date_str = date_str[:-len(m.group('tz'))]
1802
1803 # Python only supports microseconds, so remove nanoseconds
1804 m = re.search(r'^([0-9]{4,}-[0-9]{1,2}-[0-9]{1,2}T[0-9]{1,2}:[0-9]{1,2}:[0-9]{1,2}\.[0-9]{6})[0-9]+$', date_str)
1805 if m:
1806 date_str = m.group(1)
1807
1808 for expression in date_formats(day_first):
1809 try:
1810 dt = datetime.datetime.strptime(date_str, expression) - timezone + datetime.timedelta(hours=pm_delta)
1811 return calendar.timegm(dt.timetuple())
1812 except ValueError:
1813 pass
1814 timetuple = email.utils.parsedate_tz(date_str)
1815 if timetuple:
1816 return calendar.timegm(timetuple) + pm_delta * 3600
1817
1818
1819 def determine_ext(url, default_ext='unknown_video'):
1820 if url is None or '.' not in url:
1821 return default_ext
1822 guess = url.partition('?')[0].rpartition('.')[2]
1823 if re.match(r'^[A-Za-z0-9]+$', guess):
1824 return guess
1825 # Try extract ext from URLs like http://example.com/foo/bar.mp4/?download
1826 elif guess.rstrip('/') in KNOWN_EXTENSIONS:
1827 return guess.rstrip('/')
1828 else:
1829 return default_ext
1830
1831
1832 def subtitles_filename(filename, sub_lang, sub_format, expected_real_ext=None):
1833 return replace_extension(filename, sub_lang + '.' + sub_format, expected_real_ext)
1834
1835
1836 def datetime_from_str(date_str, precision='auto', format='%Y%m%d'):
1837 """
1838 Return a datetime object from a string in the format YYYYMMDD or
1839 (now|today|date)[+-][0-9](microsecond|second|minute|hour|day|week|month|year)(s)?
1840
1841 format: string date format used to return datetime object from
1842 precision: round the time portion of a datetime object.
1843 auto|microsecond|second|minute|hour|day.
1844 auto: round to the unit provided in date_str (if applicable).
1845 """
1846 auto_precision = False
1847 if precision == 'auto':
1848 auto_precision = True
1849 precision = 'microsecond'
1850 today = datetime_round(datetime.datetime.utcnow(), precision)
1851 if date_str in ('now', 'today'):
1852 return today
1853 if date_str == 'yesterday':
1854 return today - datetime.timedelta(days=1)
1855 match = re.match(
1856 r'(?P<start>.+)(?P<sign>[+-])(?P<time>\d+)(?P<unit>microsecond|second|minute|hour|day|week|month|year)(s)?',
1857 date_str)
1858 if match is not None:
1859 start_time = datetime_from_str(match.group('start'), precision, format)
1860 time = int(match.group('time')) * (-1 if match.group('sign') == '-' else 1)
1861 unit = match.group('unit')
1862 if unit == 'month' or unit == 'year':
1863 new_date = datetime_add_months(start_time, time * 12 if unit == 'year' else time)
1864 unit = 'day'
1865 else:
1866 if unit == 'week':
1867 unit = 'day'
1868 time *= 7
1869 delta = datetime.timedelta(**{unit + 's': time})
1870 new_date = start_time + delta
1871 if auto_precision:
1872 return datetime_round(new_date, unit)
1873 return new_date
1874
1875 return datetime_round(datetime.datetime.strptime(date_str, format), precision)
1876
1877
1878 def date_from_str(date_str, format='%Y%m%d'):
1879 """
1880 Return a datetime object from a string in the format YYYYMMDD or
1881 (now|today|date)[+-][0-9](microsecond|second|minute|hour|day|week|month|year)(s)?
1882
1883 format: string date format used to return datetime object from
1884 """
1885 return datetime_from_str(date_str, precision='microsecond', format=format).date()
1886
1887
1888 def datetime_add_months(dt, months):
1889 """Increment/Decrement a datetime object by months."""
1890 month = dt.month + months - 1
1891 year = dt.year + month // 12
1892 month = month % 12 + 1
1893 day = min(dt.day, calendar.monthrange(year, month)[1])
1894 return dt.replace(year, month, day)
1895
1896
1897 def datetime_round(dt, precision='day'):
1898 """
1899 Round a datetime object's time to a specific precision
1900 """
1901 if precision == 'microsecond':
1902 return dt
1903
1904 unit_seconds = {
1905 'day': 86400,
1906 'hour': 3600,
1907 'minute': 60,
1908 'second': 1,
1909 }
1910 roundto = lambda x, n: ((x + n / 2) // n) * n
1911 timestamp = calendar.timegm(dt.timetuple())
1912 return datetime.datetime.utcfromtimestamp(roundto(timestamp, unit_seconds[precision]))
1913
1914
1915 def hyphenate_date(date_str):
1916 """
1917 Convert a date in 'YYYYMMDD' format to 'YYYY-MM-DD' format"""
1918 match = re.match(r'^(\d\d\d\d)(\d\d)(\d\d)$', date_str)
1919 if match is not None:
1920 return '-'.join(match.groups())
1921 else:
1922 return date_str
1923
1924
1925 class DateRange(object):
1926 """Represents a time interval between two dates"""
1927
1928 def __init__(self, start=None, end=None):
1929 """start and end must be strings in the format accepted by date"""
1930 if start is not None:
1931 self.start = date_from_str(start)
1932 else:
1933 self.start = datetime.datetime.min.date()
1934 if end is not None:
1935 self.end = date_from_str(end)
1936 else:
1937 self.end = datetime.datetime.max.date()
1938 if self.start > self.end:
1939 raise ValueError('Date range: "%s" , the start date must be before the end date' % self)
1940
1941 @classmethod
1942 def day(cls, day):
1943 """Returns a range that only contains the given day"""
1944 return cls(day, day)
1945
1946 def __contains__(self, date):
1947 """Check if the date is in the range"""
1948 if not isinstance(date, datetime.date):
1949 date = date_from_str(date)
1950 return self.start <= date <= self.end
1951
1952 def __str__(self):
1953 return '%s - %s' % (self.start.isoformat(), self.end.isoformat())
1954
1955
1956 def platform_name():
1957 """ Returns the platform name as a compat_str """
1958 res = platform.platform()
1959 if isinstance(res, bytes):
1960 res = res.decode(preferredencoding())
1961
1962 assert isinstance(res, compat_str)
1963 return res
1964
1965
1966 def get_windows_version():
1967 ''' Get Windows version. None if it's not running on Windows '''
1968 if compat_os_name == 'nt':
1969 return version_tuple(platform.win32_ver()[1])
1970 else:
1971 return None
1972
1973
1974 def _windows_write_string(s, out):
1975 """ Returns True if the string was written using special methods,
1976 False if it has yet to be written out."""
1977 # Adapted from http://stackoverflow.com/a/3259271/35070
1978
1979 import ctypes.wintypes
1980
1981 WIN_OUTPUT_IDS = {
1982 1: -11,
1983 2: -12,
1984 }
1985
1986 try:
1987 fileno = out.fileno()
1988 except AttributeError:
1989 # If the output stream doesn't have a fileno, it's virtual
1990 return False
1991 except io.UnsupportedOperation:
1992 # Some strange Windows pseudo files?
1993 return False
1994 if fileno not in WIN_OUTPUT_IDS:
1995 return False
1996
1997 GetStdHandle = compat_ctypes_WINFUNCTYPE(
1998 ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD)(
1999 ('GetStdHandle', ctypes.windll.kernel32))
2000 h = GetStdHandle(WIN_OUTPUT_IDS[fileno])
2001
2002 WriteConsoleW = compat_ctypes_WINFUNCTYPE(
2003 ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.wintypes.LPWSTR,
2004 ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD),
2005 ctypes.wintypes.LPVOID)(('WriteConsoleW', ctypes.windll.kernel32))
2006 written = ctypes.wintypes.DWORD(0)
2007
2008 GetFileType = compat_ctypes_WINFUNCTYPE(ctypes.wintypes.DWORD, ctypes.wintypes.DWORD)(('GetFileType', ctypes.windll.kernel32))
2009 FILE_TYPE_CHAR = 0x0002
2010 FILE_TYPE_REMOTE = 0x8000
2011 GetConsoleMode = compat_ctypes_WINFUNCTYPE(
2012 ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE,
2013 ctypes.POINTER(ctypes.wintypes.DWORD))(
2014 ('GetConsoleMode', ctypes.windll.kernel32))
2015 INVALID_HANDLE_VALUE = ctypes.wintypes.DWORD(-1).value
2016
2017 def not_a_console(handle):
2018 if handle == INVALID_HANDLE_VALUE or handle is None:
2019 return True
2020 return ((GetFileType(handle) & ~FILE_TYPE_REMOTE) != FILE_TYPE_CHAR
2021 or GetConsoleMode(handle, ctypes.byref(ctypes.wintypes.DWORD())) == 0)
2022
2023 if not_a_console(h):
2024 return False
2025
2026 def next_nonbmp_pos(s):
2027 try:
2028 return next(i for i, c in enumerate(s) if ord(c) > 0xffff)
2029 except StopIteration:
2030 return len(s)
2031
2032 while s:
2033 count = min(next_nonbmp_pos(s), 1024)
2034
2035 ret = WriteConsoleW(
2036 h, s, count if count else 2, ctypes.byref(written), None)
2037 if ret == 0:
2038 raise OSError('Failed to write string')
2039 if not count: # We just wrote a non-BMP character
2040 assert written.value == 2
2041 s = s[1:]
2042 else:
2043 assert written.value > 0
2044 s = s[written.value:]
2045 return True
2046
2047
2048 def write_string(s, out=None, encoding=None):
2049 if out is None:
2050 out = sys.stderr
2051 assert type(s) == compat_str
2052
2053 if sys.platform == 'win32' and encoding is None and hasattr(out, 'fileno'):
2054 if _windows_write_string(s, out):
2055 return
2056
2057 if ('b' in getattr(out, 'mode', '')
2058 or sys.version_info[0] < 3): # Python 2 lies about mode of sys.stderr
2059 byt = s.encode(encoding or preferredencoding(), 'ignore')
2060 out.write(byt)
2061 elif hasattr(out, 'buffer'):
2062 enc = encoding or getattr(out, 'encoding', None) or preferredencoding()
2063 byt = s.encode(enc, 'ignore')
2064 out.buffer.write(byt)
2065 else:
2066 out.write(s)
2067 out.flush()
2068
2069
2070 def bytes_to_intlist(bs):
2071 if not bs:
2072 return []
2073 if isinstance(bs[0], int): # Python 3
2074 return list(bs)
2075 else:
2076 return [ord(c) for c in bs]
2077
2078
2079 def intlist_to_bytes(xs):
2080 if not xs:
2081 return b''
2082 return compat_struct_pack('%dB' % len(xs), *xs)
2083
2084
2085 # Cross-platform file locking
2086 if sys.platform == 'win32':
2087 import ctypes.wintypes
2088 import msvcrt
2089
2090 class OVERLAPPED(ctypes.Structure):
2091 _fields_ = [
2092 ('Internal', ctypes.wintypes.LPVOID),
2093 ('InternalHigh', ctypes.wintypes.LPVOID),
2094 ('Offset', ctypes.wintypes.DWORD),
2095 ('OffsetHigh', ctypes.wintypes.DWORD),
2096 ('hEvent', ctypes.wintypes.HANDLE),
2097 ]
2098
2099 kernel32 = ctypes.windll.kernel32
2100 LockFileEx = kernel32.LockFileEx
2101 LockFileEx.argtypes = [
2102 ctypes.wintypes.HANDLE, # hFile
2103 ctypes.wintypes.DWORD, # dwFlags
2104 ctypes.wintypes.DWORD, # dwReserved
2105 ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow
2106 ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh
2107 ctypes.POINTER(OVERLAPPED) # Overlapped
2108 ]
2109 LockFileEx.restype = ctypes.wintypes.BOOL
2110 UnlockFileEx = kernel32.UnlockFileEx
2111 UnlockFileEx.argtypes = [
2112 ctypes.wintypes.HANDLE, # hFile
2113 ctypes.wintypes.DWORD, # dwReserved
2114 ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow
2115 ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh
2116 ctypes.POINTER(OVERLAPPED) # Overlapped
2117 ]
2118 UnlockFileEx.restype = ctypes.wintypes.BOOL
2119 whole_low = 0xffffffff
2120 whole_high = 0x7fffffff
2121
2122 def _lock_file(f, exclusive):
2123 overlapped = OVERLAPPED()
2124 overlapped.Offset = 0
2125 overlapped.OffsetHigh = 0
2126 overlapped.hEvent = 0
2127 f._lock_file_overlapped_p = ctypes.pointer(overlapped)
2128 handle = msvcrt.get_osfhandle(f.fileno())
2129 if not LockFileEx(handle, 0x2 if exclusive else 0x0, 0,
2130 whole_low, whole_high, f._lock_file_overlapped_p):
2131 raise OSError('Locking file failed: %r' % ctypes.FormatError())
2132
2133 def _unlock_file(f):
2134 assert f._lock_file_overlapped_p
2135 handle = msvcrt.get_osfhandle(f.fileno())
2136 if not UnlockFileEx(handle, 0,
2137 whole_low, whole_high, f._lock_file_overlapped_p):
2138 raise OSError('Unlocking file failed: %r' % ctypes.FormatError())
2139
2140 else:
2141 # Some platforms, such as Jython, is missing fcntl
2142 try:
2143 import fcntl
2144
2145 def _lock_file(f, exclusive):
2146 fcntl.flock(f, fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH)
2147
2148 def _unlock_file(f):
2149 fcntl.flock(f, fcntl.LOCK_UN)
2150 except ImportError:
2151 UNSUPPORTED_MSG = 'file locking is not supported on this platform'
2152
2153 def _lock_file(f, exclusive):
2154 raise IOError(UNSUPPORTED_MSG)
2155
2156 def _unlock_file(f):
2157 raise IOError(UNSUPPORTED_MSG)
2158
2159
2160 class locked_file(object):
2161 def __init__(self, filename, mode, encoding=None):
2162 assert mode in ['r', 'a', 'w']
2163 self.f = io.open(filename, mode, encoding=encoding)
2164 self.mode = mode
2165
2166 def __enter__(self):
2167 exclusive = self.mode != 'r'
2168 try:
2169 _lock_file(self.f, exclusive)
2170 except IOError:
2171 self.f.close()
2172 raise
2173 return self
2174
2175 def __exit__(self, etype, value, traceback):
2176 try:
2177 _unlock_file(self.f)
2178 finally:
2179 self.f.close()
2180
2181 def __iter__(self):
2182 return iter(self.f)
2183
2184 def write(self, *args):
2185 return self.f.write(*args)
2186
2187 def read(self, *args):
2188 return self.f.read(*args)
2189
2190
2191 def get_filesystem_encoding():
2192 encoding = sys.getfilesystemencoding()
2193 return encoding if encoding is not None else 'utf-8'
2194
2195
2196 def shell_quote(args):
2197 quoted_args = []
2198 encoding = get_filesystem_encoding()
2199 for a in args:
2200 if isinstance(a, bytes):
2201 # We may get a filename encoded with 'encodeFilename'
2202 a = a.decode(encoding)
2203 quoted_args.append(compat_shlex_quote(a))
2204 return ' '.join(quoted_args)
2205
2206
2207 def smuggle_url(url, data):
2208 """ Pass additional data in a URL for internal use. """
2209
2210 url, idata = unsmuggle_url(url, {})
2211 data.update(idata)
2212 sdata = compat_urllib_parse_urlencode(
2213 {'__youtubedl_smuggle': json.dumps(data)})
2214 return url + '#' + sdata
2215
2216
2217 def unsmuggle_url(smug_url, default=None):
2218 if '#__youtubedl_smuggle' not in smug_url:
2219 return smug_url, default
2220 url, _, sdata = smug_url.rpartition('#')
2221 jsond = compat_parse_qs(sdata)['__youtubedl_smuggle'][0]
2222 data = json.loads(jsond)
2223 return url, data
2224
2225
2226 def format_decimal_suffix(num, fmt='%d%s', *, factor=1000):
2227 """ Formats numbers with decimal sufixes like K, M, etc """
2228 num, factor = float_or_none(num), float(factor)
2229 if num is None:
2230 return None
2231 exponent = 0 if num == 0 else int(math.log(num, factor))
2232 suffix = ['', *'kMGTPEZY'][exponent]
2233 if factor == 1024:
2234 suffix = {'k': 'Ki', '': ''}.get(suffix, f'{suffix}i')
2235 converted = num / (factor ** exponent)
2236 return fmt % (converted, suffix)
2237
2238
2239 def format_bytes(bytes):
2240 return format_decimal_suffix(bytes, '%.2f%sB', factor=1024) or 'N/A'
2241
2242
2243 def lookup_unit_table(unit_table, s):
2244 units_re = '|'.join(re.escape(u) for u in unit_table)
2245 m = re.match(
2246 r'(?P<num>[0-9]+(?:[,.][0-9]*)?)\s*(?P<unit>%s)\b' % units_re, s)
2247 if not m:
2248 return None
2249 num_str = m.group('num').replace(',', '.')
2250 mult = unit_table[m.group('unit')]
2251 return int(float(num_str) * mult)
2252
2253
2254 def parse_filesize(s):
2255 if s is None:
2256 return None
2257
2258 # The lower-case forms are of course incorrect and unofficial,
2259 # but we support those too
2260 _UNIT_TABLE = {
2261 'B': 1,
2262 'b': 1,
2263 'bytes': 1,
2264 'KiB': 1024,
2265 'KB': 1000,
2266 'kB': 1024,
2267 'Kb': 1000,
2268 'kb': 1000,
2269 'kilobytes': 1000,
2270 'kibibytes': 1024,
2271 'MiB': 1024 ** 2,
2272 'MB': 1000 ** 2,
2273 'mB': 1024 ** 2,
2274 'Mb': 1000 ** 2,
2275 'mb': 1000 ** 2,
2276 'megabytes': 1000 ** 2,
2277 'mebibytes': 1024 ** 2,
2278 'GiB': 1024 ** 3,
2279 'GB': 1000 ** 3,
2280 'gB': 1024 ** 3,
2281 'Gb': 1000 ** 3,
2282 'gb': 1000 ** 3,
2283 'gigabytes': 1000 ** 3,
2284 'gibibytes': 1024 ** 3,
2285 'TiB': 1024 ** 4,
2286 'TB': 1000 ** 4,
2287 'tB': 1024 ** 4,
2288 'Tb': 1000 ** 4,
2289 'tb': 1000 ** 4,
2290 'terabytes': 1000 ** 4,
2291 'tebibytes': 1024 ** 4,
2292 'PiB': 1024 ** 5,
2293 'PB': 1000 ** 5,
2294 'pB': 1024 ** 5,
2295 'Pb': 1000 ** 5,
2296 'pb': 1000 ** 5,
2297 'petabytes': 1000 ** 5,
2298 'pebibytes': 1024 ** 5,
2299 'EiB': 1024 ** 6,
2300 'EB': 1000 ** 6,
2301 'eB': 1024 ** 6,
2302 'Eb': 1000 ** 6,
2303 'eb': 1000 ** 6,
2304 'exabytes': 1000 ** 6,
2305 'exbibytes': 1024 ** 6,
2306 'ZiB': 1024 ** 7,
2307 'ZB': 1000 ** 7,
2308 'zB': 1024 ** 7,
2309 'Zb': 1000 ** 7,
2310 'zb': 1000 ** 7,
2311 'zettabytes': 1000 ** 7,
2312 'zebibytes': 1024 ** 7,
2313 'YiB': 1024 ** 8,
2314 'YB': 1000 ** 8,
2315 'yB': 1024 ** 8,
2316 'Yb': 1000 ** 8,
2317 'yb': 1000 ** 8,
2318 'yottabytes': 1000 ** 8,
2319 'yobibytes': 1024 ** 8,
2320 }
2321
2322 return lookup_unit_table(_UNIT_TABLE, s)
2323
2324
2325 def parse_count(s):
2326 if s is None:
2327 return None
2328
2329 s = re.sub(r'^[^\d]+\s', '', s).strip()
2330
2331 if re.match(r'^[\d,.]+$', s):
2332 return str_to_int(s)
2333
2334 _UNIT_TABLE = {
2335 'k': 1000,
2336 'K': 1000,
2337 'm': 1000 ** 2,
2338 'M': 1000 ** 2,
2339 'kk': 1000 ** 2,
2340 'KK': 1000 ** 2,
2341 'b': 1000 ** 3,
2342 'B': 1000 ** 3,
2343 }
2344
2345 ret = lookup_unit_table(_UNIT_TABLE, s)
2346 if ret is not None:
2347 return ret
2348
2349 mobj = re.match(r'([\d,.]+)(?:$|\s)', s)
2350 if mobj:
2351 return str_to_int(mobj.group(1))
2352
2353
2354 def parse_resolution(s):
2355 if s is None:
2356 return {}
2357
2358 mobj = re.search(r'(?<![a-zA-Z0-9])(?P<w>\d+)\s*[xX×,]\s*(?P<h>\d+)(?![a-zA-Z0-9])', s)
2359 if mobj:
2360 return {
2361 'width': int(mobj.group('w')),
2362 'height': int(mobj.group('h')),
2363 }
2364
2365 mobj = re.search(r'(?<![a-zA-Z0-9])(\d+)[pPiI](?![a-zA-Z0-9])', s)
2366 if mobj:
2367 return {'height': int(mobj.group(1))}
2368
2369 mobj = re.search(r'\b([48])[kK]\b', s)
2370 if mobj:
2371 return {'height': int(mobj.group(1)) * 540}
2372
2373 return {}
2374
2375
2376 def parse_bitrate(s):
2377 if not isinstance(s, compat_str):
2378 return
2379 mobj = re.search(r'\b(\d+)\s*kbps', s)
2380 if mobj:
2381 return int(mobj.group(1))
2382
2383
2384 def month_by_name(name, lang='en'):
2385 """ Return the number of a month by (locale-independently) English name """
2386
2387 month_names = MONTH_NAMES.get(lang, MONTH_NAMES['en'])
2388
2389 try:
2390 return month_names.index(name) + 1
2391 except ValueError:
2392 return None
2393
2394
2395 def month_by_abbreviation(abbrev):
2396 """ Return the number of a month by (locale-independently) English
2397 abbreviations """
2398
2399 try:
2400 return [s[:3] for s in ENGLISH_MONTH_NAMES].index(abbrev) + 1
2401 except ValueError:
2402 return None
2403
2404
2405 def fix_xml_ampersands(xml_str):
2406 """Replace all the '&' by '&amp;' in XML"""
2407 return re.sub(
2408 r'&(?!amp;|lt;|gt;|apos;|quot;|#x[0-9a-fA-F]{,4};|#[0-9]{,4};)',
2409 '&amp;',
2410 xml_str)
2411
2412
2413 def setproctitle(title):
2414 assert isinstance(title, compat_str)
2415
2416 # ctypes in Jython is not complete
2417 # http://bugs.jython.org/issue2148
2418 if sys.platform.startswith('java'):
2419 return
2420
2421 try:
2422 libc = ctypes.cdll.LoadLibrary('libc.so.6')
2423 except OSError:
2424 return
2425 except TypeError:
2426 # LoadLibrary in Windows Python 2.7.13 only expects
2427 # a bytestring, but since unicode_literals turns
2428 # every string into a unicode string, it fails.
2429 return
2430 title_bytes = title.encode('utf-8')
2431 buf = ctypes.create_string_buffer(len(title_bytes))
2432 buf.value = title_bytes
2433 try:
2434 libc.prctl(15, buf, 0, 0, 0)
2435 except AttributeError:
2436 return # Strange libc, just skip this
2437
2438
2439 def remove_start(s, start):
2440 return s[len(start):] if s is not None and s.startswith(start) else s
2441
2442
2443 def remove_end(s, end):
2444 return s[:-len(end)] if s is not None and s.endswith(end) else s
2445
2446
2447 def remove_quotes(s):
2448 if s is None or len(s) < 2:
2449 return s
2450 for quote in ('"', "'", ):
2451 if s[0] == quote and s[-1] == quote:
2452 return s[1:-1]
2453 return s
2454
2455
2456 def get_domain(url):
2457 domain = re.match(r'(?:https?:\/\/)?(?:www\.)?(?P<domain>[^\n\/]+\.[^\n\/]+)(?:\/(.*))?', url)
2458 return domain.group('domain') if domain else None
2459
2460
2461 def url_basename(url):
2462 path = compat_urlparse.urlparse(url).path
2463 return path.strip('/').split('/')[-1]
2464
2465
2466 def base_url(url):
2467 return re.match(r'https?://[^?#&]+/', url).group()
2468
2469
2470 def urljoin(base, path):
2471 if isinstance(path, bytes):
2472 path = path.decode('utf-8')
2473 if not isinstance(path, compat_str) or not path:
2474 return None
2475 if re.match(r'^(?:[a-zA-Z][a-zA-Z0-9+-.]*:)?//', path):
2476 return path
2477 if isinstance(base, bytes):
2478 base = base.decode('utf-8')
2479 if not isinstance(base, compat_str) or not re.match(
2480 r'^(?:https?:)?//', base):
2481 return None
2482 return compat_urlparse.urljoin(base, path)
2483
2484
2485 class HEADRequest(compat_urllib_request.Request):
2486 def get_method(self):
2487 return 'HEAD'
2488
2489
2490 class PUTRequest(compat_urllib_request.Request):
2491 def get_method(self):
2492 return 'PUT'
2493
2494
2495 def int_or_none(v, scale=1, default=None, get_attr=None, invscale=1):
2496 if get_attr and v is not None:
2497 v = getattr(v, get_attr, None)
2498 try:
2499 return int(v) * invscale // scale
2500 except (ValueError, TypeError, OverflowError):
2501 return default
2502
2503
2504 def str_or_none(v, default=None):
2505 return default if v is None else compat_str(v)
2506
2507
2508 def str_to_int(int_str):
2509 """ A more relaxed version of int_or_none """
2510 if isinstance(int_str, compat_integer_types):
2511 return int_str
2512 elif isinstance(int_str, compat_str):
2513 int_str = re.sub(r'[,\.\+]', '', int_str)
2514 return int_or_none(int_str)
2515
2516
2517 def float_or_none(v, scale=1, invscale=1, default=None):
2518 if v is None:
2519 return default
2520 try:
2521 return float(v) * invscale / scale
2522 except (ValueError, TypeError):
2523 return default
2524
2525
2526 def bool_or_none(v, default=None):
2527 return v if isinstance(v, bool) else default
2528
2529
2530 def strip_or_none(v, default=None):
2531 return v.strip() if isinstance(v, compat_str) else default
2532
2533
2534 def url_or_none(url):
2535 if not url or not isinstance(url, compat_str):
2536 return None
2537 url = url.strip()
2538 return url if re.match(r'^(?:(?:https?|rt(?:m(?:pt?[es]?|fp)|sp[su]?)|mms|ftps?):)?//', url) else None
2539
2540
2541 def strftime_or_none(timestamp, date_format, default=None):
2542 datetime_object = None
2543 try:
2544 if isinstance(timestamp, compat_numeric_types): # unix timestamp
2545 datetime_object = datetime.datetime.utcfromtimestamp(timestamp)
2546 elif isinstance(timestamp, compat_str): # assume YYYYMMDD
2547 datetime_object = datetime.datetime.strptime(timestamp, '%Y%m%d')
2548 return datetime_object.strftime(date_format)
2549 except (ValueError, TypeError, AttributeError):
2550 return default
2551
2552
2553 def parse_duration(s):
2554 if not isinstance(s, compat_basestring):
2555 return None
2556 s = s.strip()
2557 if not s:
2558 return None
2559
2560 days, hours, mins, secs, ms = [None] * 5
2561 m = re.match(r'''(?x)
2562 (?P<before_secs>
2563 (?:(?:(?P<days>[0-9]+):)?(?P<hours>[0-9]+):)?(?P<mins>[0-9]+):)?
2564 (?P<secs>(?(before_secs)[0-9]{1,2}|[0-9]+))
2565 (?P<ms>[.:][0-9]+)?Z?$
2566 ''', s)
2567 if m:
2568 days, hours, mins, secs, ms = m.group('days', 'hours', 'mins', 'secs', 'ms')
2569 else:
2570 m = re.match(
2571 r'''(?ix)(?:P?
2572 (?:
2573 [0-9]+\s*y(?:ears?)?\s*
2574 )?
2575 (?:
2576 [0-9]+\s*m(?:onths?)?\s*
2577 )?
2578 (?:
2579 [0-9]+\s*w(?:eeks?)?\s*
2580 )?
2581 (?:
2582 (?P<days>[0-9]+)\s*d(?:ays?)?\s*
2583 )?
2584 T)?
2585 (?:
2586 (?P<hours>[0-9]+)\s*h(?:ours?)?\s*
2587 )?
2588 (?:
2589 (?P<mins>[0-9]+)\s*m(?:in(?:ute)?s?)?\s*
2590 )?
2591 (?:
2592 (?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?\s*s(?:ec(?:ond)?s?)?\s*
2593 )?Z?$''', s)
2594 if m:
2595 days, hours, mins, secs, ms = m.groups()
2596 else:
2597 m = re.match(r'(?i)(?:(?P<hours>[0-9.]+)\s*(?:hours?)|(?P<mins>[0-9.]+)\s*(?:mins?\.?|minutes?)\s*)Z?$', s)
2598 if m:
2599 hours, mins = m.groups()
2600 else:
2601 return None
2602
2603 duration = 0
2604 if secs:
2605 duration += float(secs)
2606 if mins:
2607 duration += float(mins) * 60
2608 if hours:
2609 duration += float(hours) * 60 * 60
2610 if days:
2611 duration += float(days) * 24 * 60 * 60
2612 if ms:
2613 duration += float(ms.replace(':', '.'))
2614 return duration
2615
2616
2617 def prepend_extension(filename, ext, expected_real_ext=None):
2618 name, real_ext = os.path.splitext(filename)
2619 return (
2620 '{0}.{1}{2}'.format(name, ext, real_ext)
2621 if not expected_real_ext or real_ext[1:] == expected_real_ext
2622 else '{0}.{1}'.format(filename, ext))
2623
2624
2625 def replace_extension(filename, ext, expected_real_ext=None):
2626 name, real_ext = os.path.splitext(filename)
2627 return '{0}.{1}'.format(
2628 name if not expected_real_ext or real_ext[1:] == expected_real_ext else filename,
2629 ext)
2630
2631
2632 def check_executable(exe, args=[]):
2633 """ Checks if the given binary is installed somewhere in PATH, and returns its name.
2634 args can be a list of arguments for a short output (like -version) """
2635 try:
2636 Popen([exe] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate_or_kill()
2637 except OSError:
2638 return False
2639 return exe
2640
2641
2642 def _get_exe_version_output(exe, args):
2643 try:
2644 # STDIN should be redirected too. On UNIX-like systems, ffmpeg triggers
2645 # SIGTTOU if yt-dlp is run in the background.
2646 # See https://github.com/ytdl-org/youtube-dl/issues/955#issuecomment-209789656
2647 out, _ = Popen(
2648 [encodeArgument(exe)] + args, stdin=subprocess.PIPE,
2649 stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate_or_kill()
2650 except OSError:
2651 return False
2652 if isinstance(out, bytes): # Python 2.x
2653 out = out.decode('ascii', 'ignore')
2654 return out
2655
2656
2657 def detect_exe_version(output, version_re=None, unrecognized='present'):
2658 assert isinstance(output, compat_str)
2659 if version_re is None:
2660 version_re = r'version\s+([-0-9._a-zA-Z]+)'
2661 m = re.search(version_re, output)
2662 if m:
2663 return m.group(1)
2664 else:
2665 return unrecognized
2666
2667
2668 def get_exe_version(exe, args=['--version'],
2669 version_re=None, unrecognized='present'):
2670 """ Returns the version of the specified executable,
2671 or False if the executable is not present """
2672 out = _get_exe_version_output(exe, args)
2673 return detect_exe_version(out, version_re, unrecognized) if out else False
2674
2675
2676 class LazyList(collections.abc.Sequence):
2677 ''' Lazy immutable list from an iterable
2678 Note that slices of a LazyList are lists and not LazyList'''
2679
2680 class IndexError(IndexError):
2681 pass
2682
2683 def __init__(self, iterable, *, reverse=False, _cache=None):
2684 self.__iterable = iter(iterable)
2685 self.__cache = [] if _cache is None else _cache
2686 self.__reversed = reverse
2687
2688 def __iter__(self):
2689 if self.__reversed:
2690 # We need to consume the entire iterable to iterate in reverse
2691 yield from self.exhaust()
2692 return
2693 yield from self.__cache
2694 for item in self.__iterable:
2695 self.__cache.append(item)
2696 yield item
2697
2698 def __exhaust(self):
2699 self.__cache.extend(self.__iterable)
2700 # Discard the emptied iterable to make it pickle-able
2701 self.__iterable = []
2702 return self.__cache
2703
2704 def exhaust(self):
2705 ''' Evaluate the entire iterable '''
2706 return self.__exhaust()[::-1 if self.__reversed else 1]
2707
2708 @staticmethod
2709 def __reverse_index(x):
2710 return None if x is None else -(x + 1)
2711
2712 def __getitem__(self, idx):
2713 if isinstance(idx, slice):
2714 if self.__reversed:
2715 idx = slice(self.__reverse_index(idx.start), self.__reverse_index(idx.stop), -(idx.step or 1))
2716 start, stop, step = idx.start, idx.stop, idx.step or 1
2717 elif isinstance(idx, int):
2718 if self.__reversed:
2719 idx = self.__reverse_index(idx)
2720 start, stop, step = idx, idx, 0
2721 else:
2722 raise TypeError('indices must be integers or slices')
2723 if ((start or 0) < 0 or (stop or 0) < 0
2724 or (start is None and step < 0)
2725 or (stop is None and step > 0)):
2726 # We need to consume the entire iterable to be able to slice from the end
2727 # Obviously, never use this with infinite iterables
2728 self.__exhaust()
2729 try:
2730 return self.__cache[idx]
2731 except IndexError as e:
2732 raise self.IndexError(e) from e
2733 n = max(start or 0, stop or 0) - len(self.__cache) + 1
2734 if n > 0:
2735 self.__cache.extend(itertools.islice(self.__iterable, n))
2736 try:
2737 return self.__cache[idx]
2738 except IndexError as e:
2739 raise self.IndexError(e) from e
2740
2741 def __bool__(self):
2742 try:
2743 self[-1] if self.__reversed else self[0]
2744 except self.IndexError:
2745 return False
2746 return True
2747
2748 def __len__(self):
2749 self.__exhaust()
2750 return len(self.__cache)
2751
2752 def __reversed__(self):
2753 return type(self)(self.__iterable, reverse=not self.__reversed, _cache=self.__cache)
2754
2755 def __copy__(self):
2756 return type(self)(self.__iterable, reverse=self.__reversed, _cache=self.__cache)
2757
2758 def __repr__(self):
2759 # repr and str should mimic a list. So we exhaust the iterable
2760 return repr(self.exhaust())
2761
2762 def __str__(self):
2763 return repr(self.exhaust())
2764
2765
2766 class PagedList:
2767
2768 class IndexError(IndexError):
2769 pass
2770
2771 def __len__(self):
2772 # This is only useful for tests
2773 return len(self.getslice())
2774
2775 def __init__(self, pagefunc, pagesize, use_cache=True):
2776 self._pagefunc = pagefunc
2777 self._pagesize = pagesize
2778 self._use_cache = use_cache
2779 self._cache = {}
2780
2781 def getpage(self, pagenum):
2782 page_results = self._cache.get(pagenum)
2783 if page_results is None:
2784 page_results = list(self._pagefunc(pagenum))
2785 if self._use_cache:
2786 self._cache[pagenum] = page_results
2787 return page_results
2788
2789 def getslice(self, start=0, end=None):
2790 return list(self._getslice(start, end))
2791
2792 def _getslice(self, start, end):
2793 raise NotImplementedError('This method must be implemented by subclasses')
2794
2795 def __getitem__(self, idx):
2796 # NOTE: cache must be enabled if this is used
2797 if not isinstance(idx, int) or idx < 0:
2798 raise TypeError('indices must be non-negative integers')
2799 entries = self.getslice(idx, idx + 1)
2800 if not entries:
2801 raise self.IndexError()
2802 return entries[0]
2803
2804
2805 class OnDemandPagedList(PagedList):
2806 def _getslice(self, start, end):
2807 for pagenum in itertools.count(start // self._pagesize):
2808 firstid = pagenum * self._pagesize
2809 nextfirstid = pagenum * self._pagesize + self._pagesize
2810 if start >= nextfirstid:
2811 continue
2812
2813 startv = (
2814 start % self._pagesize
2815 if firstid <= start < nextfirstid
2816 else 0)
2817 endv = (
2818 ((end - 1) % self._pagesize) + 1
2819 if (end is not None and firstid <= end <= nextfirstid)
2820 else None)
2821
2822 page_results = self.getpage(pagenum)
2823 if startv != 0 or endv is not None:
2824 page_results = page_results[startv:endv]
2825 yield from page_results
2826
2827 # A little optimization - if current page is not "full", ie. does
2828 # not contain page_size videos then we can assume that this page
2829 # is the last one - there are no more ids on further pages -
2830 # i.e. no need to query again.
2831 if len(page_results) + startv < self._pagesize:
2832 break
2833
2834 # If we got the whole page, but the next page is not interesting,
2835 # break out early as well
2836 if end == nextfirstid:
2837 break
2838
2839
2840 class InAdvancePagedList(PagedList):
2841 def __init__(self, pagefunc, pagecount, pagesize):
2842 self._pagecount = pagecount
2843 PagedList.__init__(self, pagefunc, pagesize, True)
2844
2845 def _getslice(self, start, end):
2846 start_page = start // self._pagesize
2847 end_page = self._pagecount if end is None else min(self._pagecount, end // self._pagesize + 1)
2848 skip_elems = start - start_page * self._pagesize
2849 only_more = None if end is None else end - start
2850 for pagenum in range(start_page, end_page):
2851 page_results = self.getpage(pagenum)
2852 if skip_elems:
2853 page_results = page_results[skip_elems:]
2854 skip_elems = None
2855 if only_more is not None:
2856 if len(page_results) < only_more:
2857 only_more -= len(page_results)
2858 else:
2859 yield from page_results[:only_more]
2860 break
2861 yield from page_results
2862
2863
2864 def uppercase_escape(s):
2865 unicode_escape = codecs.getdecoder('unicode_escape')
2866 return re.sub(
2867 r'\\U[0-9a-fA-F]{8}',
2868 lambda m: unicode_escape(m.group(0))[0],
2869 s)
2870
2871
2872 def lowercase_escape(s):
2873 unicode_escape = codecs.getdecoder('unicode_escape')
2874 return re.sub(
2875 r'\\u[0-9a-fA-F]{4}',
2876 lambda m: unicode_escape(m.group(0))[0],
2877 s)
2878
2879
2880 def escape_rfc3986(s):
2881 """Escape non-ASCII characters as suggested by RFC 3986"""
2882 if sys.version_info < (3, 0) and isinstance(s, compat_str):
2883 s = s.encode('utf-8')
2884 return compat_urllib_parse.quote(s, b"%/;:@&=+$,!~*'()?#[]")
2885
2886
2887 def escape_url(url):
2888 """Escape URL as suggested by RFC 3986"""
2889 url_parsed = compat_urllib_parse_urlparse(url)
2890 return url_parsed._replace(
2891 netloc=url_parsed.netloc.encode('idna').decode('ascii'),
2892 path=escape_rfc3986(url_parsed.path),
2893 params=escape_rfc3986(url_parsed.params),
2894 query=escape_rfc3986(url_parsed.query),
2895 fragment=escape_rfc3986(url_parsed.fragment)
2896 ).geturl()
2897
2898
2899 def parse_qs(url):
2900 return compat_parse_qs(compat_urllib_parse_urlparse(url).query)
2901
2902
2903 def read_batch_urls(batch_fd):
2904 def fixup(url):
2905 if not isinstance(url, compat_str):
2906 url = url.decode('utf-8', 'replace')
2907 BOM_UTF8 = ('\xef\xbb\xbf', '\ufeff')
2908 for bom in BOM_UTF8:
2909 if url.startswith(bom):
2910 url = url[len(bom):]
2911 url = url.lstrip()
2912 if not url or url.startswith(('#', ';', ']')):
2913 return False
2914 # "#" cannot be stripped out since it is part of the URI
2915 # However, it can be safely stipped out if follwing a whitespace
2916 return re.split(r'\s#', url, 1)[0].rstrip()
2917
2918 with contextlib.closing(batch_fd) as fd:
2919 return [url for url in map(fixup, fd) if url]
2920
2921
2922 def urlencode_postdata(*args, **kargs):
2923 return compat_urllib_parse_urlencode(*args, **kargs).encode('ascii')
2924
2925
2926 def update_url_query(url, query):
2927 if not query:
2928 return url
2929 parsed_url = compat_urlparse.urlparse(url)
2930 qs = compat_parse_qs(parsed_url.query)
2931 qs.update(query)
2932 return compat_urlparse.urlunparse(parsed_url._replace(
2933 query=compat_urllib_parse_urlencode(qs, True)))
2934
2935
2936 def update_Request(req, url=None, data=None, headers={}, query={}):
2937 req_headers = req.headers.copy()
2938 req_headers.update(headers)
2939 req_data = data or req.data
2940 req_url = update_url_query(url or req.get_full_url(), query)
2941 req_get_method = req.get_method()
2942 if req_get_method == 'HEAD':
2943 req_type = HEADRequest
2944 elif req_get_method == 'PUT':
2945 req_type = PUTRequest
2946 else:
2947 req_type = compat_urllib_request.Request
2948 new_req = req_type(
2949 req_url, data=req_data, headers=req_headers,
2950 origin_req_host=req.origin_req_host, unverifiable=req.unverifiable)
2951 if hasattr(req, 'timeout'):
2952 new_req.timeout = req.timeout
2953 return new_req
2954
2955
2956 def _multipart_encode_impl(data, boundary):
2957 content_type = 'multipart/form-data; boundary=%s' % boundary
2958
2959 out = b''
2960 for k, v in data.items():
2961 out += b'--' + boundary.encode('ascii') + b'\r\n'
2962 if isinstance(k, compat_str):
2963 k = k.encode('utf-8')
2964 if isinstance(v, compat_str):
2965 v = v.encode('utf-8')
2966 # RFC 2047 requires non-ASCII field names to be encoded, while RFC 7578
2967 # suggests sending UTF-8 directly. Firefox sends UTF-8, too
2968 content = b'Content-Disposition: form-data; name="' + k + b'"\r\n\r\n' + v + b'\r\n'
2969 if boundary.encode('ascii') in content:
2970 raise ValueError('Boundary overlaps with data')
2971 out += content
2972
2973 out += b'--' + boundary.encode('ascii') + b'--\r\n'
2974
2975 return out, content_type
2976
2977
2978 def multipart_encode(data, boundary=None):
2979 '''
2980 Encode a dict to RFC 7578-compliant form-data
2981
2982 data:
2983 A dict where keys and values can be either Unicode or bytes-like
2984 objects.
2985 boundary:
2986 If specified a Unicode object, it's used as the boundary. Otherwise
2987 a random boundary is generated.
2988
2989 Reference: https://tools.ietf.org/html/rfc7578
2990 '''
2991 has_specified_boundary = boundary is not None
2992
2993 while True:
2994 if boundary is None:
2995 boundary = '---------------' + str(random.randrange(0x0fffffff, 0xffffffff))
2996
2997 try:
2998 out, content_type = _multipart_encode_impl(data, boundary)
2999 break
3000 except ValueError:
3001 if has_specified_boundary:
3002 raise
3003 boundary = None
3004
3005 return out, content_type
3006
3007
3008 def dict_get(d, key_or_keys, default=None, skip_false_values=True):
3009 if isinstance(key_or_keys, (list, tuple)):
3010 for key in key_or_keys:
3011 if key not in d or d[key] is None or skip_false_values and not d[key]:
3012 continue
3013 return d[key]
3014 return default
3015 return d.get(key_or_keys, default)
3016
3017
3018 def try_get(src, getter, expected_type=None):
3019 for get in variadic(getter):
3020 try:
3021 v = get(src)
3022 except (AttributeError, KeyError, TypeError, IndexError):
3023 pass
3024 else:
3025 if expected_type is None or isinstance(v, expected_type):
3026 return v
3027
3028
3029 def merge_dicts(*dicts):
3030 merged = {}
3031 for a_dict in dicts:
3032 for k, v in a_dict.items():
3033 if v is None:
3034 continue
3035 if (k not in merged
3036 or (isinstance(v, compat_str) and v
3037 and isinstance(merged[k], compat_str)
3038 and not merged[k])):
3039 merged[k] = v
3040 return merged
3041
3042
3043 def encode_compat_str(string, encoding=preferredencoding(), errors='strict'):
3044 return string if isinstance(string, compat_str) else compat_str(string, encoding, errors)
3045
3046
3047 US_RATINGS = {
3048 'G': 0,
3049 'PG': 10,
3050 'PG-13': 13,
3051 'R': 16,
3052 'NC': 18,
3053 }
3054
3055
3056 TV_PARENTAL_GUIDELINES = {
3057 'TV-Y': 0,
3058 'TV-Y7': 7,
3059 'TV-G': 0,
3060 'TV-PG': 0,
3061 'TV-14': 14,
3062 'TV-MA': 17,
3063 }
3064
3065
3066 def parse_age_limit(s):
3067 if type(s) == int:
3068 return s if 0 <= s <= 21 else None
3069 if not isinstance(s, compat_basestring):
3070 return None
3071 m = re.match(r'^(?P<age>\d{1,2})\+?$', s)
3072 if m:
3073 return int(m.group('age'))
3074 s = s.upper()
3075 if s in US_RATINGS:
3076 return US_RATINGS[s]
3077 m = re.match(r'^TV[_-]?(%s)$' % '|'.join(k[3:] for k in TV_PARENTAL_GUIDELINES), s)
3078 if m:
3079 return TV_PARENTAL_GUIDELINES['TV-' + m.group(1)]
3080 return None
3081
3082
3083 def strip_jsonp(code):
3084 return re.sub(
3085 r'''(?sx)^
3086 (?:window\.)?(?P<func_name>[a-zA-Z0-9_.$]*)
3087 (?:\s*&&\s*(?P=func_name))?
3088 \s*\(\s*(?P<callback_data>.*)\);?
3089 \s*?(?://[^\n]*)*$''',
3090 r'\g<callback_data>', code)
3091
3092
3093 def js_to_json(code, vars={}):
3094 # vars is a dict of var, val pairs to substitute
3095 COMMENT_RE = r'/\*(?:(?!\*/).)*?\*/|//[^\n]*\n'
3096 SKIP_RE = r'\s*(?:{comment})?\s*'.format(comment=COMMENT_RE)
3097 INTEGER_TABLE = (
3098 (r'(?s)^(0[xX][0-9a-fA-F]+){skip}:?$'.format(skip=SKIP_RE), 16),
3099 (r'(?s)^(0+[0-7]+){skip}:?$'.format(skip=SKIP_RE), 8),
3100 )
3101
3102 def fix_kv(m):
3103 v = m.group(0)
3104 if v in ('true', 'false', 'null'):
3105 return v
3106 elif v in ('undefined', 'void 0'):
3107 return 'null'
3108 elif v.startswith('/*') or v.startswith('//') or v.startswith('!') or v == ',':
3109 return ""
3110
3111 if v[0] in ("'", '"'):
3112 v = re.sub(r'(?s)\\.|"', lambda m: {
3113 '"': '\\"',
3114 "\\'": "'",
3115 '\\\n': '',
3116 '\\x': '\\u00',
3117 }.get(m.group(0), m.group(0)), v[1:-1])
3118 else:
3119 for regex, base in INTEGER_TABLE:
3120 im = re.match(regex, v)
3121 if im:
3122 i = int(im.group(1), base)
3123 return '"%d":' % i if v.endswith(':') else '%d' % i
3124
3125 if v in vars:
3126 return vars[v]
3127
3128 return '"%s"' % v
3129
3130 return re.sub(r'''(?sx)
3131 "(?:[^"\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^"\\]*"|
3132 '(?:[^'\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^'\\]*'|
3133 {comment}|,(?={skip}[\]}}])|
3134 void\s0|(?:(?<![0-9])[eE]|[a-df-zA-DF-Z_$])[.a-zA-Z_$0-9]*|
3135 \b(?:0[xX][0-9a-fA-F]+|0+[0-7]+)(?:{skip}:)?|
3136 [0-9]+(?={skip}:)|
3137 !+
3138 '''.format(comment=COMMENT_RE, skip=SKIP_RE), fix_kv, code)
3139
3140
3141 def qualities(quality_ids):
3142 """ Get a numeric quality value out of a list of possible values """
3143 def q(qid):
3144 try:
3145 return quality_ids.index(qid)
3146 except ValueError:
3147 return -1
3148 return q
3149
3150
3151 POSTPROCESS_WHEN = {'pre_process', 'before_dl', 'after_move', 'post_process', 'after_video', 'playlist'}
3152
3153
3154 DEFAULT_OUTTMPL = {
3155 'default': '%(title)s [%(id)s].%(ext)s',
3156 'chapter': '%(title)s - %(section_number)03d %(section_title)s [%(id)s].%(ext)s',
3157 }
3158 OUTTMPL_TYPES = {
3159 'chapter': None,
3160 'subtitle': None,
3161 'thumbnail': None,
3162 'description': 'description',
3163 'annotation': 'annotations.xml',
3164 'infojson': 'info.json',
3165 'link': None,
3166 'pl_video': None,
3167 'pl_thumbnail': None,
3168 'pl_description': 'description',
3169 'pl_infojson': 'info.json',
3170 }
3171
3172 # As of [1] format syntax is:
3173 # %[mapping_key][conversion_flags][minimum_width][.precision][length_modifier]type
3174 # 1. https://docs.python.org/2/library/stdtypes.html#string-formatting
3175 STR_FORMAT_RE_TMPL = r'''(?x)
3176 (?<!%)(?P<prefix>(?:%%)*)
3177 %
3178 (?P<has_key>\((?P<key>{0})\))?
3179 (?P<format>
3180 (?P<conversion>[#0\-+ ]+)?
3181 (?P<min_width>\d+)?
3182 (?P<precision>\.\d+)?
3183 (?P<len_mod>[hlL])? # unused in python
3184 {1} # conversion type
3185 )
3186 '''
3187
3188
3189 STR_FORMAT_TYPES = 'diouxXeEfFgGcrs'
3190
3191
3192 def limit_length(s, length):
3193 """ Add ellipses to overly long strings """
3194 if s is None:
3195 return None
3196 ELLIPSES = '...'
3197 if len(s) > length:
3198 return s[:length - len(ELLIPSES)] + ELLIPSES
3199 return s
3200
3201
3202 def version_tuple(v):
3203 return tuple(int(e) for e in re.split(r'[-.]', v))
3204
3205
3206 def is_outdated_version(version, limit, assume_new=True):
3207 if not version:
3208 return not assume_new
3209 try:
3210 return version_tuple(version) < version_tuple(limit)
3211 except ValueError:
3212 return not assume_new
3213
3214
3215 def ytdl_is_updateable():
3216 """ Returns if yt-dlp can be updated with -U """
3217
3218 from .update import is_non_updateable
3219
3220 return not is_non_updateable()
3221
3222
3223 def args_to_str(args):
3224 # Get a short string representation for a subprocess command
3225 return ' '.join(compat_shlex_quote(a) for a in args)
3226
3227
3228 def error_to_compat_str(err):
3229 err_str = str(err)
3230 # On python 2 error byte string must be decoded with proper
3231 # encoding rather than ascii
3232 if sys.version_info[0] < 3:
3233 err_str = err_str.decode(preferredencoding())
3234 return err_str
3235
3236
3237 def mimetype2ext(mt):
3238 if mt is None:
3239 return None
3240
3241 mt, _, params = mt.partition(';')
3242 mt = mt.strip()
3243
3244 FULL_MAP = {
3245 'audio/mp4': 'm4a',
3246 # Per RFC 3003, audio/mpeg can be .mp1, .mp2 or .mp3. Here use .mp3 as
3247 # it's the most popular one
3248 'audio/mpeg': 'mp3',
3249 'audio/x-wav': 'wav',
3250 'audio/wav': 'wav',
3251 'audio/wave': 'wav',
3252 }
3253
3254 ext = FULL_MAP.get(mt)
3255 if ext is not None:
3256 return ext
3257
3258 SUBTYPE_MAP = {
3259 '3gpp': '3gp',
3260 'smptett+xml': 'tt',
3261 'ttaf+xml': 'dfxp',
3262 'ttml+xml': 'ttml',
3263 'x-flv': 'flv',
3264 'x-mp4-fragmented': 'mp4',
3265 'x-ms-sami': 'sami',
3266 'x-ms-wmv': 'wmv',
3267 'mpegurl': 'm3u8',
3268 'x-mpegurl': 'm3u8',
3269 'vnd.apple.mpegurl': 'm3u8',
3270 'dash+xml': 'mpd',
3271 'f4m+xml': 'f4m',
3272 'hds+xml': 'f4m',
3273 'vnd.ms-sstr+xml': 'ism',
3274 'quicktime': 'mov',
3275 'mp2t': 'ts',
3276 'x-wav': 'wav',
3277 'filmstrip+json': 'fs',
3278 'svg+xml': 'svg',
3279 }
3280
3281 _, _, subtype = mt.rpartition('/')
3282 ext = SUBTYPE_MAP.get(subtype.lower())
3283 if ext is not None:
3284 return ext
3285
3286 SUFFIX_MAP = {
3287 'json': 'json',
3288 'xml': 'xml',
3289 'zip': 'zip',
3290 'gzip': 'gz',
3291 }
3292
3293 _, _, suffix = subtype.partition('+')
3294 ext = SUFFIX_MAP.get(suffix)
3295 if ext is not None:
3296 return ext
3297
3298 return subtype.replace('+', '.')
3299
3300
3301 def ext2mimetype(ext_or_url):
3302 if not ext_or_url:
3303 return None
3304 if '.' not in ext_or_url:
3305 ext_or_url = f'file.{ext_or_url}'
3306 return mimetypes.guess_type(ext_or_url)[0]
3307
3308
3309 def parse_codecs(codecs_str):
3310 # http://tools.ietf.org/html/rfc6381
3311 if not codecs_str:
3312 return {}
3313 split_codecs = list(filter(None, map(
3314 str.strip, codecs_str.strip().strip(',').split(','))))
3315 vcodec, acodec, tcodec, hdr = None, None, None, None
3316 for full_codec in split_codecs:
3317 parts = full_codec.split('.')
3318 codec = parts[0].replace('0', '')
3319 if codec in ('avc1', 'avc2', 'avc3', 'avc4', 'vp9', 'vp8', 'hev1', 'hev2',
3320 'h263', 'h264', 'mp4v', 'hvc1', 'av1', 'theora', 'dvh1', 'dvhe'):
3321 if not vcodec:
3322 vcodec = '.'.join(parts[:4]) if codec in ('vp9', 'av1', 'hvc1') else full_codec
3323 if codec in ('dvh1', 'dvhe'):
3324 hdr = 'DV'
3325 elif codec == 'av1' and len(parts) > 3 and parts[3] == '10':
3326 hdr = 'HDR10'
3327 elif full_codec.replace('0', '').startswith('vp9.2'):
3328 hdr = 'HDR10'
3329 elif codec in ('flac', 'mp4a', 'opus', 'vorbis', 'mp3', 'aac', 'ac-3', 'ec-3', 'eac3', 'dtsc', 'dtse', 'dtsh', 'dtsl'):
3330 if not acodec:
3331 acodec = full_codec
3332 elif codec in ('stpp', 'wvtt',):
3333 if not tcodec:
3334 tcodec = full_codec
3335 else:
3336 write_string('WARNING: Unknown codec %s\n' % full_codec, sys.stderr)
3337 if vcodec or acodec or tcodec:
3338 return {
3339 'vcodec': vcodec or 'none',
3340 'acodec': acodec or 'none',
3341 'dynamic_range': hdr,
3342 **({'tcodec': tcodec} if tcodec is not None else {}),
3343 }
3344 elif len(split_codecs) == 2:
3345 return {
3346 'vcodec': split_codecs[0],
3347 'acodec': split_codecs[1],
3348 }
3349 return {}
3350
3351
3352 def urlhandle_detect_ext(url_handle):
3353 getheader = url_handle.headers.get
3354
3355 cd = getheader('Content-Disposition')
3356 if cd:
3357 m = re.match(r'attachment;\s*filename="(?P<filename>[^"]+)"', cd)
3358 if m:
3359 e = determine_ext(m.group('filename'), default_ext=None)
3360 if e:
3361 return e
3362
3363 return mimetype2ext(getheader('Content-Type'))
3364
3365
3366 def encode_data_uri(data, mime_type):
3367 return 'data:%s;base64,%s' % (mime_type, base64.b64encode(data).decode('ascii'))
3368
3369
3370 def age_restricted(content_limit, age_limit):
3371 """ Returns True iff the content should be blocked """
3372
3373 if age_limit is None: # No limit set
3374 return False
3375 if content_limit is None:
3376 return False # Content available for everyone
3377 return age_limit < content_limit
3378
3379
3380 def is_html(first_bytes):
3381 """ Detect whether a file contains HTML by examining its first bytes. """
3382
3383 BOMS = [
3384 (b'\xef\xbb\xbf', 'utf-8'),
3385 (b'\x00\x00\xfe\xff', 'utf-32-be'),
3386 (b'\xff\xfe\x00\x00', 'utf-32-le'),
3387 (b'\xff\xfe', 'utf-16-le'),
3388 (b'\xfe\xff', 'utf-16-be'),
3389 ]
3390 for bom, enc in BOMS:
3391 if first_bytes.startswith(bom):
3392 s = first_bytes[len(bom):].decode(enc, 'replace')
3393 break
3394 else:
3395 s = first_bytes.decode('utf-8', 'replace')
3396
3397 return re.match(r'^\s*<', s)
3398
3399
3400 def determine_protocol(info_dict):
3401 protocol = info_dict.get('protocol')
3402 if protocol is not None:
3403 return protocol
3404
3405 url = sanitize_url(info_dict['url'])
3406 if url.startswith('rtmp'):
3407 return 'rtmp'
3408 elif url.startswith('mms'):
3409 return 'mms'
3410 elif url.startswith('rtsp'):
3411 return 'rtsp'
3412
3413 ext = determine_ext(url)
3414 if ext == 'm3u8':
3415 return 'm3u8'
3416 elif ext == 'f4m':
3417 return 'f4m'
3418
3419 return compat_urllib_parse_urlparse(url).scheme
3420
3421
3422 def render_table(header_row, data, delim=False, extra_gap=0, hide_empty=False):
3423 """ Render a list of rows, each as a list of values.
3424 Text after a \t will be right aligned """
3425 def width(string):
3426 return len(remove_terminal_sequences(string).replace('\t', ''))
3427
3428 def get_max_lens(table):
3429 return [max(width(str(v)) for v in col) for col in zip(*table)]
3430
3431 def filter_using_list(row, filterArray):
3432 return [col for take, col in itertools.zip_longest(filterArray, row, fillvalue=True) if take]
3433
3434 max_lens = get_max_lens(data) if hide_empty else []
3435 header_row = filter_using_list(header_row, max_lens)
3436 data = [filter_using_list(row, max_lens) for row in data]
3437
3438 table = [header_row] + data
3439 max_lens = get_max_lens(table)
3440 extra_gap += 1
3441 if delim:
3442 table = [header_row, [delim * (ml + extra_gap) for ml in max_lens]] + data
3443 table[1][-1] = table[1][-1][:-extra_gap] # Remove extra_gap from end of delimiter
3444 for row in table:
3445 for pos, text in enumerate(map(str, row)):
3446 if '\t' in text:
3447 row[pos] = text.replace('\t', ' ' * (max_lens[pos] - width(text))) + ' ' * extra_gap
3448 else:
3449 row[pos] = text + ' ' * (max_lens[pos] - width(text) + extra_gap)
3450 ret = '\n'.join(''.join(row).rstrip() for row in table)
3451 return ret
3452
3453
3454 def _match_one(filter_part, dct, incomplete):
3455 # TODO: Generalize code with YoutubeDL._build_format_filter
3456 STRING_OPERATORS = {
3457 '*=': operator.contains,
3458 '^=': lambda attr, value: attr.startswith(value),
3459 '$=': lambda attr, value: attr.endswith(value),
3460 '~=': lambda attr, value: re.search(value, attr),
3461 }
3462 COMPARISON_OPERATORS = {
3463 **STRING_OPERATORS,
3464 '<=': operator.le, # "<=" must be defined above "<"
3465 '<': operator.lt,
3466 '>=': operator.ge,
3467 '>': operator.gt,
3468 '=': operator.eq,
3469 }
3470
3471 operator_rex = re.compile(r'''(?x)\s*
3472 (?P<key>[a-z_]+)
3473 \s*(?P<negation>!\s*)?(?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
3474 (?:
3475 (?P<quote>["\'])(?P<quotedstrval>.+?)(?P=quote)|
3476 (?P<strval>.+?)
3477 )
3478 \s*$
3479 ''' % '|'.join(map(re.escape, COMPARISON_OPERATORS.keys())))
3480 m = operator_rex.search(filter_part)
3481 if m:
3482 m = m.groupdict()
3483 unnegated_op = COMPARISON_OPERATORS[m['op']]
3484 if m['negation']:
3485 op = lambda attr, value: not unnegated_op(attr, value)
3486 else:
3487 op = unnegated_op
3488 comparison_value = m['quotedstrval'] or m['strval'] or m['intval']
3489 if m['quote']:
3490 comparison_value = comparison_value.replace(r'\%s' % m['quote'], m['quote'])
3491 actual_value = dct.get(m['key'])
3492 numeric_comparison = None
3493 if isinstance(actual_value, compat_numeric_types):
3494 # If the original field is a string and matching comparisonvalue is
3495 # a number we should respect the origin of the original field
3496 # and process comparison value as a string (see
3497 # https://github.com/ytdl-org/youtube-dl/issues/11082)
3498 try:
3499 numeric_comparison = int(comparison_value)
3500 except ValueError:
3501 numeric_comparison = parse_filesize(comparison_value)
3502 if numeric_comparison is None:
3503 numeric_comparison = parse_filesize(f'{comparison_value}B')
3504 if numeric_comparison is None:
3505 numeric_comparison = parse_duration(comparison_value)
3506 if numeric_comparison is not None and m['op'] in STRING_OPERATORS:
3507 raise ValueError('Operator %s only supports string values!' % m['op'])
3508 if actual_value is None:
3509 return incomplete or m['none_inclusive']
3510 return op(actual_value, comparison_value if numeric_comparison is None else numeric_comparison)
3511
3512 UNARY_OPERATORS = {
3513 '': lambda v: (v is True) if isinstance(v, bool) else (v is not None),
3514 '!': lambda v: (v is False) if isinstance(v, bool) else (v is None),
3515 }
3516 operator_rex = re.compile(r'''(?x)\s*
3517 (?P<op>%s)\s*(?P<key>[a-z_]+)
3518 \s*$
3519 ''' % '|'.join(map(re.escape, UNARY_OPERATORS.keys())))
3520 m = operator_rex.search(filter_part)
3521 if m:
3522 op = UNARY_OPERATORS[m.group('op')]
3523 actual_value = dct.get(m.group('key'))
3524 if incomplete and actual_value is None:
3525 return True
3526 return op(actual_value)
3527
3528 raise ValueError('Invalid filter part %r' % filter_part)
3529
3530
3531 def match_str(filter_str, dct, incomplete=False):
3532 """ Filter a dictionary with a simple string syntax. Returns True (=passes filter) or false
3533 When incomplete, all conditions passes on missing fields
3534 """
3535 return all(
3536 _match_one(filter_part.replace(r'\&', '&'), dct, incomplete)
3537 for filter_part in re.split(r'(?<!\\)&', filter_str))
3538
3539
3540 def match_filter_func(filter_str):
3541 def _match_func(info_dict, *args, **kwargs):
3542 if match_str(filter_str, info_dict, *args, **kwargs):
3543 return None
3544 else:
3545 video_title = info_dict.get('title', info_dict.get('id', 'video'))
3546 return '%s does not pass filter %s, skipping ..' % (video_title, filter_str)
3547 return _match_func
3548
3549
3550 def parse_dfxp_time_expr(time_expr):
3551 if not time_expr:
3552 return
3553
3554 mobj = re.match(r'^(?P<time_offset>\d+(?:\.\d+)?)s?$', time_expr)
3555 if mobj:
3556 return float(mobj.group('time_offset'))
3557
3558 mobj = re.match(r'^(\d+):(\d\d):(\d\d(?:(?:\.|:)\d+)?)$', time_expr)
3559 if mobj:
3560 return 3600 * int(mobj.group(1)) + 60 * int(mobj.group(2)) + float(mobj.group(3).replace(':', '.'))
3561
3562
3563 def srt_subtitles_timecode(seconds):
3564 return '%02d:%02d:%02d,%03d' % timetuple_from_msec(seconds * 1000)
3565
3566
3567 def ass_subtitles_timecode(seconds):
3568 time = timetuple_from_msec(seconds * 1000)
3569 return '%01d:%02d:%02d.%02d' % (*time[:-1], time.milliseconds / 10)
3570
3571
3572 def dfxp2srt(dfxp_data):
3573 '''
3574 @param dfxp_data A bytes-like object containing DFXP data
3575 @returns A unicode object containing converted SRT data
3576 '''
3577 LEGACY_NAMESPACES = (
3578 (b'http://www.w3.org/ns/ttml', [
3579 b'http://www.w3.org/2004/11/ttaf1',
3580 b'http://www.w3.org/2006/04/ttaf1',
3581 b'http://www.w3.org/2006/10/ttaf1',
3582 ]),
3583 (b'http://www.w3.org/ns/ttml#styling', [
3584 b'http://www.w3.org/ns/ttml#style',
3585 ]),
3586 )
3587
3588 SUPPORTED_STYLING = [
3589 'color',
3590 'fontFamily',
3591 'fontSize',
3592 'fontStyle',
3593 'fontWeight',
3594 'textDecoration'
3595 ]
3596
3597 _x = functools.partial(xpath_with_ns, ns_map={
3598 'xml': 'http://www.w3.org/XML/1998/namespace',
3599 'ttml': 'http://www.w3.org/ns/ttml',
3600 'tts': 'http://www.w3.org/ns/ttml#styling',
3601 })
3602
3603 styles = {}
3604 default_style = {}
3605
3606 class TTMLPElementParser(object):
3607 _out = ''
3608 _unclosed_elements = []
3609 _applied_styles = []
3610
3611 def start(self, tag, attrib):
3612 if tag in (_x('ttml:br'), 'br'):
3613 self._out += '\n'
3614 else:
3615 unclosed_elements = []
3616 style = {}
3617 element_style_id = attrib.get('style')
3618 if default_style:
3619 style.update(default_style)
3620 if element_style_id:
3621 style.update(styles.get(element_style_id, {}))
3622 for prop in SUPPORTED_STYLING:
3623 prop_val = attrib.get(_x('tts:' + prop))
3624 if prop_val:
3625 style[prop] = prop_val
3626 if style:
3627 font = ''
3628 for k, v in sorted(style.items()):
3629 if self._applied_styles and self._applied_styles[-1].get(k) == v:
3630 continue
3631 if k == 'color':
3632 font += ' color="%s"' % v
3633 elif k == 'fontSize':
3634 font += ' size="%s"' % v
3635 elif k == 'fontFamily':
3636 font += ' face="%s"' % v
3637 elif k == 'fontWeight' and v == 'bold':
3638 self._out += '<b>'
3639 unclosed_elements.append('b')
3640 elif k == 'fontStyle' and v == 'italic':
3641 self._out += '<i>'
3642 unclosed_elements.append('i')
3643 elif k == 'textDecoration' and v == 'underline':
3644 self._out += '<u>'
3645 unclosed_elements.append('u')
3646 if font:
3647 self._out += '<font' + font + '>'
3648 unclosed_elements.append('font')
3649 applied_style = {}
3650 if self._applied_styles:
3651 applied_style.update(self._applied_styles[-1])
3652 applied_style.update(style)
3653 self._applied_styles.append(applied_style)
3654 self._unclosed_elements.append(unclosed_elements)
3655
3656 def end(self, tag):
3657 if tag not in (_x('ttml:br'), 'br'):
3658 unclosed_elements = self._unclosed_elements.pop()
3659 for element in reversed(unclosed_elements):
3660 self._out += '</%s>' % element
3661 if unclosed_elements and self._applied_styles:
3662 self._applied_styles.pop()
3663
3664 def data(self, data):
3665 self._out += data
3666
3667 def close(self):
3668 return self._out.strip()
3669
3670 def parse_node(node):
3671 target = TTMLPElementParser()
3672 parser = xml.etree.ElementTree.XMLParser(target=target)
3673 parser.feed(xml.etree.ElementTree.tostring(node))
3674 return parser.close()
3675
3676 for k, v in LEGACY_NAMESPACES:
3677 for ns in v:
3678 dfxp_data = dfxp_data.replace(ns, k)
3679
3680 dfxp = compat_etree_fromstring(dfxp_data)
3681 out = []
3682 paras = dfxp.findall(_x('.//ttml:p')) or dfxp.findall('.//p')
3683
3684 if not paras:
3685 raise ValueError('Invalid dfxp/TTML subtitle')
3686
3687 repeat = False
3688 while True:
3689 for style in dfxp.findall(_x('.//ttml:style')):
3690 style_id = style.get('id') or style.get(_x('xml:id'))
3691 if not style_id:
3692 continue
3693 parent_style_id = style.get('style')
3694 if parent_style_id:
3695 if parent_style_id not in styles:
3696 repeat = True
3697 continue
3698 styles[style_id] = styles[parent_style_id].copy()
3699 for prop in SUPPORTED_STYLING:
3700 prop_val = style.get(_x('tts:' + prop))
3701 if prop_val:
3702 styles.setdefault(style_id, {})[prop] = prop_val
3703 if repeat:
3704 repeat = False
3705 else:
3706 break
3707
3708 for p in ('body', 'div'):
3709 ele = xpath_element(dfxp, [_x('.//ttml:' + p), './/' + p])
3710 if ele is None:
3711 continue
3712 style = styles.get(ele.get('style'))
3713 if not style:
3714 continue
3715 default_style.update(style)
3716
3717 for para, index in zip(paras, itertools.count(1)):
3718 begin_time = parse_dfxp_time_expr(para.attrib.get('begin'))
3719 end_time = parse_dfxp_time_expr(para.attrib.get('end'))
3720 dur = parse_dfxp_time_expr(para.attrib.get('dur'))
3721 if begin_time is None:
3722 continue
3723 if not end_time:
3724 if not dur:
3725 continue
3726 end_time = begin_time + dur
3727 out.append('%d\n%s --> %s\n%s\n\n' % (
3728 index,
3729 srt_subtitles_timecode(begin_time),
3730 srt_subtitles_timecode(end_time),
3731 parse_node(para)))
3732
3733 return ''.join(out)
3734
3735
3736 def cli_option(params, command_option, param):
3737 param = params.get(param)
3738 if param:
3739 param = compat_str(param)
3740 return [command_option, param] if param is not None else []
3741
3742
3743 def cli_bool_option(params, command_option, param, true_value='true', false_value='false', separator=None):
3744 param = params.get(param)
3745 if param is None:
3746 return []
3747 assert isinstance(param, bool)
3748 if separator:
3749 return [command_option + separator + (true_value if param else false_value)]
3750 return [command_option, true_value if param else false_value]
3751
3752
3753 def cli_valueless_option(params, command_option, param, expected_value=True):
3754 param = params.get(param)
3755 return [command_option] if param == expected_value else []
3756
3757
3758 def cli_configuration_args(argdict, keys, default=[], use_compat=True):
3759 if isinstance(argdict, (list, tuple)): # for backward compatibility
3760 if use_compat:
3761 return argdict
3762 else:
3763 argdict = None
3764 if argdict is None:
3765 return default
3766 assert isinstance(argdict, dict)
3767
3768 assert isinstance(keys, (list, tuple))
3769 for key_list in keys:
3770 arg_list = list(filter(
3771 lambda x: x is not None,
3772 [argdict.get(key.lower()) for key in variadic(key_list)]))
3773 if arg_list:
3774 return [arg for args in arg_list for arg in args]
3775 return default
3776
3777
3778 def _configuration_args(main_key, argdict, exe, keys=None, default=[], use_compat=True):
3779 main_key, exe = main_key.lower(), exe.lower()
3780 root_key = exe if main_key == exe else f'{main_key}+{exe}'
3781 keys = [f'{root_key}{k}' for k in (keys or [''])]
3782 if root_key in keys:
3783 if main_key != exe:
3784 keys.append((main_key, exe))
3785 keys.append('default')
3786 else:
3787 use_compat = False
3788 return cli_configuration_args(argdict, keys, default, use_compat)
3789
3790
3791 class ISO639Utils(object):
3792 # See http://www.loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt
3793 _lang_map = {
3794 'aa': 'aar',
3795 'ab': 'abk',
3796 'ae': 'ave',
3797 'af': 'afr',
3798 'ak': 'aka',
3799 'am': 'amh',
3800 'an': 'arg',
3801 'ar': 'ara',
3802 'as': 'asm',
3803 'av': 'ava',
3804 'ay': 'aym',
3805 'az': 'aze',
3806 'ba': 'bak',
3807 'be': 'bel',
3808 'bg': 'bul',
3809 'bh': 'bih',
3810 'bi': 'bis',
3811 'bm': 'bam',
3812 'bn': 'ben',
3813 'bo': 'bod',
3814 'br': 'bre',
3815 'bs': 'bos',
3816 'ca': 'cat',
3817 'ce': 'che',
3818 'ch': 'cha',
3819 'co': 'cos',
3820 'cr': 'cre',
3821 'cs': 'ces',
3822 'cu': 'chu',
3823 'cv': 'chv',
3824 'cy': 'cym',
3825 'da': 'dan',
3826 'de': 'deu',
3827 'dv': 'div',
3828 'dz': 'dzo',
3829 'ee': 'ewe',
3830 'el': 'ell',
3831 'en': 'eng',
3832 'eo': 'epo',
3833 'es': 'spa',
3834 'et': 'est',
3835 'eu': 'eus',
3836 'fa': 'fas',
3837 'ff': 'ful',
3838 'fi': 'fin',
3839 'fj': 'fij',
3840 'fo': 'fao',
3841 'fr': 'fra',
3842 'fy': 'fry',
3843 'ga': 'gle',
3844 'gd': 'gla',
3845 'gl': 'glg',
3846 'gn': 'grn',
3847 'gu': 'guj',
3848 'gv': 'glv',
3849 'ha': 'hau',
3850 'he': 'heb',
3851 'iw': 'heb', # Replaced by he in 1989 revision
3852 'hi': 'hin',
3853 'ho': 'hmo',
3854 'hr': 'hrv',
3855 'ht': 'hat',
3856 'hu': 'hun',
3857 'hy': 'hye',
3858 'hz': 'her',
3859 'ia': 'ina',
3860 'id': 'ind',
3861 'in': 'ind', # Replaced by id in 1989 revision
3862 'ie': 'ile',
3863 'ig': 'ibo',
3864 'ii': 'iii',
3865 'ik': 'ipk',
3866 'io': 'ido',
3867 'is': 'isl',
3868 'it': 'ita',
3869 'iu': 'iku',
3870 'ja': 'jpn',
3871 'jv': 'jav',
3872 'ka': 'kat',
3873 'kg': 'kon',
3874 'ki': 'kik',
3875 'kj': 'kua',
3876 'kk': 'kaz',
3877 'kl': 'kal',
3878 'km': 'khm',
3879 'kn': 'kan',
3880 'ko': 'kor',
3881 'kr': 'kau',
3882 'ks': 'kas',
3883 'ku': 'kur',
3884 'kv': 'kom',
3885 'kw': 'cor',
3886 'ky': 'kir',
3887 'la': 'lat',
3888 'lb': 'ltz',
3889 'lg': 'lug',
3890 'li': 'lim',
3891 'ln': 'lin',
3892 'lo': 'lao',
3893 'lt': 'lit',
3894 'lu': 'lub',
3895 'lv': 'lav',
3896 'mg': 'mlg',
3897 'mh': 'mah',
3898 'mi': 'mri',
3899 'mk': 'mkd',
3900 'ml': 'mal',
3901 'mn': 'mon',
3902 'mr': 'mar',
3903 'ms': 'msa',
3904 'mt': 'mlt',
3905 'my': 'mya',
3906 'na': 'nau',
3907 'nb': 'nob',
3908 'nd': 'nde',
3909 'ne': 'nep',
3910 'ng': 'ndo',
3911 'nl': 'nld',
3912 'nn': 'nno',
3913 'no': 'nor',
3914 'nr': 'nbl',
3915 'nv': 'nav',
3916 'ny': 'nya',
3917 'oc': 'oci',
3918 'oj': 'oji',
3919 'om': 'orm',
3920 'or': 'ori',
3921 'os': 'oss',
3922 'pa': 'pan',
3923 'pi': 'pli',
3924 'pl': 'pol',
3925 'ps': 'pus',
3926 'pt': 'por',
3927 'qu': 'que',
3928 'rm': 'roh',
3929 'rn': 'run',
3930 'ro': 'ron',
3931 'ru': 'rus',
3932 'rw': 'kin',
3933 'sa': 'san',
3934 'sc': 'srd',
3935 'sd': 'snd',
3936 'se': 'sme',
3937 'sg': 'sag',
3938 'si': 'sin',
3939 'sk': 'slk',
3940 'sl': 'slv',
3941 'sm': 'smo',
3942 'sn': 'sna',
3943 'so': 'som',
3944 'sq': 'sqi',
3945 'sr': 'srp',
3946 'ss': 'ssw',
3947 'st': 'sot',
3948 'su': 'sun',
3949 'sv': 'swe',
3950 'sw': 'swa',
3951 'ta': 'tam',
3952 'te': 'tel',
3953 'tg': 'tgk',
3954 'th': 'tha',
3955 'ti': 'tir',
3956 'tk': 'tuk',
3957 'tl': 'tgl',
3958 'tn': 'tsn',
3959 'to': 'ton',
3960 'tr': 'tur',
3961 'ts': 'tso',
3962 'tt': 'tat',
3963 'tw': 'twi',
3964 'ty': 'tah',
3965 'ug': 'uig',
3966 'uk': 'ukr',
3967 'ur': 'urd',
3968 'uz': 'uzb',
3969 've': 'ven',
3970 'vi': 'vie',
3971 'vo': 'vol',
3972 'wa': 'wln',
3973 'wo': 'wol',
3974 'xh': 'xho',
3975 'yi': 'yid',
3976 'ji': 'yid', # Replaced by yi in 1989 revision
3977 'yo': 'yor',
3978 'za': 'zha',
3979 'zh': 'zho',
3980 'zu': 'zul',
3981 }
3982
3983 @classmethod
3984 def short2long(cls, code):
3985 """Convert language code from ISO 639-1 to ISO 639-2/T"""
3986 return cls._lang_map.get(code[:2])
3987
3988 @classmethod
3989 def long2short(cls, code):
3990 """Convert language code from ISO 639-2/T to ISO 639-1"""
3991 for short_name, long_name in cls._lang_map.items():
3992 if long_name == code:
3993 return short_name
3994
3995
3996 class ISO3166Utils(object):
3997 # From http://data.okfn.org/data/core/country-list
3998 _country_map = {
3999 'AF': 'Afghanistan',
4000 'AX': 'Åland Islands',
4001 'AL': 'Albania',
4002 'DZ': 'Algeria',
4003 'AS': 'American Samoa',
4004 'AD': 'Andorra',
4005 'AO': 'Angola',
4006 'AI': 'Anguilla',
4007 'AQ': 'Antarctica',
4008 'AG': 'Antigua and Barbuda',
4009 'AR': 'Argentina',
4010 'AM': 'Armenia',
4011 'AW': 'Aruba',
4012 'AU': 'Australia',
4013 'AT': 'Austria',
4014 'AZ': 'Azerbaijan',
4015 'BS': 'Bahamas',
4016 'BH': 'Bahrain',
4017 'BD': 'Bangladesh',
4018 'BB': 'Barbados',
4019 'BY': 'Belarus',
4020 'BE': 'Belgium',
4021 'BZ': 'Belize',
4022 'BJ': 'Benin',
4023 'BM': 'Bermuda',
4024 'BT': 'Bhutan',
4025 'BO': 'Bolivia, Plurinational State of',
4026 'BQ': 'Bonaire, Sint Eustatius and Saba',
4027 'BA': 'Bosnia and Herzegovina',
4028 'BW': 'Botswana',
4029 'BV': 'Bouvet Island',
4030 'BR': 'Brazil',
4031 'IO': 'British Indian Ocean Territory',
4032 'BN': 'Brunei Darussalam',
4033 'BG': 'Bulgaria',
4034 'BF': 'Burkina Faso',
4035 'BI': 'Burundi',
4036 'KH': 'Cambodia',
4037 'CM': 'Cameroon',
4038 'CA': 'Canada',
4039 'CV': 'Cape Verde',
4040 'KY': 'Cayman Islands',
4041 'CF': 'Central African Republic',
4042 'TD': 'Chad',
4043 'CL': 'Chile',
4044 'CN': 'China',
4045 'CX': 'Christmas Island',
4046 'CC': 'Cocos (Keeling) Islands',
4047 'CO': 'Colombia',
4048 'KM': 'Comoros',
4049 'CG': 'Congo',
4050 'CD': 'Congo, the Democratic Republic of the',
4051 'CK': 'Cook Islands',
4052 'CR': 'Costa Rica',
4053 'CI': 'Côte d\'Ivoire',
4054 'HR': 'Croatia',
4055 'CU': 'Cuba',
4056 'CW': 'Curaçao',
4057 'CY': 'Cyprus',
4058 'CZ': 'Czech Republic',
4059 'DK': 'Denmark',
4060 'DJ': 'Djibouti',
4061 'DM': 'Dominica',
4062 'DO': 'Dominican Republic',
4063 'EC': 'Ecuador',
4064 'EG': 'Egypt',
4065 'SV': 'El Salvador',
4066 'GQ': 'Equatorial Guinea',
4067 'ER': 'Eritrea',
4068 'EE': 'Estonia',
4069 'ET': 'Ethiopia',
4070 'FK': 'Falkland Islands (Malvinas)',
4071 'FO': 'Faroe Islands',
4072 'FJ': 'Fiji',
4073 'FI': 'Finland',
4074 'FR': 'France',
4075 'GF': 'French Guiana',
4076 'PF': 'French Polynesia',
4077 'TF': 'French Southern Territories',
4078 'GA': 'Gabon',
4079 'GM': 'Gambia',
4080 'GE': 'Georgia',
4081 'DE': 'Germany',
4082 'GH': 'Ghana',
4083 'GI': 'Gibraltar',
4084 'GR': 'Greece',
4085 'GL': 'Greenland',
4086 'GD': 'Grenada',
4087 'GP': 'Guadeloupe',
4088 'GU': 'Guam',
4089 'GT': 'Guatemala',
4090 'GG': 'Guernsey',
4091 'GN': 'Guinea',
4092 'GW': 'Guinea-Bissau',
4093 'GY': 'Guyana',
4094 'HT': 'Haiti',
4095 'HM': 'Heard Island and McDonald Islands',
4096 'VA': 'Holy See (Vatican City State)',
4097 'HN': 'Honduras',
4098 'HK': 'Hong Kong',
4099 'HU': 'Hungary',
4100 'IS': 'Iceland',
4101 'IN': 'India',
4102 'ID': 'Indonesia',
4103 'IR': 'Iran, Islamic Republic of',
4104 'IQ': 'Iraq',
4105 'IE': 'Ireland',
4106 'IM': 'Isle of Man',
4107 'IL': 'Israel',
4108 'IT': 'Italy',
4109 'JM': 'Jamaica',
4110 'JP': 'Japan',
4111 'JE': 'Jersey',
4112 'JO': 'Jordan',
4113 'KZ': 'Kazakhstan',
4114 'KE': 'Kenya',
4115 'KI': 'Kiribati',
4116 'KP': 'Korea, Democratic People\'s Republic of',
4117 'KR': 'Korea, Republic of',
4118 'KW': 'Kuwait',
4119 'KG': 'Kyrgyzstan',
4120 'LA': 'Lao People\'s Democratic Republic',
4121 'LV': 'Latvia',
4122 'LB': 'Lebanon',
4123 'LS': 'Lesotho',
4124 'LR': 'Liberia',
4125 'LY': 'Libya',
4126 'LI': 'Liechtenstein',
4127 'LT': 'Lithuania',
4128 'LU': 'Luxembourg',
4129 'MO': 'Macao',
4130 'MK': 'Macedonia, the Former Yugoslav Republic of',
4131 'MG': 'Madagascar',
4132 'MW': 'Malawi',
4133 'MY': 'Malaysia',
4134 'MV': 'Maldives',
4135 'ML': 'Mali',
4136 'MT': 'Malta',
4137 'MH': 'Marshall Islands',
4138 'MQ': 'Martinique',
4139 'MR': 'Mauritania',
4140 'MU': 'Mauritius',
4141 'YT': 'Mayotte',
4142 'MX': 'Mexico',
4143 'FM': 'Micronesia, Federated States of',
4144 'MD': 'Moldova, Republic of',
4145 'MC': 'Monaco',
4146 'MN': 'Mongolia',
4147 'ME': 'Montenegro',
4148 'MS': 'Montserrat',
4149 'MA': 'Morocco',
4150 'MZ': 'Mozambique',
4151 'MM': 'Myanmar',
4152 'NA': 'Namibia',
4153 'NR': 'Nauru',
4154 'NP': 'Nepal',
4155 'NL': 'Netherlands',
4156 'NC': 'New Caledonia',
4157 'NZ': 'New Zealand',
4158 'NI': 'Nicaragua',
4159 'NE': 'Niger',
4160 'NG': 'Nigeria',
4161 'NU': 'Niue',
4162 'NF': 'Norfolk Island',
4163 'MP': 'Northern Mariana Islands',
4164 'NO': 'Norway',
4165 'OM': 'Oman',
4166 'PK': 'Pakistan',
4167 'PW': 'Palau',
4168 'PS': 'Palestine, State of',
4169 'PA': 'Panama',
4170 'PG': 'Papua New Guinea',
4171 'PY': 'Paraguay',
4172 'PE': 'Peru',
4173 'PH': 'Philippines',
4174 'PN': 'Pitcairn',
4175 'PL': 'Poland',
4176 'PT': 'Portugal',
4177 'PR': 'Puerto Rico',
4178 'QA': 'Qatar',
4179 'RE': 'Réunion',
4180 'RO': 'Romania',
4181 'RU': 'Russian Federation',
4182 'RW': 'Rwanda',
4183 'BL': 'Saint Barthélemy',
4184 'SH': 'Saint Helena, Ascension and Tristan da Cunha',
4185 'KN': 'Saint Kitts and Nevis',
4186 'LC': 'Saint Lucia',
4187 'MF': 'Saint Martin (French part)',
4188 'PM': 'Saint Pierre and Miquelon',
4189 'VC': 'Saint Vincent and the Grenadines',
4190 'WS': 'Samoa',
4191 'SM': 'San Marino',
4192 'ST': 'Sao Tome and Principe',
4193 'SA': 'Saudi Arabia',
4194 'SN': 'Senegal',
4195 'RS': 'Serbia',
4196 'SC': 'Seychelles',
4197 'SL': 'Sierra Leone',
4198 'SG': 'Singapore',
4199 'SX': 'Sint Maarten (Dutch part)',
4200 'SK': 'Slovakia',
4201 'SI': 'Slovenia',
4202 'SB': 'Solomon Islands',
4203 'SO': 'Somalia',
4204 'ZA': 'South Africa',
4205 'GS': 'South Georgia and the South Sandwich Islands',
4206 'SS': 'South Sudan',
4207 'ES': 'Spain',
4208 'LK': 'Sri Lanka',
4209 'SD': 'Sudan',
4210 'SR': 'Suriname',
4211 'SJ': 'Svalbard and Jan Mayen',
4212 'SZ': 'Swaziland',
4213 'SE': 'Sweden',
4214 'CH': 'Switzerland',
4215 'SY': 'Syrian Arab Republic',
4216 'TW': 'Taiwan, Province of China',
4217 'TJ': 'Tajikistan',
4218 'TZ': 'Tanzania, United Republic of',
4219 'TH': 'Thailand',
4220 'TL': 'Timor-Leste',
4221 'TG': 'Togo',
4222 'TK': 'Tokelau',
4223 'TO': 'Tonga',
4224 'TT': 'Trinidad and Tobago',
4225 'TN': 'Tunisia',
4226 'TR': 'Turkey',
4227 'TM': 'Turkmenistan',
4228 'TC': 'Turks and Caicos Islands',
4229 'TV': 'Tuvalu',
4230 'UG': 'Uganda',
4231 'UA': 'Ukraine',
4232 'AE': 'United Arab Emirates',
4233 'GB': 'United Kingdom',
4234 'US': 'United States',
4235 'UM': 'United States Minor Outlying Islands',
4236 'UY': 'Uruguay',
4237 'UZ': 'Uzbekistan',
4238 'VU': 'Vanuatu',
4239 'VE': 'Venezuela, Bolivarian Republic of',
4240 'VN': 'Viet Nam',
4241 'VG': 'Virgin Islands, British',
4242 'VI': 'Virgin Islands, U.S.',
4243 'WF': 'Wallis and Futuna',
4244 'EH': 'Western Sahara',
4245 'YE': 'Yemen',
4246 'ZM': 'Zambia',
4247 'ZW': 'Zimbabwe',
4248 }
4249
4250 @classmethod
4251 def short2full(cls, code):
4252 """Convert an ISO 3166-2 country code to the corresponding full name"""
4253 return cls._country_map.get(code.upper())
4254
4255
4256 class GeoUtils(object):
4257 # Major IPv4 address blocks per country
4258 _country_ip_map = {
4259 'AD': '46.172.224.0/19',
4260 'AE': '94.200.0.0/13',
4261 'AF': '149.54.0.0/17',
4262 'AG': '209.59.64.0/18',
4263 'AI': '204.14.248.0/21',
4264 'AL': '46.99.0.0/16',
4265 'AM': '46.70.0.0/15',
4266 'AO': '105.168.0.0/13',
4267 'AP': '182.50.184.0/21',
4268 'AQ': '23.154.160.0/24',
4269 'AR': '181.0.0.0/12',
4270 'AS': '202.70.112.0/20',
4271 'AT': '77.116.0.0/14',
4272 'AU': '1.128.0.0/11',
4273 'AW': '181.41.0.0/18',
4274 'AX': '185.217.4.0/22',
4275 'AZ': '5.197.0.0/16',
4276 'BA': '31.176.128.0/17',
4277 'BB': '65.48.128.0/17',
4278 'BD': '114.130.0.0/16',
4279 'BE': '57.0.0.0/8',
4280 'BF': '102.178.0.0/15',
4281 'BG': '95.42.0.0/15',
4282 'BH': '37.131.0.0/17',
4283 'BI': '154.117.192.0/18',
4284 'BJ': '137.255.0.0/16',
4285 'BL': '185.212.72.0/23',
4286 'BM': '196.12.64.0/18',
4287 'BN': '156.31.0.0/16',
4288 'BO': '161.56.0.0/16',
4289 'BQ': '161.0.80.0/20',
4290 'BR': '191.128.0.0/12',
4291 'BS': '24.51.64.0/18',
4292 'BT': '119.2.96.0/19',
4293 'BW': '168.167.0.0/16',
4294 'BY': '178.120.0.0/13',
4295 'BZ': '179.42.192.0/18',
4296 'CA': '99.224.0.0/11',
4297 'CD': '41.243.0.0/16',
4298 'CF': '197.242.176.0/21',
4299 'CG': '160.113.0.0/16',
4300 'CH': '85.0.0.0/13',
4301 'CI': '102.136.0.0/14',
4302 'CK': '202.65.32.0/19',
4303 'CL': '152.172.0.0/14',
4304 'CM': '102.244.0.0/14',
4305 'CN': '36.128.0.0/10',
4306 'CO': '181.240.0.0/12',
4307 'CR': '201.192.0.0/12',
4308 'CU': '152.206.0.0/15',
4309 'CV': '165.90.96.0/19',
4310 'CW': '190.88.128.0/17',
4311 'CY': '31.153.0.0/16',
4312 'CZ': '88.100.0.0/14',
4313 'DE': '53.0.0.0/8',
4314 'DJ': '197.241.0.0/17',
4315 'DK': '87.48.0.0/12',
4316 'DM': '192.243.48.0/20',
4317 'DO': '152.166.0.0/15',
4318 'DZ': '41.96.0.0/12',
4319 'EC': '186.68.0.0/15',
4320 'EE': '90.190.0.0/15',
4321 'EG': '156.160.0.0/11',
4322 'ER': '196.200.96.0/20',
4323 'ES': '88.0.0.0/11',
4324 'ET': '196.188.0.0/14',
4325 'EU': '2.16.0.0/13',
4326 'FI': '91.152.0.0/13',
4327 'FJ': '144.120.0.0/16',
4328 'FK': '80.73.208.0/21',
4329 'FM': '119.252.112.0/20',
4330 'FO': '88.85.32.0/19',
4331 'FR': '90.0.0.0/9',
4332 'GA': '41.158.0.0/15',
4333 'GB': '25.0.0.0/8',
4334 'GD': '74.122.88.0/21',
4335 'GE': '31.146.0.0/16',
4336 'GF': '161.22.64.0/18',
4337 'GG': '62.68.160.0/19',
4338 'GH': '154.160.0.0/12',
4339 'GI': '95.164.0.0/16',
4340 'GL': '88.83.0.0/19',
4341 'GM': '160.182.0.0/15',
4342 'GN': '197.149.192.0/18',
4343 'GP': '104.250.0.0/19',
4344 'GQ': '105.235.224.0/20',
4345 'GR': '94.64.0.0/13',
4346 'GT': '168.234.0.0/16',
4347 'GU': '168.123.0.0/16',
4348 'GW': '197.214.80.0/20',
4349 'GY': '181.41.64.0/18',
4350 'HK': '113.252.0.0/14',
4351 'HN': '181.210.0.0/16',
4352 'HR': '93.136.0.0/13',
4353 'HT': '148.102.128.0/17',
4354 'HU': '84.0.0.0/14',
4355 'ID': '39.192.0.0/10',
4356 'IE': '87.32.0.0/12',
4357 'IL': '79.176.0.0/13',
4358 'IM': '5.62.80.0/20',
4359 'IN': '117.192.0.0/10',
4360 'IO': '203.83.48.0/21',
4361 'IQ': '37.236.0.0/14',
4362 'IR': '2.176.0.0/12',
4363 'IS': '82.221.0.0/16',
4364 'IT': '79.0.0.0/10',
4365 'JE': '87.244.64.0/18',
4366 'JM': '72.27.0.0/17',
4367 'JO': '176.29.0.0/16',
4368 'JP': '133.0.0.0/8',
4369 'KE': '105.48.0.0/12',
4370 'KG': '158.181.128.0/17',
4371 'KH': '36.37.128.0/17',
4372 'KI': '103.25.140.0/22',
4373 'KM': '197.255.224.0/20',
4374 'KN': '198.167.192.0/19',
4375 'KP': '175.45.176.0/22',
4376 'KR': '175.192.0.0/10',
4377 'KW': '37.36.0.0/14',
4378 'KY': '64.96.0.0/15',
4379 'KZ': '2.72.0.0/13',
4380 'LA': '115.84.64.0/18',
4381 'LB': '178.135.0.0/16',
4382 'LC': '24.92.144.0/20',
4383 'LI': '82.117.0.0/19',
4384 'LK': '112.134.0.0/15',
4385 'LR': '102.183.0.0/16',
4386 'LS': '129.232.0.0/17',
4387 'LT': '78.56.0.0/13',
4388 'LU': '188.42.0.0/16',
4389 'LV': '46.109.0.0/16',
4390 'LY': '41.252.0.0/14',
4391 'MA': '105.128.0.0/11',
4392 'MC': '88.209.64.0/18',
4393 'MD': '37.246.0.0/16',
4394 'ME': '178.175.0.0/17',
4395 'MF': '74.112.232.0/21',
4396 'MG': '154.126.0.0/17',
4397 'MH': '117.103.88.0/21',
4398 'MK': '77.28.0.0/15',
4399 'ML': '154.118.128.0/18',
4400 'MM': '37.111.0.0/17',
4401 'MN': '49.0.128.0/17',
4402 'MO': '60.246.0.0/16',
4403 'MP': '202.88.64.0/20',
4404 'MQ': '109.203.224.0/19',
4405 'MR': '41.188.64.0/18',
4406 'MS': '208.90.112.0/22',
4407 'MT': '46.11.0.0/16',
4408 'MU': '105.16.0.0/12',
4409 'MV': '27.114.128.0/18',
4410 'MW': '102.70.0.0/15',
4411 'MX': '187.192.0.0/11',
4412 'MY': '175.136.0.0/13',
4413 'MZ': '197.218.0.0/15',
4414 'NA': '41.182.0.0/16',
4415 'NC': '101.101.0.0/18',
4416 'NE': '197.214.0.0/18',
4417 'NF': '203.17.240.0/22',
4418 'NG': '105.112.0.0/12',
4419 'NI': '186.76.0.0/15',
4420 'NL': '145.96.0.0/11',
4421 'NO': '84.208.0.0/13',
4422 'NP': '36.252.0.0/15',
4423 'NR': '203.98.224.0/19',
4424 'NU': '49.156.48.0/22',
4425 'NZ': '49.224.0.0/14',
4426 'OM': '5.36.0.0/15',
4427 'PA': '186.72.0.0/15',
4428 'PE': '186.160.0.0/14',
4429 'PF': '123.50.64.0/18',
4430 'PG': '124.240.192.0/19',
4431 'PH': '49.144.0.0/13',
4432 'PK': '39.32.0.0/11',
4433 'PL': '83.0.0.0/11',
4434 'PM': '70.36.0.0/20',
4435 'PR': '66.50.0.0/16',
4436 'PS': '188.161.0.0/16',
4437 'PT': '85.240.0.0/13',
4438 'PW': '202.124.224.0/20',
4439 'PY': '181.120.0.0/14',
4440 'QA': '37.210.0.0/15',
4441 'RE': '102.35.0.0/16',
4442 'RO': '79.112.0.0/13',
4443 'RS': '93.86.0.0/15',
4444 'RU': '5.136.0.0/13',
4445 'RW': '41.186.0.0/16',
4446 'SA': '188.48.0.0/13',
4447 'SB': '202.1.160.0/19',
4448 'SC': '154.192.0.0/11',
4449 'SD': '102.120.0.0/13',
4450 'SE': '78.64.0.0/12',
4451 'SG': '8.128.0.0/10',
4452 'SI': '188.196.0.0/14',
4453 'SK': '78.98.0.0/15',
4454 'SL': '102.143.0.0/17',
4455 'SM': '89.186.32.0/19',
4456 'SN': '41.82.0.0/15',
4457 'SO': '154.115.192.0/18',
4458 'SR': '186.179.128.0/17',
4459 'SS': '105.235.208.0/21',
4460 'ST': '197.159.160.0/19',
4461 'SV': '168.243.0.0/16',
4462 'SX': '190.102.0.0/20',
4463 'SY': '5.0.0.0/16',
4464 'SZ': '41.84.224.0/19',
4465 'TC': '65.255.48.0/20',
4466 'TD': '154.68.128.0/19',
4467 'TG': '196.168.0.0/14',
4468 'TH': '171.96.0.0/13',
4469 'TJ': '85.9.128.0/18',
4470 'TK': '27.96.24.0/21',
4471 'TL': '180.189.160.0/20',
4472 'TM': '95.85.96.0/19',
4473 'TN': '197.0.0.0/11',
4474 'TO': '175.176.144.0/21',
4475 'TR': '78.160.0.0/11',
4476 'TT': '186.44.0.0/15',
4477 'TV': '202.2.96.0/19',
4478 'TW': '120.96.0.0/11',
4479 'TZ': '156.156.0.0/14',
4480 'UA': '37.52.0.0/14',
4481 'UG': '102.80.0.0/13',
4482 'US': '6.0.0.0/8',
4483 'UY': '167.56.0.0/13',
4484 'UZ': '84.54.64.0/18',
4485 'VA': '212.77.0.0/19',
4486 'VC': '207.191.240.0/21',
4487 'VE': '186.88.0.0/13',
4488 'VG': '66.81.192.0/20',
4489 'VI': '146.226.0.0/16',
4490 'VN': '14.160.0.0/11',
4491 'VU': '202.80.32.0/20',
4492 'WF': '117.20.32.0/21',
4493 'WS': '202.4.32.0/19',
4494 'YE': '134.35.0.0/16',
4495 'YT': '41.242.116.0/22',
4496 'ZA': '41.0.0.0/11',
4497 'ZM': '102.144.0.0/13',
4498 'ZW': '102.177.192.0/18',
4499 }
4500
4501 @classmethod
4502 def random_ipv4(cls, code_or_block):
4503 if len(code_or_block) == 2:
4504 block = cls._country_ip_map.get(code_or_block.upper())
4505 if not block:
4506 return None
4507 else:
4508 block = code_or_block
4509 addr, preflen = block.split('/')
4510 addr_min = compat_struct_unpack('!L', socket.inet_aton(addr))[0]
4511 addr_max = addr_min | (0xffffffff >> int(preflen))
4512 return compat_str(socket.inet_ntoa(
4513 compat_struct_pack('!L', random.randint(addr_min, addr_max))))
4514
4515
4516 class PerRequestProxyHandler(compat_urllib_request.ProxyHandler):
4517 def __init__(self, proxies=None):
4518 # Set default handlers
4519 for type in ('http', 'https'):
4520 setattr(self, '%s_open' % type,
4521 lambda r, proxy='__noproxy__', type=type, meth=self.proxy_open:
4522 meth(r, proxy, type))
4523 compat_urllib_request.ProxyHandler.__init__(self, proxies)
4524
4525 def proxy_open(self, req, proxy, type):
4526 req_proxy = req.headers.get('Ytdl-request-proxy')
4527 if req_proxy is not None:
4528 proxy = req_proxy
4529 del req.headers['Ytdl-request-proxy']
4530
4531 if proxy == '__noproxy__':
4532 return None # No Proxy
4533 if compat_urlparse.urlparse(proxy).scheme.lower() in ('socks', 'socks4', 'socks4a', 'socks5'):
4534 req.add_header('Ytdl-socks-proxy', proxy)
4535 # yt-dlp's http/https handlers do wrapping the socket with socks
4536 return None
4537 return compat_urllib_request.ProxyHandler.proxy_open(
4538 self, req, proxy, type)
4539
4540
4541 # Both long_to_bytes and bytes_to_long are adapted from PyCrypto, which is
4542 # released into Public Domain
4543 # https://github.com/dlitz/pycrypto/blob/master/lib/Crypto/Util/number.py#L387
4544
4545 def long_to_bytes(n, blocksize=0):
4546 """long_to_bytes(n:long, blocksize:int) : string
4547 Convert a long integer to a byte string.
4548
4549 If optional blocksize is given and greater than zero, pad the front of the
4550 byte string with binary zeros so that the length is a multiple of
4551 blocksize.
4552 """
4553 # after much testing, this algorithm was deemed to be the fastest
4554 s = b''
4555 n = int(n)
4556 while n > 0:
4557 s = compat_struct_pack('>I', n & 0xffffffff) + s
4558 n = n >> 32
4559 # strip off leading zeros
4560 for i in range(len(s)):
4561 if s[i] != b'\000'[0]:
4562 break
4563 else:
4564 # only happens when n == 0
4565 s = b'\000'
4566 i = 0
4567 s = s[i:]
4568 # add back some pad bytes. this could be done more efficiently w.r.t. the
4569 # de-padding being done above, but sigh...
4570 if blocksize > 0 and len(s) % blocksize:
4571 s = (blocksize - len(s) % blocksize) * b'\000' + s
4572 return s
4573
4574
4575 def bytes_to_long(s):
4576 """bytes_to_long(string) : long
4577 Convert a byte string to a long integer.
4578
4579 This is (essentially) the inverse of long_to_bytes().
4580 """
4581 acc = 0
4582 length = len(s)
4583 if length % 4:
4584 extra = (4 - length % 4)
4585 s = b'\000' * extra + s
4586 length = length + extra
4587 for i in range(0, length, 4):
4588 acc = (acc << 32) + compat_struct_unpack('>I', s[i:i + 4])[0]
4589 return acc
4590
4591
4592 def ohdave_rsa_encrypt(data, exponent, modulus):
4593 '''
4594 Implement OHDave's RSA algorithm. See http://www.ohdave.com/rsa/
4595
4596 Input:
4597 data: data to encrypt, bytes-like object
4598 exponent, modulus: parameter e and N of RSA algorithm, both integer
4599 Output: hex string of encrypted data
4600
4601 Limitation: supports one block encryption only
4602 '''
4603
4604 payload = int(binascii.hexlify(data[::-1]), 16)
4605 encrypted = pow(payload, exponent, modulus)
4606 return '%x' % encrypted
4607
4608
4609 def pkcs1pad(data, length):
4610 """
4611 Padding input data with PKCS#1 scheme
4612
4613 @param {int[]} data input data
4614 @param {int} length target length
4615 @returns {int[]} padded data
4616 """
4617 if len(data) > length - 11:
4618 raise ValueError('Input data too long for PKCS#1 padding')
4619
4620 pseudo_random = [random.randint(0, 254) for _ in range(length - len(data) - 3)]
4621 return [0, 2] + pseudo_random + [0] + data
4622
4623
4624 def encode_base_n(num, n, table=None):
4625 FULL_TABLE = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
4626 if not table:
4627 table = FULL_TABLE[:n]
4628
4629 if n > len(table):
4630 raise ValueError('base %d exceeds table length %d' % (n, len(table)))
4631
4632 if num == 0:
4633 return table[0]
4634
4635 ret = ''
4636 while num:
4637 ret = table[num % n] + ret
4638 num = num // n
4639 return ret
4640
4641
4642 def decode_packed_codes(code):
4643 mobj = re.search(PACKED_CODES_RE, code)
4644 obfuscated_code, base, count, symbols = mobj.groups()
4645 base = int(base)
4646 count = int(count)
4647 symbols = symbols.split('|')
4648 symbol_table = {}
4649
4650 while count:
4651 count -= 1
4652 base_n_count = encode_base_n(count, base)
4653 symbol_table[base_n_count] = symbols[count] or base_n_count
4654
4655 return re.sub(
4656 r'\b(\w+)\b', lambda mobj: symbol_table[mobj.group(0)],
4657 obfuscated_code)
4658
4659
4660 def caesar(s, alphabet, shift):
4661 if shift == 0:
4662 return s
4663 l = len(alphabet)
4664 return ''.join(
4665 alphabet[(alphabet.index(c) + shift) % l] if c in alphabet else c
4666 for c in s)
4667
4668
4669 def rot47(s):
4670 return caesar(s, r'''!"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~''', 47)
4671
4672
4673 def parse_m3u8_attributes(attrib):
4674 info = {}
4675 for (key, val) in re.findall(r'(?P<key>[A-Z0-9-]+)=(?P<val>"[^"]+"|[^",]+)(?:,|$)', attrib):
4676 if val.startswith('"'):
4677 val = val[1:-1]
4678 info[key] = val
4679 return info
4680
4681
4682 def urshift(val, n):
4683 return val >> n if val >= 0 else (val + 0x100000000) >> n
4684
4685
4686 # Based on png2str() written by @gdkchan and improved by @yokrysty
4687 # Originally posted at https://github.com/ytdl-org/youtube-dl/issues/9706
4688 def decode_png(png_data):
4689 # Reference: https://www.w3.org/TR/PNG/
4690 header = png_data[8:]
4691
4692 if png_data[:8] != b'\x89PNG\x0d\x0a\x1a\x0a' or header[4:8] != b'IHDR':
4693 raise IOError('Not a valid PNG file.')
4694
4695 int_map = {1: '>B', 2: '>H', 4: '>I'}
4696 unpack_integer = lambda x: compat_struct_unpack(int_map[len(x)], x)[0]
4697
4698 chunks = []
4699
4700 while header:
4701 length = unpack_integer(header[:4])
4702 header = header[4:]
4703
4704 chunk_type = header[:4]
4705 header = header[4:]
4706
4707 chunk_data = header[:length]
4708 header = header[length:]
4709
4710 header = header[4:] # Skip CRC
4711
4712 chunks.append({
4713 'type': chunk_type,
4714 'length': length,
4715 'data': chunk_data
4716 })
4717
4718 ihdr = chunks[0]['data']
4719
4720 width = unpack_integer(ihdr[:4])
4721 height = unpack_integer(ihdr[4:8])
4722
4723 idat = b''
4724
4725 for chunk in chunks:
4726 if chunk['type'] == b'IDAT':
4727 idat += chunk['data']
4728
4729 if not idat:
4730 raise IOError('Unable to read PNG data.')
4731
4732 decompressed_data = bytearray(zlib.decompress(idat))
4733
4734 stride = width * 3
4735 pixels = []
4736
4737 def _get_pixel(idx):
4738 x = idx % stride
4739 y = idx // stride
4740 return pixels[y][x]
4741
4742 for y in range(height):
4743 basePos = y * (1 + stride)
4744 filter_type = decompressed_data[basePos]
4745
4746 current_row = []
4747
4748 pixels.append(current_row)
4749
4750 for x in range(stride):
4751 color = decompressed_data[1 + basePos + x]
4752 basex = y * stride + x
4753 left = 0
4754 up = 0
4755
4756 if x > 2:
4757 left = _get_pixel(basex - 3)
4758 if y > 0:
4759 up = _get_pixel(basex - stride)
4760
4761 if filter_type == 1: # Sub
4762 color = (color + left) & 0xff
4763 elif filter_type == 2: # Up
4764 color = (color + up) & 0xff
4765 elif filter_type == 3: # Average
4766 color = (color + ((left + up) >> 1)) & 0xff
4767 elif filter_type == 4: # Paeth
4768 a = left
4769 b = up
4770 c = 0
4771
4772 if x > 2 and y > 0:
4773 c = _get_pixel(basex - stride - 3)
4774
4775 p = a + b - c
4776
4777 pa = abs(p - a)
4778 pb = abs(p - b)
4779 pc = abs(p - c)
4780
4781 if pa <= pb and pa <= pc:
4782 color = (color + a) & 0xff
4783 elif pb <= pc:
4784 color = (color + b) & 0xff
4785 else:
4786 color = (color + c) & 0xff
4787
4788 current_row.append(color)
4789
4790 return width, height, pixels
4791
4792
4793 def write_xattr(path, key, value):
4794 # This mess below finds the best xattr tool for the job
4795 try:
4796 # try the pyxattr module...
4797 import xattr
4798
4799 if hasattr(xattr, 'set'): # pyxattr
4800 # Unicode arguments are not supported in python-pyxattr until
4801 # version 0.5.0
4802 # See https://github.com/ytdl-org/youtube-dl/issues/5498
4803 pyxattr_required_version = '0.5.0'
4804 if version_tuple(xattr.__version__) < version_tuple(pyxattr_required_version):
4805 # TODO: fallback to CLI tools
4806 raise XAttrUnavailableError(
4807 'python-pyxattr is detected but is too old. '
4808 'yt-dlp requires %s or above while your version is %s. '
4809 'Falling back to other xattr implementations' % (
4810 pyxattr_required_version, xattr.__version__))
4811
4812 setxattr = xattr.set
4813 else: # xattr
4814 setxattr = xattr.setxattr
4815
4816 try:
4817 setxattr(path, key, value)
4818 except EnvironmentError as e:
4819 raise XAttrMetadataError(e.errno, e.strerror)
4820
4821 except ImportError:
4822 if compat_os_name == 'nt':
4823 # Write xattrs to NTFS Alternate Data Streams:
4824 # http://en.wikipedia.org/wiki/NTFS#Alternate_data_streams_.28ADS.29
4825 assert ':' not in key
4826 assert os.path.exists(path)
4827
4828 ads_fn = path + ':' + key
4829 try:
4830 with open(ads_fn, 'wb') as f:
4831 f.write(value)
4832 except EnvironmentError as e:
4833 raise XAttrMetadataError(e.errno, e.strerror)
4834 else:
4835 user_has_setfattr = check_executable('setfattr', ['--version'])
4836 user_has_xattr = check_executable('xattr', ['-h'])
4837
4838 if user_has_setfattr or user_has_xattr:
4839
4840 value = value.decode('utf-8')
4841 if user_has_setfattr:
4842 executable = 'setfattr'
4843 opts = ['-n', key, '-v', value]
4844 elif user_has_xattr:
4845 executable = 'xattr'
4846 opts = ['-w', key, value]
4847
4848 cmd = ([encodeFilename(executable, True)]
4849 + [encodeArgument(o) for o in opts]
4850 + [encodeFilename(path, True)])
4851
4852 try:
4853 p = Popen(
4854 cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
4855 except EnvironmentError as e:
4856 raise XAttrMetadataError(e.errno, e.strerror)
4857 stdout, stderr = p.communicate_or_kill()
4858 stderr = stderr.decode('utf-8', 'replace')
4859 if p.returncode != 0:
4860 raise XAttrMetadataError(p.returncode, stderr)
4861
4862 else:
4863 # On Unix, and can't find pyxattr, setfattr, or xattr.
4864 if sys.platform.startswith('linux'):
4865 raise XAttrUnavailableError(
4866 "Couldn't find a tool to set the xattrs. "
4867 "Install either the python 'pyxattr' or 'xattr' "
4868 "modules, or the GNU 'attr' package "
4869 "(which contains the 'setfattr' tool).")
4870 else:
4871 raise XAttrUnavailableError(
4872 "Couldn't find a tool to set the xattrs. "
4873 "Install either the python 'xattr' module, "
4874 "or the 'xattr' binary.")
4875
4876
4877 def random_birthday(year_field, month_field, day_field):
4878 start_date = datetime.date(1950, 1, 1)
4879 end_date = datetime.date(1995, 12, 31)
4880 offset = random.randint(0, (end_date - start_date).days)
4881 random_date = start_date + datetime.timedelta(offset)
4882 return {
4883 year_field: str(random_date.year),
4884 month_field: str(random_date.month),
4885 day_field: str(random_date.day),
4886 }
4887
4888
4889 # Templates for internet shortcut files, which are plain text files.
4890 DOT_URL_LINK_TEMPLATE = '''
4891 [InternetShortcut]
4892 URL=%(url)s
4893 '''.lstrip()
4894
4895 DOT_WEBLOC_LINK_TEMPLATE = '''
4896 <?xml version="1.0" encoding="UTF-8"?>
4897 <!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
4898 <plist version="1.0">
4899 <dict>
4900 \t<key>URL</key>
4901 \t<string>%(url)s</string>
4902 </dict>
4903 </plist>
4904 '''.lstrip()
4905
4906 DOT_DESKTOP_LINK_TEMPLATE = '''
4907 [Desktop Entry]
4908 Encoding=UTF-8
4909 Name=%(filename)s
4910 Type=Link
4911 URL=%(url)s
4912 Icon=text-html
4913 '''.lstrip()
4914
4915 LINK_TEMPLATES = {
4916 'url': DOT_URL_LINK_TEMPLATE,
4917 'desktop': DOT_DESKTOP_LINK_TEMPLATE,
4918 'webloc': DOT_WEBLOC_LINK_TEMPLATE,
4919 }
4920
4921
4922 def iri_to_uri(iri):
4923 """
4924 Converts an IRI (Internationalized Resource Identifier, allowing Unicode characters) to a URI (Uniform Resource Identifier, ASCII-only).
4925
4926 The function doesn't add an additional layer of escaping; e.g., it doesn't escape `%3C` as `%253C`. Instead, it percent-escapes characters with an underlying UTF-8 encoding *besides* those already escaped, leaving the URI intact.
4927 """
4928
4929 iri_parts = compat_urllib_parse_urlparse(iri)
4930
4931 if '[' in iri_parts.netloc:
4932 raise ValueError('IPv6 URIs are not, yet, supported.')
4933 # Querying `.netloc`, when there's only one bracket, also raises a ValueError.
4934
4935 # The `safe` argument values, that the following code uses, contain the characters that should not be percent-encoded. Everything else but letters, digits and '_.-' will be percent-encoded with an underlying UTF-8 encoding. Everything already percent-encoded will be left as is.
4936
4937 net_location = ''
4938 if iri_parts.username:
4939 net_location += compat_urllib_parse_quote(iri_parts.username, safe=r"!$%&'()*+,~")
4940 if iri_parts.password is not None:
4941 net_location += ':' + compat_urllib_parse_quote(iri_parts.password, safe=r"!$%&'()*+,~")
4942 net_location += '@'
4943
4944 net_location += iri_parts.hostname.encode('idna').decode('utf-8') # Punycode for Unicode hostnames.
4945 # The 'idna' encoding produces ASCII text.
4946 if iri_parts.port is not None and iri_parts.port != 80:
4947 net_location += ':' + str(iri_parts.port)
4948
4949 return compat_urllib_parse_urlunparse(
4950 (iri_parts.scheme,
4951 net_location,
4952
4953 compat_urllib_parse_quote_plus(iri_parts.path, safe=r"!$%&'()*+,/:;=@|~"),
4954
4955 # Unsure about the `safe` argument, since this is a legacy way of handling parameters.
4956 compat_urllib_parse_quote_plus(iri_parts.params, safe=r"!$%&'()*+,/:;=@|~"),
4957
4958 # Not totally sure about the `safe` argument, since the source does not explicitly mention the query URI component.
4959 compat_urllib_parse_quote_plus(iri_parts.query, safe=r"!$%&'()*+,/:;=?@{|}~"),
4960
4961 compat_urllib_parse_quote_plus(iri_parts.fragment, safe=r"!#$%&'()*+,/:;=?@{|}~")))
4962
4963 # Source for `safe` arguments: https://url.spec.whatwg.org/#percent-encoded-bytes.
4964
4965
4966 def to_high_limit_path(path):
4967 if sys.platform in ['win32', 'cygwin']:
4968 # Work around MAX_PATH limitation on Windows. The maximum allowed length for the individual path segments may still be quite limited.
4969 return r'\\?\ '.rstrip() + os.path.abspath(path)
4970
4971 return path
4972
4973
4974 def format_field(obj, field=None, template='%s', ignore=(None, ''), default='', func=None):
4975 val = traverse_obj(obj, *variadic(field))
4976 if val in ignore:
4977 return default
4978 return template % (func(val) if func else val)
4979
4980
4981 def clean_podcast_url(url):
4982 return re.sub(r'''(?x)
4983 (?:
4984 (?:
4985 chtbl\.com/track|
4986 media\.blubrry\.com| # https://create.blubrry.com/resources/podcast-media-download-statistics/getting-started/
4987 play\.podtrac\.com
4988 )/[^/]+|
4989 (?:dts|www)\.podtrac\.com/(?:pts/)?redirect\.[0-9a-z]{3,4}| # http://analytics.podtrac.com/how-to-measure
4990 flex\.acast\.com|
4991 pd(?:
4992 cn\.co| # https://podcorn.com/analytics-prefix/
4993 st\.fm # https://podsights.com/docs/
4994 )/e
4995 )/''', '', url)
4996
4997
4998 _HEX_TABLE = '0123456789abcdef'
4999
5000
5001 def random_uuidv4():
5002 return re.sub(r'[xy]', lambda x: _HEX_TABLE[random.randint(0, 15)], 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx')
5003
5004
5005 def make_dir(path, to_screen=None):
5006 try:
5007 dn = os.path.dirname(path)
5008 if dn and not os.path.exists(dn):
5009 os.makedirs(dn)
5010 return True
5011 except (OSError, IOError) as err:
5012 if callable(to_screen) is not None:
5013 to_screen('unable to create directory ' + error_to_compat_str(err))
5014 return False
5015
5016
5017 def get_executable_path():
5018 from zipimport import zipimporter
5019 if hasattr(sys, 'frozen'): # Running from PyInstaller
5020 path = os.path.dirname(sys.executable)
5021 elif isinstance(globals().get('__loader__'), zipimporter): # Running from ZIP
5022 path = os.path.join(os.path.dirname(__file__), '../..')
5023 else:
5024 path = os.path.join(os.path.dirname(__file__), '..')
5025 return os.path.abspath(path)
5026
5027
5028 def load_plugins(name, suffix, namespace):
5029 classes = {}
5030 try:
5031 plugins_spec = importlib.util.spec_from_file_location(
5032 name, os.path.join(get_executable_path(), 'ytdlp_plugins', name, '__init__.py'))
5033 plugins = importlib.util.module_from_spec(plugins_spec)
5034 sys.modules[plugins_spec.name] = plugins
5035 plugins_spec.loader.exec_module(plugins)
5036 for name in dir(plugins):
5037 if name in namespace:
5038 continue
5039 if not name.endswith(suffix):
5040 continue
5041 klass = getattr(plugins, name)
5042 classes[name] = namespace[name] = klass
5043 except FileNotFoundError:
5044 pass
5045 return classes
5046
5047
5048 def traverse_obj(
5049 obj, *path_list, default=None, expected_type=None, get_all=True,
5050 casesense=True, is_user_input=False, traverse_string=False):
5051 ''' Traverse nested list/dict/tuple
5052 @param path_list A list of paths which are checked one by one.
5053 Each path is a list of keys where each key is a string,
5054 a function, a tuple of strings/None or "...".
5055 When a fuction is given, it takes the key as argument and
5056 returns whether the key matches or not. When a tuple is given,
5057 all the keys given in the tuple are traversed, and
5058 "..." traverses all the keys in the object
5059 "None" returns the object without traversal
5060 @param default Default value to return
5061 @param expected_type Only accept final value of this type (Can also be any callable)
5062 @param get_all Return all the values obtained from a path or only the first one
5063 @param casesense Whether to consider dictionary keys as case sensitive
5064 @param is_user_input Whether the keys are generated from user input. If True,
5065 strings are converted to int/slice if necessary
5066 @param traverse_string Whether to traverse inside strings. If True, any
5067 non-compatible object will also be converted into a string
5068 # TODO: Write tests
5069 '''
5070 if not casesense:
5071 _lower = lambda k: (k.lower() if isinstance(k, str) else k)
5072 path_list = (map(_lower, variadic(path)) for path in path_list)
5073
5074 def _traverse_obj(obj, path, _current_depth=0):
5075 nonlocal depth
5076 path = tuple(variadic(path))
5077 for i, key in enumerate(path):
5078 if None in (key, obj):
5079 return obj
5080 if isinstance(key, (list, tuple)):
5081 obj = [_traverse_obj(obj, sub_key, _current_depth) for sub_key in key]
5082 key = ...
5083 if key is ...:
5084 obj = (obj.values() if isinstance(obj, dict)
5085 else obj if isinstance(obj, (list, tuple, LazyList))
5086 else str(obj) if traverse_string else [])
5087 _current_depth += 1
5088 depth = max(depth, _current_depth)
5089 return [_traverse_obj(inner_obj, path[i + 1:], _current_depth) for inner_obj in obj]
5090 elif callable(key):
5091 if isinstance(obj, (list, tuple, LazyList)):
5092 obj = enumerate(obj)
5093 elif isinstance(obj, dict):
5094 obj = obj.items()
5095 else:
5096 if not traverse_string:
5097 return None
5098 obj = str(obj)
5099 _current_depth += 1
5100 depth = max(depth, _current_depth)
5101 return [_traverse_obj(v, path[i + 1:], _current_depth) for k, v in obj if key(k)]
5102 elif isinstance(obj, dict) and not (is_user_input and key == ':'):
5103 obj = (obj.get(key) if casesense or (key in obj)
5104 else next((v for k, v in obj.items() if _lower(k) == key), None))
5105 else:
5106 if is_user_input:
5107 key = (int_or_none(key) if ':' not in key
5108 else slice(*map(int_or_none, key.split(':'))))
5109 if key == slice(None):
5110 return _traverse_obj(obj, (..., *path[i + 1:]), _current_depth)
5111 if not isinstance(key, (int, slice)):
5112 return None
5113 if not isinstance(obj, (list, tuple, LazyList)):
5114 if not traverse_string:
5115 return None
5116 obj = str(obj)
5117 try:
5118 obj = obj[key]
5119 except IndexError:
5120 return None
5121 return obj
5122
5123 if isinstance(expected_type, type):
5124 type_test = lambda val: val if isinstance(val, expected_type) else None
5125 elif expected_type is not None:
5126 type_test = expected_type
5127 else:
5128 type_test = lambda val: val
5129
5130 for path in path_list:
5131 depth = 0
5132 val = _traverse_obj(obj, path)
5133 if val is not None:
5134 if depth:
5135 for _ in range(depth - 1):
5136 val = itertools.chain.from_iterable(v for v in val if v is not None)
5137 val = [v for v in map(type_test, val) if v is not None]
5138 if val:
5139 return val if get_all else val[0]
5140 else:
5141 val = type_test(val)
5142 if val is not None:
5143 return val
5144 return default
5145
5146
5147 def traverse_dict(dictn, keys, casesense=True):
5148 write_string('DeprecationWarning: yt_dlp.utils.traverse_dict is deprecated '
5149 'and may be removed in a future version. Use yt_dlp.utils.traverse_obj instead')
5150 return traverse_obj(dictn, keys, casesense=casesense, is_user_input=True, traverse_string=True)
5151
5152
5153 def variadic(x, allowed_types=(str, bytes, dict)):
5154 return x if isinstance(x, collections.abc.Iterable) and not isinstance(x, allowed_types) else (x,)
5155
5156
5157 # create a JSON Web Signature (jws) with HS256 algorithm
5158 # the resulting format is in JWS Compact Serialization
5159 # implemented following JWT https://www.rfc-editor.org/rfc/rfc7519.html
5160 # implemented following JWS https://www.rfc-editor.org/rfc/rfc7515.html
5161 def jwt_encode_hs256(payload_data, key, headers={}):
5162 header_data = {
5163 'alg': 'HS256',
5164 'typ': 'JWT',
5165 }
5166 if headers:
5167 header_data.update(headers)
5168 header_b64 = base64.b64encode(json.dumps(header_data).encode('utf-8'))
5169 payload_b64 = base64.b64encode(json.dumps(payload_data).encode('utf-8'))
5170 h = hmac.new(key.encode('utf-8'), header_b64 + b'.' + payload_b64, hashlib.sha256)
5171 signature_b64 = base64.b64encode(h.digest())
5172 token = header_b64 + b'.' + payload_b64 + b'.' + signature_b64
5173 return token
5174
5175
5176 # can be extended in future to verify the signature and parse header and return the algorithm used if it's not HS256
5177 def jwt_decode_hs256(jwt):
5178 header_b64, payload_b64, signature_b64 = jwt.split('.')
5179 payload_data = json.loads(base64.urlsafe_b64decode(payload_b64))
5180 return payload_data
5181
5182
5183 def supports_terminal_sequences(stream):
5184 if compat_os_name == 'nt':
5185 from .compat import WINDOWS_VT_MODE # Must be imported locally
5186 if not WINDOWS_VT_MODE or get_windows_version() < (10, 0, 10586):
5187 return False
5188 elif not os.getenv('TERM'):
5189 return False
5190 try:
5191 return stream.isatty()
5192 except BaseException:
5193 return False
5194
5195
5196 _terminal_sequences_re = re.compile('\033\\[[^m]+m')
5197
5198
5199 def remove_terminal_sequences(string):
5200 return _terminal_sequences_re.sub('', string)
5201
5202
5203 def number_of_digits(number):
5204 return len('%d' % number)
5205
5206
5207 def join_nonempty(*values, delim='-', from_dict=None):
5208 if from_dict is not None:
5209 values = map(from_dict.get, values)
5210 return delim.join(map(str, filter(None, values)))
5211
5212
5213 class Config:
5214 own_args = None
5215 filename = None
5216 __initialized = False
5217
5218 def __init__(self, parser, label=None):
5219 self._parser, self.label = parser, label
5220 self._loaded_paths, self.configs = set(), []
5221
5222 def init(self, args=None, filename=None):
5223 assert not self.__initialized
5224 directory = ''
5225 if filename:
5226 location = os.path.realpath(filename)
5227 directory = os.path.dirname(location)
5228 if location in self._loaded_paths:
5229 return False
5230 self._loaded_paths.add(location)
5231
5232 self.__initialized = True
5233 self.own_args, self.filename = args, filename
5234 for location in self._parser.parse_args(args)[0].config_locations or []:
5235 location = os.path.join(directory, expand_path(location))
5236 if os.path.isdir(location):
5237 location = os.path.join(location, 'yt-dlp.conf')
5238 if not os.path.exists(location):
5239 self._parser.error(f'config location {location} does not exist')
5240 self.append_config(self.read_file(location), location)
5241 return True
5242
5243 def __str__(self):
5244 label = join_nonempty(
5245 self.label, 'config', f'"{self.filename}"' if self.filename else '',
5246 delim=' ')
5247 return join_nonempty(
5248 self.own_args is not None and f'{label[0].upper()}{label[1:]}: {self.hide_login_info(self.own_args)}',
5249 *(f'\n{c}'.replace('\n', '\n| ')[1:] for c in self.configs),
5250 delim='\n')
5251
5252 @staticmethod
5253 def read_file(filename, default=[]):
5254 try:
5255 optionf = open(filename)
5256 except IOError:
5257 return default # silently skip if file is not present
5258 try:
5259 # FIXME: https://github.com/ytdl-org/youtube-dl/commit/dfe5fa49aed02cf36ba9f743b11b0903554b5e56
5260 contents = optionf.read()
5261 if sys.version_info < (3,):
5262 contents = contents.decode(preferredencoding())
5263 res = compat_shlex_split(contents, comments=True)
5264 finally:
5265 optionf.close()
5266 return res
5267
5268 @staticmethod
5269 def hide_login_info(opts):
5270 PRIVATE_OPTS = set(['-p', '--password', '-u', '--username', '--video-password', '--ap-password', '--ap-username'])
5271 eqre = re.compile('^(?P<key>' + ('|'.join(re.escape(po) for po in PRIVATE_OPTS)) + ')=.+$')
5272
5273 def _scrub_eq(o):
5274 m = eqre.match(o)
5275 if m:
5276 return m.group('key') + '=PRIVATE'
5277 else:
5278 return o
5279
5280 opts = list(map(_scrub_eq, opts))
5281 for idx, opt in enumerate(opts):
5282 if opt in PRIVATE_OPTS and idx + 1 < len(opts):
5283 opts[idx + 1] = 'PRIVATE'
5284 return opts
5285
5286 def append_config(self, *args, label=None):
5287 config = type(self)(self._parser, label)
5288 config._loaded_paths = self._loaded_paths
5289 if config.init(*args):
5290 self.configs.append(config)
5291
5292 @property
5293 def all_args(self):
5294 for config in reversed(self.configs):
5295 yield from config.all_args
5296 yield from self.own_args or []
5297
5298 def parse_args(self):
5299 return self._parser.parse_args(list(self.all_args))