]> jfr.im git - yt-dlp.git/blob - yt_dlp/utils.py
3f22eaf75f37a47359facae394c8441cb91d5fba
[yt-dlp.git] / yt_dlp / utils.py
1 #!/usr/bin/env python3
2 import atexit
3 import base64
4 import binascii
5 import calendar
6 import codecs
7 import collections
8 import contextlib
9 import ctypes
10 import datetime
11 import email.header
12 import email.utils
13 import errno
14 import functools
15 import gzip
16 import hashlib
17 import hmac
18 import importlib.util
19 import io
20 import itertools
21 import json
22 import locale
23 import math
24 import mimetypes
25 import operator
26 import os
27 import platform
28 import random
29 import re
30 import shlex
31 import socket
32 import ssl
33 import subprocess
34 import sys
35 import tempfile
36 import time
37 import traceback
38 import urllib.parse
39 import xml.etree.ElementTree
40 import zlib
41
42 from .compat import (
43 asyncio,
44 compat_chr,
45 compat_cookiejar,
46 compat_etree_fromstring,
47 compat_expanduser,
48 compat_html_entities,
49 compat_html_entities_html5,
50 compat_HTMLParseError,
51 compat_HTMLParser,
52 compat_http_client,
53 compat_HTTPError,
54 compat_os_name,
55 compat_parse_qs,
56 compat_shlex_quote,
57 compat_str,
58 compat_struct_pack,
59 compat_struct_unpack,
60 compat_urllib_error,
61 compat_urllib_parse_unquote_plus,
62 compat_urllib_parse_urlencode,
63 compat_urllib_parse_urlparse,
64 compat_urllib_request,
65 compat_urlparse,
66 )
67 from .dependencies import brotli, certifi, websockets
68 from .socks import ProxyType, sockssocket
69
70
71 def register_socks_protocols():
72 # "Register" SOCKS protocols
73 # In Python < 2.6.5, urlsplit() suffers from bug https://bugs.python.org/issue7904
74 # URLs with protocols not in urlparse.uses_netloc are not handled correctly
75 for scheme in ('socks', 'socks4', 'socks4a', 'socks5'):
76 if scheme not in compat_urlparse.uses_netloc:
77 compat_urlparse.uses_netloc.append(scheme)
78
79
80 # This is not clearly defined otherwise
81 compiled_regex_type = type(re.compile(''))
82
83
84 def random_user_agent():
85 _USER_AGENT_TPL = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/%s Safari/537.36'
86 _CHROME_VERSIONS = (
87 '90.0.4430.212',
88 '90.0.4430.24',
89 '90.0.4430.70',
90 '90.0.4430.72',
91 '90.0.4430.85',
92 '90.0.4430.93',
93 '91.0.4472.101',
94 '91.0.4472.106',
95 '91.0.4472.114',
96 '91.0.4472.124',
97 '91.0.4472.164',
98 '91.0.4472.19',
99 '91.0.4472.77',
100 '92.0.4515.107',
101 '92.0.4515.115',
102 '92.0.4515.131',
103 '92.0.4515.159',
104 '92.0.4515.43',
105 '93.0.4556.0',
106 '93.0.4577.15',
107 '93.0.4577.63',
108 '93.0.4577.82',
109 '94.0.4606.41',
110 '94.0.4606.54',
111 '94.0.4606.61',
112 '94.0.4606.71',
113 '94.0.4606.81',
114 '94.0.4606.85',
115 '95.0.4638.17',
116 '95.0.4638.50',
117 '95.0.4638.54',
118 '95.0.4638.69',
119 '95.0.4638.74',
120 '96.0.4664.18',
121 '96.0.4664.45',
122 '96.0.4664.55',
123 '96.0.4664.93',
124 '97.0.4692.20',
125 )
126 return _USER_AGENT_TPL % random.choice(_CHROME_VERSIONS)
127
128
129 SUPPORTED_ENCODINGS = [
130 'gzip', 'deflate'
131 ]
132 if brotli:
133 SUPPORTED_ENCODINGS.append('br')
134
135 std_headers = {
136 'User-Agent': random_user_agent(),
137 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
138 'Accept-Language': 'en-us,en;q=0.5',
139 'Sec-Fetch-Mode': 'navigate',
140 }
141
142
143 USER_AGENTS = {
144 'Safari': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27',
145 }
146
147
148 NO_DEFAULT = object()
149
150 ENGLISH_MONTH_NAMES = [
151 'January', 'February', 'March', 'April', 'May', 'June',
152 'July', 'August', 'September', 'October', 'November', 'December']
153
154 MONTH_NAMES = {
155 'en': ENGLISH_MONTH_NAMES,
156 'fr': [
157 'janvier', 'février', 'mars', 'avril', 'mai', 'juin',
158 'juillet', 'août', 'septembre', 'octobre', 'novembre', 'décembre'],
159 }
160
161 KNOWN_EXTENSIONS = (
162 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'aac',
163 'flv', 'f4v', 'f4a', 'f4b',
164 'webm', 'ogg', 'ogv', 'oga', 'ogx', 'spx', 'opus',
165 'mkv', 'mka', 'mk3d',
166 'avi', 'divx',
167 'mov',
168 'asf', 'wmv', 'wma',
169 '3gp', '3g2',
170 'mp3',
171 'flac',
172 'ape',
173 'wav',
174 'f4f', 'f4m', 'm3u8', 'smil')
175
176 # needed for sanitizing filenames in restricted mode
177 ACCENT_CHARS = dict(zip('ÂÃÄÀÁÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖŐØŒÙÚÛÜŰÝÞßàáâãäåæçèéêëìíîïðñòóôõöőøœùúûüűýþÿ',
178 itertools.chain('AAAAAA', ['AE'], 'CEEEEIIIIDNOOOOOOO', ['OE'], 'UUUUUY', ['TH', 'ss'],
179 'aaaaaa', ['ae'], 'ceeeeiiiionooooooo', ['oe'], 'uuuuuy', ['th'], 'y')))
180
181 DATE_FORMATS = (
182 '%d %B %Y',
183 '%d %b %Y',
184 '%B %d %Y',
185 '%B %dst %Y',
186 '%B %dnd %Y',
187 '%B %drd %Y',
188 '%B %dth %Y',
189 '%b %d %Y',
190 '%b %dst %Y',
191 '%b %dnd %Y',
192 '%b %drd %Y',
193 '%b %dth %Y',
194 '%b %dst %Y %I:%M',
195 '%b %dnd %Y %I:%M',
196 '%b %drd %Y %I:%M',
197 '%b %dth %Y %I:%M',
198 '%Y %m %d',
199 '%Y-%m-%d',
200 '%Y.%m.%d.',
201 '%Y/%m/%d',
202 '%Y/%m/%d %H:%M',
203 '%Y/%m/%d %H:%M:%S',
204 '%Y%m%d%H%M',
205 '%Y%m%d%H%M%S',
206 '%Y%m%d',
207 '%Y-%m-%d %H:%M',
208 '%Y-%m-%d %H:%M:%S',
209 '%Y-%m-%d %H:%M:%S.%f',
210 '%Y-%m-%d %H:%M:%S:%f',
211 '%d.%m.%Y %H:%M',
212 '%d.%m.%Y %H.%M',
213 '%Y-%m-%dT%H:%M:%SZ',
214 '%Y-%m-%dT%H:%M:%S.%fZ',
215 '%Y-%m-%dT%H:%M:%S.%f0Z',
216 '%Y-%m-%dT%H:%M:%S',
217 '%Y-%m-%dT%H:%M:%S.%f',
218 '%Y-%m-%dT%H:%M',
219 '%b %d %Y at %H:%M',
220 '%b %d %Y at %H:%M:%S',
221 '%B %d %Y at %H:%M',
222 '%B %d %Y at %H:%M:%S',
223 '%H:%M %d-%b-%Y',
224 )
225
226 DATE_FORMATS_DAY_FIRST = list(DATE_FORMATS)
227 DATE_FORMATS_DAY_FIRST.extend([
228 '%d-%m-%Y',
229 '%d.%m.%Y',
230 '%d.%m.%y',
231 '%d/%m/%Y',
232 '%d/%m/%y',
233 '%d/%m/%Y %H:%M:%S',
234 ])
235
236 DATE_FORMATS_MONTH_FIRST = list(DATE_FORMATS)
237 DATE_FORMATS_MONTH_FIRST.extend([
238 '%m-%d-%Y',
239 '%m.%d.%Y',
240 '%m/%d/%Y',
241 '%m/%d/%y',
242 '%m/%d/%Y %H:%M:%S',
243 ])
244
245 PACKED_CODES_RE = r"}\('(.+)',(\d+),(\d+),'([^']+)'\.split\('\|'\)"
246 JSON_LD_RE = r'(?is)<script[^>]+type=(["\']?)application/ld\+json\1[^>]*>(?P<json_ld>.+?)</script>'
247
248 NUMBER_RE = r'\d+(?:\.\d+)?'
249
250
251 def preferredencoding():
252 """Get preferred encoding.
253
254 Returns the best encoding scheme for the system, based on
255 locale.getpreferredencoding() and some further tweaks.
256 """
257 try:
258 pref = locale.getpreferredencoding()
259 'TEST'.encode(pref)
260 except Exception:
261 pref = 'UTF-8'
262
263 return pref
264
265
266 def write_json_file(obj, fn):
267 """ Encode obj as JSON and write it to fn, atomically if possible """
268
269 tf = tempfile.NamedTemporaryFile(
270 prefix=f'{os.path.basename(fn)}.', dir=os.path.dirname(fn),
271 suffix='.tmp', delete=False, mode='w', encoding='utf-8')
272
273 try:
274 with tf:
275 json.dump(obj, tf, ensure_ascii=False)
276 if sys.platform == 'win32':
277 # Need to remove existing file on Windows, else os.rename raises
278 # WindowsError or FileExistsError.
279 with contextlib.suppress(OSError):
280 os.unlink(fn)
281 with contextlib.suppress(OSError):
282 mask = os.umask(0)
283 os.umask(mask)
284 os.chmod(tf.name, 0o666 & ~mask)
285 os.rename(tf.name, fn)
286 except Exception:
287 with contextlib.suppress(OSError):
288 os.remove(tf.name)
289 raise
290
291
292 def find_xpath_attr(node, xpath, key, val=None):
293 """ Find the xpath xpath[@key=val] """
294 assert re.match(r'^[a-zA-Z_-]+$', key)
295 expr = xpath + ('[@%s]' % key if val is None else f"[@{key}='{val}']")
296 return node.find(expr)
297
298 # On python2.6 the xml.etree.ElementTree.Element methods don't support
299 # the namespace parameter
300
301
302 def xpath_with_ns(path, ns_map):
303 components = [c.split(':') for c in path.split('/')]
304 replaced = []
305 for c in components:
306 if len(c) == 1:
307 replaced.append(c[0])
308 else:
309 ns, tag = c
310 replaced.append('{%s}%s' % (ns_map[ns], tag))
311 return '/'.join(replaced)
312
313
314 def xpath_element(node, xpath, name=None, fatal=False, default=NO_DEFAULT):
315 def _find_xpath(xpath):
316 return node.find(xpath)
317
318 if isinstance(xpath, (str, compat_str)):
319 n = _find_xpath(xpath)
320 else:
321 for xp in xpath:
322 n = _find_xpath(xp)
323 if n is not None:
324 break
325
326 if n is None:
327 if default is not NO_DEFAULT:
328 return default
329 elif fatal:
330 name = xpath if name is None else name
331 raise ExtractorError('Could not find XML element %s' % name)
332 else:
333 return None
334 return n
335
336
337 def xpath_text(node, xpath, name=None, fatal=False, default=NO_DEFAULT):
338 n = xpath_element(node, xpath, name, fatal=fatal, default=default)
339 if n is None or n == default:
340 return n
341 if n.text is None:
342 if default is not NO_DEFAULT:
343 return default
344 elif fatal:
345 name = xpath if name is None else name
346 raise ExtractorError('Could not find XML element\'s text %s' % name)
347 else:
348 return None
349 return n.text
350
351
352 def xpath_attr(node, xpath, key, name=None, fatal=False, default=NO_DEFAULT):
353 n = find_xpath_attr(node, xpath, key)
354 if n is None:
355 if default is not NO_DEFAULT:
356 return default
357 elif fatal:
358 name = f'{xpath}[@{key}]' if name is None else name
359 raise ExtractorError('Could not find XML attribute %s' % name)
360 else:
361 return None
362 return n.attrib[key]
363
364
365 def get_element_by_id(id, html):
366 """Return the content of the tag with the specified ID in the passed HTML document"""
367 return get_element_by_attribute('id', id, html)
368
369
370 def get_element_html_by_id(id, html):
371 """Return the html of the tag with the specified ID in the passed HTML document"""
372 return get_element_html_by_attribute('id', id, html)
373
374
375 def get_element_by_class(class_name, html):
376 """Return the content of the first tag with the specified class in the passed HTML document"""
377 retval = get_elements_by_class(class_name, html)
378 return retval[0] if retval else None
379
380
381 def get_element_html_by_class(class_name, html):
382 """Return the html of the first tag with the specified class in the passed HTML document"""
383 retval = get_elements_html_by_class(class_name, html)
384 return retval[0] if retval else None
385
386
387 def get_element_by_attribute(attribute, value, html, escape_value=True):
388 retval = get_elements_by_attribute(attribute, value, html, escape_value)
389 return retval[0] if retval else None
390
391
392 def get_element_html_by_attribute(attribute, value, html, escape_value=True):
393 retval = get_elements_html_by_attribute(attribute, value, html, escape_value)
394 return retval[0] if retval else None
395
396
397 def get_elements_by_class(class_name, html):
398 """Return the content of all tags with the specified class in the passed HTML document as a list"""
399 return get_elements_by_attribute(
400 'class', r'[^\'"]*\b%s\b[^\'"]*' % re.escape(class_name),
401 html, escape_value=False)
402
403
404 def get_elements_html_by_class(class_name, html):
405 """Return the html of all tags with the specified class in the passed HTML document as a list"""
406 return get_elements_html_by_attribute(
407 'class', r'[^\'"]*\b%s\b[^\'"]*' % re.escape(class_name),
408 html, escape_value=False)
409
410
411 def get_elements_by_attribute(*args, **kwargs):
412 """Return the content of the tag with the specified attribute in the passed HTML document"""
413 return [content for content, _ in get_elements_text_and_html_by_attribute(*args, **kwargs)]
414
415
416 def get_elements_html_by_attribute(*args, **kwargs):
417 """Return the html of the tag with the specified attribute in the passed HTML document"""
418 return [whole for _, whole in get_elements_text_and_html_by_attribute(*args, **kwargs)]
419
420
421 def get_elements_text_and_html_by_attribute(attribute, value, html, escape_value=True):
422 """
423 Return the text (content) and the html (whole) of the tag with the specified
424 attribute in the passed HTML document
425 """
426
427 quote = '' if re.match(r'''[\s"'`=<>]''', value) else '?'
428
429 value = re.escape(value) if escape_value else value
430
431 partial_element_re = rf'''(?x)
432 <(?P<tag>[a-zA-Z0-9:._-]+)
433 (?:\s(?:[^>"']|"[^"]*"|'[^']*')*)?
434 \s{re.escape(attribute)}\s*=\s*(?P<_q>['"]{quote})(?-x:{value})(?P=_q)
435 '''
436
437 for m in re.finditer(partial_element_re, html):
438 content, whole = get_element_text_and_html_by_tag(m.group('tag'), html[m.start():])
439
440 yield (
441 unescapeHTML(re.sub(r'^(?P<q>["\'])(?P<content>.*)(?P=q)$', r'\g<content>', content, flags=re.DOTALL)),
442 whole
443 )
444
445
446 class HTMLBreakOnClosingTagParser(compat_HTMLParser):
447 """
448 HTML parser which raises HTMLBreakOnClosingTagException upon reaching the
449 closing tag for the first opening tag it has encountered, and can be used
450 as a context manager
451 """
452
453 class HTMLBreakOnClosingTagException(Exception):
454 pass
455
456 def __init__(self):
457 self.tagstack = collections.deque()
458 compat_HTMLParser.__init__(self)
459
460 def __enter__(self):
461 return self
462
463 def __exit__(self, *_):
464 self.close()
465
466 def close(self):
467 # handle_endtag does not return upon raising HTMLBreakOnClosingTagException,
468 # so data remains buffered; we no longer have any interest in it, thus
469 # override this method to discard it
470 pass
471
472 def handle_starttag(self, tag, _):
473 self.tagstack.append(tag)
474
475 def handle_endtag(self, tag):
476 if not self.tagstack:
477 raise compat_HTMLParseError('no tags in the stack')
478 while self.tagstack:
479 inner_tag = self.tagstack.pop()
480 if inner_tag == tag:
481 break
482 else:
483 raise compat_HTMLParseError(f'matching opening tag for closing {tag} tag not found')
484 if not self.tagstack:
485 raise self.HTMLBreakOnClosingTagException()
486
487
488 def get_element_text_and_html_by_tag(tag, html):
489 """
490 For the first element with the specified tag in the passed HTML document
491 return its' content (text) and the whole element (html)
492 """
493 def find_or_raise(haystack, needle, exc):
494 try:
495 return haystack.index(needle)
496 except ValueError:
497 raise exc
498 closing_tag = f'</{tag}>'
499 whole_start = find_or_raise(
500 html, f'<{tag}', compat_HTMLParseError(f'opening {tag} tag not found'))
501 content_start = find_or_raise(
502 html[whole_start:], '>', compat_HTMLParseError(f'malformed opening {tag} tag'))
503 content_start += whole_start + 1
504 with HTMLBreakOnClosingTagParser() as parser:
505 parser.feed(html[whole_start:content_start])
506 if not parser.tagstack or parser.tagstack[0] != tag:
507 raise compat_HTMLParseError(f'parser did not match opening {tag} tag')
508 offset = content_start
509 while offset < len(html):
510 next_closing_tag_start = find_or_raise(
511 html[offset:], closing_tag,
512 compat_HTMLParseError(f'closing {tag} tag not found'))
513 next_closing_tag_end = next_closing_tag_start + len(closing_tag)
514 try:
515 parser.feed(html[offset:offset + next_closing_tag_end])
516 offset += next_closing_tag_end
517 except HTMLBreakOnClosingTagParser.HTMLBreakOnClosingTagException:
518 return html[content_start:offset + next_closing_tag_start], \
519 html[whole_start:offset + next_closing_tag_end]
520 raise compat_HTMLParseError('unexpected end of html')
521
522
523 class HTMLAttributeParser(compat_HTMLParser):
524 """Trivial HTML parser to gather the attributes for a single element"""
525
526 def __init__(self):
527 self.attrs = {}
528 compat_HTMLParser.__init__(self)
529
530 def handle_starttag(self, tag, attrs):
531 self.attrs = dict(attrs)
532
533
534 class HTMLListAttrsParser(compat_HTMLParser):
535 """HTML parser to gather the attributes for the elements of a list"""
536
537 def __init__(self):
538 compat_HTMLParser.__init__(self)
539 self.items = []
540 self._level = 0
541
542 def handle_starttag(self, tag, attrs):
543 if tag == 'li' and self._level == 0:
544 self.items.append(dict(attrs))
545 self._level += 1
546
547 def handle_endtag(self, tag):
548 self._level -= 1
549
550
551 def extract_attributes(html_element):
552 """Given a string for an HTML element such as
553 <el
554 a="foo" B="bar" c="&98;az" d=boz
555 empty= noval entity="&amp;"
556 sq='"' dq="'"
557 >
558 Decode and return a dictionary of attributes.
559 {
560 'a': 'foo', 'b': 'bar', c: 'baz', d: 'boz',
561 'empty': '', 'noval': None, 'entity': '&',
562 'sq': '"', 'dq': '\''
563 }.
564 """
565 parser = HTMLAttributeParser()
566 with contextlib.suppress(compat_HTMLParseError):
567 parser.feed(html_element)
568 parser.close()
569 return parser.attrs
570
571
572 def parse_list(webpage):
573 """Given a string for an series of HTML <li> elements,
574 return a dictionary of their attributes"""
575 parser = HTMLListAttrsParser()
576 parser.feed(webpage)
577 parser.close()
578 return parser.items
579
580
581 def clean_html(html):
582 """Clean an HTML snippet into a readable string"""
583
584 if html is None: # Convenience for sanitizing descriptions etc.
585 return html
586
587 html = re.sub(r'\s+', ' ', html)
588 html = re.sub(r'(?u)\s?<\s?br\s?/?\s?>\s?', '\n', html)
589 html = re.sub(r'(?u)<\s?/\s?p\s?>\s?<\s?p[^>]*>', '\n', html)
590 # Strip html tags
591 html = re.sub('<.*?>', '', html)
592 # Replace html entities
593 html = unescapeHTML(html)
594 return html.strip()
595
596
597 def sanitize_open(filename, open_mode):
598 """Try to open the given filename, and slightly tweak it if this fails.
599
600 Attempts to open the given filename. If this fails, it tries to change
601 the filename slightly, step by step, until it's either able to open it
602 or it fails and raises a final exception, like the standard open()
603 function.
604
605 It returns the tuple (stream, definitive_file_name).
606 """
607 if filename == '-':
608 if sys.platform == 'win32':
609 import msvcrt
610 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
611 return (sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout, filename)
612
613 for attempt in range(2):
614 try:
615 try:
616 if sys.platform == 'win32':
617 # FIXME: An exclusive lock also locks the file from being read.
618 # Since windows locks are mandatory, don't lock the file on windows (for now).
619 # Ref: https://github.com/yt-dlp/yt-dlp/issues/3124
620 raise LockingUnsupportedError()
621 stream = locked_file(filename, open_mode, block=False).__enter__()
622 except LockingUnsupportedError:
623 stream = open(filename, open_mode)
624 return (stream, filename)
625 except OSError as err:
626 if attempt or err.errno in (errno.EACCES,):
627 raise
628 old_filename, filename = filename, sanitize_path(filename)
629 if old_filename == filename:
630 raise
631
632
633 def timeconvert(timestr):
634 """Convert RFC 2822 defined time string into system timestamp"""
635 timestamp = None
636 timetuple = email.utils.parsedate_tz(timestr)
637 if timetuple is not None:
638 timestamp = email.utils.mktime_tz(timetuple)
639 return timestamp
640
641
642 def sanitize_filename(s, restricted=False, is_id=NO_DEFAULT):
643 """Sanitizes a string so it could be used as part of a filename.
644 @param restricted Use a stricter subset of allowed characters
645 @param is_id Whether this is an ID that should be kept unchanged if possible.
646 If unset, yt-dlp's new sanitization rules are in effect
647 """
648 if s == '':
649 return ''
650
651 def replace_insane(char):
652 if restricted and char in ACCENT_CHARS:
653 return ACCENT_CHARS[char]
654 elif not restricted and char == '\n':
655 return '\0 '
656 elif char == '?' or ord(char) < 32 or ord(char) == 127:
657 return ''
658 elif char == '"':
659 return '' if restricted else '\''
660 elif char == ':':
661 return '\0_\0-' if restricted else '\0 \0-'
662 elif char in '\\/|*<>':
663 return '\0_'
664 if restricted and (char in '!&\'()[]{}$;`^,#' or char.isspace() or ord(char) > 127):
665 return '\0_'
666 return char
667
668 s = re.sub(r'[0-9]+(?::[0-9]+)+', lambda m: m.group(0).replace(':', '_'), s) # Handle timestamps
669 result = ''.join(map(replace_insane, s))
670 if is_id is NO_DEFAULT:
671 result = re.sub('(\0.)(?:(?=\\1)..)+', r'\1', result) # Remove repeated substitute chars
672 STRIP_RE = '(?:\0.|[ _-])*'
673 result = re.sub(f'^\0.{STRIP_RE}|{STRIP_RE}\0.$', '', result) # Remove substitute chars from start/end
674 result = result.replace('\0', '') or '_'
675
676 if not is_id:
677 while '__' in result:
678 result = result.replace('__', '_')
679 result = result.strip('_')
680 # Common case of "Foreign band name - English song title"
681 if restricted and result.startswith('-_'):
682 result = result[2:]
683 if result.startswith('-'):
684 result = '_' + result[len('-'):]
685 result = result.lstrip('.')
686 if not result:
687 result = '_'
688 return result
689
690
691 def sanitize_path(s, force=False):
692 """Sanitizes and normalizes path on Windows"""
693 if sys.platform == 'win32':
694 force = False
695 drive_or_unc, _ = os.path.splitdrive(s)
696 elif force:
697 drive_or_unc = ''
698 else:
699 return s
700
701 norm_path = os.path.normpath(remove_start(s, drive_or_unc)).split(os.path.sep)
702 if drive_or_unc:
703 norm_path.pop(0)
704 sanitized_path = [
705 path_part if path_part in ['.', '..'] else re.sub(r'(?:[/<>:"\|\\?\*]|[\s.]$)', '#', path_part)
706 for path_part in norm_path]
707 if drive_or_unc:
708 sanitized_path.insert(0, drive_or_unc + os.path.sep)
709 elif force and s and s[0] == os.path.sep:
710 sanitized_path.insert(0, os.path.sep)
711 return os.path.join(*sanitized_path)
712
713
714 def sanitize_url(url):
715 # Prepend protocol-less URLs with `http:` scheme in order to mitigate
716 # the number of unwanted failures due to missing protocol
717 if url.startswith('//'):
718 return 'http:%s' % url
719 # Fix some common typos seen so far
720 COMMON_TYPOS = (
721 # https://github.com/ytdl-org/youtube-dl/issues/15649
722 (r'^httpss://', r'https://'),
723 # https://bx1.be/lives/direct-tv/
724 (r'^rmtp([es]?)://', r'rtmp\1://'),
725 )
726 for mistake, fixup in COMMON_TYPOS:
727 if re.match(mistake, url):
728 return re.sub(mistake, fixup, url)
729 return url
730
731
732 def extract_basic_auth(url):
733 parts = compat_urlparse.urlsplit(url)
734 if parts.username is None:
735 return url, None
736 url = compat_urlparse.urlunsplit(parts._replace(netloc=(
737 parts.hostname if parts.port is None
738 else '%s:%d' % (parts.hostname, parts.port))))
739 auth_payload = base64.b64encode(
740 ('%s:%s' % (parts.username, parts.password or '')).encode('utf-8'))
741 return url, 'Basic ' + auth_payload.decode('utf-8')
742
743
744 def sanitized_Request(url, *args, **kwargs):
745 url, auth_header = extract_basic_auth(escape_url(sanitize_url(url)))
746 if auth_header is not None:
747 headers = args[1] if len(args) >= 2 else kwargs.setdefault('headers', {})
748 headers['Authorization'] = auth_header
749 return compat_urllib_request.Request(url, *args, **kwargs)
750
751
752 def expand_path(s):
753 """Expand shell variables and ~"""
754 return os.path.expandvars(compat_expanduser(s))
755
756
757 def orderedSet(iterable):
758 """ Remove all duplicates from the input iterable """
759 res = []
760 for el in iterable:
761 if el not in res:
762 res.append(el)
763 return res
764
765
766 def _htmlentity_transform(entity_with_semicolon):
767 """Transforms an HTML entity to a character."""
768 entity = entity_with_semicolon[:-1]
769
770 # Known non-numeric HTML entity
771 if entity in compat_html_entities.name2codepoint:
772 return compat_chr(compat_html_entities.name2codepoint[entity])
773
774 # TODO: HTML5 allows entities without a semicolon. For example,
775 # '&Eacuteric' should be decoded as 'Éric'.
776 if entity_with_semicolon in compat_html_entities_html5:
777 return compat_html_entities_html5[entity_with_semicolon]
778
779 mobj = re.match(r'#(x[0-9a-fA-F]+|[0-9]+)', entity)
780 if mobj is not None:
781 numstr = mobj.group(1)
782 if numstr.startswith('x'):
783 base = 16
784 numstr = '0%s' % numstr
785 else:
786 base = 10
787 # See https://github.com/ytdl-org/youtube-dl/issues/7518
788 with contextlib.suppress(ValueError):
789 return compat_chr(int(numstr, base))
790
791 # Unknown entity in name, return its literal representation
792 return '&%s;' % entity
793
794
795 def unescapeHTML(s):
796 if s is None:
797 return None
798 assert isinstance(s, str)
799
800 return re.sub(
801 r'&([^&;]+;)', lambda m: _htmlentity_transform(m.group(1)), s)
802
803
804 def escapeHTML(text):
805 return (
806 text
807 .replace('&', '&amp;')
808 .replace('<', '&lt;')
809 .replace('>', '&gt;')
810 .replace('"', '&quot;')
811 .replace("'", '&#39;')
812 )
813
814
815 def process_communicate_or_kill(p, *args, **kwargs):
816 try:
817 return p.communicate(*args, **kwargs)
818 except BaseException: # Including KeyboardInterrupt
819 p.kill()
820 p.wait()
821 raise
822
823
824 class Popen(subprocess.Popen):
825 if sys.platform == 'win32':
826 _startupinfo = subprocess.STARTUPINFO()
827 _startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
828 else:
829 _startupinfo = None
830
831 def __init__(self, *args, **kwargs):
832 super().__init__(*args, **kwargs, startupinfo=self._startupinfo)
833
834 def communicate_or_kill(self, *args, **kwargs):
835 return process_communicate_or_kill(self, *args, **kwargs)
836
837
838 def get_subprocess_encoding():
839 if sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5:
840 # For subprocess calls, encode with locale encoding
841 # Refer to http://stackoverflow.com/a/9951851/35070
842 encoding = preferredencoding()
843 else:
844 encoding = sys.getfilesystemencoding()
845 if encoding is None:
846 encoding = 'utf-8'
847 return encoding
848
849
850 def encodeFilename(s, for_subprocess=False):
851 assert isinstance(s, str)
852 return s
853
854
855 def decodeFilename(b, for_subprocess=False):
856 return b
857
858
859 def encodeArgument(s):
860 # Legacy code that uses byte strings
861 # Uncomment the following line after fixing all post processors
862 # assert isinstance(s, str), 'Internal error: %r should be of type %r, is %r' % (s, compat_str, type(s))
863 return s if isinstance(s, str) else s.decode('ascii')
864
865
866 def decodeArgument(b):
867 return b
868
869
870 def decodeOption(optval):
871 if optval is None:
872 return optval
873 if isinstance(optval, bytes):
874 optval = optval.decode(preferredencoding())
875
876 assert isinstance(optval, compat_str)
877 return optval
878
879
880 _timetuple = collections.namedtuple('Time', ('hours', 'minutes', 'seconds', 'milliseconds'))
881
882
883 def timetuple_from_msec(msec):
884 secs, msec = divmod(msec, 1000)
885 mins, secs = divmod(secs, 60)
886 hrs, mins = divmod(mins, 60)
887 return _timetuple(hrs, mins, secs, msec)
888
889
890 def formatSeconds(secs, delim=':', msec=False):
891 time = timetuple_from_msec(secs * 1000)
892 if time.hours:
893 ret = '%d%s%02d%s%02d' % (time.hours, delim, time.minutes, delim, time.seconds)
894 elif time.minutes:
895 ret = '%d%s%02d' % (time.minutes, delim, time.seconds)
896 else:
897 ret = '%d' % time.seconds
898 return '%s.%03d' % (ret, time.milliseconds) if msec else ret
899
900
901 def _ssl_load_windows_store_certs(ssl_context, storename):
902 # Code adapted from _load_windows_store_certs in https://github.com/python/cpython/blob/main/Lib/ssl.py
903 try:
904 certs = [cert for cert, encoding, trust in ssl.enum_certificates(storename)
905 if encoding == 'x509_asn' and (
906 trust is True or ssl.Purpose.SERVER_AUTH.oid in trust)]
907 except PermissionError:
908 return
909 for cert in certs:
910 with contextlib.suppress(ssl.SSLError):
911 ssl_context.load_verify_locations(cadata=cert)
912
913
914 def make_HTTPS_handler(params, **kwargs):
915 opts_check_certificate = not params.get('nocheckcertificate')
916 context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
917 context.check_hostname = opts_check_certificate
918 if params.get('legacyserverconnect'):
919 context.options |= 4 # SSL_OP_LEGACY_SERVER_CONNECT
920 context.verify_mode = ssl.CERT_REQUIRED if opts_check_certificate else ssl.CERT_NONE
921 if opts_check_certificate:
922 if has_certifi and 'no-certifi' not in params.get('compat_opts', []):
923 context.load_verify_locations(cafile=certifi.where())
924 else:
925 try:
926 context.load_default_certs()
927 # Work around the issue in load_default_certs when there are bad certificates. See:
928 # https://github.com/yt-dlp/yt-dlp/issues/1060,
929 # https://bugs.python.org/issue35665, https://bugs.python.org/issue45312
930 except ssl.SSLError:
931 # enum_certificates is not present in mingw python. See https://github.com/yt-dlp/yt-dlp/issues/1151
932 if sys.platform == 'win32' and hasattr(ssl, 'enum_certificates'):
933 # Create a new context to discard any certificates that were already loaded
934 context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
935 context.check_hostname, context.verify_mode = True, ssl.CERT_REQUIRED
936 for storename in ('CA', 'ROOT'):
937 _ssl_load_windows_store_certs(context, storename)
938 context.set_default_verify_paths()
939 client_certfile = params.get('client_certificate')
940 if client_certfile:
941 try:
942 context.load_cert_chain(
943 client_certfile, keyfile=params.get('client_certificate_key'),
944 password=params.get('client_certificate_password'))
945 except ssl.SSLError:
946 raise YoutubeDLError('Unable to load client certificate')
947 return YoutubeDLHTTPSHandler(params, context=context, **kwargs)
948
949
950 def bug_reports_message(before=';'):
951 msg = ('please report this issue on https://github.com/yt-dlp/yt-dlp/issues?q= , '
952 'filling out the appropriate issue template. '
953 'Confirm you are on the latest version using yt-dlp -U')
954
955 before = before.rstrip()
956 if not before or before.endswith(('.', '!', '?')):
957 msg = msg[0].title() + msg[1:]
958
959 return (before + ' ' if before else '') + msg
960
961
962 class YoutubeDLError(Exception):
963 """Base exception for YoutubeDL errors."""
964 msg = None
965
966 def __init__(self, msg=None):
967 if msg is not None:
968 self.msg = msg
969 elif self.msg is None:
970 self.msg = type(self).__name__
971 super().__init__(self.msg)
972
973
974 network_exceptions = [compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error]
975 if hasattr(ssl, 'CertificateError'):
976 network_exceptions.append(ssl.CertificateError)
977 network_exceptions = tuple(network_exceptions)
978
979
980 class ExtractorError(YoutubeDLError):
981 """Error during info extraction."""
982
983 def __init__(self, msg, tb=None, expected=False, cause=None, video_id=None, ie=None):
984 """ tb, if given, is the original traceback (so that it can be printed out).
985 If expected is set, this is a normal error message and most likely not a bug in yt-dlp.
986 """
987 if sys.exc_info()[0] in network_exceptions:
988 expected = True
989
990 self.orig_msg = str(msg)
991 self.traceback = tb
992 self.expected = expected
993 self.cause = cause
994 self.video_id = video_id
995 self.ie = ie
996 self.exc_info = sys.exc_info() # preserve original exception
997
998 super().__init__(''.join((
999 format_field(ie, template='[%s] '),
1000 format_field(video_id, template='%s: '),
1001 msg,
1002 format_field(cause, template=' (caused by %r)'),
1003 '' if expected else bug_reports_message())))
1004
1005 def format_traceback(self):
1006 return join_nonempty(
1007 self.traceback and ''.join(traceback.format_tb(self.traceback)),
1008 self.cause and ''.join(traceback.format_exception(None, self.cause, self.cause.__traceback__)[1:]),
1009 delim='\n') or None
1010
1011
1012 class UnsupportedError(ExtractorError):
1013 def __init__(self, url):
1014 super().__init__(
1015 'Unsupported URL: %s' % url, expected=True)
1016 self.url = url
1017
1018
1019 class RegexNotFoundError(ExtractorError):
1020 """Error when a regex didn't match"""
1021 pass
1022
1023
1024 class GeoRestrictedError(ExtractorError):
1025 """Geographic restriction Error exception.
1026
1027 This exception may be thrown when a video is not available from your
1028 geographic location due to geographic restrictions imposed by a website.
1029 """
1030
1031 def __init__(self, msg, countries=None, **kwargs):
1032 kwargs['expected'] = True
1033 super().__init__(msg, **kwargs)
1034 self.countries = countries
1035
1036
1037 class DownloadError(YoutubeDLError):
1038 """Download Error exception.
1039
1040 This exception may be thrown by FileDownloader objects if they are not
1041 configured to continue on errors. They will contain the appropriate
1042 error message.
1043 """
1044
1045 def __init__(self, msg, exc_info=None):
1046 """ exc_info, if given, is the original exception that caused the trouble (as returned by sys.exc_info()). """
1047 super().__init__(msg)
1048 self.exc_info = exc_info
1049
1050
1051 class EntryNotInPlaylist(YoutubeDLError):
1052 """Entry not in playlist exception.
1053
1054 This exception will be thrown by YoutubeDL when a requested entry
1055 is not found in the playlist info_dict
1056 """
1057 msg = 'Entry not found in info'
1058
1059
1060 class SameFileError(YoutubeDLError):
1061 """Same File exception.
1062
1063 This exception will be thrown by FileDownloader objects if they detect
1064 multiple files would have to be downloaded to the same file on disk.
1065 """
1066 msg = 'Fixed output name but more than one file to download'
1067
1068 def __init__(self, filename=None):
1069 if filename is not None:
1070 self.msg += f': {filename}'
1071 super().__init__(self.msg)
1072
1073
1074 class PostProcessingError(YoutubeDLError):
1075 """Post Processing exception.
1076
1077 This exception may be raised by PostProcessor's .run() method to
1078 indicate an error in the postprocessing task.
1079 """
1080
1081
1082 class DownloadCancelled(YoutubeDLError):
1083 """ Exception raised when the download queue should be interrupted """
1084 msg = 'The download was cancelled'
1085
1086
1087 class ExistingVideoReached(DownloadCancelled):
1088 """ --break-on-existing triggered """
1089 msg = 'Encountered a video that is already in the archive, stopping due to --break-on-existing'
1090
1091
1092 class RejectedVideoReached(DownloadCancelled):
1093 """ --break-on-reject triggered """
1094 msg = 'Encountered a video that did not match filter, stopping due to --break-on-reject'
1095
1096
1097 class MaxDownloadsReached(DownloadCancelled):
1098 """ --max-downloads limit has been reached. """
1099 msg = 'Maximum number of downloads reached, stopping due to --max-downloads'
1100
1101
1102 class ReExtractInfo(YoutubeDLError):
1103 """ Video info needs to be re-extracted. """
1104
1105 def __init__(self, msg, expected=False):
1106 super().__init__(msg)
1107 self.expected = expected
1108
1109
1110 class ThrottledDownload(ReExtractInfo):
1111 """ Download speed below --throttled-rate. """
1112 msg = 'The download speed is below throttle limit'
1113
1114 def __init__(self):
1115 super().__init__(self.msg, expected=False)
1116
1117
1118 class UnavailableVideoError(YoutubeDLError):
1119 """Unavailable Format exception.
1120
1121 This exception will be thrown when a video is requested
1122 in a format that is not available for that video.
1123 """
1124 msg = 'Unable to download video'
1125
1126 def __init__(self, err=None):
1127 if err is not None:
1128 self.msg += f': {err}'
1129 super().__init__(self.msg)
1130
1131
1132 class ContentTooShortError(YoutubeDLError):
1133 """Content Too Short exception.
1134
1135 This exception may be raised by FileDownloader objects when a file they
1136 download is too small for what the server announced first, indicating
1137 the connection was probably interrupted.
1138 """
1139
1140 def __init__(self, downloaded, expected):
1141 super().__init__(f'Downloaded {downloaded} bytes, expected {expected} bytes')
1142 # Both in bytes
1143 self.downloaded = downloaded
1144 self.expected = expected
1145
1146
1147 class XAttrMetadataError(YoutubeDLError):
1148 def __init__(self, code=None, msg='Unknown error'):
1149 super().__init__(msg)
1150 self.code = code
1151 self.msg = msg
1152
1153 # Parsing code and msg
1154 if (self.code in (errno.ENOSPC, errno.EDQUOT)
1155 or 'No space left' in self.msg or 'Disk quota exceeded' in self.msg):
1156 self.reason = 'NO_SPACE'
1157 elif self.code == errno.E2BIG or 'Argument list too long' in self.msg:
1158 self.reason = 'VALUE_TOO_LONG'
1159 else:
1160 self.reason = 'NOT_SUPPORTED'
1161
1162
1163 class XAttrUnavailableError(YoutubeDLError):
1164 pass
1165
1166
1167 def _create_http_connection(ydl_handler, http_class, is_https, *args, **kwargs):
1168 hc = http_class(*args, **kwargs)
1169 source_address = ydl_handler._params.get('source_address')
1170
1171 if source_address is not None:
1172 # This is to workaround _create_connection() from socket where it will try all
1173 # address data from getaddrinfo() including IPv6. This filters the result from
1174 # getaddrinfo() based on the source_address value.
1175 # This is based on the cpython socket.create_connection() function.
1176 # https://github.com/python/cpython/blob/master/Lib/socket.py#L691
1177 def _create_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, source_address=None):
1178 host, port = address
1179 err = None
1180 addrs = socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM)
1181 af = socket.AF_INET if '.' in source_address[0] else socket.AF_INET6
1182 ip_addrs = [addr for addr in addrs if addr[0] == af]
1183 if addrs and not ip_addrs:
1184 ip_version = 'v4' if af == socket.AF_INET else 'v6'
1185 raise OSError(
1186 "No remote IP%s addresses available for connect, can't use '%s' as source address"
1187 % (ip_version, source_address[0]))
1188 for res in ip_addrs:
1189 af, socktype, proto, canonname, sa = res
1190 sock = None
1191 try:
1192 sock = socket.socket(af, socktype, proto)
1193 if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT:
1194 sock.settimeout(timeout)
1195 sock.bind(source_address)
1196 sock.connect(sa)
1197 err = None # Explicitly break reference cycle
1198 return sock
1199 except OSError as _:
1200 err = _
1201 if sock is not None:
1202 sock.close()
1203 if err is not None:
1204 raise err
1205 else:
1206 raise OSError('getaddrinfo returns an empty list')
1207 if hasattr(hc, '_create_connection'):
1208 hc._create_connection = _create_connection
1209 hc.source_address = (source_address, 0)
1210
1211 return hc
1212
1213
1214 def handle_youtubedl_headers(headers):
1215 filtered_headers = headers
1216
1217 if 'Youtubedl-no-compression' in filtered_headers:
1218 filtered_headers = {k: v for k, v in filtered_headers.items() if k.lower() != 'accept-encoding'}
1219 del filtered_headers['Youtubedl-no-compression']
1220
1221 return filtered_headers
1222
1223
1224 class YoutubeDLHandler(compat_urllib_request.HTTPHandler):
1225 """Handler for HTTP requests and responses.
1226
1227 This class, when installed with an OpenerDirector, automatically adds
1228 the standard headers to every HTTP request and handles gzipped and
1229 deflated responses from web servers. If compression is to be avoided in
1230 a particular request, the original request in the program code only has
1231 to include the HTTP header "Youtubedl-no-compression", which will be
1232 removed before making the real request.
1233
1234 Part of this code was copied from:
1235
1236 http://techknack.net/python-urllib2-handlers/
1237
1238 Andrew Rowls, the author of that code, agreed to release it to the
1239 public domain.
1240 """
1241
1242 def __init__(self, params, *args, **kwargs):
1243 compat_urllib_request.HTTPHandler.__init__(self, *args, **kwargs)
1244 self._params = params
1245
1246 def http_open(self, req):
1247 conn_class = compat_http_client.HTTPConnection
1248
1249 socks_proxy = req.headers.get('Ytdl-socks-proxy')
1250 if socks_proxy:
1251 conn_class = make_socks_conn_class(conn_class, socks_proxy)
1252 del req.headers['Ytdl-socks-proxy']
1253
1254 return self.do_open(functools.partial(
1255 _create_http_connection, self, conn_class, False),
1256 req)
1257
1258 @staticmethod
1259 def deflate(data):
1260 if not data:
1261 return data
1262 try:
1263 return zlib.decompress(data, -zlib.MAX_WBITS)
1264 except zlib.error:
1265 return zlib.decompress(data)
1266
1267 @staticmethod
1268 def brotli(data):
1269 if not data:
1270 return data
1271 return brotli.decompress(data)
1272
1273 def http_request(self, req):
1274 # According to RFC 3986, URLs can not contain non-ASCII characters, however this is not
1275 # always respected by websites, some tend to give out URLs with non percent-encoded
1276 # non-ASCII characters (see telemb.py, ard.py [#3412])
1277 # urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991)
1278 # To work around aforementioned issue we will replace request's original URL with
1279 # percent-encoded one
1280 # Since redirects are also affected (e.g. http://www.southpark.de/alle-episoden/s18e09)
1281 # the code of this workaround has been moved here from YoutubeDL.urlopen()
1282 url = req.get_full_url()
1283 url_escaped = escape_url(url)
1284
1285 # Substitute URL if any change after escaping
1286 if url != url_escaped:
1287 req = update_Request(req, url=url_escaped)
1288
1289 for h, v in self._params.get('http_headers', std_headers).items():
1290 # Capitalize is needed because of Python bug 2275: http://bugs.python.org/issue2275
1291 # The dict keys are capitalized because of this bug by urllib
1292 if h.capitalize() not in req.headers:
1293 req.add_header(h, v)
1294
1295 if 'Accept-encoding' not in req.headers:
1296 req.add_header('Accept-encoding', ', '.join(SUPPORTED_ENCODINGS))
1297
1298 req.headers = handle_youtubedl_headers(req.headers)
1299
1300 return req
1301
1302 def http_response(self, req, resp):
1303 old_resp = resp
1304 # gzip
1305 if resp.headers.get('Content-encoding', '') == 'gzip':
1306 content = resp.read()
1307 gz = gzip.GzipFile(fileobj=io.BytesIO(content), mode='rb')
1308 try:
1309 uncompressed = io.BytesIO(gz.read())
1310 except OSError as original_ioerror:
1311 # There may be junk add the end of the file
1312 # See http://stackoverflow.com/q/4928560/35070 for details
1313 for i in range(1, 1024):
1314 try:
1315 gz = gzip.GzipFile(fileobj=io.BytesIO(content[:-i]), mode='rb')
1316 uncompressed = io.BytesIO(gz.read())
1317 except OSError:
1318 continue
1319 break
1320 else:
1321 raise original_ioerror
1322 resp = compat_urllib_request.addinfourl(uncompressed, old_resp.headers, old_resp.url, old_resp.code)
1323 resp.msg = old_resp.msg
1324 del resp.headers['Content-encoding']
1325 # deflate
1326 if resp.headers.get('Content-encoding', '') == 'deflate':
1327 gz = io.BytesIO(self.deflate(resp.read()))
1328 resp = compat_urllib_request.addinfourl(gz, old_resp.headers, old_resp.url, old_resp.code)
1329 resp.msg = old_resp.msg
1330 del resp.headers['Content-encoding']
1331 # brotli
1332 if resp.headers.get('Content-encoding', '') == 'br':
1333 resp = compat_urllib_request.addinfourl(
1334 io.BytesIO(self.brotli(resp.read())), old_resp.headers, old_resp.url, old_resp.code)
1335 resp.msg = old_resp.msg
1336 del resp.headers['Content-encoding']
1337 # Percent-encode redirect URL of Location HTTP header to satisfy RFC 3986 (see
1338 # https://github.com/ytdl-org/youtube-dl/issues/6457).
1339 if 300 <= resp.code < 400:
1340 location = resp.headers.get('Location')
1341 if location:
1342 # As of RFC 2616 default charset is iso-8859-1 that is respected by python 3
1343 location = location.encode('iso-8859-1').decode('utf-8')
1344 location_escaped = escape_url(location)
1345 if location != location_escaped:
1346 del resp.headers['Location']
1347 resp.headers['Location'] = location_escaped
1348 return resp
1349
1350 https_request = http_request
1351 https_response = http_response
1352
1353
1354 def make_socks_conn_class(base_class, socks_proxy):
1355 assert issubclass(base_class, (
1356 compat_http_client.HTTPConnection, compat_http_client.HTTPSConnection))
1357
1358 url_components = compat_urlparse.urlparse(socks_proxy)
1359 if url_components.scheme.lower() == 'socks5':
1360 socks_type = ProxyType.SOCKS5
1361 elif url_components.scheme.lower() in ('socks', 'socks4'):
1362 socks_type = ProxyType.SOCKS4
1363 elif url_components.scheme.lower() == 'socks4a':
1364 socks_type = ProxyType.SOCKS4A
1365
1366 def unquote_if_non_empty(s):
1367 if not s:
1368 return s
1369 return compat_urllib_parse_unquote_plus(s)
1370
1371 proxy_args = (
1372 socks_type,
1373 url_components.hostname, url_components.port or 1080,
1374 True, # Remote DNS
1375 unquote_if_non_empty(url_components.username),
1376 unquote_if_non_empty(url_components.password),
1377 )
1378
1379 class SocksConnection(base_class):
1380 def connect(self):
1381 self.sock = sockssocket()
1382 self.sock.setproxy(*proxy_args)
1383 if isinstance(self.timeout, (int, float)):
1384 self.sock.settimeout(self.timeout)
1385 self.sock.connect((self.host, self.port))
1386
1387 if isinstance(self, compat_http_client.HTTPSConnection):
1388 if hasattr(self, '_context'): # Python > 2.6
1389 self.sock = self._context.wrap_socket(
1390 self.sock, server_hostname=self.host)
1391 else:
1392 self.sock = ssl.wrap_socket(self.sock)
1393
1394 return SocksConnection
1395
1396
1397 class YoutubeDLHTTPSHandler(compat_urllib_request.HTTPSHandler):
1398 def __init__(self, params, https_conn_class=None, *args, **kwargs):
1399 compat_urllib_request.HTTPSHandler.__init__(self, *args, **kwargs)
1400 self._https_conn_class = https_conn_class or compat_http_client.HTTPSConnection
1401 self._params = params
1402
1403 def https_open(self, req):
1404 kwargs = {}
1405 conn_class = self._https_conn_class
1406
1407 if hasattr(self, '_context'): # python > 2.6
1408 kwargs['context'] = self._context
1409 if hasattr(self, '_check_hostname'): # python 3.x
1410 kwargs['check_hostname'] = self._check_hostname
1411
1412 socks_proxy = req.headers.get('Ytdl-socks-proxy')
1413 if socks_proxy:
1414 conn_class = make_socks_conn_class(conn_class, socks_proxy)
1415 del req.headers['Ytdl-socks-proxy']
1416
1417 return self.do_open(functools.partial(
1418 _create_http_connection, self, conn_class, True),
1419 req, **kwargs)
1420
1421
1422 class YoutubeDLCookieJar(compat_cookiejar.MozillaCookieJar):
1423 """
1424 See [1] for cookie file format.
1425
1426 1. https://curl.haxx.se/docs/http-cookies.html
1427 """
1428 _HTTPONLY_PREFIX = '#HttpOnly_'
1429 _ENTRY_LEN = 7
1430 _HEADER = '''# Netscape HTTP Cookie File
1431 # This file is generated by yt-dlp. Do not edit.
1432
1433 '''
1434 _CookieFileEntry = collections.namedtuple(
1435 'CookieFileEntry',
1436 ('domain_name', 'include_subdomains', 'path', 'https_only', 'expires_at', 'name', 'value'))
1437
1438 def save(self, filename=None, ignore_discard=False, ignore_expires=False):
1439 """
1440 Save cookies to a file.
1441
1442 Most of the code is taken from CPython 3.8 and slightly adapted
1443 to support cookie files with UTF-8 in both python 2 and 3.
1444 """
1445 if filename is None:
1446 if self.filename is not None:
1447 filename = self.filename
1448 else:
1449 raise ValueError(compat_cookiejar.MISSING_FILENAME_TEXT)
1450
1451 # Store session cookies with `expires` set to 0 instead of an empty
1452 # string
1453 for cookie in self:
1454 if cookie.expires is None:
1455 cookie.expires = 0
1456
1457 with open(filename, 'w', encoding='utf-8') as f:
1458 f.write(self._HEADER)
1459 now = time.time()
1460 for cookie in self:
1461 if not ignore_discard and cookie.discard:
1462 continue
1463 if not ignore_expires and cookie.is_expired(now):
1464 continue
1465 if cookie.secure:
1466 secure = 'TRUE'
1467 else:
1468 secure = 'FALSE'
1469 if cookie.domain.startswith('.'):
1470 initial_dot = 'TRUE'
1471 else:
1472 initial_dot = 'FALSE'
1473 if cookie.expires is not None:
1474 expires = compat_str(cookie.expires)
1475 else:
1476 expires = ''
1477 if cookie.value is None:
1478 # cookies.txt regards 'Set-Cookie: foo' as a cookie
1479 # with no name, whereas http.cookiejar regards it as a
1480 # cookie with no value.
1481 name = ''
1482 value = cookie.name
1483 else:
1484 name = cookie.name
1485 value = cookie.value
1486 f.write(
1487 '\t'.join([cookie.domain, initial_dot, cookie.path,
1488 secure, expires, name, value]) + '\n')
1489
1490 def load(self, filename=None, ignore_discard=False, ignore_expires=False):
1491 """Load cookies from a file."""
1492 if filename is None:
1493 if self.filename is not None:
1494 filename = self.filename
1495 else:
1496 raise ValueError(compat_cookiejar.MISSING_FILENAME_TEXT)
1497
1498 def prepare_line(line):
1499 if line.startswith(self._HTTPONLY_PREFIX):
1500 line = line[len(self._HTTPONLY_PREFIX):]
1501 # comments and empty lines are fine
1502 if line.startswith('#') or not line.strip():
1503 return line
1504 cookie_list = line.split('\t')
1505 if len(cookie_list) != self._ENTRY_LEN:
1506 raise compat_cookiejar.LoadError('invalid length %d' % len(cookie_list))
1507 cookie = self._CookieFileEntry(*cookie_list)
1508 if cookie.expires_at and not cookie.expires_at.isdigit():
1509 raise compat_cookiejar.LoadError('invalid expires at %s' % cookie.expires_at)
1510 return line
1511
1512 cf = io.StringIO()
1513 with open(filename, encoding='utf-8') as f:
1514 for line in f:
1515 try:
1516 cf.write(prepare_line(line))
1517 except compat_cookiejar.LoadError as e:
1518 if f'{line.strip()} '[0] in '[{"':
1519 raise compat_cookiejar.LoadError(
1520 'Cookies file must be Netscape formatted, not JSON. See '
1521 'https://github.com/ytdl-org/youtube-dl#how-do-i-pass-cookies-to-youtube-dl')
1522 write_string(f'WARNING: skipping cookie file entry due to {e}: {line!r}\n')
1523 continue
1524 cf.seek(0)
1525 self._really_load(cf, filename, ignore_discard, ignore_expires)
1526 # Session cookies are denoted by either `expires` field set to
1527 # an empty string or 0. MozillaCookieJar only recognizes the former
1528 # (see [1]). So we need force the latter to be recognized as session
1529 # cookies on our own.
1530 # Session cookies may be important for cookies-based authentication,
1531 # e.g. usually, when user does not check 'Remember me' check box while
1532 # logging in on a site, some important cookies are stored as session
1533 # cookies so that not recognizing them will result in failed login.
1534 # 1. https://bugs.python.org/issue17164
1535 for cookie in self:
1536 # Treat `expires=0` cookies as session cookies
1537 if cookie.expires == 0:
1538 cookie.expires = None
1539 cookie.discard = True
1540
1541
1542 class YoutubeDLCookieProcessor(compat_urllib_request.HTTPCookieProcessor):
1543 def __init__(self, cookiejar=None):
1544 compat_urllib_request.HTTPCookieProcessor.__init__(self, cookiejar)
1545
1546 def http_response(self, request, response):
1547 return compat_urllib_request.HTTPCookieProcessor.http_response(self, request, response)
1548
1549 https_request = compat_urllib_request.HTTPCookieProcessor.http_request
1550 https_response = http_response
1551
1552
1553 class YoutubeDLRedirectHandler(compat_urllib_request.HTTPRedirectHandler):
1554 """YoutubeDL redirect handler
1555
1556 The code is based on HTTPRedirectHandler implementation from CPython [1].
1557
1558 This redirect handler solves two issues:
1559 - ensures redirect URL is always unicode under python 2
1560 - introduces support for experimental HTTP response status code
1561 308 Permanent Redirect [2] used by some sites [3]
1562
1563 1. https://github.com/python/cpython/blob/master/Lib/urllib/request.py
1564 2. https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/308
1565 3. https://github.com/ytdl-org/youtube-dl/issues/28768
1566 """
1567
1568 http_error_301 = http_error_303 = http_error_307 = http_error_308 = compat_urllib_request.HTTPRedirectHandler.http_error_302
1569
1570 def redirect_request(self, req, fp, code, msg, headers, newurl):
1571 """Return a Request or None in response to a redirect.
1572
1573 This is called by the http_error_30x methods when a
1574 redirection response is received. If a redirection should
1575 take place, return a new Request to allow http_error_30x to
1576 perform the redirect. Otherwise, raise HTTPError if no-one
1577 else should try to handle this url. Return None if you can't
1578 but another Handler might.
1579 """
1580 m = req.get_method()
1581 if (not (code in (301, 302, 303, 307, 308) and m in ("GET", "HEAD")
1582 or code in (301, 302, 303) and m == "POST")):
1583 raise compat_HTTPError(req.full_url, code, msg, headers, fp)
1584 # Strictly (according to RFC 2616), 301 or 302 in response to
1585 # a POST MUST NOT cause a redirection without confirmation
1586 # from the user (of urllib.request, in this case). In practice,
1587 # essentially all clients do redirect in this case, so we do
1588 # the same.
1589
1590 # Be conciliant with URIs containing a space. This is mainly
1591 # redundant with the more complete encoding done in http_error_302(),
1592 # but it is kept for compatibility with other callers.
1593 newurl = newurl.replace(' ', '%20')
1594
1595 CONTENT_HEADERS = ("content-length", "content-type")
1596 # NB: don't use dict comprehension for python 2.6 compatibility
1597 newheaders = {k: v for k, v in req.headers.items() if k.lower() not in CONTENT_HEADERS}
1598
1599 # A 303 must either use GET or HEAD for subsequent request
1600 # https://datatracker.ietf.org/doc/html/rfc7231#section-6.4.4
1601 if code == 303 and m != 'HEAD':
1602 m = 'GET'
1603 # 301 and 302 redirects are commonly turned into a GET from a POST
1604 # for subsequent requests by browsers, so we'll do the same.
1605 # https://datatracker.ietf.org/doc/html/rfc7231#section-6.4.2
1606 # https://datatracker.ietf.org/doc/html/rfc7231#section-6.4.3
1607 if code in (301, 302) and m == 'POST':
1608 m = 'GET'
1609
1610 return compat_urllib_request.Request(
1611 newurl, headers=newheaders, origin_req_host=req.origin_req_host,
1612 unverifiable=True, method=m)
1613
1614
1615 def extract_timezone(date_str):
1616 m = re.search(
1617 r'''(?x)
1618 ^.{8,}? # >=8 char non-TZ prefix, if present
1619 (?P<tz>Z| # just the UTC Z, or
1620 (?:(?<=.\b\d{4}|\b\d{2}:\d\d)| # preceded by 4 digits or hh:mm or
1621 (?<!.\b[a-zA-Z]{3}|[a-zA-Z]{4}|..\b\d\d)) # not preceded by 3 alpha word or >= 4 alpha or 2 digits
1622 [ ]? # optional space
1623 (?P<sign>\+|-) # +/-
1624 (?P<hours>[0-9]{2}):?(?P<minutes>[0-9]{2}) # hh[:]mm
1625 $)
1626 ''', date_str)
1627 if not m:
1628 timezone = datetime.timedelta()
1629 else:
1630 date_str = date_str[:-len(m.group('tz'))]
1631 if not m.group('sign'):
1632 timezone = datetime.timedelta()
1633 else:
1634 sign = 1 if m.group('sign') == '+' else -1
1635 timezone = datetime.timedelta(
1636 hours=sign * int(m.group('hours')),
1637 minutes=sign * int(m.group('minutes')))
1638 return timezone, date_str
1639
1640
1641 def parse_iso8601(date_str, delimiter='T', timezone=None):
1642 """ Return a UNIX timestamp from the given date """
1643
1644 if date_str is None:
1645 return None
1646
1647 date_str = re.sub(r'\.[0-9]+', '', date_str)
1648
1649 if timezone is None:
1650 timezone, date_str = extract_timezone(date_str)
1651
1652 with contextlib.suppress(ValueError):
1653 date_format = f'%Y-%m-%d{delimiter}%H:%M:%S'
1654 dt = datetime.datetime.strptime(date_str, date_format) - timezone
1655 return calendar.timegm(dt.timetuple())
1656
1657
1658 def date_formats(day_first=True):
1659 return DATE_FORMATS_DAY_FIRST if day_first else DATE_FORMATS_MONTH_FIRST
1660
1661
1662 def unified_strdate(date_str, day_first=True):
1663 """Return a string with the date in the format YYYYMMDD"""
1664
1665 if date_str is None:
1666 return None
1667 upload_date = None
1668 # Replace commas
1669 date_str = date_str.replace(',', ' ')
1670 # Remove AM/PM + timezone
1671 date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str)
1672 _, date_str = extract_timezone(date_str)
1673
1674 for expression in date_formats(day_first):
1675 with contextlib.suppress(ValueError):
1676 upload_date = datetime.datetime.strptime(date_str, expression).strftime('%Y%m%d')
1677 if upload_date is None:
1678 timetuple = email.utils.parsedate_tz(date_str)
1679 if timetuple:
1680 with contextlib.suppress(ValueError):
1681 upload_date = datetime.datetime(*timetuple[:6]).strftime('%Y%m%d')
1682 if upload_date is not None:
1683 return compat_str(upload_date)
1684
1685
1686 def unified_timestamp(date_str, day_first=True):
1687 if date_str is None:
1688 return None
1689
1690 date_str = re.sub(r'[,|]', '', date_str)
1691
1692 pm_delta = 12 if re.search(r'(?i)PM', date_str) else 0
1693 timezone, date_str = extract_timezone(date_str)
1694
1695 # Remove AM/PM + timezone
1696 date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str)
1697
1698 # Remove unrecognized timezones from ISO 8601 alike timestamps
1699 m = re.search(r'\d{1,2}:\d{1,2}(?:\.\d+)?(?P<tz>\s*[A-Z]+)$', date_str)
1700 if m:
1701 date_str = date_str[:-len(m.group('tz'))]
1702
1703 # Python only supports microseconds, so remove nanoseconds
1704 m = re.search(r'^([0-9]{4,}-[0-9]{1,2}-[0-9]{1,2}T[0-9]{1,2}:[0-9]{1,2}:[0-9]{1,2}\.[0-9]{6})[0-9]+$', date_str)
1705 if m:
1706 date_str = m.group(1)
1707
1708 for expression in date_formats(day_first):
1709 with contextlib.suppress(ValueError):
1710 dt = datetime.datetime.strptime(date_str, expression) - timezone + datetime.timedelta(hours=pm_delta)
1711 return calendar.timegm(dt.timetuple())
1712 timetuple = email.utils.parsedate_tz(date_str)
1713 if timetuple:
1714 return calendar.timegm(timetuple) + pm_delta * 3600
1715
1716
1717 def determine_ext(url, default_ext='unknown_video'):
1718 if url is None or '.' not in url:
1719 return default_ext
1720 guess = url.partition('?')[0].rpartition('.')[2]
1721 if re.match(r'^[A-Za-z0-9]+$', guess):
1722 return guess
1723 # Try extract ext from URLs like http://example.com/foo/bar.mp4/?download
1724 elif guess.rstrip('/') in KNOWN_EXTENSIONS:
1725 return guess.rstrip('/')
1726 else:
1727 return default_ext
1728
1729
1730 def subtitles_filename(filename, sub_lang, sub_format, expected_real_ext=None):
1731 return replace_extension(filename, sub_lang + '.' + sub_format, expected_real_ext)
1732
1733
1734 def datetime_from_str(date_str, precision='auto', format='%Y%m%d'):
1735 """
1736 Return a datetime object from a string in the format YYYYMMDD or
1737 (now|today|yesterday|date)[+-][0-9](microsecond|second|minute|hour|day|week|month|year)(s)?
1738
1739 format: string date format used to return datetime object from
1740 precision: round the time portion of a datetime object.
1741 auto|microsecond|second|minute|hour|day.
1742 auto: round to the unit provided in date_str (if applicable).
1743 """
1744 auto_precision = False
1745 if precision == 'auto':
1746 auto_precision = True
1747 precision = 'microsecond'
1748 today = datetime_round(datetime.datetime.utcnow(), precision)
1749 if date_str in ('now', 'today'):
1750 return today
1751 if date_str == 'yesterday':
1752 return today - datetime.timedelta(days=1)
1753 match = re.match(
1754 r'(?P<start>.+)(?P<sign>[+-])(?P<time>\d+)(?P<unit>microsecond|second|minute|hour|day|week|month|year)(s)?',
1755 date_str)
1756 if match is not None:
1757 start_time = datetime_from_str(match.group('start'), precision, format)
1758 time = int(match.group('time')) * (-1 if match.group('sign') == '-' else 1)
1759 unit = match.group('unit')
1760 if unit == 'month' or unit == 'year':
1761 new_date = datetime_add_months(start_time, time * 12 if unit == 'year' else time)
1762 unit = 'day'
1763 else:
1764 if unit == 'week':
1765 unit = 'day'
1766 time *= 7
1767 delta = datetime.timedelta(**{unit + 's': time})
1768 new_date = start_time + delta
1769 if auto_precision:
1770 return datetime_round(new_date, unit)
1771 return new_date
1772
1773 return datetime_round(datetime.datetime.strptime(date_str, format), precision)
1774
1775
1776 def date_from_str(date_str, format='%Y%m%d', strict=False):
1777 """
1778 Return a datetime object from a string in the format YYYYMMDD or
1779 (now|today|yesterday|date)[+-][0-9](microsecond|second|minute|hour|day|week|month|year)(s)?
1780
1781 If "strict", only (now|today)[+-][0-9](day|week|month|year)(s)? is allowed
1782
1783 format: string date format used to return datetime object from
1784 """
1785 if strict and not re.fullmatch(r'\d{8}|(now|today)[+-]\d+(day|week|month|year)(s)?', date_str):
1786 raise ValueError(f'Invalid date format {date_str}')
1787 return datetime_from_str(date_str, precision='microsecond', format=format).date()
1788
1789
1790 def datetime_add_months(dt, months):
1791 """Increment/Decrement a datetime object by months."""
1792 month = dt.month + months - 1
1793 year = dt.year + month // 12
1794 month = month % 12 + 1
1795 day = min(dt.day, calendar.monthrange(year, month)[1])
1796 return dt.replace(year, month, day)
1797
1798
1799 def datetime_round(dt, precision='day'):
1800 """
1801 Round a datetime object's time to a specific precision
1802 """
1803 if precision == 'microsecond':
1804 return dt
1805
1806 unit_seconds = {
1807 'day': 86400,
1808 'hour': 3600,
1809 'minute': 60,
1810 'second': 1,
1811 }
1812 roundto = lambda x, n: ((x + n / 2) // n) * n
1813 timestamp = calendar.timegm(dt.timetuple())
1814 return datetime.datetime.utcfromtimestamp(roundto(timestamp, unit_seconds[precision]))
1815
1816
1817 def hyphenate_date(date_str):
1818 """
1819 Convert a date in 'YYYYMMDD' format to 'YYYY-MM-DD' format"""
1820 match = re.match(r'^(\d\d\d\d)(\d\d)(\d\d)$', date_str)
1821 if match is not None:
1822 return '-'.join(match.groups())
1823 else:
1824 return date_str
1825
1826
1827 class DateRange:
1828 """Represents a time interval between two dates"""
1829
1830 def __init__(self, start=None, end=None):
1831 """start and end must be strings in the format accepted by date"""
1832 if start is not None:
1833 self.start = date_from_str(start, strict=True)
1834 else:
1835 self.start = datetime.datetime.min.date()
1836 if end is not None:
1837 self.end = date_from_str(end, strict=True)
1838 else:
1839 self.end = datetime.datetime.max.date()
1840 if self.start > self.end:
1841 raise ValueError('Date range: "%s" , the start date must be before the end date' % self)
1842
1843 @classmethod
1844 def day(cls, day):
1845 """Returns a range that only contains the given day"""
1846 return cls(day, day)
1847
1848 def __contains__(self, date):
1849 """Check if the date is in the range"""
1850 if not isinstance(date, datetime.date):
1851 date = date_from_str(date)
1852 return self.start <= date <= self.end
1853
1854 def __str__(self):
1855 return f'{self.start.isoformat()} - {self.end.isoformat()}'
1856
1857
1858 def platform_name():
1859 """ Returns the platform name as a compat_str """
1860 res = platform.platform()
1861 if isinstance(res, bytes):
1862 res = res.decode(preferredencoding())
1863
1864 assert isinstance(res, compat_str)
1865 return res
1866
1867
1868 def get_windows_version():
1869 ''' Get Windows version. None if it's not running on Windows '''
1870 if compat_os_name == 'nt':
1871 return version_tuple(platform.win32_ver()[1])
1872 else:
1873 return None
1874
1875
1876 def write_string(s, out=None, encoding=None):
1877 assert isinstance(s, str)
1878 out = out or sys.stderr
1879
1880 from .compat import WINDOWS_VT_MODE # Must be imported locally
1881 if WINDOWS_VT_MODE:
1882 s = re.sub(r'([\r\n]+)', r' \1', s)
1883
1884 if 'b' in getattr(out, 'mode', ''):
1885 byt = s.encode(encoding or preferredencoding(), 'ignore')
1886 out.write(byt)
1887 elif hasattr(out, 'buffer'):
1888 enc = encoding or getattr(out, 'encoding', None) or preferredencoding()
1889 byt = s.encode(enc, 'ignore')
1890 out.buffer.write(byt)
1891 else:
1892 out.write(s)
1893 out.flush()
1894
1895
1896 def bytes_to_intlist(bs):
1897 if not bs:
1898 return []
1899 if isinstance(bs[0], int): # Python 3
1900 return list(bs)
1901 else:
1902 return [ord(c) for c in bs]
1903
1904
1905 def intlist_to_bytes(xs):
1906 if not xs:
1907 return b''
1908 return compat_struct_pack('%dB' % len(xs), *xs)
1909
1910
1911 class LockingUnsupportedError(IOError):
1912 msg = 'File locking is not supported on this platform'
1913
1914 def __init__(self):
1915 super().__init__(self.msg)
1916
1917
1918 # Cross-platform file locking
1919 if sys.platform == 'win32':
1920 import ctypes.wintypes
1921 import msvcrt
1922
1923 class OVERLAPPED(ctypes.Structure):
1924 _fields_ = [
1925 ('Internal', ctypes.wintypes.LPVOID),
1926 ('InternalHigh', ctypes.wintypes.LPVOID),
1927 ('Offset', ctypes.wintypes.DWORD),
1928 ('OffsetHigh', ctypes.wintypes.DWORD),
1929 ('hEvent', ctypes.wintypes.HANDLE),
1930 ]
1931
1932 kernel32 = ctypes.windll.kernel32
1933 LockFileEx = kernel32.LockFileEx
1934 LockFileEx.argtypes = [
1935 ctypes.wintypes.HANDLE, # hFile
1936 ctypes.wintypes.DWORD, # dwFlags
1937 ctypes.wintypes.DWORD, # dwReserved
1938 ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow
1939 ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh
1940 ctypes.POINTER(OVERLAPPED) # Overlapped
1941 ]
1942 LockFileEx.restype = ctypes.wintypes.BOOL
1943 UnlockFileEx = kernel32.UnlockFileEx
1944 UnlockFileEx.argtypes = [
1945 ctypes.wintypes.HANDLE, # hFile
1946 ctypes.wintypes.DWORD, # dwReserved
1947 ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow
1948 ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh
1949 ctypes.POINTER(OVERLAPPED) # Overlapped
1950 ]
1951 UnlockFileEx.restype = ctypes.wintypes.BOOL
1952 whole_low = 0xffffffff
1953 whole_high = 0x7fffffff
1954
1955 def _lock_file(f, exclusive, block):
1956 overlapped = OVERLAPPED()
1957 overlapped.Offset = 0
1958 overlapped.OffsetHigh = 0
1959 overlapped.hEvent = 0
1960 f._lock_file_overlapped_p = ctypes.pointer(overlapped)
1961
1962 if not LockFileEx(msvcrt.get_osfhandle(f.fileno()),
1963 (0x2 if exclusive else 0x0) | (0x0 if block else 0x1),
1964 0, whole_low, whole_high, f._lock_file_overlapped_p):
1965 raise BlockingIOError('Locking file failed: %r' % ctypes.FormatError())
1966
1967 def _unlock_file(f):
1968 assert f._lock_file_overlapped_p
1969 handle = msvcrt.get_osfhandle(f.fileno())
1970 if not UnlockFileEx(handle, 0, whole_low, whole_high, f._lock_file_overlapped_p):
1971 raise OSError('Unlocking file failed: %r' % ctypes.FormatError())
1972
1973 else:
1974 try:
1975 import fcntl
1976
1977 def _lock_file(f, exclusive, block):
1978 flags = fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH
1979 if not block:
1980 flags |= fcntl.LOCK_NB
1981 try:
1982 fcntl.flock(f, flags)
1983 except BlockingIOError:
1984 raise
1985 except OSError: # AOSP does not have flock()
1986 fcntl.lockf(f, flags)
1987
1988 def _unlock_file(f):
1989 try:
1990 fcntl.flock(f, fcntl.LOCK_UN)
1991 except OSError:
1992 fcntl.lockf(f, fcntl.LOCK_UN)
1993
1994 except ImportError:
1995
1996 def _lock_file(f, exclusive, block):
1997 raise LockingUnsupportedError()
1998
1999 def _unlock_file(f):
2000 raise LockingUnsupportedError()
2001
2002
2003 class locked_file:
2004 locked = False
2005
2006 def __init__(self, filename, mode, block=True, encoding=None):
2007 if mode not in {'r', 'rb', 'a', 'ab', 'w', 'wb'}:
2008 raise NotImplementedError(mode)
2009 self.mode, self.block = mode, block
2010
2011 writable = any(f in mode for f in 'wax+')
2012 readable = any(f in mode for f in 'r+')
2013 flags = functools.reduce(operator.ior, (
2014 getattr(os, 'O_CLOEXEC', 0), # UNIX only
2015 getattr(os, 'O_BINARY', 0), # Windows only
2016 getattr(os, 'O_NOINHERIT', 0), # Windows only
2017 os.O_CREAT if writable else 0, # O_TRUNC only after locking
2018 os.O_APPEND if 'a' in mode else 0,
2019 os.O_EXCL if 'x' in mode else 0,
2020 os.O_RDONLY if not writable else os.O_RDWR if readable else os.O_WRONLY,
2021 ))
2022
2023 self.f = os.fdopen(os.open(filename, flags, 0o666), mode, encoding=encoding)
2024
2025 def __enter__(self):
2026 exclusive = 'r' not in self.mode
2027 try:
2028 _lock_file(self.f, exclusive, self.block)
2029 self.locked = True
2030 except OSError:
2031 self.f.close()
2032 raise
2033 if 'w' in self.mode:
2034 try:
2035 self.f.truncate()
2036 except OSError as e:
2037 if e.errno != 29: # Illegal seek, expected when self.f is a FIFO
2038 raise e
2039 return self
2040
2041 def unlock(self):
2042 if not self.locked:
2043 return
2044 try:
2045 _unlock_file(self.f)
2046 finally:
2047 self.locked = False
2048
2049 def __exit__(self, *_):
2050 try:
2051 self.unlock()
2052 finally:
2053 self.f.close()
2054
2055 open = __enter__
2056 close = __exit__
2057
2058 def __getattr__(self, attr):
2059 return getattr(self.f, attr)
2060
2061 def __iter__(self):
2062 return iter(self.f)
2063
2064
2065 def get_filesystem_encoding():
2066 encoding = sys.getfilesystemencoding()
2067 return encoding if encoding is not None else 'utf-8'
2068
2069
2070 def shell_quote(args):
2071 quoted_args = []
2072 encoding = get_filesystem_encoding()
2073 for a in args:
2074 if isinstance(a, bytes):
2075 # We may get a filename encoded with 'encodeFilename'
2076 a = a.decode(encoding)
2077 quoted_args.append(compat_shlex_quote(a))
2078 return ' '.join(quoted_args)
2079
2080
2081 def smuggle_url(url, data):
2082 """ Pass additional data in a URL for internal use. """
2083
2084 url, idata = unsmuggle_url(url, {})
2085 data.update(idata)
2086 sdata = compat_urllib_parse_urlencode(
2087 {'__youtubedl_smuggle': json.dumps(data)})
2088 return url + '#' + sdata
2089
2090
2091 def unsmuggle_url(smug_url, default=None):
2092 if '#__youtubedl_smuggle' not in smug_url:
2093 return smug_url, default
2094 url, _, sdata = smug_url.rpartition('#')
2095 jsond = compat_parse_qs(sdata)['__youtubedl_smuggle'][0]
2096 data = json.loads(jsond)
2097 return url, data
2098
2099
2100 def format_decimal_suffix(num, fmt='%d%s', *, factor=1000):
2101 """ Formats numbers with decimal sufixes like K, M, etc """
2102 num, factor = float_or_none(num), float(factor)
2103 if num is None or num < 0:
2104 return None
2105 POSSIBLE_SUFFIXES = 'kMGTPEZY'
2106 exponent = 0 if num == 0 else min(int(math.log(num, factor)), len(POSSIBLE_SUFFIXES))
2107 suffix = ['', *POSSIBLE_SUFFIXES][exponent]
2108 if factor == 1024:
2109 suffix = {'k': 'Ki', '': ''}.get(suffix, f'{suffix}i')
2110 converted = num / (factor ** exponent)
2111 return fmt % (converted, suffix)
2112
2113
2114 def format_bytes(bytes):
2115 return format_decimal_suffix(bytes, '%.2f%sB', factor=1024) or 'N/A'
2116
2117
2118 def lookup_unit_table(unit_table, s):
2119 units_re = '|'.join(re.escape(u) for u in unit_table)
2120 m = re.match(
2121 r'(?P<num>[0-9]+(?:[,.][0-9]*)?)\s*(?P<unit>%s)\b' % units_re, s)
2122 if not m:
2123 return None
2124 num_str = m.group('num').replace(',', '.')
2125 mult = unit_table[m.group('unit')]
2126 return int(float(num_str) * mult)
2127
2128
2129 def parse_filesize(s):
2130 if s is None:
2131 return None
2132
2133 # The lower-case forms are of course incorrect and unofficial,
2134 # but we support those too
2135 _UNIT_TABLE = {
2136 'B': 1,
2137 'b': 1,
2138 'bytes': 1,
2139 'KiB': 1024,
2140 'KB': 1000,
2141 'kB': 1024,
2142 'Kb': 1000,
2143 'kb': 1000,
2144 'kilobytes': 1000,
2145 'kibibytes': 1024,
2146 'MiB': 1024 ** 2,
2147 'MB': 1000 ** 2,
2148 'mB': 1024 ** 2,
2149 'Mb': 1000 ** 2,
2150 'mb': 1000 ** 2,
2151 'megabytes': 1000 ** 2,
2152 'mebibytes': 1024 ** 2,
2153 'GiB': 1024 ** 3,
2154 'GB': 1000 ** 3,
2155 'gB': 1024 ** 3,
2156 'Gb': 1000 ** 3,
2157 'gb': 1000 ** 3,
2158 'gigabytes': 1000 ** 3,
2159 'gibibytes': 1024 ** 3,
2160 'TiB': 1024 ** 4,
2161 'TB': 1000 ** 4,
2162 'tB': 1024 ** 4,
2163 'Tb': 1000 ** 4,
2164 'tb': 1000 ** 4,
2165 'terabytes': 1000 ** 4,
2166 'tebibytes': 1024 ** 4,
2167 'PiB': 1024 ** 5,
2168 'PB': 1000 ** 5,
2169 'pB': 1024 ** 5,
2170 'Pb': 1000 ** 5,
2171 'pb': 1000 ** 5,
2172 'petabytes': 1000 ** 5,
2173 'pebibytes': 1024 ** 5,
2174 'EiB': 1024 ** 6,
2175 'EB': 1000 ** 6,
2176 'eB': 1024 ** 6,
2177 'Eb': 1000 ** 6,
2178 'eb': 1000 ** 6,
2179 'exabytes': 1000 ** 6,
2180 'exbibytes': 1024 ** 6,
2181 'ZiB': 1024 ** 7,
2182 'ZB': 1000 ** 7,
2183 'zB': 1024 ** 7,
2184 'Zb': 1000 ** 7,
2185 'zb': 1000 ** 7,
2186 'zettabytes': 1000 ** 7,
2187 'zebibytes': 1024 ** 7,
2188 'YiB': 1024 ** 8,
2189 'YB': 1000 ** 8,
2190 'yB': 1024 ** 8,
2191 'Yb': 1000 ** 8,
2192 'yb': 1000 ** 8,
2193 'yottabytes': 1000 ** 8,
2194 'yobibytes': 1024 ** 8,
2195 }
2196
2197 return lookup_unit_table(_UNIT_TABLE, s)
2198
2199
2200 def parse_count(s):
2201 if s is None:
2202 return None
2203
2204 s = re.sub(r'^[^\d]+\s', '', s).strip()
2205
2206 if re.match(r'^[\d,.]+$', s):
2207 return str_to_int(s)
2208
2209 _UNIT_TABLE = {
2210 'k': 1000,
2211 'K': 1000,
2212 'm': 1000 ** 2,
2213 'M': 1000 ** 2,
2214 'kk': 1000 ** 2,
2215 'KK': 1000 ** 2,
2216 'b': 1000 ** 3,
2217 'B': 1000 ** 3,
2218 }
2219
2220 ret = lookup_unit_table(_UNIT_TABLE, s)
2221 if ret is not None:
2222 return ret
2223
2224 mobj = re.match(r'([\d,.]+)(?:$|\s)', s)
2225 if mobj:
2226 return str_to_int(mobj.group(1))
2227
2228
2229 def parse_resolution(s, *, lenient=False):
2230 if s is None:
2231 return {}
2232
2233 if lenient:
2234 mobj = re.search(r'(?P<w>\d+)\s*[xX×,]\s*(?P<h>\d+)', s)
2235 else:
2236 mobj = re.search(r'(?<![a-zA-Z0-9])(?P<w>\d+)\s*[xX×,]\s*(?P<h>\d+)(?![a-zA-Z0-9])', s)
2237 if mobj:
2238 return {
2239 'width': int(mobj.group('w')),
2240 'height': int(mobj.group('h')),
2241 }
2242
2243 mobj = re.search(r'(?<![a-zA-Z0-9])(\d+)[pPiI](?![a-zA-Z0-9])', s)
2244 if mobj:
2245 return {'height': int(mobj.group(1))}
2246
2247 mobj = re.search(r'\b([48])[kK]\b', s)
2248 if mobj:
2249 return {'height': int(mobj.group(1)) * 540}
2250
2251 return {}
2252
2253
2254 def parse_bitrate(s):
2255 if not isinstance(s, compat_str):
2256 return
2257 mobj = re.search(r'\b(\d+)\s*kbps', s)
2258 if mobj:
2259 return int(mobj.group(1))
2260
2261
2262 def month_by_name(name, lang='en'):
2263 """ Return the number of a month by (locale-independently) English name """
2264
2265 month_names = MONTH_NAMES.get(lang, MONTH_NAMES['en'])
2266
2267 try:
2268 return month_names.index(name) + 1
2269 except ValueError:
2270 return None
2271
2272
2273 def month_by_abbreviation(abbrev):
2274 """ Return the number of a month by (locale-independently) English
2275 abbreviations """
2276
2277 try:
2278 return [s[:3] for s in ENGLISH_MONTH_NAMES].index(abbrev) + 1
2279 except ValueError:
2280 return None
2281
2282
2283 def fix_xml_ampersands(xml_str):
2284 """Replace all the '&' by '&amp;' in XML"""
2285 return re.sub(
2286 r'&(?!amp;|lt;|gt;|apos;|quot;|#x[0-9a-fA-F]{,4};|#[0-9]{,4};)',
2287 '&amp;',
2288 xml_str)
2289
2290
2291 def setproctitle(title):
2292 assert isinstance(title, compat_str)
2293
2294 # ctypes in Jython is not complete
2295 # http://bugs.jython.org/issue2148
2296 if sys.platform.startswith('java'):
2297 return
2298
2299 try:
2300 libc = ctypes.cdll.LoadLibrary('libc.so.6')
2301 except OSError:
2302 return
2303 except TypeError:
2304 # LoadLibrary in Windows Python 2.7.13 only expects
2305 # a bytestring, but since unicode_literals turns
2306 # every string into a unicode string, it fails.
2307 return
2308 title_bytes = title.encode('utf-8')
2309 buf = ctypes.create_string_buffer(len(title_bytes))
2310 buf.value = title_bytes
2311 try:
2312 libc.prctl(15, buf, 0, 0, 0)
2313 except AttributeError:
2314 return # Strange libc, just skip this
2315
2316
2317 def remove_start(s, start):
2318 return s[len(start):] if s is not None and s.startswith(start) else s
2319
2320
2321 def remove_end(s, end):
2322 return s[:-len(end)] if s is not None and s.endswith(end) else s
2323
2324
2325 def remove_quotes(s):
2326 if s is None or len(s) < 2:
2327 return s
2328 for quote in ('"', "'", ):
2329 if s[0] == quote and s[-1] == quote:
2330 return s[1:-1]
2331 return s
2332
2333
2334 def get_domain(url):
2335 domain = re.match(r'(?:https?:\/\/)?(?:www\.)?(?P<domain>[^\n\/]+\.[^\n\/]+)(?:\/(.*))?', url)
2336 return domain.group('domain') if domain else None
2337
2338
2339 def url_basename(url):
2340 path = compat_urlparse.urlparse(url).path
2341 return path.strip('/').split('/')[-1]
2342
2343
2344 def base_url(url):
2345 return re.match(r'https?://[^?#&]+/', url).group()
2346
2347
2348 def urljoin(base, path):
2349 if isinstance(path, bytes):
2350 path = path.decode('utf-8')
2351 if not isinstance(path, compat_str) or not path:
2352 return None
2353 if re.match(r'^(?:[a-zA-Z][a-zA-Z0-9+-.]*:)?//', path):
2354 return path
2355 if isinstance(base, bytes):
2356 base = base.decode('utf-8')
2357 if not isinstance(base, compat_str) or not re.match(
2358 r'^(?:https?:)?//', base):
2359 return None
2360 return compat_urlparse.urljoin(base, path)
2361
2362
2363 class HEADRequest(compat_urllib_request.Request):
2364 def get_method(self):
2365 return 'HEAD'
2366
2367
2368 class PUTRequest(compat_urllib_request.Request):
2369 def get_method(self):
2370 return 'PUT'
2371
2372
2373 def int_or_none(v, scale=1, default=None, get_attr=None, invscale=1):
2374 if get_attr and v is not None:
2375 v = getattr(v, get_attr, None)
2376 try:
2377 return int(v) * invscale // scale
2378 except (ValueError, TypeError, OverflowError):
2379 return default
2380
2381
2382 def str_or_none(v, default=None):
2383 return default if v is None else compat_str(v)
2384
2385
2386 def str_to_int(int_str):
2387 """ A more relaxed version of int_or_none """
2388 if isinstance(int_str, int):
2389 return int_str
2390 elif isinstance(int_str, compat_str):
2391 int_str = re.sub(r'[,\.\+]', '', int_str)
2392 return int_or_none(int_str)
2393
2394
2395 def float_or_none(v, scale=1, invscale=1, default=None):
2396 if v is None:
2397 return default
2398 try:
2399 return float(v) * invscale / scale
2400 except (ValueError, TypeError):
2401 return default
2402
2403
2404 def bool_or_none(v, default=None):
2405 return v if isinstance(v, bool) else default
2406
2407
2408 def strip_or_none(v, default=None):
2409 return v.strip() if isinstance(v, compat_str) else default
2410
2411
2412 def url_or_none(url):
2413 if not url or not isinstance(url, compat_str):
2414 return None
2415 url = url.strip()
2416 return url if re.match(r'^(?:(?:https?|rt(?:m(?:pt?[es]?|fp)|sp[su]?)|mms|ftps?):)?//', url) else None
2417
2418
2419 def request_to_url(req):
2420 if isinstance(req, compat_urllib_request.Request):
2421 return req.get_full_url()
2422 else:
2423 return req
2424
2425
2426 def strftime_or_none(timestamp, date_format, default=None):
2427 datetime_object = None
2428 try:
2429 if isinstance(timestamp, (int, float)): # unix timestamp
2430 datetime_object = datetime.datetime.utcfromtimestamp(timestamp)
2431 elif isinstance(timestamp, compat_str): # assume YYYYMMDD
2432 datetime_object = datetime.datetime.strptime(timestamp, '%Y%m%d')
2433 return datetime_object.strftime(date_format)
2434 except (ValueError, TypeError, AttributeError):
2435 return default
2436
2437
2438 def parse_duration(s):
2439 if not isinstance(s, str):
2440 return None
2441 s = s.strip()
2442 if not s:
2443 return None
2444
2445 days, hours, mins, secs, ms = [None] * 5
2446 m = re.match(r'''(?x)
2447 (?P<before_secs>
2448 (?:(?:(?P<days>[0-9]+):)?(?P<hours>[0-9]+):)?(?P<mins>[0-9]+):)?
2449 (?P<secs>(?(before_secs)[0-9]{1,2}|[0-9]+))
2450 (?P<ms>[.:][0-9]+)?Z?$
2451 ''', s)
2452 if m:
2453 days, hours, mins, secs, ms = m.group('days', 'hours', 'mins', 'secs', 'ms')
2454 else:
2455 m = re.match(
2456 r'''(?ix)(?:P?
2457 (?:
2458 [0-9]+\s*y(?:ears?)?,?\s*
2459 )?
2460 (?:
2461 [0-9]+\s*m(?:onths?)?,?\s*
2462 )?
2463 (?:
2464 [0-9]+\s*w(?:eeks?)?,?\s*
2465 )?
2466 (?:
2467 (?P<days>[0-9]+)\s*d(?:ays?)?,?\s*
2468 )?
2469 T)?
2470 (?:
2471 (?P<hours>[0-9]+)\s*h(?:ours?)?,?\s*
2472 )?
2473 (?:
2474 (?P<mins>[0-9]+)\s*m(?:in(?:ute)?s?)?,?\s*
2475 )?
2476 (?:
2477 (?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?\s*s(?:ec(?:ond)?s?)?\s*
2478 )?Z?$''', s)
2479 if m:
2480 days, hours, mins, secs, ms = m.groups()
2481 else:
2482 m = re.match(r'(?i)(?:(?P<hours>[0-9.]+)\s*(?:hours?)|(?P<mins>[0-9.]+)\s*(?:mins?\.?|minutes?)\s*)Z?$', s)
2483 if m:
2484 hours, mins = m.groups()
2485 else:
2486 return None
2487
2488 if ms:
2489 ms = ms.replace(':', '.')
2490 return sum(float(part or 0) * mult for part, mult in (
2491 (days, 86400), (hours, 3600), (mins, 60), (secs, 1), (ms, 1)))
2492
2493
2494 def prepend_extension(filename, ext, expected_real_ext=None):
2495 name, real_ext = os.path.splitext(filename)
2496 return (
2497 f'{name}.{ext}{real_ext}'
2498 if not expected_real_ext or real_ext[1:] == expected_real_ext
2499 else f'{filename}.{ext}')
2500
2501
2502 def replace_extension(filename, ext, expected_real_ext=None):
2503 name, real_ext = os.path.splitext(filename)
2504 return '{}.{}'.format(
2505 name if not expected_real_ext or real_ext[1:] == expected_real_ext else filename,
2506 ext)
2507
2508
2509 def check_executable(exe, args=[]):
2510 """ Checks if the given binary is installed somewhere in PATH, and returns its name.
2511 args can be a list of arguments for a short output (like -version) """
2512 try:
2513 Popen([exe] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate_or_kill()
2514 except OSError:
2515 return False
2516 return exe
2517
2518
2519 def _get_exe_version_output(exe, args, *, to_screen=None):
2520 if to_screen:
2521 to_screen(f'Checking exe version: {shell_quote([exe] + args)}')
2522 try:
2523 # STDIN should be redirected too. On UNIX-like systems, ffmpeg triggers
2524 # SIGTTOU if yt-dlp is run in the background.
2525 # See https://github.com/ytdl-org/youtube-dl/issues/955#issuecomment-209789656
2526 out, _ = Popen(
2527 [encodeArgument(exe)] + args, stdin=subprocess.PIPE,
2528 stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate_or_kill()
2529 except OSError:
2530 return False
2531 if isinstance(out, bytes): # Python 2.x
2532 out = out.decode('ascii', 'ignore')
2533 return out
2534
2535
2536 def detect_exe_version(output, version_re=None, unrecognized='present'):
2537 assert isinstance(output, compat_str)
2538 if version_re is None:
2539 version_re = r'version\s+([-0-9._a-zA-Z]+)'
2540 m = re.search(version_re, output)
2541 if m:
2542 return m.group(1)
2543 else:
2544 return unrecognized
2545
2546
2547 def get_exe_version(exe, args=['--version'],
2548 version_re=None, unrecognized='present'):
2549 """ Returns the version of the specified executable,
2550 or False if the executable is not present """
2551 out = _get_exe_version_output(exe, args)
2552 return detect_exe_version(out, version_re, unrecognized) if out else False
2553
2554
2555 class LazyList(collections.abc.Sequence):
2556 ''' Lazy immutable list from an iterable
2557 Note that slices of a LazyList are lists and not LazyList'''
2558
2559 class IndexError(IndexError):
2560 pass
2561
2562 def __init__(self, iterable, *, reverse=False, _cache=None):
2563 self.__iterable = iter(iterable)
2564 self.__cache = [] if _cache is None else _cache
2565 self.__reversed = reverse
2566
2567 def __iter__(self):
2568 if self.__reversed:
2569 # We need to consume the entire iterable to iterate in reverse
2570 yield from self.exhaust()
2571 return
2572 yield from self.__cache
2573 for item in self.__iterable:
2574 self.__cache.append(item)
2575 yield item
2576
2577 def __exhaust(self):
2578 self.__cache.extend(self.__iterable)
2579 # Discard the emptied iterable to make it pickle-able
2580 self.__iterable = []
2581 return self.__cache
2582
2583 def exhaust(self):
2584 ''' Evaluate the entire iterable '''
2585 return self.__exhaust()[::-1 if self.__reversed else 1]
2586
2587 @staticmethod
2588 def __reverse_index(x):
2589 return None if x is None else -(x + 1)
2590
2591 def __getitem__(self, idx):
2592 if isinstance(idx, slice):
2593 if self.__reversed:
2594 idx = slice(self.__reverse_index(idx.start), self.__reverse_index(idx.stop), -(idx.step or 1))
2595 start, stop, step = idx.start, idx.stop, idx.step or 1
2596 elif isinstance(idx, int):
2597 if self.__reversed:
2598 idx = self.__reverse_index(idx)
2599 start, stop, step = idx, idx, 0
2600 else:
2601 raise TypeError('indices must be integers or slices')
2602 if ((start or 0) < 0 or (stop or 0) < 0
2603 or (start is None and step < 0)
2604 or (stop is None and step > 0)):
2605 # We need to consume the entire iterable to be able to slice from the end
2606 # Obviously, never use this with infinite iterables
2607 self.__exhaust()
2608 try:
2609 return self.__cache[idx]
2610 except IndexError as e:
2611 raise self.IndexError(e) from e
2612 n = max(start or 0, stop or 0) - len(self.__cache) + 1
2613 if n > 0:
2614 self.__cache.extend(itertools.islice(self.__iterable, n))
2615 try:
2616 return self.__cache[idx]
2617 except IndexError as e:
2618 raise self.IndexError(e) from e
2619
2620 def __bool__(self):
2621 try:
2622 self[-1] if self.__reversed else self[0]
2623 except self.IndexError:
2624 return False
2625 return True
2626
2627 def __len__(self):
2628 self.__exhaust()
2629 return len(self.__cache)
2630
2631 def __reversed__(self):
2632 return type(self)(self.__iterable, reverse=not self.__reversed, _cache=self.__cache)
2633
2634 def __copy__(self):
2635 return type(self)(self.__iterable, reverse=self.__reversed, _cache=self.__cache)
2636
2637 def __repr__(self):
2638 # repr and str should mimic a list. So we exhaust the iterable
2639 return repr(self.exhaust())
2640
2641 def __str__(self):
2642 return repr(self.exhaust())
2643
2644
2645 class PagedList:
2646
2647 class IndexError(IndexError):
2648 pass
2649
2650 def __len__(self):
2651 # This is only useful for tests
2652 return len(self.getslice())
2653
2654 def __init__(self, pagefunc, pagesize, use_cache=True):
2655 self._pagefunc = pagefunc
2656 self._pagesize = pagesize
2657 self._pagecount = float('inf')
2658 self._use_cache = use_cache
2659 self._cache = {}
2660
2661 def getpage(self, pagenum):
2662 page_results = self._cache.get(pagenum)
2663 if page_results is None:
2664 page_results = [] if pagenum > self._pagecount else list(self._pagefunc(pagenum))
2665 if self._use_cache:
2666 self._cache[pagenum] = page_results
2667 return page_results
2668
2669 def getslice(self, start=0, end=None):
2670 return list(self._getslice(start, end))
2671
2672 def _getslice(self, start, end):
2673 raise NotImplementedError('This method must be implemented by subclasses')
2674
2675 def __getitem__(self, idx):
2676 assert self._use_cache, 'Indexing PagedList requires cache'
2677 if not isinstance(idx, int) or idx < 0:
2678 raise TypeError('indices must be non-negative integers')
2679 entries = self.getslice(idx, idx + 1)
2680 if not entries:
2681 raise self.IndexError()
2682 return entries[0]
2683
2684
2685 class OnDemandPagedList(PagedList):
2686 """Download pages until a page with less than maximum results"""
2687
2688 def _getslice(self, start, end):
2689 for pagenum in itertools.count(start // self._pagesize):
2690 firstid = pagenum * self._pagesize
2691 nextfirstid = pagenum * self._pagesize + self._pagesize
2692 if start >= nextfirstid:
2693 continue
2694
2695 startv = (
2696 start % self._pagesize
2697 if firstid <= start < nextfirstid
2698 else 0)
2699 endv = (
2700 ((end - 1) % self._pagesize) + 1
2701 if (end is not None and firstid <= end <= nextfirstid)
2702 else None)
2703
2704 try:
2705 page_results = self.getpage(pagenum)
2706 except Exception:
2707 self._pagecount = pagenum - 1
2708 raise
2709 if startv != 0 or endv is not None:
2710 page_results = page_results[startv:endv]
2711 yield from page_results
2712
2713 # A little optimization - if current page is not "full", ie. does
2714 # not contain page_size videos then we can assume that this page
2715 # is the last one - there are no more ids on further pages -
2716 # i.e. no need to query again.
2717 if len(page_results) + startv < self._pagesize:
2718 break
2719
2720 # If we got the whole page, but the next page is not interesting,
2721 # break out early as well
2722 if end == nextfirstid:
2723 break
2724
2725
2726 class InAdvancePagedList(PagedList):
2727 """PagedList with total number of pages known in advance"""
2728
2729 def __init__(self, pagefunc, pagecount, pagesize):
2730 PagedList.__init__(self, pagefunc, pagesize, True)
2731 self._pagecount = pagecount
2732
2733 def _getslice(self, start, end):
2734 start_page = start // self._pagesize
2735 end_page = self._pagecount if end is None else min(self._pagecount, end // self._pagesize + 1)
2736 skip_elems = start - start_page * self._pagesize
2737 only_more = None if end is None else end - start
2738 for pagenum in range(start_page, end_page):
2739 page_results = self.getpage(pagenum)
2740 if skip_elems:
2741 page_results = page_results[skip_elems:]
2742 skip_elems = None
2743 if only_more is not None:
2744 if len(page_results) < only_more:
2745 only_more -= len(page_results)
2746 else:
2747 yield from page_results[:only_more]
2748 break
2749 yield from page_results
2750
2751
2752 def uppercase_escape(s):
2753 unicode_escape = codecs.getdecoder('unicode_escape')
2754 return re.sub(
2755 r'\\U[0-9a-fA-F]{8}',
2756 lambda m: unicode_escape(m.group(0))[0],
2757 s)
2758
2759
2760 def lowercase_escape(s):
2761 unicode_escape = codecs.getdecoder('unicode_escape')
2762 return re.sub(
2763 r'\\u[0-9a-fA-F]{4}',
2764 lambda m: unicode_escape(m.group(0))[0],
2765 s)
2766
2767
2768 def escape_rfc3986(s):
2769 """Escape non-ASCII characters as suggested by RFC 3986"""
2770 return urllib.parse.quote(s, b"%/;:@&=+$,!~*'()?#[]")
2771
2772
2773 def escape_url(url):
2774 """Escape URL as suggested by RFC 3986"""
2775 url_parsed = compat_urllib_parse_urlparse(url)
2776 return url_parsed._replace(
2777 netloc=url_parsed.netloc.encode('idna').decode('ascii'),
2778 path=escape_rfc3986(url_parsed.path),
2779 params=escape_rfc3986(url_parsed.params),
2780 query=escape_rfc3986(url_parsed.query),
2781 fragment=escape_rfc3986(url_parsed.fragment)
2782 ).geturl()
2783
2784
2785 def parse_qs(url):
2786 return compat_parse_qs(compat_urllib_parse_urlparse(url).query)
2787
2788
2789 def read_batch_urls(batch_fd):
2790 def fixup(url):
2791 if not isinstance(url, compat_str):
2792 url = url.decode('utf-8', 'replace')
2793 BOM_UTF8 = ('\xef\xbb\xbf', '\ufeff')
2794 for bom in BOM_UTF8:
2795 if url.startswith(bom):
2796 url = url[len(bom):]
2797 url = url.lstrip()
2798 if not url or url.startswith(('#', ';', ']')):
2799 return False
2800 # "#" cannot be stripped out since it is part of the URI
2801 # However, it can be safely stipped out if follwing a whitespace
2802 return re.split(r'\s#', url, 1)[0].rstrip()
2803
2804 with contextlib.closing(batch_fd) as fd:
2805 return [url for url in map(fixup, fd) if url]
2806
2807
2808 def urlencode_postdata(*args, **kargs):
2809 return compat_urllib_parse_urlencode(*args, **kargs).encode('ascii')
2810
2811
2812 def update_url_query(url, query):
2813 if not query:
2814 return url
2815 parsed_url = compat_urlparse.urlparse(url)
2816 qs = compat_parse_qs(parsed_url.query)
2817 qs.update(query)
2818 return compat_urlparse.urlunparse(parsed_url._replace(
2819 query=compat_urllib_parse_urlencode(qs, True)))
2820
2821
2822 def update_Request(req, url=None, data=None, headers={}, query={}):
2823 req_headers = req.headers.copy()
2824 req_headers.update(headers)
2825 req_data = data or req.data
2826 req_url = update_url_query(url or req.get_full_url(), query)
2827 req_get_method = req.get_method()
2828 if req_get_method == 'HEAD':
2829 req_type = HEADRequest
2830 elif req_get_method == 'PUT':
2831 req_type = PUTRequest
2832 else:
2833 req_type = compat_urllib_request.Request
2834 new_req = req_type(
2835 req_url, data=req_data, headers=req_headers,
2836 origin_req_host=req.origin_req_host, unverifiable=req.unverifiable)
2837 if hasattr(req, 'timeout'):
2838 new_req.timeout = req.timeout
2839 return new_req
2840
2841
2842 def _multipart_encode_impl(data, boundary):
2843 content_type = 'multipart/form-data; boundary=%s' % boundary
2844
2845 out = b''
2846 for k, v in data.items():
2847 out += b'--' + boundary.encode('ascii') + b'\r\n'
2848 if isinstance(k, compat_str):
2849 k = k.encode('utf-8')
2850 if isinstance(v, compat_str):
2851 v = v.encode('utf-8')
2852 # RFC 2047 requires non-ASCII field names to be encoded, while RFC 7578
2853 # suggests sending UTF-8 directly. Firefox sends UTF-8, too
2854 content = b'Content-Disposition: form-data; name="' + k + b'"\r\n\r\n' + v + b'\r\n'
2855 if boundary.encode('ascii') in content:
2856 raise ValueError('Boundary overlaps with data')
2857 out += content
2858
2859 out += b'--' + boundary.encode('ascii') + b'--\r\n'
2860
2861 return out, content_type
2862
2863
2864 def multipart_encode(data, boundary=None):
2865 '''
2866 Encode a dict to RFC 7578-compliant form-data
2867
2868 data:
2869 A dict where keys and values can be either Unicode or bytes-like
2870 objects.
2871 boundary:
2872 If specified a Unicode object, it's used as the boundary. Otherwise
2873 a random boundary is generated.
2874
2875 Reference: https://tools.ietf.org/html/rfc7578
2876 '''
2877 has_specified_boundary = boundary is not None
2878
2879 while True:
2880 if boundary is None:
2881 boundary = '---------------' + str(random.randrange(0x0fffffff, 0xffffffff))
2882
2883 try:
2884 out, content_type = _multipart_encode_impl(data, boundary)
2885 break
2886 except ValueError:
2887 if has_specified_boundary:
2888 raise
2889 boundary = None
2890
2891 return out, content_type
2892
2893
2894 def dict_get(d, key_or_keys, default=None, skip_false_values=True):
2895 for val in map(d.get, variadic(key_or_keys)):
2896 if val is not None and (val or not skip_false_values):
2897 return val
2898 return default
2899
2900
2901 def try_call(*funcs, expected_type=None, args=[], kwargs={}):
2902 for f in funcs:
2903 try:
2904 val = f(*args, **kwargs)
2905 except (AttributeError, KeyError, TypeError, IndexError, ZeroDivisionError):
2906 pass
2907 else:
2908 if expected_type is None or isinstance(val, expected_type):
2909 return val
2910
2911
2912 def try_get(src, getter, expected_type=None):
2913 return try_call(*variadic(getter), args=(src,), expected_type=expected_type)
2914
2915
2916 def filter_dict(dct, cndn=lambda _, v: v is not None):
2917 return {k: v for k, v in dct.items() if cndn(k, v)}
2918
2919
2920 def merge_dicts(*dicts):
2921 merged = {}
2922 for a_dict in dicts:
2923 for k, v in a_dict.items():
2924 if (v is not None and k not in merged
2925 or isinstance(v, str) and merged[k] == ''):
2926 merged[k] = v
2927 return merged
2928
2929
2930 def encode_compat_str(string, encoding=preferredencoding(), errors='strict'):
2931 return string if isinstance(string, compat_str) else compat_str(string, encoding, errors)
2932
2933
2934 US_RATINGS = {
2935 'G': 0,
2936 'PG': 10,
2937 'PG-13': 13,
2938 'R': 16,
2939 'NC': 18,
2940 }
2941
2942
2943 TV_PARENTAL_GUIDELINES = {
2944 'TV-Y': 0,
2945 'TV-Y7': 7,
2946 'TV-G': 0,
2947 'TV-PG': 0,
2948 'TV-14': 14,
2949 'TV-MA': 17,
2950 }
2951
2952
2953 def parse_age_limit(s):
2954 # isinstance(False, int) is True. So type() must be used instead
2955 if type(s) is int:
2956 return s if 0 <= s <= 21 else None
2957 elif not isinstance(s, str):
2958 return None
2959 m = re.match(r'^(?P<age>\d{1,2})\+?$', s)
2960 if m:
2961 return int(m.group('age'))
2962 s = s.upper()
2963 if s in US_RATINGS:
2964 return US_RATINGS[s]
2965 m = re.match(r'^TV[_-]?(%s)$' % '|'.join(k[3:] for k in TV_PARENTAL_GUIDELINES), s)
2966 if m:
2967 return TV_PARENTAL_GUIDELINES['TV-' + m.group(1)]
2968 return None
2969
2970
2971 def strip_jsonp(code):
2972 return re.sub(
2973 r'''(?sx)^
2974 (?:window\.)?(?P<func_name>[a-zA-Z0-9_.$]*)
2975 (?:\s*&&\s*(?P=func_name))?
2976 \s*\(\s*(?P<callback_data>.*)\);?
2977 \s*?(?://[^\n]*)*$''',
2978 r'\g<callback_data>', code)
2979
2980
2981 def js_to_json(code, vars={}):
2982 # vars is a dict of var, val pairs to substitute
2983 COMMENT_RE = r'/\*(?:(?!\*/).)*?\*/|//[^\n]*\n'
2984 SKIP_RE = fr'\s*(?:{COMMENT_RE})?\s*'
2985 INTEGER_TABLE = (
2986 (fr'(?s)^(0[xX][0-9a-fA-F]+){SKIP_RE}:?$', 16),
2987 (fr'(?s)^(0+[0-7]+){SKIP_RE}:?$', 8),
2988 )
2989
2990 def fix_kv(m):
2991 v = m.group(0)
2992 if v in ('true', 'false', 'null'):
2993 return v
2994 elif v in ('undefined', 'void 0'):
2995 return 'null'
2996 elif v.startswith('/*') or v.startswith('//') or v.startswith('!') or v == ',':
2997 return ""
2998
2999 if v[0] in ("'", '"'):
3000 v = re.sub(r'(?s)\\.|"', lambda m: {
3001 '"': '\\"',
3002 "\\'": "'",
3003 '\\\n': '',
3004 '\\x': '\\u00',
3005 }.get(m.group(0), m.group(0)), v[1:-1])
3006 else:
3007 for regex, base in INTEGER_TABLE:
3008 im = re.match(regex, v)
3009 if im:
3010 i = int(im.group(1), base)
3011 return '"%d":' % i if v.endswith(':') else '%d' % i
3012
3013 if v in vars:
3014 return vars[v]
3015
3016 return '"%s"' % v
3017
3018 code = re.sub(r'new Date\((".+")\)', r'\g<1>', code)
3019
3020 return re.sub(r'''(?sx)
3021 "(?:[^"\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^"\\]*"|
3022 '(?:[^'\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^'\\]*'|
3023 {comment}|,(?={skip}[\]}}])|
3024 void\s0|(?:(?<![0-9])[eE]|[a-df-zA-DF-Z_$])[.a-zA-Z_$0-9]*|
3025 \b(?:0[xX][0-9a-fA-F]+|0+[0-7]+)(?:{skip}:)?|
3026 [0-9]+(?={skip}:)|
3027 !+
3028 '''.format(comment=COMMENT_RE, skip=SKIP_RE), fix_kv, code)
3029
3030
3031 def qualities(quality_ids):
3032 """ Get a numeric quality value out of a list of possible values """
3033 def q(qid):
3034 try:
3035 return quality_ids.index(qid)
3036 except ValueError:
3037 return -1
3038 return q
3039
3040
3041 POSTPROCESS_WHEN = ('pre_process', 'after_filter', 'before_dl', 'after_move', 'post_process', 'after_video', 'playlist')
3042
3043
3044 DEFAULT_OUTTMPL = {
3045 'default': '%(title)s [%(id)s].%(ext)s',
3046 'chapter': '%(title)s - %(section_number)03d %(section_title)s [%(id)s].%(ext)s',
3047 }
3048 OUTTMPL_TYPES = {
3049 'chapter': None,
3050 'subtitle': None,
3051 'thumbnail': None,
3052 'description': 'description',
3053 'annotation': 'annotations.xml',
3054 'infojson': 'info.json',
3055 'link': None,
3056 'pl_video': None,
3057 'pl_thumbnail': None,
3058 'pl_description': 'description',
3059 'pl_infojson': 'info.json',
3060 }
3061
3062 # As of [1] format syntax is:
3063 # %[mapping_key][conversion_flags][minimum_width][.precision][length_modifier]type
3064 # 1. https://docs.python.org/2/library/stdtypes.html#string-formatting
3065 STR_FORMAT_RE_TMPL = r'''(?x)
3066 (?<!%)(?P<prefix>(?:%%)*)
3067 %
3068 (?P<has_key>\((?P<key>{0})\))?
3069 (?P<format>
3070 (?P<conversion>[#0\-+ ]+)?
3071 (?P<min_width>\d+)?
3072 (?P<precision>\.\d+)?
3073 (?P<len_mod>[hlL])? # unused in python
3074 {1} # conversion type
3075 )
3076 '''
3077
3078
3079 STR_FORMAT_TYPES = 'diouxXeEfFgGcrs'
3080
3081
3082 def limit_length(s, length):
3083 """ Add ellipses to overly long strings """
3084 if s is None:
3085 return None
3086 ELLIPSES = '...'
3087 if len(s) > length:
3088 return s[:length - len(ELLIPSES)] + ELLIPSES
3089 return s
3090
3091
3092 def version_tuple(v):
3093 return tuple(int(e) for e in re.split(r'[-.]', v))
3094
3095
3096 def is_outdated_version(version, limit, assume_new=True):
3097 if not version:
3098 return not assume_new
3099 try:
3100 return version_tuple(version) < version_tuple(limit)
3101 except ValueError:
3102 return not assume_new
3103
3104
3105 def ytdl_is_updateable():
3106 """ Returns if yt-dlp can be updated with -U """
3107
3108 from .update import is_non_updateable
3109
3110 return not is_non_updateable()
3111
3112
3113 def args_to_str(args):
3114 # Get a short string representation for a subprocess command
3115 return ' '.join(compat_shlex_quote(a) for a in args)
3116
3117
3118 def error_to_compat_str(err):
3119 return str(err)
3120
3121
3122 def error_to_str(err):
3123 return f'{type(err).__name__}: {err}'
3124
3125
3126 def mimetype2ext(mt):
3127 if mt is None:
3128 return None
3129
3130 mt, _, params = mt.partition(';')
3131 mt = mt.strip()
3132
3133 FULL_MAP = {
3134 'audio/mp4': 'm4a',
3135 # Per RFC 3003, audio/mpeg can be .mp1, .mp2 or .mp3. Here use .mp3 as
3136 # it's the most popular one
3137 'audio/mpeg': 'mp3',
3138 'audio/x-wav': 'wav',
3139 'audio/wav': 'wav',
3140 'audio/wave': 'wav',
3141 }
3142
3143 ext = FULL_MAP.get(mt)
3144 if ext is not None:
3145 return ext
3146
3147 SUBTYPE_MAP = {
3148 '3gpp': '3gp',
3149 'smptett+xml': 'tt',
3150 'ttaf+xml': 'dfxp',
3151 'ttml+xml': 'ttml',
3152 'x-flv': 'flv',
3153 'x-mp4-fragmented': 'mp4',
3154 'x-ms-sami': 'sami',
3155 'x-ms-wmv': 'wmv',
3156 'mpegurl': 'm3u8',
3157 'x-mpegurl': 'm3u8',
3158 'vnd.apple.mpegurl': 'm3u8',
3159 'dash+xml': 'mpd',
3160 'f4m+xml': 'f4m',
3161 'hds+xml': 'f4m',
3162 'vnd.ms-sstr+xml': 'ism',
3163 'quicktime': 'mov',
3164 'mp2t': 'ts',
3165 'x-wav': 'wav',
3166 'filmstrip+json': 'fs',
3167 'svg+xml': 'svg',
3168 }
3169
3170 _, _, subtype = mt.rpartition('/')
3171 ext = SUBTYPE_MAP.get(subtype.lower())
3172 if ext is not None:
3173 return ext
3174
3175 SUFFIX_MAP = {
3176 'json': 'json',
3177 'xml': 'xml',
3178 'zip': 'zip',
3179 'gzip': 'gz',
3180 }
3181
3182 _, _, suffix = subtype.partition('+')
3183 ext = SUFFIX_MAP.get(suffix)
3184 if ext is not None:
3185 return ext
3186
3187 return subtype.replace('+', '.')
3188
3189
3190 def ext2mimetype(ext_or_url):
3191 if not ext_or_url:
3192 return None
3193 if '.' not in ext_or_url:
3194 ext_or_url = f'file.{ext_or_url}'
3195 return mimetypes.guess_type(ext_or_url)[0]
3196
3197
3198 def parse_codecs(codecs_str):
3199 # http://tools.ietf.org/html/rfc6381
3200 if not codecs_str:
3201 return {}
3202 split_codecs = list(filter(None, map(
3203 str.strip, codecs_str.strip().strip(',').split(','))))
3204 vcodec, acodec, scodec, hdr = None, None, None, None
3205 for full_codec in split_codecs:
3206 parts = full_codec.split('.')
3207 codec = parts[0].replace('0', '')
3208 if codec in ('avc1', 'avc2', 'avc3', 'avc4', 'vp9', 'vp8', 'hev1', 'hev2',
3209 'h263', 'h264', 'mp4v', 'hvc1', 'av1', 'theora', 'dvh1', 'dvhe'):
3210 if not vcodec:
3211 vcodec = '.'.join(parts[:4]) if codec in ('vp9', 'av1', 'hvc1') else full_codec
3212 if codec in ('dvh1', 'dvhe'):
3213 hdr = 'DV'
3214 elif codec == 'av1' and len(parts) > 3 and parts[3] == '10':
3215 hdr = 'HDR10'
3216 elif full_codec.replace('0', '').startswith('vp9.2'):
3217 hdr = 'HDR10'
3218 elif codec in ('flac', 'mp4a', 'opus', 'vorbis', 'mp3', 'aac', 'ac-3', 'ec-3', 'eac3', 'dtsc', 'dtse', 'dtsh', 'dtsl'):
3219 if not acodec:
3220 acodec = full_codec
3221 elif codec in ('stpp', 'wvtt',):
3222 if not scodec:
3223 scodec = full_codec
3224 else:
3225 write_string(f'WARNING: Unknown codec {full_codec}\n')
3226 if vcodec or acodec or scodec:
3227 return {
3228 'vcodec': vcodec or 'none',
3229 'acodec': acodec or 'none',
3230 'dynamic_range': hdr,
3231 **({'scodec': scodec} if scodec is not None else {}),
3232 }
3233 elif len(split_codecs) == 2:
3234 return {
3235 'vcodec': split_codecs[0],
3236 'acodec': split_codecs[1],
3237 }
3238 return {}
3239
3240
3241 def urlhandle_detect_ext(url_handle):
3242 getheader = url_handle.headers.get
3243
3244 cd = getheader('Content-Disposition')
3245 if cd:
3246 m = re.match(r'attachment;\s*filename="(?P<filename>[^"]+)"', cd)
3247 if m:
3248 e = determine_ext(m.group('filename'), default_ext=None)
3249 if e:
3250 return e
3251
3252 return mimetype2ext(getheader('Content-Type'))
3253
3254
3255 def encode_data_uri(data, mime_type):
3256 return 'data:%s;base64,%s' % (mime_type, base64.b64encode(data).decode('ascii'))
3257
3258
3259 def age_restricted(content_limit, age_limit):
3260 """ Returns True iff the content should be blocked """
3261
3262 if age_limit is None: # No limit set
3263 return False
3264 if content_limit is None:
3265 return False # Content available for everyone
3266 return age_limit < content_limit
3267
3268
3269 def is_html(first_bytes):
3270 """ Detect whether a file contains HTML by examining its first bytes. """
3271
3272 BOMS = [
3273 (b'\xef\xbb\xbf', 'utf-8'),
3274 (b'\x00\x00\xfe\xff', 'utf-32-be'),
3275 (b'\xff\xfe\x00\x00', 'utf-32-le'),
3276 (b'\xff\xfe', 'utf-16-le'),
3277 (b'\xfe\xff', 'utf-16-be'),
3278 ]
3279 for bom, enc in BOMS:
3280 if first_bytes.startswith(bom):
3281 s = first_bytes[len(bom):].decode(enc, 'replace')
3282 break
3283 else:
3284 s = first_bytes.decode('utf-8', 'replace')
3285
3286 return re.match(r'^\s*<', s)
3287
3288
3289 def determine_protocol(info_dict):
3290 protocol = info_dict.get('protocol')
3291 if protocol is not None:
3292 return protocol
3293
3294 url = sanitize_url(info_dict['url'])
3295 if url.startswith('rtmp'):
3296 return 'rtmp'
3297 elif url.startswith('mms'):
3298 return 'mms'
3299 elif url.startswith('rtsp'):
3300 return 'rtsp'
3301
3302 ext = determine_ext(url)
3303 if ext == 'm3u8':
3304 return 'm3u8'
3305 elif ext == 'f4m':
3306 return 'f4m'
3307
3308 return compat_urllib_parse_urlparse(url).scheme
3309
3310
3311 def render_table(header_row, data, delim=False, extra_gap=0, hide_empty=False):
3312 """ Render a list of rows, each as a list of values.
3313 Text after a \t will be right aligned """
3314 def width(string):
3315 return len(remove_terminal_sequences(string).replace('\t', ''))
3316
3317 def get_max_lens(table):
3318 return [max(width(str(v)) for v in col) for col in zip(*table)]
3319
3320 def filter_using_list(row, filterArray):
3321 return [col for take, col in itertools.zip_longest(filterArray, row, fillvalue=True) if take]
3322
3323 max_lens = get_max_lens(data) if hide_empty else []
3324 header_row = filter_using_list(header_row, max_lens)
3325 data = [filter_using_list(row, max_lens) for row in data]
3326
3327 table = [header_row] + data
3328 max_lens = get_max_lens(table)
3329 extra_gap += 1
3330 if delim:
3331 table = [header_row, [delim * (ml + extra_gap) for ml in max_lens]] + data
3332 table[1][-1] = table[1][-1][:-extra_gap * len(delim)] # Remove extra_gap from end of delimiter
3333 for row in table:
3334 for pos, text in enumerate(map(str, row)):
3335 if '\t' in text:
3336 row[pos] = text.replace('\t', ' ' * (max_lens[pos] - width(text))) + ' ' * extra_gap
3337 else:
3338 row[pos] = text + ' ' * (max_lens[pos] - width(text) + extra_gap)
3339 ret = '\n'.join(''.join(row).rstrip() for row in table)
3340 return ret
3341
3342
3343 def _match_one(filter_part, dct, incomplete):
3344 # TODO: Generalize code with YoutubeDL._build_format_filter
3345 STRING_OPERATORS = {
3346 '*=': operator.contains,
3347 '^=': lambda attr, value: attr.startswith(value),
3348 '$=': lambda attr, value: attr.endswith(value),
3349 '~=': lambda attr, value: re.search(value, attr),
3350 }
3351 COMPARISON_OPERATORS = {
3352 **STRING_OPERATORS,
3353 '<=': operator.le, # "<=" must be defined above "<"
3354 '<': operator.lt,
3355 '>=': operator.ge,
3356 '>': operator.gt,
3357 '=': operator.eq,
3358 }
3359
3360 if isinstance(incomplete, bool):
3361 is_incomplete = lambda _: incomplete
3362 else:
3363 is_incomplete = lambda k: k in incomplete
3364
3365 operator_rex = re.compile(r'''(?x)\s*
3366 (?P<key>[a-z_]+)
3367 \s*(?P<negation>!\s*)?(?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
3368 (?:
3369 (?P<quote>["\'])(?P<quotedstrval>.+?)(?P=quote)|
3370 (?P<strval>.+?)
3371 )
3372 \s*$
3373 ''' % '|'.join(map(re.escape, COMPARISON_OPERATORS.keys())))
3374 m = operator_rex.search(filter_part)
3375 if m:
3376 m = m.groupdict()
3377 unnegated_op = COMPARISON_OPERATORS[m['op']]
3378 if m['negation']:
3379 op = lambda attr, value: not unnegated_op(attr, value)
3380 else:
3381 op = unnegated_op
3382 comparison_value = m['quotedstrval'] or m['strval'] or m['intval']
3383 if m['quote']:
3384 comparison_value = comparison_value.replace(r'\%s' % m['quote'], m['quote'])
3385 actual_value = dct.get(m['key'])
3386 numeric_comparison = None
3387 if isinstance(actual_value, (int, float)):
3388 # If the original field is a string and matching comparisonvalue is
3389 # a number we should respect the origin of the original field
3390 # and process comparison value as a string (see
3391 # https://github.com/ytdl-org/youtube-dl/issues/11082)
3392 try:
3393 numeric_comparison = int(comparison_value)
3394 except ValueError:
3395 numeric_comparison = parse_filesize(comparison_value)
3396 if numeric_comparison is None:
3397 numeric_comparison = parse_filesize(f'{comparison_value}B')
3398 if numeric_comparison is None:
3399 numeric_comparison = parse_duration(comparison_value)
3400 if numeric_comparison is not None and m['op'] in STRING_OPERATORS:
3401 raise ValueError('Operator %s only supports string values!' % m['op'])
3402 if actual_value is None:
3403 return is_incomplete(m['key']) or m['none_inclusive']
3404 return op(actual_value, comparison_value if numeric_comparison is None else numeric_comparison)
3405
3406 UNARY_OPERATORS = {
3407 '': lambda v: (v is True) if isinstance(v, bool) else (v is not None),
3408 '!': lambda v: (v is False) if isinstance(v, bool) else (v is None),
3409 }
3410 operator_rex = re.compile(r'''(?x)\s*
3411 (?P<op>%s)\s*(?P<key>[a-z_]+)
3412 \s*$
3413 ''' % '|'.join(map(re.escape, UNARY_OPERATORS.keys())))
3414 m = operator_rex.search(filter_part)
3415 if m:
3416 op = UNARY_OPERATORS[m.group('op')]
3417 actual_value = dct.get(m.group('key'))
3418 if is_incomplete(m.group('key')) and actual_value is None:
3419 return True
3420 return op(actual_value)
3421
3422 raise ValueError('Invalid filter part %r' % filter_part)
3423
3424
3425 def match_str(filter_str, dct, incomplete=False):
3426 """ Filter a dictionary with a simple string syntax.
3427 @returns Whether the filter passes
3428 @param incomplete Set of keys that is expected to be missing from dct.
3429 Can be True/False to indicate all/none of the keys may be missing.
3430 All conditions on incomplete keys pass if the key is missing
3431 """
3432 return all(
3433 _match_one(filter_part.replace(r'\&', '&'), dct, incomplete)
3434 for filter_part in re.split(r'(?<!\\)&', filter_str))
3435
3436
3437 def match_filter_func(filters):
3438 if not filters:
3439 return None
3440 filters = set(variadic(filters))
3441
3442 interactive = '-' in filters
3443 if interactive:
3444 filters.remove('-')
3445
3446 def _match_func(info_dict, incomplete=False):
3447 if not filters or any(match_str(f, info_dict, incomplete) for f in filters):
3448 return NO_DEFAULT if interactive and not incomplete else None
3449 else:
3450 video_title = info_dict.get('title') or info_dict.get('id') or 'video'
3451 filter_str = ') | ('.join(map(str.strip, filters))
3452 return f'{video_title} does not pass filter ({filter_str}), skipping ..'
3453 return _match_func
3454
3455
3456 def parse_dfxp_time_expr(time_expr):
3457 if not time_expr:
3458 return
3459
3460 mobj = re.match(rf'^(?P<time_offset>{NUMBER_RE})s?$', time_expr)
3461 if mobj:
3462 return float(mobj.group('time_offset'))
3463
3464 mobj = re.match(r'^(\d+):(\d\d):(\d\d(?:(?:\.|:)\d+)?)$', time_expr)
3465 if mobj:
3466 return 3600 * int(mobj.group(1)) + 60 * int(mobj.group(2)) + float(mobj.group(3).replace(':', '.'))
3467
3468
3469 def srt_subtitles_timecode(seconds):
3470 return '%02d:%02d:%02d,%03d' % timetuple_from_msec(seconds * 1000)
3471
3472
3473 def ass_subtitles_timecode(seconds):
3474 time = timetuple_from_msec(seconds * 1000)
3475 return '%01d:%02d:%02d.%02d' % (*time[:-1], time.milliseconds / 10)
3476
3477
3478 def dfxp2srt(dfxp_data):
3479 '''
3480 @param dfxp_data A bytes-like object containing DFXP data
3481 @returns A unicode object containing converted SRT data
3482 '''
3483 LEGACY_NAMESPACES = (
3484 (b'http://www.w3.org/ns/ttml', [
3485 b'http://www.w3.org/2004/11/ttaf1',
3486 b'http://www.w3.org/2006/04/ttaf1',
3487 b'http://www.w3.org/2006/10/ttaf1',
3488 ]),
3489 (b'http://www.w3.org/ns/ttml#styling', [
3490 b'http://www.w3.org/ns/ttml#style',
3491 ]),
3492 )
3493
3494 SUPPORTED_STYLING = [
3495 'color',
3496 'fontFamily',
3497 'fontSize',
3498 'fontStyle',
3499 'fontWeight',
3500 'textDecoration'
3501 ]
3502
3503 _x = functools.partial(xpath_with_ns, ns_map={
3504 'xml': 'http://www.w3.org/XML/1998/namespace',
3505 'ttml': 'http://www.w3.org/ns/ttml',
3506 'tts': 'http://www.w3.org/ns/ttml#styling',
3507 })
3508
3509 styles = {}
3510 default_style = {}
3511
3512 class TTMLPElementParser:
3513 _out = ''
3514 _unclosed_elements = []
3515 _applied_styles = []
3516
3517 def start(self, tag, attrib):
3518 if tag in (_x('ttml:br'), 'br'):
3519 self._out += '\n'
3520 else:
3521 unclosed_elements = []
3522 style = {}
3523 element_style_id = attrib.get('style')
3524 if default_style:
3525 style.update(default_style)
3526 if element_style_id:
3527 style.update(styles.get(element_style_id, {}))
3528 for prop in SUPPORTED_STYLING:
3529 prop_val = attrib.get(_x('tts:' + prop))
3530 if prop_val:
3531 style[prop] = prop_val
3532 if style:
3533 font = ''
3534 for k, v in sorted(style.items()):
3535 if self._applied_styles and self._applied_styles[-1].get(k) == v:
3536 continue
3537 if k == 'color':
3538 font += ' color="%s"' % v
3539 elif k == 'fontSize':
3540 font += ' size="%s"' % v
3541 elif k == 'fontFamily':
3542 font += ' face="%s"' % v
3543 elif k == 'fontWeight' and v == 'bold':
3544 self._out += '<b>'
3545 unclosed_elements.append('b')
3546 elif k == 'fontStyle' and v == 'italic':
3547 self._out += '<i>'
3548 unclosed_elements.append('i')
3549 elif k == 'textDecoration' and v == 'underline':
3550 self._out += '<u>'
3551 unclosed_elements.append('u')
3552 if font:
3553 self._out += '<font' + font + '>'
3554 unclosed_elements.append('font')
3555 applied_style = {}
3556 if self._applied_styles:
3557 applied_style.update(self._applied_styles[-1])
3558 applied_style.update(style)
3559 self._applied_styles.append(applied_style)
3560 self._unclosed_elements.append(unclosed_elements)
3561
3562 def end(self, tag):
3563 if tag not in (_x('ttml:br'), 'br'):
3564 unclosed_elements = self._unclosed_elements.pop()
3565 for element in reversed(unclosed_elements):
3566 self._out += '</%s>' % element
3567 if unclosed_elements and self._applied_styles:
3568 self._applied_styles.pop()
3569
3570 def data(self, data):
3571 self._out += data
3572
3573 def close(self):
3574 return self._out.strip()
3575
3576 def parse_node(node):
3577 target = TTMLPElementParser()
3578 parser = xml.etree.ElementTree.XMLParser(target=target)
3579 parser.feed(xml.etree.ElementTree.tostring(node))
3580 return parser.close()
3581
3582 for k, v in LEGACY_NAMESPACES:
3583 for ns in v:
3584 dfxp_data = dfxp_data.replace(ns, k)
3585
3586 dfxp = compat_etree_fromstring(dfxp_data)
3587 out = []
3588 paras = dfxp.findall(_x('.//ttml:p')) or dfxp.findall('.//p')
3589
3590 if not paras:
3591 raise ValueError('Invalid dfxp/TTML subtitle')
3592
3593 repeat = False
3594 while True:
3595 for style in dfxp.findall(_x('.//ttml:style')):
3596 style_id = style.get('id') or style.get(_x('xml:id'))
3597 if not style_id:
3598 continue
3599 parent_style_id = style.get('style')
3600 if parent_style_id:
3601 if parent_style_id not in styles:
3602 repeat = True
3603 continue
3604 styles[style_id] = styles[parent_style_id].copy()
3605 for prop in SUPPORTED_STYLING:
3606 prop_val = style.get(_x('tts:' + prop))
3607 if prop_val:
3608 styles.setdefault(style_id, {})[prop] = prop_val
3609 if repeat:
3610 repeat = False
3611 else:
3612 break
3613
3614 for p in ('body', 'div'):
3615 ele = xpath_element(dfxp, [_x('.//ttml:' + p), './/' + p])
3616 if ele is None:
3617 continue
3618 style = styles.get(ele.get('style'))
3619 if not style:
3620 continue
3621 default_style.update(style)
3622
3623 for para, index in zip(paras, itertools.count(1)):
3624 begin_time = parse_dfxp_time_expr(para.attrib.get('begin'))
3625 end_time = parse_dfxp_time_expr(para.attrib.get('end'))
3626 dur = parse_dfxp_time_expr(para.attrib.get('dur'))
3627 if begin_time is None:
3628 continue
3629 if not end_time:
3630 if not dur:
3631 continue
3632 end_time = begin_time + dur
3633 out.append('%d\n%s --> %s\n%s\n\n' % (
3634 index,
3635 srt_subtitles_timecode(begin_time),
3636 srt_subtitles_timecode(end_time),
3637 parse_node(para)))
3638
3639 return ''.join(out)
3640
3641
3642 def cli_option(params, command_option, param):
3643 param = params.get(param)
3644 if param:
3645 param = compat_str(param)
3646 return [command_option, param] if param is not None else []
3647
3648
3649 def cli_bool_option(params, command_option, param, true_value='true', false_value='false', separator=None):
3650 param = params.get(param)
3651 if param is None:
3652 return []
3653 assert isinstance(param, bool)
3654 if separator:
3655 return [command_option + separator + (true_value if param else false_value)]
3656 return [command_option, true_value if param else false_value]
3657
3658
3659 def cli_valueless_option(params, command_option, param, expected_value=True):
3660 param = params.get(param)
3661 return [command_option] if param == expected_value else []
3662
3663
3664 def cli_configuration_args(argdict, keys, default=[], use_compat=True):
3665 if isinstance(argdict, (list, tuple)): # for backward compatibility
3666 if use_compat:
3667 return argdict
3668 else:
3669 argdict = None
3670 if argdict is None:
3671 return default
3672 assert isinstance(argdict, dict)
3673
3674 assert isinstance(keys, (list, tuple))
3675 for key_list in keys:
3676 arg_list = list(filter(
3677 lambda x: x is not None,
3678 [argdict.get(key.lower()) for key in variadic(key_list)]))
3679 if arg_list:
3680 return [arg for args in arg_list for arg in args]
3681 return default
3682
3683
3684 def _configuration_args(main_key, argdict, exe, keys=None, default=[], use_compat=True):
3685 main_key, exe = main_key.lower(), exe.lower()
3686 root_key = exe if main_key == exe else f'{main_key}+{exe}'
3687 keys = [f'{root_key}{k}' for k in (keys or [''])]
3688 if root_key in keys:
3689 if main_key != exe:
3690 keys.append((main_key, exe))
3691 keys.append('default')
3692 else:
3693 use_compat = False
3694 return cli_configuration_args(argdict, keys, default, use_compat)
3695
3696
3697 class ISO639Utils:
3698 # See http://www.loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt
3699 _lang_map = {
3700 'aa': 'aar',
3701 'ab': 'abk',
3702 'ae': 'ave',
3703 'af': 'afr',
3704 'ak': 'aka',
3705 'am': 'amh',
3706 'an': 'arg',
3707 'ar': 'ara',
3708 'as': 'asm',
3709 'av': 'ava',
3710 'ay': 'aym',
3711 'az': 'aze',
3712 'ba': 'bak',
3713 'be': 'bel',
3714 'bg': 'bul',
3715 'bh': 'bih',
3716 'bi': 'bis',
3717 'bm': 'bam',
3718 'bn': 'ben',
3719 'bo': 'bod',
3720 'br': 'bre',
3721 'bs': 'bos',
3722 'ca': 'cat',
3723 'ce': 'che',
3724 'ch': 'cha',
3725 'co': 'cos',
3726 'cr': 'cre',
3727 'cs': 'ces',
3728 'cu': 'chu',
3729 'cv': 'chv',
3730 'cy': 'cym',
3731 'da': 'dan',
3732 'de': 'deu',
3733 'dv': 'div',
3734 'dz': 'dzo',
3735 'ee': 'ewe',
3736 'el': 'ell',
3737 'en': 'eng',
3738 'eo': 'epo',
3739 'es': 'spa',
3740 'et': 'est',
3741 'eu': 'eus',
3742 'fa': 'fas',
3743 'ff': 'ful',
3744 'fi': 'fin',
3745 'fj': 'fij',
3746 'fo': 'fao',
3747 'fr': 'fra',
3748 'fy': 'fry',
3749 'ga': 'gle',
3750 'gd': 'gla',
3751 'gl': 'glg',
3752 'gn': 'grn',
3753 'gu': 'guj',
3754 'gv': 'glv',
3755 'ha': 'hau',
3756 'he': 'heb',
3757 'iw': 'heb', # Replaced by he in 1989 revision
3758 'hi': 'hin',
3759 'ho': 'hmo',
3760 'hr': 'hrv',
3761 'ht': 'hat',
3762 'hu': 'hun',
3763 'hy': 'hye',
3764 'hz': 'her',
3765 'ia': 'ina',
3766 'id': 'ind',
3767 'in': 'ind', # Replaced by id in 1989 revision
3768 'ie': 'ile',
3769 'ig': 'ibo',
3770 'ii': 'iii',
3771 'ik': 'ipk',
3772 'io': 'ido',
3773 'is': 'isl',
3774 'it': 'ita',
3775 'iu': 'iku',
3776 'ja': 'jpn',
3777 'jv': 'jav',
3778 'ka': 'kat',
3779 'kg': 'kon',
3780 'ki': 'kik',
3781 'kj': 'kua',
3782 'kk': 'kaz',
3783 'kl': 'kal',
3784 'km': 'khm',
3785 'kn': 'kan',
3786 'ko': 'kor',
3787 'kr': 'kau',
3788 'ks': 'kas',
3789 'ku': 'kur',
3790 'kv': 'kom',
3791 'kw': 'cor',
3792 'ky': 'kir',
3793 'la': 'lat',
3794 'lb': 'ltz',
3795 'lg': 'lug',
3796 'li': 'lim',
3797 'ln': 'lin',
3798 'lo': 'lao',
3799 'lt': 'lit',
3800 'lu': 'lub',
3801 'lv': 'lav',
3802 'mg': 'mlg',
3803 'mh': 'mah',
3804 'mi': 'mri',
3805 'mk': 'mkd',
3806 'ml': 'mal',
3807 'mn': 'mon',
3808 'mr': 'mar',
3809 'ms': 'msa',
3810 'mt': 'mlt',
3811 'my': 'mya',
3812 'na': 'nau',
3813 'nb': 'nob',
3814 'nd': 'nde',
3815 'ne': 'nep',
3816 'ng': 'ndo',
3817 'nl': 'nld',
3818 'nn': 'nno',
3819 'no': 'nor',
3820 'nr': 'nbl',
3821 'nv': 'nav',
3822 'ny': 'nya',
3823 'oc': 'oci',
3824 'oj': 'oji',
3825 'om': 'orm',
3826 'or': 'ori',
3827 'os': 'oss',
3828 'pa': 'pan',
3829 'pi': 'pli',
3830 'pl': 'pol',
3831 'ps': 'pus',
3832 'pt': 'por',
3833 'qu': 'que',
3834 'rm': 'roh',
3835 'rn': 'run',
3836 'ro': 'ron',
3837 'ru': 'rus',
3838 'rw': 'kin',
3839 'sa': 'san',
3840 'sc': 'srd',
3841 'sd': 'snd',
3842 'se': 'sme',
3843 'sg': 'sag',
3844 'si': 'sin',
3845 'sk': 'slk',
3846 'sl': 'slv',
3847 'sm': 'smo',
3848 'sn': 'sna',
3849 'so': 'som',
3850 'sq': 'sqi',
3851 'sr': 'srp',
3852 'ss': 'ssw',
3853 'st': 'sot',
3854 'su': 'sun',
3855 'sv': 'swe',
3856 'sw': 'swa',
3857 'ta': 'tam',
3858 'te': 'tel',
3859 'tg': 'tgk',
3860 'th': 'tha',
3861 'ti': 'tir',
3862 'tk': 'tuk',
3863 'tl': 'tgl',
3864 'tn': 'tsn',
3865 'to': 'ton',
3866 'tr': 'tur',
3867 'ts': 'tso',
3868 'tt': 'tat',
3869 'tw': 'twi',
3870 'ty': 'tah',
3871 'ug': 'uig',
3872 'uk': 'ukr',
3873 'ur': 'urd',
3874 'uz': 'uzb',
3875 've': 'ven',
3876 'vi': 'vie',
3877 'vo': 'vol',
3878 'wa': 'wln',
3879 'wo': 'wol',
3880 'xh': 'xho',
3881 'yi': 'yid',
3882 'ji': 'yid', # Replaced by yi in 1989 revision
3883 'yo': 'yor',
3884 'za': 'zha',
3885 'zh': 'zho',
3886 'zu': 'zul',
3887 }
3888
3889 @classmethod
3890 def short2long(cls, code):
3891 """Convert language code from ISO 639-1 to ISO 639-2/T"""
3892 return cls._lang_map.get(code[:2])
3893
3894 @classmethod
3895 def long2short(cls, code):
3896 """Convert language code from ISO 639-2/T to ISO 639-1"""
3897 for short_name, long_name in cls._lang_map.items():
3898 if long_name == code:
3899 return short_name
3900
3901
3902 class ISO3166Utils:
3903 # From http://data.okfn.org/data/core/country-list
3904 _country_map = {
3905 'AF': 'Afghanistan',
3906 'AX': 'Åland Islands',
3907 'AL': 'Albania',
3908 'DZ': 'Algeria',
3909 'AS': 'American Samoa',
3910 'AD': 'Andorra',
3911 'AO': 'Angola',
3912 'AI': 'Anguilla',
3913 'AQ': 'Antarctica',
3914 'AG': 'Antigua and Barbuda',
3915 'AR': 'Argentina',
3916 'AM': 'Armenia',
3917 'AW': 'Aruba',
3918 'AU': 'Australia',
3919 'AT': 'Austria',
3920 'AZ': 'Azerbaijan',
3921 'BS': 'Bahamas',
3922 'BH': 'Bahrain',
3923 'BD': 'Bangladesh',
3924 'BB': 'Barbados',
3925 'BY': 'Belarus',
3926 'BE': 'Belgium',
3927 'BZ': 'Belize',
3928 'BJ': 'Benin',
3929 'BM': 'Bermuda',
3930 'BT': 'Bhutan',
3931 'BO': 'Bolivia, Plurinational State of',
3932 'BQ': 'Bonaire, Sint Eustatius and Saba',
3933 'BA': 'Bosnia and Herzegovina',
3934 'BW': 'Botswana',
3935 'BV': 'Bouvet Island',
3936 'BR': 'Brazil',
3937 'IO': 'British Indian Ocean Territory',
3938 'BN': 'Brunei Darussalam',
3939 'BG': 'Bulgaria',
3940 'BF': 'Burkina Faso',
3941 'BI': 'Burundi',
3942 'KH': 'Cambodia',
3943 'CM': 'Cameroon',
3944 'CA': 'Canada',
3945 'CV': 'Cape Verde',
3946 'KY': 'Cayman Islands',
3947 'CF': 'Central African Republic',
3948 'TD': 'Chad',
3949 'CL': 'Chile',
3950 'CN': 'China',
3951 'CX': 'Christmas Island',
3952 'CC': 'Cocos (Keeling) Islands',
3953 'CO': 'Colombia',
3954 'KM': 'Comoros',
3955 'CG': 'Congo',
3956 'CD': 'Congo, the Democratic Republic of the',
3957 'CK': 'Cook Islands',
3958 'CR': 'Costa Rica',
3959 'CI': 'Côte d\'Ivoire',
3960 'HR': 'Croatia',
3961 'CU': 'Cuba',
3962 'CW': 'Curaçao',
3963 'CY': 'Cyprus',
3964 'CZ': 'Czech Republic',
3965 'DK': 'Denmark',
3966 'DJ': 'Djibouti',
3967 'DM': 'Dominica',
3968 'DO': 'Dominican Republic',
3969 'EC': 'Ecuador',
3970 'EG': 'Egypt',
3971 'SV': 'El Salvador',
3972 'GQ': 'Equatorial Guinea',
3973 'ER': 'Eritrea',
3974 'EE': 'Estonia',
3975 'ET': 'Ethiopia',
3976 'FK': 'Falkland Islands (Malvinas)',
3977 'FO': 'Faroe Islands',
3978 'FJ': 'Fiji',
3979 'FI': 'Finland',
3980 'FR': 'France',
3981 'GF': 'French Guiana',
3982 'PF': 'French Polynesia',
3983 'TF': 'French Southern Territories',
3984 'GA': 'Gabon',
3985 'GM': 'Gambia',
3986 'GE': 'Georgia',
3987 'DE': 'Germany',
3988 'GH': 'Ghana',
3989 'GI': 'Gibraltar',
3990 'GR': 'Greece',
3991 'GL': 'Greenland',
3992 'GD': 'Grenada',
3993 'GP': 'Guadeloupe',
3994 'GU': 'Guam',
3995 'GT': 'Guatemala',
3996 'GG': 'Guernsey',
3997 'GN': 'Guinea',
3998 'GW': 'Guinea-Bissau',
3999 'GY': 'Guyana',
4000 'HT': 'Haiti',
4001 'HM': 'Heard Island and McDonald Islands',
4002 'VA': 'Holy See (Vatican City State)',
4003 'HN': 'Honduras',
4004 'HK': 'Hong Kong',
4005 'HU': 'Hungary',
4006 'IS': 'Iceland',
4007 'IN': 'India',
4008 'ID': 'Indonesia',
4009 'IR': 'Iran, Islamic Republic of',
4010 'IQ': 'Iraq',
4011 'IE': 'Ireland',
4012 'IM': 'Isle of Man',
4013 'IL': 'Israel',
4014 'IT': 'Italy',
4015 'JM': 'Jamaica',
4016 'JP': 'Japan',
4017 'JE': 'Jersey',
4018 'JO': 'Jordan',
4019 'KZ': 'Kazakhstan',
4020 'KE': 'Kenya',
4021 'KI': 'Kiribati',
4022 'KP': 'Korea, Democratic People\'s Republic of',
4023 'KR': 'Korea, Republic of',
4024 'KW': 'Kuwait',
4025 'KG': 'Kyrgyzstan',
4026 'LA': 'Lao People\'s Democratic Republic',
4027 'LV': 'Latvia',
4028 'LB': 'Lebanon',
4029 'LS': 'Lesotho',
4030 'LR': 'Liberia',
4031 'LY': 'Libya',
4032 'LI': 'Liechtenstein',
4033 'LT': 'Lithuania',
4034 'LU': 'Luxembourg',
4035 'MO': 'Macao',
4036 'MK': 'Macedonia, the Former Yugoslav Republic of',
4037 'MG': 'Madagascar',
4038 'MW': 'Malawi',
4039 'MY': 'Malaysia',
4040 'MV': 'Maldives',
4041 'ML': 'Mali',
4042 'MT': 'Malta',
4043 'MH': 'Marshall Islands',
4044 'MQ': 'Martinique',
4045 'MR': 'Mauritania',
4046 'MU': 'Mauritius',
4047 'YT': 'Mayotte',
4048 'MX': 'Mexico',
4049 'FM': 'Micronesia, Federated States of',
4050 'MD': 'Moldova, Republic of',
4051 'MC': 'Monaco',
4052 'MN': 'Mongolia',
4053 'ME': 'Montenegro',
4054 'MS': 'Montserrat',
4055 'MA': 'Morocco',
4056 'MZ': 'Mozambique',
4057 'MM': 'Myanmar',
4058 'NA': 'Namibia',
4059 'NR': 'Nauru',
4060 'NP': 'Nepal',
4061 'NL': 'Netherlands',
4062 'NC': 'New Caledonia',
4063 'NZ': 'New Zealand',
4064 'NI': 'Nicaragua',
4065 'NE': 'Niger',
4066 'NG': 'Nigeria',
4067 'NU': 'Niue',
4068 'NF': 'Norfolk Island',
4069 'MP': 'Northern Mariana Islands',
4070 'NO': 'Norway',
4071 'OM': 'Oman',
4072 'PK': 'Pakistan',
4073 'PW': 'Palau',
4074 'PS': 'Palestine, State of',
4075 'PA': 'Panama',
4076 'PG': 'Papua New Guinea',
4077 'PY': 'Paraguay',
4078 'PE': 'Peru',
4079 'PH': 'Philippines',
4080 'PN': 'Pitcairn',
4081 'PL': 'Poland',
4082 'PT': 'Portugal',
4083 'PR': 'Puerto Rico',
4084 'QA': 'Qatar',
4085 'RE': 'Réunion',
4086 'RO': 'Romania',
4087 'RU': 'Russian Federation',
4088 'RW': 'Rwanda',
4089 'BL': 'Saint Barthélemy',
4090 'SH': 'Saint Helena, Ascension and Tristan da Cunha',
4091 'KN': 'Saint Kitts and Nevis',
4092 'LC': 'Saint Lucia',
4093 'MF': 'Saint Martin (French part)',
4094 'PM': 'Saint Pierre and Miquelon',
4095 'VC': 'Saint Vincent and the Grenadines',
4096 'WS': 'Samoa',
4097 'SM': 'San Marino',
4098 'ST': 'Sao Tome and Principe',
4099 'SA': 'Saudi Arabia',
4100 'SN': 'Senegal',
4101 'RS': 'Serbia',
4102 'SC': 'Seychelles',
4103 'SL': 'Sierra Leone',
4104 'SG': 'Singapore',
4105 'SX': 'Sint Maarten (Dutch part)',
4106 'SK': 'Slovakia',
4107 'SI': 'Slovenia',
4108 'SB': 'Solomon Islands',
4109 'SO': 'Somalia',
4110 'ZA': 'South Africa',
4111 'GS': 'South Georgia and the South Sandwich Islands',
4112 'SS': 'South Sudan',
4113 'ES': 'Spain',
4114 'LK': 'Sri Lanka',
4115 'SD': 'Sudan',
4116 'SR': 'Suriname',
4117 'SJ': 'Svalbard and Jan Mayen',
4118 'SZ': 'Swaziland',
4119 'SE': 'Sweden',
4120 'CH': 'Switzerland',
4121 'SY': 'Syrian Arab Republic',
4122 'TW': 'Taiwan, Province of China',
4123 'TJ': 'Tajikistan',
4124 'TZ': 'Tanzania, United Republic of',
4125 'TH': 'Thailand',
4126 'TL': 'Timor-Leste',
4127 'TG': 'Togo',
4128 'TK': 'Tokelau',
4129 'TO': 'Tonga',
4130 'TT': 'Trinidad and Tobago',
4131 'TN': 'Tunisia',
4132 'TR': 'Turkey',
4133 'TM': 'Turkmenistan',
4134 'TC': 'Turks and Caicos Islands',
4135 'TV': 'Tuvalu',
4136 'UG': 'Uganda',
4137 'UA': 'Ukraine',
4138 'AE': 'United Arab Emirates',
4139 'GB': 'United Kingdom',
4140 'US': 'United States',
4141 'UM': 'United States Minor Outlying Islands',
4142 'UY': 'Uruguay',
4143 'UZ': 'Uzbekistan',
4144 'VU': 'Vanuatu',
4145 'VE': 'Venezuela, Bolivarian Republic of',
4146 'VN': 'Viet Nam',
4147 'VG': 'Virgin Islands, British',
4148 'VI': 'Virgin Islands, U.S.',
4149 'WF': 'Wallis and Futuna',
4150 'EH': 'Western Sahara',
4151 'YE': 'Yemen',
4152 'ZM': 'Zambia',
4153 'ZW': 'Zimbabwe',
4154 }
4155
4156 @classmethod
4157 def short2full(cls, code):
4158 """Convert an ISO 3166-2 country code to the corresponding full name"""
4159 return cls._country_map.get(code.upper())
4160
4161
4162 class GeoUtils:
4163 # Major IPv4 address blocks per country
4164 _country_ip_map = {
4165 'AD': '46.172.224.0/19',
4166 'AE': '94.200.0.0/13',
4167 'AF': '149.54.0.0/17',
4168 'AG': '209.59.64.0/18',
4169 'AI': '204.14.248.0/21',
4170 'AL': '46.99.0.0/16',
4171 'AM': '46.70.0.0/15',
4172 'AO': '105.168.0.0/13',
4173 'AP': '182.50.184.0/21',
4174 'AQ': '23.154.160.0/24',
4175 'AR': '181.0.0.0/12',
4176 'AS': '202.70.112.0/20',
4177 'AT': '77.116.0.0/14',
4178 'AU': '1.128.0.0/11',
4179 'AW': '181.41.0.0/18',
4180 'AX': '185.217.4.0/22',
4181 'AZ': '5.197.0.0/16',
4182 'BA': '31.176.128.0/17',
4183 'BB': '65.48.128.0/17',
4184 'BD': '114.130.0.0/16',
4185 'BE': '57.0.0.0/8',
4186 'BF': '102.178.0.0/15',
4187 'BG': '95.42.0.0/15',
4188 'BH': '37.131.0.0/17',
4189 'BI': '154.117.192.0/18',
4190 'BJ': '137.255.0.0/16',
4191 'BL': '185.212.72.0/23',
4192 'BM': '196.12.64.0/18',
4193 'BN': '156.31.0.0/16',
4194 'BO': '161.56.0.0/16',
4195 'BQ': '161.0.80.0/20',
4196 'BR': '191.128.0.0/12',
4197 'BS': '24.51.64.0/18',
4198 'BT': '119.2.96.0/19',
4199 'BW': '168.167.0.0/16',
4200 'BY': '178.120.0.0/13',
4201 'BZ': '179.42.192.0/18',
4202 'CA': '99.224.0.0/11',
4203 'CD': '41.243.0.0/16',
4204 'CF': '197.242.176.0/21',
4205 'CG': '160.113.0.0/16',
4206 'CH': '85.0.0.0/13',
4207 'CI': '102.136.0.0/14',
4208 'CK': '202.65.32.0/19',
4209 'CL': '152.172.0.0/14',
4210 'CM': '102.244.0.0/14',
4211 'CN': '36.128.0.0/10',
4212 'CO': '181.240.0.0/12',
4213 'CR': '201.192.0.0/12',
4214 'CU': '152.206.0.0/15',
4215 'CV': '165.90.96.0/19',
4216 'CW': '190.88.128.0/17',
4217 'CY': '31.153.0.0/16',
4218 'CZ': '88.100.0.0/14',
4219 'DE': '53.0.0.0/8',
4220 'DJ': '197.241.0.0/17',
4221 'DK': '87.48.0.0/12',
4222 'DM': '192.243.48.0/20',
4223 'DO': '152.166.0.0/15',
4224 'DZ': '41.96.0.0/12',
4225 'EC': '186.68.0.0/15',
4226 'EE': '90.190.0.0/15',
4227 'EG': '156.160.0.0/11',
4228 'ER': '196.200.96.0/20',
4229 'ES': '88.0.0.0/11',
4230 'ET': '196.188.0.0/14',
4231 'EU': '2.16.0.0/13',
4232 'FI': '91.152.0.0/13',
4233 'FJ': '144.120.0.0/16',
4234 'FK': '80.73.208.0/21',
4235 'FM': '119.252.112.0/20',
4236 'FO': '88.85.32.0/19',
4237 'FR': '90.0.0.0/9',
4238 'GA': '41.158.0.0/15',
4239 'GB': '25.0.0.0/8',
4240 'GD': '74.122.88.0/21',
4241 'GE': '31.146.0.0/16',
4242 'GF': '161.22.64.0/18',
4243 'GG': '62.68.160.0/19',
4244 'GH': '154.160.0.0/12',
4245 'GI': '95.164.0.0/16',
4246 'GL': '88.83.0.0/19',
4247 'GM': '160.182.0.0/15',
4248 'GN': '197.149.192.0/18',
4249 'GP': '104.250.0.0/19',
4250 'GQ': '105.235.224.0/20',
4251 'GR': '94.64.0.0/13',
4252 'GT': '168.234.0.0/16',
4253 'GU': '168.123.0.0/16',
4254 'GW': '197.214.80.0/20',
4255 'GY': '181.41.64.0/18',
4256 'HK': '113.252.0.0/14',
4257 'HN': '181.210.0.0/16',
4258 'HR': '93.136.0.0/13',
4259 'HT': '148.102.128.0/17',
4260 'HU': '84.0.0.0/14',
4261 'ID': '39.192.0.0/10',
4262 'IE': '87.32.0.0/12',
4263 'IL': '79.176.0.0/13',
4264 'IM': '5.62.80.0/20',
4265 'IN': '117.192.0.0/10',
4266 'IO': '203.83.48.0/21',
4267 'IQ': '37.236.0.0/14',
4268 'IR': '2.176.0.0/12',
4269 'IS': '82.221.0.0/16',
4270 'IT': '79.0.0.0/10',
4271 'JE': '87.244.64.0/18',
4272 'JM': '72.27.0.0/17',
4273 'JO': '176.29.0.0/16',
4274 'JP': '133.0.0.0/8',
4275 'KE': '105.48.0.0/12',
4276 'KG': '158.181.128.0/17',
4277 'KH': '36.37.128.0/17',
4278 'KI': '103.25.140.0/22',
4279 'KM': '197.255.224.0/20',
4280 'KN': '198.167.192.0/19',
4281 'KP': '175.45.176.0/22',
4282 'KR': '175.192.0.0/10',
4283 'KW': '37.36.0.0/14',
4284 'KY': '64.96.0.0/15',
4285 'KZ': '2.72.0.0/13',
4286 'LA': '115.84.64.0/18',
4287 'LB': '178.135.0.0/16',
4288 'LC': '24.92.144.0/20',
4289 'LI': '82.117.0.0/19',
4290 'LK': '112.134.0.0/15',
4291 'LR': '102.183.0.0/16',
4292 'LS': '129.232.0.0/17',
4293 'LT': '78.56.0.0/13',
4294 'LU': '188.42.0.0/16',
4295 'LV': '46.109.0.0/16',
4296 'LY': '41.252.0.0/14',
4297 'MA': '105.128.0.0/11',
4298 'MC': '88.209.64.0/18',
4299 'MD': '37.246.0.0/16',
4300 'ME': '178.175.0.0/17',
4301 'MF': '74.112.232.0/21',
4302 'MG': '154.126.0.0/17',
4303 'MH': '117.103.88.0/21',
4304 'MK': '77.28.0.0/15',
4305 'ML': '154.118.128.0/18',
4306 'MM': '37.111.0.0/17',
4307 'MN': '49.0.128.0/17',
4308 'MO': '60.246.0.0/16',
4309 'MP': '202.88.64.0/20',
4310 'MQ': '109.203.224.0/19',
4311 'MR': '41.188.64.0/18',
4312 'MS': '208.90.112.0/22',
4313 'MT': '46.11.0.0/16',
4314 'MU': '105.16.0.0/12',
4315 'MV': '27.114.128.0/18',
4316 'MW': '102.70.0.0/15',
4317 'MX': '187.192.0.0/11',
4318 'MY': '175.136.0.0/13',
4319 'MZ': '197.218.0.0/15',
4320 'NA': '41.182.0.0/16',
4321 'NC': '101.101.0.0/18',
4322 'NE': '197.214.0.0/18',
4323 'NF': '203.17.240.0/22',
4324 'NG': '105.112.0.0/12',
4325 'NI': '186.76.0.0/15',
4326 'NL': '145.96.0.0/11',
4327 'NO': '84.208.0.0/13',
4328 'NP': '36.252.0.0/15',
4329 'NR': '203.98.224.0/19',
4330 'NU': '49.156.48.0/22',
4331 'NZ': '49.224.0.0/14',
4332 'OM': '5.36.0.0/15',
4333 'PA': '186.72.0.0/15',
4334 'PE': '186.160.0.0/14',
4335 'PF': '123.50.64.0/18',
4336 'PG': '124.240.192.0/19',
4337 'PH': '49.144.0.0/13',
4338 'PK': '39.32.0.0/11',
4339 'PL': '83.0.0.0/11',
4340 'PM': '70.36.0.0/20',
4341 'PR': '66.50.0.0/16',
4342 'PS': '188.161.0.0/16',
4343 'PT': '85.240.0.0/13',
4344 'PW': '202.124.224.0/20',
4345 'PY': '181.120.0.0/14',
4346 'QA': '37.210.0.0/15',
4347 'RE': '102.35.0.0/16',
4348 'RO': '79.112.0.0/13',
4349 'RS': '93.86.0.0/15',
4350 'RU': '5.136.0.0/13',
4351 'RW': '41.186.0.0/16',
4352 'SA': '188.48.0.0/13',
4353 'SB': '202.1.160.0/19',
4354 'SC': '154.192.0.0/11',
4355 'SD': '102.120.0.0/13',
4356 'SE': '78.64.0.0/12',
4357 'SG': '8.128.0.0/10',
4358 'SI': '188.196.0.0/14',
4359 'SK': '78.98.0.0/15',
4360 'SL': '102.143.0.0/17',
4361 'SM': '89.186.32.0/19',
4362 'SN': '41.82.0.0/15',
4363 'SO': '154.115.192.0/18',
4364 'SR': '186.179.128.0/17',
4365 'SS': '105.235.208.0/21',
4366 'ST': '197.159.160.0/19',
4367 'SV': '168.243.0.0/16',
4368 'SX': '190.102.0.0/20',
4369 'SY': '5.0.0.0/16',
4370 'SZ': '41.84.224.0/19',
4371 'TC': '65.255.48.0/20',
4372 'TD': '154.68.128.0/19',
4373 'TG': '196.168.0.0/14',
4374 'TH': '171.96.0.0/13',
4375 'TJ': '85.9.128.0/18',
4376 'TK': '27.96.24.0/21',
4377 'TL': '180.189.160.0/20',
4378 'TM': '95.85.96.0/19',
4379 'TN': '197.0.0.0/11',
4380 'TO': '175.176.144.0/21',
4381 'TR': '78.160.0.0/11',
4382 'TT': '186.44.0.0/15',
4383 'TV': '202.2.96.0/19',
4384 'TW': '120.96.0.0/11',
4385 'TZ': '156.156.0.0/14',
4386 'UA': '37.52.0.0/14',
4387 'UG': '102.80.0.0/13',
4388 'US': '6.0.0.0/8',
4389 'UY': '167.56.0.0/13',
4390 'UZ': '84.54.64.0/18',
4391 'VA': '212.77.0.0/19',
4392 'VC': '207.191.240.0/21',
4393 'VE': '186.88.0.0/13',
4394 'VG': '66.81.192.0/20',
4395 'VI': '146.226.0.0/16',
4396 'VN': '14.160.0.0/11',
4397 'VU': '202.80.32.0/20',
4398 'WF': '117.20.32.0/21',
4399 'WS': '202.4.32.0/19',
4400 'YE': '134.35.0.0/16',
4401 'YT': '41.242.116.0/22',
4402 'ZA': '41.0.0.0/11',
4403 'ZM': '102.144.0.0/13',
4404 'ZW': '102.177.192.0/18',
4405 }
4406
4407 @classmethod
4408 def random_ipv4(cls, code_or_block):
4409 if len(code_or_block) == 2:
4410 block = cls._country_ip_map.get(code_or_block.upper())
4411 if not block:
4412 return None
4413 else:
4414 block = code_or_block
4415 addr, preflen = block.split('/')
4416 addr_min = compat_struct_unpack('!L', socket.inet_aton(addr))[0]
4417 addr_max = addr_min | (0xffffffff >> int(preflen))
4418 return compat_str(socket.inet_ntoa(
4419 compat_struct_pack('!L', random.randint(addr_min, addr_max))))
4420
4421
4422 class PerRequestProxyHandler(compat_urllib_request.ProxyHandler):
4423 def __init__(self, proxies=None):
4424 # Set default handlers
4425 for type in ('http', 'https'):
4426 setattr(self, '%s_open' % type,
4427 lambda r, proxy='__noproxy__', type=type, meth=self.proxy_open:
4428 meth(r, proxy, type))
4429 compat_urllib_request.ProxyHandler.__init__(self, proxies)
4430
4431 def proxy_open(self, req, proxy, type):
4432 req_proxy = req.headers.get('Ytdl-request-proxy')
4433 if req_proxy is not None:
4434 proxy = req_proxy
4435 del req.headers['Ytdl-request-proxy']
4436
4437 if proxy == '__noproxy__':
4438 return None # No Proxy
4439 if compat_urlparse.urlparse(proxy).scheme.lower() in ('socks', 'socks4', 'socks4a', 'socks5'):
4440 req.add_header('Ytdl-socks-proxy', proxy)
4441 # yt-dlp's http/https handlers do wrapping the socket with socks
4442 return None
4443 return compat_urllib_request.ProxyHandler.proxy_open(
4444 self, req, proxy, type)
4445
4446
4447 # Both long_to_bytes and bytes_to_long are adapted from PyCrypto, which is
4448 # released into Public Domain
4449 # https://github.com/dlitz/pycrypto/blob/master/lib/Crypto/Util/number.py#L387
4450
4451 def long_to_bytes(n, blocksize=0):
4452 """long_to_bytes(n:long, blocksize:int) : string
4453 Convert a long integer to a byte string.
4454
4455 If optional blocksize is given and greater than zero, pad the front of the
4456 byte string with binary zeros so that the length is a multiple of
4457 blocksize.
4458 """
4459 # after much testing, this algorithm was deemed to be the fastest
4460 s = b''
4461 n = int(n)
4462 while n > 0:
4463 s = compat_struct_pack('>I', n & 0xffffffff) + s
4464 n = n >> 32
4465 # strip off leading zeros
4466 for i in range(len(s)):
4467 if s[i] != b'\000'[0]:
4468 break
4469 else:
4470 # only happens when n == 0
4471 s = b'\000'
4472 i = 0
4473 s = s[i:]
4474 # add back some pad bytes. this could be done more efficiently w.r.t. the
4475 # de-padding being done above, but sigh...
4476 if blocksize > 0 and len(s) % blocksize:
4477 s = (blocksize - len(s) % blocksize) * b'\000' + s
4478 return s
4479
4480
4481 def bytes_to_long(s):
4482 """bytes_to_long(string) : long
4483 Convert a byte string to a long integer.
4484
4485 This is (essentially) the inverse of long_to_bytes().
4486 """
4487 acc = 0
4488 length = len(s)
4489 if length % 4:
4490 extra = (4 - length % 4)
4491 s = b'\000' * extra + s
4492 length = length + extra
4493 for i in range(0, length, 4):
4494 acc = (acc << 32) + compat_struct_unpack('>I', s[i:i + 4])[0]
4495 return acc
4496
4497
4498 def ohdave_rsa_encrypt(data, exponent, modulus):
4499 '''
4500 Implement OHDave's RSA algorithm. See http://www.ohdave.com/rsa/
4501
4502 Input:
4503 data: data to encrypt, bytes-like object
4504 exponent, modulus: parameter e and N of RSA algorithm, both integer
4505 Output: hex string of encrypted data
4506
4507 Limitation: supports one block encryption only
4508 '''
4509
4510 payload = int(binascii.hexlify(data[::-1]), 16)
4511 encrypted = pow(payload, exponent, modulus)
4512 return '%x' % encrypted
4513
4514
4515 def pkcs1pad(data, length):
4516 """
4517 Padding input data with PKCS#1 scheme
4518
4519 @param {int[]} data input data
4520 @param {int} length target length
4521 @returns {int[]} padded data
4522 """
4523 if len(data) > length - 11:
4524 raise ValueError('Input data too long for PKCS#1 padding')
4525
4526 pseudo_random = [random.randint(0, 254) for _ in range(length - len(data) - 3)]
4527 return [0, 2] + pseudo_random + [0] + data
4528
4529
4530 def encode_base_n(num, n, table=None):
4531 FULL_TABLE = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
4532 if not table:
4533 table = FULL_TABLE[:n]
4534
4535 if n > len(table):
4536 raise ValueError('base %d exceeds table length %d' % (n, len(table)))
4537
4538 if num == 0:
4539 return table[0]
4540
4541 ret = ''
4542 while num:
4543 ret = table[num % n] + ret
4544 num = num // n
4545 return ret
4546
4547
4548 def decode_packed_codes(code):
4549 mobj = re.search(PACKED_CODES_RE, code)
4550 obfuscated_code, base, count, symbols = mobj.groups()
4551 base = int(base)
4552 count = int(count)
4553 symbols = symbols.split('|')
4554 symbol_table = {}
4555
4556 while count:
4557 count -= 1
4558 base_n_count = encode_base_n(count, base)
4559 symbol_table[base_n_count] = symbols[count] or base_n_count
4560
4561 return re.sub(
4562 r'\b(\w+)\b', lambda mobj: symbol_table[mobj.group(0)],
4563 obfuscated_code)
4564
4565
4566 def caesar(s, alphabet, shift):
4567 if shift == 0:
4568 return s
4569 l = len(alphabet)
4570 return ''.join(
4571 alphabet[(alphabet.index(c) + shift) % l] if c in alphabet else c
4572 for c in s)
4573
4574
4575 def rot47(s):
4576 return caesar(s, r'''!"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~''', 47)
4577
4578
4579 def parse_m3u8_attributes(attrib):
4580 info = {}
4581 for (key, val) in re.findall(r'(?P<key>[A-Z0-9-]+)=(?P<val>"[^"]+"|[^",]+)(?:,|$)', attrib):
4582 if val.startswith('"'):
4583 val = val[1:-1]
4584 info[key] = val
4585 return info
4586
4587
4588 def urshift(val, n):
4589 return val >> n if val >= 0 else (val + 0x100000000) >> n
4590
4591
4592 # Based on png2str() written by @gdkchan and improved by @yokrysty
4593 # Originally posted at https://github.com/ytdl-org/youtube-dl/issues/9706
4594 def decode_png(png_data):
4595 # Reference: https://www.w3.org/TR/PNG/
4596 header = png_data[8:]
4597
4598 if png_data[:8] != b'\x89PNG\x0d\x0a\x1a\x0a' or header[4:8] != b'IHDR':
4599 raise OSError('Not a valid PNG file.')
4600
4601 int_map = {1: '>B', 2: '>H', 4: '>I'}
4602 unpack_integer = lambda x: compat_struct_unpack(int_map[len(x)], x)[0]
4603
4604 chunks = []
4605
4606 while header:
4607 length = unpack_integer(header[:4])
4608 header = header[4:]
4609
4610 chunk_type = header[:4]
4611 header = header[4:]
4612
4613 chunk_data = header[:length]
4614 header = header[length:]
4615
4616 header = header[4:] # Skip CRC
4617
4618 chunks.append({
4619 'type': chunk_type,
4620 'length': length,
4621 'data': chunk_data
4622 })
4623
4624 ihdr = chunks[0]['data']
4625
4626 width = unpack_integer(ihdr[:4])
4627 height = unpack_integer(ihdr[4:8])
4628
4629 idat = b''
4630
4631 for chunk in chunks:
4632 if chunk['type'] == b'IDAT':
4633 idat += chunk['data']
4634
4635 if not idat:
4636 raise OSError('Unable to read PNG data.')
4637
4638 decompressed_data = bytearray(zlib.decompress(idat))
4639
4640 stride = width * 3
4641 pixels = []
4642
4643 def _get_pixel(idx):
4644 x = idx % stride
4645 y = idx // stride
4646 return pixels[y][x]
4647
4648 for y in range(height):
4649 basePos = y * (1 + stride)
4650 filter_type = decompressed_data[basePos]
4651
4652 current_row = []
4653
4654 pixels.append(current_row)
4655
4656 for x in range(stride):
4657 color = decompressed_data[1 + basePos + x]
4658 basex = y * stride + x
4659 left = 0
4660 up = 0
4661
4662 if x > 2:
4663 left = _get_pixel(basex - 3)
4664 if y > 0:
4665 up = _get_pixel(basex - stride)
4666
4667 if filter_type == 1: # Sub
4668 color = (color + left) & 0xff
4669 elif filter_type == 2: # Up
4670 color = (color + up) & 0xff
4671 elif filter_type == 3: # Average
4672 color = (color + ((left + up) >> 1)) & 0xff
4673 elif filter_type == 4: # Paeth
4674 a = left
4675 b = up
4676 c = 0
4677
4678 if x > 2 and y > 0:
4679 c = _get_pixel(basex - stride - 3)
4680
4681 p = a + b - c
4682
4683 pa = abs(p - a)
4684 pb = abs(p - b)
4685 pc = abs(p - c)
4686
4687 if pa <= pb and pa <= pc:
4688 color = (color + a) & 0xff
4689 elif pb <= pc:
4690 color = (color + b) & 0xff
4691 else:
4692 color = (color + c) & 0xff
4693
4694 current_row.append(color)
4695
4696 return width, height, pixels
4697
4698
4699 def write_xattr(path, key, value):
4700 # Windows: Write xattrs to NTFS Alternate Data Streams:
4701 # http://en.wikipedia.org/wiki/NTFS#Alternate_data_streams_.28ADS.29
4702 if compat_os_name == 'nt':
4703 assert ':' not in key
4704 assert os.path.exists(path)
4705
4706 try:
4707 with open(f'{path}:{key}', 'wb') as f:
4708 f.write(value)
4709 except OSError as e:
4710 raise XAttrMetadataError(e.errno, e.strerror)
4711 return
4712
4713 # UNIX Method 1. Use xattrs/pyxattrs modules
4714 from .dependencies import xattr
4715
4716 setxattr = None
4717 if getattr(xattr, '_yt_dlp__identifier', None) == 'pyxattr':
4718 # Unicode arguments are not supported in pyxattr until version 0.5.0
4719 # See https://github.com/ytdl-org/youtube-dl/issues/5498
4720 if version_tuple(xattr.__version__) >= (0, 5, 0):
4721 setxattr = xattr.set
4722 elif xattr:
4723 setxattr = xattr.setxattr
4724
4725 if setxattr:
4726 try:
4727 setxattr(path, key, value)
4728 except OSError as e:
4729 raise XAttrMetadataError(e.errno, e.strerror)
4730 return
4731
4732 # UNIX Method 2. Use setfattr/xattr executables
4733 exe = ('setfattr' if check_executable('setfattr', ['--version'])
4734 else 'xattr' if check_executable('xattr', ['-h']) else None)
4735 if not exe:
4736 raise XAttrUnavailableError(
4737 'Couldn\'t find a tool to set the xattrs. Install either the python "xattr" or "pyxattr" modules or the '
4738 + ('"xattr" binary' if sys.platform != 'linux' else 'GNU "attr" package (which contains the "setfattr" tool)'))
4739
4740 value = value.decode('utf-8')
4741 try:
4742 p = Popen(
4743 [exe, '-w', key, value, path] if exe == 'xattr' else [exe, '-n', key, '-v', value, path],
4744 stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
4745 except OSError as e:
4746 raise XAttrMetadataError(e.errno, e.strerror)
4747 stderr = p.communicate_or_kill()[1].decode('utf-8', 'replace')
4748 if p.returncode:
4749 raise XAttrMetadataError(p.returncode, stderr)
4750
4751
4752 def random_birthday(year_field, month_field, day_field):
4753 start_date = datetime.date(1950, 1, 1)
4754 end_date = datetime.date(1995, 12, 31)
4755 offset = random.randint(0, (end_date - start_date).days)
4756 random_date = start_date + datetime.timedelta(offset)
4757 return {
4758 year_field: str(random_date.year),
4759 month_field: str(random_date.month),
4760 day_field: str(random_date.day),
4761 }
4762
4763
4764 # Templates for internet shortcut files, which are plain text files.
4765 DOT_URL_LINK_TEMPLATE = '''\
4766 [InternetShortcut]
4767 URL=%(url)s
4768 '''
4769
4770 DOT_WEBLOC_LINK_TEMPLATE = '''\
4771 <?xml version="1.0" encoding="UTF-8"?>
4772 <!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
4773 <plist version="1.0">
4774 <dict>
4775 \t<key>URL</key>
4776 \t<string>%(url)s</string>
4777 </dict>
4778 </plist>
4779 '''
4780
4781 DOT_DESKTOP_LINK_TEMPLATE = '''\
4782 [Desktop Entry]
4783 Encoding=UTF-8
4784 Name=%(filename)s
4785 Type=Link
4786 URL=%(url)s
4787 Icon=text-html
4788 '''
4789
4790 LINK_TEMPLATES = {
4791 'url': DOT_URL_LINK_TEMPLATE,
4792 'desktop': DOT_DESKTOP_LINK_TEMPLATE,
4793 'webloc': DOT_WEBLOC_LINK_TEMPLATE,
4794 }
4795
4796
4797 def iri_to_uri(iri):
4798 """
4799 Converts an IRI (Internationalized Resource Identifier, allowing Unicode characters) to a URI (Uniform Resource Identifier, ASCII-only).
4800
4801 The function doesn't add an additional layer of escaping; e.g., it doesn't escape `%3C` as `%253C`. Instead, it percent-escapes characters with an underlying UTF-8 encoding *besides* those already escaped, leaving the URI intact.
4802 """
4803
4804 iri_parts = compat_urllib_parse_urlparse(iri)
4805
4806 if '[' in iri_parts.netloc:
4807 raise ValueError('IPv6 URIs are not, yet, supported.')
4808 # Querying `.netloc`, when there's only one bracket, also raises a ValueError.
4809
4810 # The `safe` argument values, that the following code uses, contain the characters that should not be percent-encoded. Everything else but letters, digits and '_.-' will be percent-encoded with an underlying UTF-8 encoding. Everything already percent-encoded will be left as is.
4811
4812 net_location = ''
4813 if iri_parts.username:
4814 net_location += urllib.parse.quote(iri_parts.username, safe=r"!$%&'()*+,~")
4815 if iri_parts.password is not None:
4816 net_location += ':' + urllib.parse.quote(iri_parts.password, safe=r"!$%&'()*+,~")
4817 net_location += '@'
4818
4819 net_location += iri_parts.hostname.encode('idna').decode('utf-8') # Punycode for Unicode hostnames.
4820 # The 'idna' encoding produces ASCII text.
4821 if iri_parts.port is not None and iri_parts.port != 80:
4822 net_location += ':' + str(iri_parts.port)
4823
4824 return urllib.parse.urlunparse(
4825 (iri_parts.scheme,
4826 net_location,
4827
4828 urllib.parse.quote_plus(iri_parts.path, safe=r"!$%&'()*+,/:;=@|~"),
4829
4830 # Unsure about the `safe` argument, since this is a legacy way of handling parameters.
4831 urllib.parse.quote_plus(iri_parts.params, safe=r"!$%&'()*+,/:;=@|~"),
4832
4833 # Not totally sure about the `safe` argument, since the source does not explicitly mention the query URI component.
4834 urllib.parse.quote_plus(iri_parts.query, safe=r"!$%&'()*+,/:;=?@{|}~"),
4835
4836 urllib.parse.quote_plus(iri_parts.fragment, safe=r"!#$%&'()*+,/:;=?@{|}~")))
4837
4838 # Source for `safe` arguments: https://url.spec.whatwg.org/#percent-encoded-bytes.
4839
4840
4841 def to_high_limit_path(path):
4842 if sys.platform in ['win32', 'cygwin']:
4843 # Work around MAX_PATH limitation on Windows. The maximum allowed length for the individual path segments may still be quite limited.
4844 return '\\\\?\\' + os.path.abspath(path)
4845
4846 return path
4847
4848
4849 def format_field(obj, field=None, template='%s', ignore=(None, ''), default='', func=None):
4850 val = traverse_obj(obj, *variadic(field))
4851 if val in ignore:
4852 return default
4853 return template % (func(val) if func else val)
4854
4855
4856 def clean_podcast_url(url):
4857 return re.sub(r'''(?x)
4858 (?:
4859 (?:
4860 chtbl\.com/track|
4861 media\.blubrry\.com| # https://create.blubrry.com/resources/podcast-media-download-statistics/getting-started/
4862 play\.podtrac\.com
4863 )/[^/]+|
4864 (?:dts|www)\.podtrac\.com/(?:pts/)?redirect\.[0-9a-z]{3,4}| # http://analytics.podtrac.com/how-to-measure
4865 flex\.acast\.com|
4866 pd(?:
4867 cn\.co| # https://podcorn.com/analytics-prefix/
4868 st\.fm # https://podsights.com/docs/
4869 )/e
4870 )/''', '', url)
4871
4872
4873 _HEX_TABLE = '0123456789abcdef'
4874
4875
4876 def random_uuidv4():
4877 return re.sub(r'[xy]', lambda x: _HEX_TABLE[random.randint(0, 15)], 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx')
4878
4879
4880 def make_dir(path, to_screen=None):
4881 try:
4882 dn = os.path.dirname(path)
4883 if dn and not os.path.exists(dn):
4884 os.makedirs(dn)
4885 return True
4886 except OSError as err:
4887 if callable(to_screen) is not None:
4888 to_screen('unable to create directory ' + error_to_compat_str(err))
4889 return False
4890
4891
4892 def get_executable_path():
4893 from zipimport import zipimporter
4894 if hasattr(sys, 'frozen'): # Running from PyInstaller
4895 path = os.path.dirname(sys.executable)
4896 elif isinstance(__loader__, zipimporter): # Running from ZIP
4897 path = os.path.join(os.path.dirname(__file__), '../..')
4898 else:
4899 path = os.path.join(os.path.dirname(__file__), '..')
4900 return os.path.abspath(path)
4901
4902
4903 def load_plugins(name, suffix, namespace):
4904 classes = {}
4905 with contextlib.suppress(FileNotFoundError):
4906 plugins_spec = importlib.util.spec_from_file_location(
4907 name, os.path.join(get_executable_path(), 'ytdlp_plugins', name, '__init__.py'))
4908 plugins = importlib.util.module_from_spec(plugins_spec)
4909 sys.modules[plugins_spec.name] = plugins
4910 plugins_spec.loader.exec_module(plugins)
4911 for name in dir(plugins):
4912 if name in namespace:
4913 continue
4914 if not name.endswith(suffix):
4915 continue
4916 klass = getattr(plugins, name)
4917 classes[name] = namespace[name] = klass
4918 return classes
4919
4920
4921 def traverse_obj(
4922 obj, *path_list, default=None, expected_type=None, get_all=True,
4923 casesense=True, is_user_input=False, traverse_string=False):
4924 ''' Traverse nested list/dict/tuple
4925 @param path_list A list of paths which are checked one by one.
4926 Each path is a list of keys where each key is a:
4927 - None: Do nothing
4928 - string: A dictionary key
4929 - int: An index into a list
4930 - tuple: A list of keys all of which will be traversed
4931 - Ellipsis: Fetch all values in the object
4932 - Function: Takes the key and value as arguments
4933 and returns whether the key matches or not
4934 @param default Default value to return
4935 @param expected_type Only accept final value of this type (Can also be any callable)
4936 @param get_all Return all the values obtained from a path or only the first one
4937 @param casesense Whether to consider dictionary keys as case sensitive
4938 @param is_user_input Whether the keys are generated from user input. If True,
4939 strings are converted to int/slice if necessary
4940 @param traverse_string Whether to traverse inside strings. If True, any
4941 non-compatible object will also be converted into a string
4942 # TODO: Write tests
4943 '''
4944 if not casesense:
4945 _lower = lambda k: (k.lower() if isinstance(k, str) else k)
4946 path_list = (map(_lower, variadic(path)) for path in path_list)
4947
4948 def _traverse_obj(obj, path, _current_depth=0):
4949 nonlocal depth
4950 path = tuple(variadic(path))
4951 for i, key in enumerate(path):
4952 if None in (key, obj):
4953 return obj
4954 if isinstance(key, (list, tuple)):
4955 obj = [_traverse_obj(obj, sub_key, _current_depth) for sub_key in key]
4956 key = ...
4957 if key is ...:
4958 obj = (obj.values() if isinstance(obj, dict)
4959 else obj if isinstance(obj, (list, tuple, LazyList))
4960 else str(obj) if traverse_string else [])
4961 _current_depth += 1
4962 depth = max(depth, _current_depth)
4963 return [_traverse_obj(inner_obj, path[i + 1:], _current_depth) for inner_obj in obj]
4964 elif callable(key):
4965 if isinstance(obj, (list, tuple, LazyList)):
4966 obj = enumerate(obj)
4967 elif isinstance(obj, dict):
4968 obj = obj.items()
4969 else:
4970 if not traverse_string:
4971 return None
4972 obj = str(obj)
4973 _current_depth += 1
4974 depth = max(depth, _current_depth)
4975 return [_traverse_obj(v, path[i + 1:], _current_depth) for k, v in obj if try_call(key, args=(k, v))]
4976 elif isinstance(obj, dict) and not (is_user_input and key == ':'):
4977 obj = (obj.get(key) if casesense or (key in obj)
4978 else next((v for k, v in obj.items() if _lower(k) == key), None))
4979 else:
4980 if is_user_input:
4981 key = (int_or_none(key) if ':' not in key
4982 else slice(*map(int_or_none, key.split(':'))))
4983 if key == slice(None):
4984 return _traverse_obj(obj, (..., *path[i + 1:]), _current_depth)
4985 if not isinstance(key, (int, slice)):
4986 return None
4987 if not isinstance(obj, (list, tuple, LazyList)):
4988 if not traverse_string:
4989 return None
4990 obj = str(obj)
4991 try:
4992 obj = obj[key]
4993 except IndexError:
4994 return None
4995 return obj
4996
4997 if isinstance(expected_type, type):
4998 type_test = lambda val: val if isinstance(val, expected_type) else None
4999 elif expected_type is not None:
5000 type_test = expected_type
5001 else:
5002 type_test = lambda val: val
5003
5004 for path in path_list:
5005 depth = 0
5006 val = _traverse_obj(obj, path)
5007 if val is not None:
5008 if depth:
5009 for _ in range(depth - 1):
5010 val = itertools.chain.from_iterable(v for v in val if v is not None)
5011 val = [v for v in map(type_test, val) if v is not None]
5012 if val:
5013 return val if get_all else val[0]
5014 else:
5015 val = type_test(val)
5016 if val is not None:
5017 return val
5018 return default
5019
5020
5021 def traverse_dict(dictn, keys, casesense=True):
5022 write_string('DeprecationWarning: yt_dlp.utils.traverse_dict is deprecated '
5023 'and may be removed in a future version. Use yt_dlp.utils.traverse_obj instead')
5024 return traverse_obj(dictn, keys, casesense=casesense, is_user_input=True, traverse_string=True)
5025
5026
5027 def get_first(obj, keys, **kwargs):
5028 return traverse_obj(obj, (..., *variadic(keys)), **kwargs, get_all=False)
5029
5030
5031 def variadic(x, allowed_types=(str, bytes, dict)):
5032 return x if isinstance(x, collections.abc.Iterable) and not isinstance(x, allowed_types) else (x,)
5033
5034
5035 def decode_base(value, digits):
5036 # This will convert given base-x string to scalar (long or int)
5037 table = {char: index for index, char in enumerate(digits)}
5038 result = 0
5039 base = len(digits)
5040 for chr in value:
5041 result *= base
5042 result += table[chr]
5043 return result
5044
5045
5046 def time_seconds(**kwargs):
5047 t = datetime.datetime.now(datetime.timezone(datetime.timedelta(**kwargs)))
5048 return t.timestamp()
5049
5050
5051 # create a JSON Web Signature (jws) with HS256 algorithm
5052 # the resulting format is in JWS Compact Serialization
5053 # implemented following JWT https://www.rfc-editor.org/rfc/rfc7519.html
5054 # implemented following JWS https://www.rfc-editor.org/rfc/rfc7515.html
5055 def jwt_encode_hs256(payload_data, key, headers={}):
5056 header_data = {
5057 'alg': 'HS256',
5058 'typ': 'JWT',
5059 }
5060 if headers:
5061 header_data.update(headers)
5062 header_b64 = base64.b64encode(json.dumps(header_data).encode('utf-8'))
5063 payload_b64 = base64.b64encode(json.dumps(payload_data).encode('utf-8'))
5064 h = hmac.new(key.encode('utf-8'), header_b64 + b'.' + payload_b64, hashlib.sha256)
5065 signature_b64 = base64.b64encode(h.digest())
5066 token = header_b64 + b'.' + payload_b64 + b'.' + signature_b64
5067 return token
5068
5069
5070 # can be extended in future to verify the signature and parse header and return the algorithm used if it's not HS256
5071 def jwt_decode_hs256(jwt):
5072 header_b64, payload_b64, signature_b64 = jwt.split('.')
5073 payload_data = json.loads(base64.urlsafe_b64decode(payload_b64))
5074 return payload_data
5075
5076
5077 def supports_terminal_sequences(stream):
5078 if compat_os_name == 'nt':
5079 from .compat import WINDOWS_VT_MODE # Must be imported locally
5080 if not WINDOWS_VT_MODE or get_windows_version() < (10, 0, 10586):
5081 return False
5082 elif not os.getenv('TERM'):
5083 return False
5084 try:
5085 return stream.isatty()
5086 except BaseException:
5087 return False
5088
5089
5090 _terminal_sequences_re = re.compile('\033\\[[^m]+m')
5091
5092
5093 def remove_terminal_sequences(string):
5094 return _terminal_sequences_re.sub('', string)
5095
5096
5097 def number_of_digits(number):
5098 return len('%d' % number)
5099
5100
5101 def join_nonempty(*values, delim='-', from_dict=None):
5102 if from_dict is not None:
5103 values = map(from_dict.get, values)
5104 return delim.join(map(str, filter(None, values)))
5105
5106
5107 def scale_thumbnails_to_max_format_width(formats, thumbnails, url_width_re):
5108 """
5109 Find the largest format dimensions in terms of video width and, for each thumbnail:
5110 * Modify the URL: Match the width with the provided regex and replace with the former width
5111 * Update dimensions
5112
5113 This function is useful with video services that scale the provided thumbnails on demand
5114 """
5115 _keys = ('width', 'height')
5116 max_dimensions = max(
5117 (tuple(format.get(k) or 0 for k in _keys) for format in formats),
5118 default=(0, 0))
5119 if not max_dimensions[0]:
5120 return thumbnails
5121 return [
5122 merge_dicts(
5123 {'url': re.sub(url_width_re, str(max_dimensions[0]), thumbnail['url'])},
5124 dict(zip(_keys, max_dimensions)), thumbnail)
5125 for thumbnail in thumbnails
5126 ]
5127
5128
5129 def parse_http_range(range):
5130 """ Parse value of "Range" or "Content-Range" HTTP header into tuple. """
5131 if not range:
5132 return None, None, None
5133 crg = re.search(r'bytes[ =](\d+)-(\d+)?(?:/(\d+))?', range)
5134 if not crg:
5135 return None, None, None
5136 return int(crg.group(1)), int_or_none(crg.group(2)), int_or_none(crg.group(3))
5137
5138
5139 class Config:
5140 own_args = None
5141 filename = None
5142 __initialized = False
5143
5144 def __init__(self, parser, label=None):
5145 self._parser, self.label = parser, label
5146 self._loaded_paths, self.configs = set(), []
5147
5148 def init(self, args=None, filename=None):
5149 assert not self.__initialized
5150 directory = ''
5151 if filename:
5152 location = os.path.realpath(filename)
5153 directory = os.path.dirname(location)
5154 if location in self._loaded_paths:
5155 return False
5156 self._loaded_paths.add(location)
5157
5158 self.__initialized = True
5159 self.own_args, self.filename = args, filename
5160 for location in self._parser.parse_args(args)[0].config_locations or []:
5161 location = os.path.join(directory, expand_path(location))
5162 if os.path.isdir(location):
5163 location = os.path.join(location, 'yt-dlp.conf')
5164 if not os.path.exists(location):
5165 self._parser.error(f'config location {location} does not exist')
5166 self.append_config(self.read_file(location), location)
5167 return True
5168
5169 def __str__(self):
5170 label = join_nonempty(
5171 self.label, 'config', f'"{self.filename}"' if self.filename else '',
5172 delim=' ')
5173 return join_nonempty(
5174 self.own_args is not None and f'{label[0].upper()}{label[1:]}: {self.hide_login_info(self.own_args)}',
5175 *(f'\n{c}'.replace('\n', '\n| ')[1:] for c in self.configs),
5176 delim='\n')
5177
5178 @staticmethod
5179 def read_file(filename, default=[]):
5180 try:
5181 optionf = open(filename)
5182 except OSError:
5183 return default # silently skip if file is not present
5184 try:
5185 # FIXME: https://github.com/ytdl-org/youtube-dl/commit/dfe5fa49aed02cf36ba9f743b11b0903554b5e56
5186 contents = optionf.read()
5187 res = shlex.split(contents, comments=True)
5188 finally:
5189 optionf.close()
5190 return res
5191
5192 @staticmethod
5193 def hide_login_info(opts):
5194 PRIVATE_OPTS = {'-p', '--password', '-u', '--username', '--video-password', '--ap-password', '--ap-username'}
5195 eqre = re.compile('^(?P<key>' + ('|'.join(re.escape(po) for po in PRIVATE_OPTS)) + ')=.+$')
5196
5197 def _scrub_eq(o):
5198 m = eqre.match(o)
5199 if m:
5200 return m.group('key') + '=PRIVATE'
5201 else:
5202 return o
5203
5204 opts = list(map(_scrub_eq, opts))
5205 for idx, opt in enumerate(opts):
5206 if opt in PRIVATE_OPTS and idx + 1 < len(opts):
5207 opts[idx + 1] = 'PRIVATE'
5208 return opts
5209
5210 def append_config(self, *args, label=None):
5211 config = type(self)(self._parser, label)
5212 config._loaded_paths = self._loaded_paths
5213 if config.init(*args):
5214 self.configs.append(config)
5215
5216 @property
5217 def all_args(self):
5218 for config in reversed(self.configs):
5219 yield from config.all_args
5220 yield from self.own_args or []
5221
5222 def parse_args(self):
5223 return self._parser.parse_args(self.all_args)
5224
5225
5226 class WebSocketsWrapper():
5227 """Wraps websockets module to use in non-async scopes"""
5228 pool = None
5229
5230 def __init__(self, url, headers=None, connect=True):
5231 self.loop = asyncio.new_event_loop()
5232 # XXX: "loop" is deprecated
5233 self.conn = websockets.connect(
5234 url, extra_headers=headers, ping_interval=None,
5235 close_timeout=float('inf'), loop=self.loop, ping_timeout=float('inf'))
5236 if connect:
5237 self.__enter__()
5238 atexit.register(self.__exit__, None, None, None)
5239
5240 def __enter__(self):
5241 if not self.pool:
5242 self.pool = self.run_with_loop(self.conn.__aenter__(), self.loop)
5243 return self
5244
5245 def send(self, *args):
5246 self.run_with_loop(self.pool.send(*args), self.loop)
5247
5248 def recv(self, *args):
5249 return self.run_with_loop(self.pool.recv(*args), self.loop)
5250
5251 def __exit__(self, type, value, traceback):
5252 try:
5253 return self.run_with_loop(self.conn.__aexit__(type, value, traceback), self.loop)
5254 finally:
5255 self.loop.close()
5256 self._cancel_all_tasks(self.loop)
5257
5258 # taken from https://github.com/python/cpython/blob/3.9/Lib/asyncio/runners.py with modifications
5259 # for contributors: If there's any new library using asyncio needs to be run in non-async, move these function out of this class
5260 @staticmethod
5261 def run_with_loop(main, loop):
5262 if not asyncio.iscoroutine(main):
5263 raise ValueError(f'a coroutine was expected, got {main!r}')
5264
5265 try:
5266 return loop.run_until_complete(main)
5267 finally:
5268 loop.run_until_complete(loop.shutdown_asyncgens())
5269 if hasattr(loop, 'shutdown_default_executor'):
5270 loop.run_until_complete(loop.shutdown_default_executor())
5271
5272 @staticmethod
5273 def _cancel_all_tasks(loop):
5274 to_cancel = asyncio.all_tasks(loop)
5275
5276 if not to_cancel:
5277 return
5278
5279 for task in to_cancel:
5280 task.cancel()
5281
5282 # XXX: "loop" is removed in python 3.10+
5283 loop.run_until_complete(
5284 asyncio.gather(*to_cancel, loop=loop, return_exceptions=True))
5285
5286 for task in to_cancel:
5287 if task.cancelled():
5288 continue
5289 if task.exception() is not None:
5290 loop.call_exception_handler({
5291 'message': 'unhandled exception during asyncio.run() shutdown',
5292 'exception': task.exception(),
5293 'task': task,
5294 })
5295
5296
5297 def merge_headers(*dicts):
5298 """Merge dicts of http headers case insensitively, prioritizing the latter ones"""
5299 return {k.title(): v for k, v in itertools.chain.from_iterable(map(dict.items, dicts))}
5300
5301
5302 class classproperty:
5303 def __init__(self, f):
5304 self.f = f
5305
5306 def __get__(self, _, cls):
5307 return self.f(cls)
5308
5309
5310 def Namespace(**kwargs):
5311 return collections.namedtuple('Namespace', kwargs)(**kwargs)
5312
5313
5314 # Deprecated
5315 has_certifi = bool(certifi)
5316 has_websockets = bool(websockets)