]> jfr.im git - yt-dlp.git/blob - yt_dlp/utils.py
[cleanup] Misc fixes (see desc)
[yt-dlp.git] / yt_dlp / utils.py
1 #!/usr/bin/env python3
2 import atexit
3 import base64
4 import binascii
5 import calendar
6 import codecs
7 import collections
8 import contextlib
9 import ctypes
10 import datetime
11 import email.header
12 import email.utils
13 import errno
14 import gzip
15 import hashlib
16 import hmac
17 import importlib.util
18 import io
19 import itertools
20 import json
21 import locale
22 import math
23 import mimetypes
24 import operator
25 import os
26 import platform
27 import random
28 import re
29 import shlex
30 import socket
31 import ssl
32 import subprocess
33 import sys
34 import tempfile
35 import time
36 import traceback
37 import types
38 import urllib.parse
39 import xml.etree.ElementTree
40 import zlib
41
42 from .compat import asyncio, functools # isort: split
43 from .compat import (
44 compat_chr,
45 compat_cookiejar,
46 compat_etree_fromstring,
47 compat_expanduser,
48 compat_html_entities,
49 compat_html_entities_html5,
50 compat_HTMLParseError,
51 compat_HTMLParser,
52 compat_http_client,
53 compat_HTTPError,
54 compat_os_name,
55 compat_parse_qs,
56 compat_shlex_quote,
57 compat_str,
58 compat_struct_pack,
59 compat_struct_unpack,
60 compat_urllib_error,
61 compat_urllib_parse_unquote_plus,
62 compat_urllib_parse_urlencode,
63 compat_urllib_parse_urlparse,
64 compat_urllib_request,
65 compat_urlparse,
66 )
67 from .dependencies import brotli, certifi, websockets
68 from .socks import ProxyType, sockssocket
69
70
71 def register_socks_protocols():
72 # "Register" SOCKS protocols
73 # In Python < 2.6.5, urlsplit() suffers from bug https://bugs.python.org/issue7904
74 # URLs with protocols not in urlparse.uses_netloc are not handled correctly
75 for scheme in ('socks', 'socks4', 'socks4a', 'socks5'):
76 if scheme not in compat_urlparse.uses_netloc:
77 compat_urlparse.uses_netloc.append(scheme)
78
79
80 # This is not clearly defined otherwise
81 compiled_regex_type = type(re.compile(''))
82
83
84 def random_user_agent():
85 _USER_AGENT_TPL = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/%s Safari/537.36'
86 _CHROME_VERSIONS = (
87 '90.0.4430.212',
88 '90.0.4430.24',
89 '90.0.4430.70',
90 '90.0.4430.72',
91 '90.0.4430.85',
92 '90.0.4430.93',
93 '91.0.4472.101',
94 '91.0.4472.106',
95 '91.0.4472.114',
96 '91.0.4472.124',
97 '91.0.4472.164',
98 '91.0.4472.19',
99 '91.0.4472.77',
100 '92.0.4515.107',
101 '92.0.4515.115',
102 '92.0.4515.131',
103 '92.0.4515.159',
104 '92.0.4515.43',
105 '93.0.4556.0',
106 '93.0.4577.15',
107 '93.0.4577.63',
108 '93.0.4577.82',
109 '94.0.4606.41',
110 '94.0.4606.54',
111 '94.0.4606.61',
112 '94.0.4606.71',
113 '94.0.4606.81',
114 '94.0.4606.85',
115 '95.0.4638.17',
116 '95.0.4638.50',
117 '95.0.4638.54',
118 '95.0.4638.69',
119 '95.0.4638.74',
120 '96.0.4664.18',
121 '96.0.4664.45',
122 '96.0.4664.55',
123 '96.0.4664.93',
124 '97.0.4692.20',
125 )
126 return _USER_AGENT_TPL % random.choice(_CHROME_VERSIONS)
127
128
129 SUPPORTED_ENCODINGS = [
130 'gzip', 'deflate'
131 ]
132 if brotli:
133 SUPPORTED_ENCODINGS.append('br')
134
135 std_headers = {
136 'User-Agent': random_user_agent(),
137 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
138 'Accept-Language': 'en-us,en;q=0.5',
139 'Sec-Fetch-Mode': 'navigate',
140 }
141
142
143 USER_AGENTS = {
144 'Safari': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27',
145 }
146
147
148 NO_DEFAULT = object()
149
150 ENGLISH_MONTH_NAMES = [
151 'January', 'February', 'March', 'April', 'May', 'June',
152 'July', 'August', 'September', 'October', 'November', 'December']
153
154 MONTH_NAMES = {
155 'en': ENGLISH_MONTH_NAMES,
156 'fr': [
157 'janvier', 'février', 'mars', 'avril', 'mai', 'juin',
158 'juillet', 'août', 'septembre', 'octobre', 'novembre', 'décembre'],
159 }
160
161 KNOWN_EXTENSIONS = (
162 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'aac',
163 'flv', 'f4v', 'f4a', 'f4b',
164 'webm', 'ogg', 'ogv', 'oga', 'ogx', 'spx', 'opus',
165 'mkv', 'mka', 'mk3d',
166 'avi', 'divx',
167 'mov',
168 'asf', 'wmv', 'wma',
169 '3gp', '3g2',
170 'mp3',
171 'flac',
172 'ape',
173 'wav',
174 'f4f', 'f4m', 'm3u8', 'smil')
175
176 # needed for sanitizing filenames in restricted mode
177 ACCENT_CHARS = dict(zip('ÂÃÄÀÁÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖŐØŒÙÚÛÜŰÝÞßàáâãäåæçèéêëìíîïðñòóôõöőøœùúûüűýþÿ',
178 itertools.chain('AAAAAA', ['AE'], 'CEEEEIIIIDNOOOOOOO', ['OE'], 'UUUUUY', ['TH', 'ss'],
179 'aaaaaa', ['ae'], 'ceeeeiiiionooooooo', ['oe'], 'uuuuuy', ['th'], 'y')))
180
181 DATE_FORMATS = (
182 '%d %B %Y',
183 '%d %b %Y',
184 '%B %d %Y',
185 '%B %dst %Y',
186 '%B %dnd %Y',
187 '%B %drd %Y',
188 '%B %dth %Y',
189 '%b %d %Y',
190 '%b %dst %Y',
191 '%b %dnd %Y',
192 '%b %drd %Y',
193 '%b %dth %Y',
194 '%b %dst %Y %I:%M',
195 '%b %dnd %Y %I:%M',
196 '%b %drd %Y %I:%M',
197 '%b %dth %Y %I:%M',
198 '%Y %m %d',
199 '%Y-%m-%d',
200 '%Y.%m.%d.',
201 '%Y/%m/%d',
202 '%Y/%m/%d %H:%M',
203 '%Y/%m/%d %H:%M:%S',
204 '%Y%m%d%H%M',
205 '%Y%m%d%H%M%S',
206 '%Y%m%d',
207 '%Y-%m-%d %H:%M',
208 '%Y-%m-%d %H:%M:%S',
209 '%Y-%m-%d %H:%M:%S.%f',
210 '%Y-%m-%d %H:%M:%S:%f',
211 '%d.%m.%Y %H:%M',
212 '%d.%m.%Y %H.%M',
213 '%Y-%m-%dT%H:%M:%SZ',
214 '%Y-%m-%dT%H:%M:%S.%fZ',
215 '%Y-%m-%dT%H:%M:%S.%f0Z',
216 '%Y-%m-%dT%H:%M:%S',
217 '%Y-%m-%dT%H:%M:%S.%f',
218 '%Y-%m-%dT%H:%M',
219 '%b %d %Y at %H:%M',
220 '%b %d %Y at %H:%M:%S',
221 '%B %d %Y at %H:%M',
222 '%B %d %Y at %H:%M:%S',
223 '%H:%M %d-%b-%Y',
224 )
225
226 DATE_FORMATS_DAY_FIRST = list(DATE_FORMATS)
227 DATE_FORMATS_DAY_FIRST.extend([
228 '%d-%m-%Y',
229 '%d.%m.%Y',
230 '%d.%m.%y',
231 '%d/%m/%Y',
232 '%d/%m/%y',
233 '%d/%m/%Y %H:%M:%S',
234 ])
235
236 DATE_FORMATS_MONTH_FIRST = list(DATE_FORMATS)
237 DATE_FORMATS_MONTH_FIRST.extend([
238 '%m-%d-%Y',
239 '%m.%d.%Y',
240 '%m/%d/%Y',
241 '%m/%d/%y',
242 '%m/%d/%Y %H:%M:%S',
243 ])
244
245 PACKED_CODES_RE = r"}\('(.+)',(\d+),(\d+),'([^']+)'\.split\('\|'\)"
246 JSON_LD_RE = r'(?is)<script[^>]+type=(["\']?)application/ld\+json\1[^>]*>(?P<json_ld>.+?)</script>'
247
248 NUMBER_RE = r'\d+(?:\.\d+)?'
249
250
251 @functools.cache
252 def preferredencoding():
253 """Get preferred encoding.
254
255 Returns the best encoding scheme for the system, based on
256 locale.getpreferredencoding() and some further tweaks.
257 """
258 try:
259 pref = locale.getpreferredencoding()
260 'TEST'.encode(pref)
261 except Exception:
262 pref = 'UTF-8'
263
264 return pref
265
266
267 def write_json_file(obj, fn):
268 """ Encode obj as JSON and write it to fn, atomically if possible """
269
270 tf = tempfile.NamedTemporaryFile(
271 prefix=f'{os.path.basename(fn)}.', dir=os.path.dirname(fn),
272 suffix='.tmp', delete=False, mode='w', encoding='utf-8')
273
274 try:
275 with tf:
276 json.dump(obj, tf, ensure_ascii=False)
277 if sys.platform == 'win32':
278 # Need to remove existing file on Windows, else os.rename raises
279 # WindowsError or FileExistsError.
280 with contextlib.suppress(OSError):
281 os.unlink(fn)
282 with contextlib.suppress(OSError):
283 mask = os.umask(0)
284 os.umask(mask)
285 os.chmod(tf.name, 0o666 & ~mask)
286 os.rename(tf.name, fn)
287 except Exception:
288 with contextlib.suppress(OSError):
289 os.remove(tf.name)
290 raise
291
292
293 def find_xpath_attr(node, xpath, key, val=None):
294 """ Find the xpath xpath[@key=val] """
295 assert re.match(r'^[a-zA-Z_-]+$', key)
296 expr = xpath + ('[@%s]' % key if val is None else f"[@{key}='{val}']")
297 return node.find(expr)
298
299 # On python2.6 the xml.etree.ElementTree.Element methods don't support
300 # the namespace parameter
301
302
303 def xpath_with_ns(path, ns_map):
304 components = [c.split(':') for c in path.split('/')]
305 replaced = []
306 for c in components:
307 if len(c) == 1:
308 replaced.append(c[0])
309 else:
310 ns, tag = c
311 replaced.append('{%s}%s' % (ns_map[ns], tag))
312 return '/'.join(replaced)
313
314
315 def xpath_element(node, xpath, name=None, fatal=False, default=NO_DEFAULT):
316 def _find_xpath(xpath):
317 return node.find(xpath)
318
319 if isinstance(xpath, (str, compat_str)):
320 n = _find_xpath(xpath)
321 else:
322 for xp in xpath:
323 n = _find_xpath(xp)
324 if n is not None:
325 break
326
327 if n is None:
328 if default is not NO_DEFAULT:
329 return default
330 elif fatal:
331 name = xpath if name is None else name
332 raise ExtractorError('Could not find XML element %s' % name)
333 else:
334 return None
335 return n
336
337
338 def xpath_text(node, xpath, name=None, fatal=False, default=NO_DEFAULT):
339 n = xpath_element(node, xpath, name, fatal=fatal, default=default)
340 if n is None or n == default:
341 return n
342 if n.text is None:
343 if default is not NO_DEFAULT:
344 return default
345 elif fatal:
346 name = xpath if name is None else name
347 raise ExtractorError('Could not find XML element\'s text %s' % name)
348 else:
349 return None
350 return n.text
351
352
353 def xpath_attr(node, xpath, key, name=None, fatal=False, default=NO_DEFAULT):
354 n = find_xpath_attr(node, xpath, key)
355 if n is None:
356 if default is not NO_DEFAULT:
357 return default
358 elif fatal:
359 name = f'{xpath}[@{key}]' if name is None else name
360 raise ExtractorError('Could not find XML attribute %s' % name)
361 else:
362 return None
363 return n.attrib[key]
364
365
366 def get_element_by_id(id, html, **kwargs):
367 """Return the content of the tag with the specified ID in the passed HTML document"""
368 return get_element_by_attribute('id', id, html, **kwargs)
369
370
371 def get_element_html_by_id(id, html, **kwargs):
372 """Return the html of the tag with the specified ID in the passed HTML document"""
373 return get_element_html_by_attribute('id', id, html, **kwargs)
374
375
376 def get_element_by_class(class_name, html):
377 """Return the content of the first tag with the specified class in the passed HTML document"""
378 retval = get_elements_by_class(class_name, html)
379 return retval[0] if retval else None
380
381
382 def get_element_html_by_class(class_name, html):
383 """Return the html of the first tag with the specified class in the passed HTML document"""
384 retval = get_elements_html_by_class(class_name, html)
385 return retval[0] if retval else None
386
387
388 def get_element_by_attribute(attribute, value, html, **kwargs):
389 retval = get_elements_by_attribute(attribute, value, html, **kwargs)
390 return retval[0] if retval else None
391
392
393 def get_element_html_by_attribute(attribute, value, html, **kargs):
394 retval = get_elements_html_by_attribute(attribute, value, html, **kargs)
395 return retval[0] if retval else None
396
397
398 def get_elements_by_class(class_name, html, **kargs):
399 """Return the content of all tags with the specified class in the passed HTML document as a list"""
400 return get_elements_by_attribute(
401 'class', r'[^\'"]*(?<=[\'"\s])%s(?=[\'"\s])[^\'"]*' % re.escape(class_name),
402 html, escape_value=False)
403
404
405 def get_elements_html_by_class(class_name, html):
406 """Return the html of all tags with the specified class in the passed HTML document as a list"""
407 return get_elements_html_by_attribute(
408 'class', r'[^\'"]*(?<=[\'"\s])%s(?=[\'"\s])[^\'"]*' % re.escape(class_name),
409 html, escape_value=False)
410
411
412 def get_elements_by_attribute(*args, **kwargs):
413 """Return the content of the tag with the specified attribute in the passed HTML document"""
414 return [content for content, _ in get_elements_text_and_html_by_attribute(*args, **kwargs)]
415
416
417 def get_elements_html_by_attribute(*args, **kwargs):
418 """Return the html of the tag with the specified attribute in the passed HTML document"""
419 return [whole for _, whole in get_elements_text_and_html_by_attribute(*args, **kwargs)]
420
421
422 def get_elements_text_and_html_by_attribute(attribute, value, html, escape_value=True):
423 """
424 Return the text (content) and the html (whole) of the tag with the specified
425 attribute in the passed HTML document
426 """
427
428 quote = '' if re.match(r'''[\s"'`=<>]''', value) else '?'
429
430 value = re.escape(value) if escape_value else value
431
432 partial_element_re = rf'''(?x)
433 <(?P<tag>[a-zA-Z0-9:._-]+)
434 (?:\s(?:[^>"']|"[^"]*"|'[^']*')*)?
435 \s{re.escape(attribute)}\s*=\s*(?P<_q>['"]{quote})(?-x:{value})(?P=_q)
436 '''
437
438 for m in re.finditer(partial_element_re, html):
439 content, whole = get_element_text_and_html_by_tag(m.group('tag'), html[m.start():])
440
441 yield (
442 unescapeHTML(re.sub(r'^(?P<q>["\'])(?P<content>.*)(?P=q)$', r'\g<content>', content, flags=re.DOTALL)),
443 whole
444 )
445
446
447 class HTMLBreakOnClosingTagParser(compat_HTMLParser):
448 """
449 HTML parser which raises HTMLBreakOnClosingTagException upon reaching the
450 closing tag for the first opening tag it has encountered, and can be used
451 as a context manager
452 """
453
454 class HTMLBreakOnClosingTagException(Exception):
455 pass
456
457 def __init__(self):
458 self.tagstack = collections.deque()
459 compat_HTMLParser.__init__(self)
460
461 def __enter__(self):
462 return self
463
464 def __exit__(self, *_):
465 self.close()
466
467 def close(self):
468 # handle_endtag does not return upon raising HTMLBreakOnClosingTagException,
469 # so data remains buffered; we no longer have any interest in it, thus
470 # override this method to discard it
471 pass
472
473 def handle_starttag(self, tag, _):
474 self.tagstack.append(tag)
475
476 def handle_endtag(self, tag):
477 if not self.tagstack:
478 raise compat_HTMLParseError('no tags in the stack')
479 while self.tagstack:
480 inner_tag = self.tagstack.pop()
481 if inner_tag == tag:
482 break
483 else:
484 raise compat_HTMLParseError(f'matching opening tag for closing {tag} tag not found')
485 if not self.tagstack:
486 raise self.HTMLBreakOnClosingTagException()
487
488
489 def get_element_text_and_html_by_tag(tag, html):
490 """
491 For the first element with the specified tag in the passed HTML document
492 return its' content (text) and the whole element (html)
493 """
494 def find_or_raise(haystack, needle, exc):
495 try:
496 return haystack.index(needle)
497 except ValueError:
498 raise exc
499 closing_tag = f'</{tag}>'
500 whole_start = find_or_raise(
501 html, f'<{tag}', compat_HTMLParseError(f'opening {tag} tag not found'))
502 content_start = find_or_raise(
503 html[whole_start:], '>', compat_HTMLParseError(f'malformed opening {tag} tag'))
504 content_start += whole_start + 1
505 with HTMLBreakOnClosingTagParser() as parser:
506 parser.feed(html[whole_start:content_start])
507 if not parser.tagstack or parser.tagstack[0] != tag:
508 raise compat_HTMLParseError(f'parser did not match opening {tag} tag')
509 offset = content_start
510 while offset < len(html):
511 next_closing_tag_start = find_or_raise(
512 html[offset:], closing_tag,
513 compat_HTMLParseError(f'closing {tag} tag not found'))
514 next_closing_tag_end = next_closing_tag_start + len(closing_tag)
515 try:
516 parser.feed(html[offset:offset + next_closing_tag_end])
517 offset += next_closing_tag_end
518 except HTMLBreakOnClosingTagParser.HTMLBreakOnClosingTagException:
519 return html[content_start:offset + next_closing_tag_start], \
520 html[whole_start:offset + next_closing_tag_end]
521 raise compat_HTMLParseError('unexpected end of html')
522
523
524 class HTMLAttributeParser(compat_HTMLParser):
525 """Trivial HTML parser to gather the attributes for a single element"""
526
527 def __init__(self):
528 self.attrs = {}
529 compat_HTMLParser.__init__(self)
530
531 def handle_starttag(self, tag, attrs):
532 self.attrs = dict(attrs)
533
534
535 class HTMLListAttrsParser(compat_HTMLParser):
536 """HTML parser to gather the attributes for the elements of a list"""
537
538 def __init__(self):
539 compat_HTMLParser.__init__(self)
540 self.items = []
541 self._level = 0
542
543 def handle_starttag(self, tag, attrs):
544 if tag == 'li' and self._level == 0:
545 self.items.append(dict(attrs))
546 self._level += 1
547
548 def handle_endtag(self, tag):
549 self._level -= 1
550
551
552 def extract_attributes(html_element):
553 """Given a string for an HTML element such as
554 <el
555 a="foo" B="bar" c="&98;az" d=boz
556 empty= noval entity="&amp;"
557 sq='"' dq="'"
558 >
559 Decode and return a dictionary of attributes.
560 {
561 'a': 'foo', 'b': 'bar', c: 'baz', d: 'boz',
562 'empty': '', 'noval': None, 'entity': '&',
563 'sq': '"', 'dq': '\''
564 }.
565 """
566 parser = HTMLAttributeParser()
567 with contextlib.suppress(compat_HTMLParseError):
568 parser.feed(html_element)
569 parser.close()
570 return parser.attrs
571
572
573 def parse_list(webpage):
574 """Given a string for an series of HTML <li> elements,
575 return a dictionary of their attributes"""
576 parser = HTMLListAttrsParser()
577 parser.feed(webpage)
578 parser.close()
579 return parser.items
580
581
582 def clean_html(html):
583 """Clean an HTML snippet into a readable string"""
584
585 if html is None: # Convenience for sanitizing descriptions etc.
586 return html
587
588 html = re.sub(r'\s+', ' ', html)
589 html = re.sub(r'(?u)\s?<\s?br\s?/?\s?>\s?', '\n', html)
590 html = re.sub(r'(?u)<\s?/\s?p\s?>\s?<\s?p[^>]*>', '\n', html)
591 # Strip html tags
592 html = re.sub('<.*?>', '', html)
593 # Replace html entities
594 html = unescapeHTML(html)
595 return html.strip()
596
597
598 class LenientJSONDecoder(json.JSONDecoder):
599 def __init__(self, *args, transform_source=None, ignore_extra=False, **kwargs):
600 self.transform_source, self.ignore_extra = transform_source, ignore_extra
601 super().__init__(*args, **kwargs)
602
603 def decode(self, s):
604 if self.transform_source:
605 s = self.transform_source(s)
606 if self.ignore_extra:
607 return self.raw_decode(s.lstrip())[0]
608 return super().decode(s)
609
610
611 def sanitize_open(filename, open_mode):
612 """Try to open the given filename, and slightly tweak it if this fails.
613
614 Attempts to open the given filename. If this fails, it tries to change
615 the filename slightly, step by step, until it's either able to open it
616 or it fails and raises a final exception, like the standard open()
617 function.
618
619 It returns the tuple (stream, definitive_file_name).
620 """
621 if filename == '-':
622 if sys.platform == 'win32':
623 import msvcrt
624 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
625 return (sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout, filename)
626
627 for attempt in range(2):
628 try:
629 try:
630 if sys.platform == 'win32':
631 # FIXME: An exclusive lock also locks the file from being read.
632 # Since windows locks are mandatory, don't lock the file on windows (for now).
633 # Ref: https://github.com/yt-dlp/yt-dlp/issues/3124
634 raise LockingUnsupportedError()
635 stream = locked_file(filename, open_mode, block=False).__enter__()
636 except OSError:
637 stream = open(filename, open_mode)
638 return stream, filename
639 except OSError as err:
640 if attempt or err.errno in (errno.EACCES,):
641 raise
642 old_filename, filename = filename, sanitize_path(filename)
643 if old_filename == filename:
644 raise
645
646
647 def timeconvert(timestr):
648 """Convert RFC 2822 defined time string into system timestamp"""
649 timestamp = None
650 timetuple = email.utils.parsedate_tz(timestr)
651 if timetuple is not None:
652 timestamp = email.utils.mktime_tz(timetuple)
653 return timestamp
654
655
656 def sanitize_filename(s, restricted=False, is_id=NO_DEFAULT):
657 """Sanitizes a string so it could be used as part of a filename.
658 @param restricted Use a stricter subset of allowed characters
659 @param is_id Whether this is an ID that should be kept unchanged if possible.
660 If unset, yt-dlp's new sanitization rules are in effect
661 """
662 if s == '':
663 return ''
664
665 def replace_insane(char):
666 if restricted and char in ACCENT_CHARS:
667 return ACCENT_CHARS[char]
668 elif not restricted and char == '\n':
669 return '\0 '
670 elif char == '?' or ord(char) < 32 or ord(char) == 127:
671 return ''
672 elif char == '"':
673 return '' if restricted else '\''
674 elif char == ':':
675 return '\0_\0-' if restricted else '\0 \0-'
676 elif char in '\\/|*<>':
677 return '\0_'
678 if restricted and (char in '!&\'()[]{}$;`^,#' or char.isspace() or ord(char) > 127):
679 return '\0_'
680 return char
681
682 s = re.sub(r'[0-9]+(?::[0-9]+)+', lambda m: m.group(0).replace(':', '_'), s) # Handle timestamps
683 result = ''.join(map(replace_insane, s))
684 if is_id is NO_DEFAULT:
685 result = re.sub('(\0.)(?:(?=\\1)..)+', r'\1', result) # Remove repeated substitute chars
686 STRIP_RE = '(?:\0.|[ _-])*'
687 result = re.sub(f'^\0.{STRIP_RE}|{STRIP_RE}\0.$', '', result) # Remove substitute chars from start/end
688 result = result.replace('\0', '') or '_'
689
690 if not is_id:
691 while '__' in result:
692 result = result.replace('__', '_')
693 result = result.strip('_')
694 # Common case of "Foreign band name - English song title"
695 if restricted and result.startswith('-_'):
696 result = result[2:]
697 if result.startswith('-'):
698 result = '_' + result[len('-'):]
699 result = result.lstrip('.')
700 if not result:
701 result = '_'
702 return result
703
704
705 def sanitize_path(s, force=False):
706 """Sanitizes and normalizes path on Windows"""
707 if sys.platform == 'win32':
708 force = False
709 drive_or_unc, _ = os.path.splitdrive(s)
710 elif force:
711 drive_or_unc = ''
712 else:
713 return s
714
715 norm_path = os.path.normpath(remove_start(s, drive_or_unc)).split(os.path.sep)
716 if drive_or_unc:
717 norm_path.pop(0)
718 sanitized_path = [
719 path_part if path_part in ['.', '..'] else re.sub(r'(?:[/<>:"\|\\?\*]|[\s.]$)', '#', path_part)
720 for path_part in norm_path]
721 if drive_or_unc:
722 sanitized_path.insert(0, drive_or_unc + os.path.sep)
723 elif force and s and s[0] == os.path.sep:
724 sanitized_path.insert(0, os.path.sep)
725 return os.path.join(*sanitized_path)
726
727
728 def sanitize_url(url):
729 # Prepend protocol-less URLs with `http:` scheme in order to mitigate
730 # the number of unwanted failures due to missing protocol
731 if url is None:
732 return
733 elif url.startswith('//'):
734 return 'http:%s' % url
735 # Fix some common typos seen so far
736 COMMON_TYPOS = (
737 # https://github.com/ytdl-org/youtube-dl/issues/15649
738 (r'^httpss://', r'https://'),
739 # https://bx1.be/lives/direct-tv/
740 (r'^rmtp([es]?)://', r'rtmp\1://'),
741 )
742 for mistake, fixup in COMMON_TYPOS:
743 if re.match(mistake, url):
744 return re.sub(mistake, fixup, url)
745 return url
746
747
748 def extract_basic_auth(url):
749 parts = compat_urlparse.urlsplit(url)
750 if parts.username is None:
751 return url, None
752 url = compat_urlparse.urlunsplit(parts._replace(netloc=(
753 parts.hostname if parts.port is None
754 else '%s:%d' % (parts.hostname, parts.port))))
755 auth_payload = base64.b64encode(
756 ('%s:%s' % (parts.username, parts.password or '')).encode())
757 return url, f'Basic {auth_payload.decode()}'
758
759
760 def sanitized_Request(url, *args, **kwargs):
761 url, auth_header = extract_basic_auth(escape_url(sanitize_url(url)))
762 if auth_header is not None:
763 headers = args[1] if len(args) >= 2 else kwargs.setdefault('headers', {})
764 headers['Authorization'] = auth_header
765 return compat_urllib_request.Request(url, *args, **kwargs)
766
767
768 def expand_path(s):
769 """Expand shell variables and ~"""
770 return os.path.expandvars(compat_expanduser(s))
771
772
773 def orderedSet(iterable):
774 """ Remove all duplicates from the input iterable """
775 res = []
776 for el in iterable:
777 if el not in res:
778 res.append(el)
779 return res
780
781
782 def _htmlentity_transform(entity_with_semicolon):
783 """Transforms an HTML entity to a character."""
784 entity = entity_with_semicolon[:-1]
785
786 # Known non-numeric HTML entity
787 if entity in compat_html_entities.name2codepoint:
788 return compat_chr(compat_html_entities.name2codepoint[entity])
789
790 # TODO: HTML5 allows entities without a semicolon. For example,
791 # '&Eacuteric' should be decoded as 'Éric'.
792 if entity_with_semicolon in compat_html_entities_html5:
793 return compat_html_entities_html5[entity_with_semicolon]
794
795 mobj = re.match(r'#(x[0-9a-fA-F]+|[0-9]+)', entity)
796 if mobj is not None:
797 numstr = mobj.group(1)
798 if numstr.startswith('x'):
799 base = 16
800 numstr = '0%s' % numstr
801 else:
802 base = 10
803 # See https://github.com/ytdl-org/youtube-dl/issues/7518
804 with contextlib.suppress(ValueError):
805 return compat_chr(int(numstr, base))
806
807 # Unknown entity in name, return its literal representation
808 return '&%s;' % entity
809
810
811 def unescapeHTML(s):
812 if s is None:
813 return None
814 assert isinstance(s, str)
815
816 return re.sub(
817 r'&([^&;]+;)', lambda m: _htmlentity_transform(m.group(1)), s)
818
819
820 def escapeHTML(text):
821 return (
822 text
823 .replace('&', '&amp;')
824 .replace('<', '&lt;')
825 .replace('>', '&gt;')
826 .replace('"', '&quot;')
827 .replace("'", '&#39;')
828 )
829
830
831 def process_communicate_or_kill(p, *args, **kwargs):
832 write_string('DeprecationWarning: yt_dlp.utils.process_communicate_or_kill is deprecated '
833 'and may be removed in a future version. Use yt_dlp.utils.Popen.communicate_or_kill instead')
834 return Popen.communicate_or_kill(p, *args, **kwargs)
835
836
837 class Popen(subprocess.Popen):
838 if sys.platform == 'win32':
839 _startupinfo = subprocess.STARTUPINFO()
840 _startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
841 else:
842 _startupinfo = None
843
844 def __init__(self, *args, **kwargs):
845 super().__init__(*args, **kwargs, startupinfo=self._startupinfo)
846
847 def communicate_or_kill(self, *args, **kwargs):
848 try:
849 return self.communicate(*args, **kwargs)
850 except BaseException: # Including KeyboardInterrupt
851 self.kill()
852 self.wait()
853 raise
854
855
856 def get_subprocess_encoding():
857 if sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5:
858 # For subprocess calls, encode with locale encoding
859 # Refer to http://stackoverflow.com/a/9951851/35070
860 encoding = preferredencoding()
861 else:
862 encoding = sys.getfilesystemencoding()
863 if encoding is None:
864 encoding = 'utf-8'
865 return encoding
866
867
868 def encodeFilename(s, for_subprocess=False):
869 assert isinstance(s, str)
870 return s
871
872
873 def decodeFilename(b, for_subprocess=False):
874 return b
875
876
877 def encodeArgument(s):
878 # Legacy code that uses byte strings
879 # Uncomment the following line after fixing all post processors
880 # assert isinstance(s, str), 'Internal error: %r should be of type %r, is %r' % (s, compat_str, type(s))
881 return s if isinstance(s, str) else s.decode('ascii')
882
883
884 def decodeArgument(b):
885 return b
886
887
888 def decodeOption(optval):
889 if optval is None:
890 return optval
891 if isinstance(optval, bytes):
892 optval = optval.decode(preferredencoding())
893
894 assert isinstance(optval, compat_str)
895 return optval
896
897
898 _timetuple = collections.namedtuple('Time', ('hours', 'minutes', 'seconds', 'milliseconds'))
899
900
901 def timetuple_from_msec(msec):
902 secs, msec = divmod(msec, 1000)
903 mins, secs = divmod(secs, 60)
904 hrs, mins = divmod(mins, 60)
905 return _timetuple(hrs, mins, secs, msec)
906
907
908 def formatSeconds(secs, delim=':', msec=False):
909 time = timetuple_from_msec(secs * 1000)
910 if time.hours:
911 ret = '%d%s%02d%s%02d' % (time.hours, delim, time.minutes, delim, time.seconds)
912 elif time.minutes:
913 ret = '%d%s%02d' % (time.minutes, delim, time.seconds)
914 else:
915 ret = '%d' % time.seconds
916 return '%s.%03d' % (ret, time.milliseconds) if msec else ret
917
918
919 def _ssl_load_windows_store_certs(ssl_context, storename):
920 # Code adapted from _load_windows_store_certs in https://github.com/python/cpython/blob/main/Lib/ssl.py
921 try:
922 certs = [cert for cert, encoding, trust in ssl.enum_certificates(storename)
923 if encoding == 'x509_asn' and (
924 trust is True or ssl.Purpose.SERVER_AUTH.oid in trust)]
925 except PermissionError:
926 return
927 for cert in certs:
928 with contextlib.suppress(ssl.SSLError):
929 ssl_context.load_verify_locations(cadata=cert)
930
931
932 def make_HTTPS_handler(params, **kwargs):
933 opts_check_certificate = not params.get('nocheckcertificate')
934 context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
935 context.check_hostname = opts_check_certificate
936 if params.get('legacyserverconnect'):
937 context.options |= 4 # SSL_OP_LEGACY_SERVER_CONNECT
938 # Allow use of weaker ciphers in Python 3.10+. See https://bugs.python.org/issue43998
939 context.set_ciphers('DEFAULT')
940
941 context.verify_mode = ssl.CERT_REQUIRED if opts_check_certificate else ssl.CERT_NONE
942 if opts_check_certificate:
943 if has_certifi and 'no-certifi' not in params.get('compat_opts', []):
944 context.load_verify_locations(cafile=certifi.where())
945 try:
946 context.load_default_certs()
947 # Work around the issue in load_default_certs when there are bad certificates. See:
948 # https://github.com/yt-dlp/yt-dlp/issues/1060,
949 # https://bugs.python.org/issue35665, https://bugs.python.org/issue45312
950 except ssl.SSLError:
951 # enum_certificates is not present in mingw python. See https://github.com/yt-dlp/yt-dlp/issues/1151
952 if sys.platform == 'win32' and hasattr(ssl, 'enum_certificates'):
953 for storename in ('CA', 'ROOT'):
954 _ssl_load_windows_store_certs(context, storename)
955 context.set_default_verify_paths()
956
957 client_certfile = params.get('client_certificate')
958 if client_certfile:
959 try:
960 context.load_cert_chain(
961 client_certfile, keyfile=params.get('client_certificate_key'),
962 password=params.get('client_certificate_password'))
963 except ssl.SSLError:
964 raise YoutubeDLError('Unable to load client certificate')
965
966 # Some servers may reject requests if ALPN extension is not sent. See:
967 # https://github.com/python/cpython/issues/85140
968 # https://github.com/yt-dlp/yt-dlp/issues/3878
969 with contextlib.suppress(NotImplementedError):
970 context.set_alpn_protocols(['http/1.1'])
971
972 return YoutubeDLHTTPSHandler(params, context=context, **kwargs)
973
974
975 def bug_reports_message(before=';'):
976 msg = ('please report this issue on https://github.com/yt-dlp/yt-dlp/issues?q= , '
977 'filling out the appropriate issue template. '
978 'Confirm you are on the latest version using yt-dlp -U')
979
980 before = before.rstrip()
981 if not before or before.endswith(('.', '!', '?')):
982 msg = msg[0].title() + msg[1:]
983
984 return (before + ' ' if before else '') + msg
985
986
987 class YoutubeDLError(Exception):
988 """Base exception for YoutubeDL errors."""
989 msg = None
990
991 def __init__(self, msg=None):
992 if msg is not None:
993 self.msg = msg
994 elif self.msg is None:
995 self.msg = type(self).__name__
996 super().__init__(self.msg)
997
998
999 network_exceptions = [compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error]
1000 if hasattr(ssl, 'CertificateError'):
1001 network_exceptions.append(ssl.CertificateError)
1002 network_exceptions = tuple(network_exceptions)
1003
1004
1005 class ExtractorError(YoutubeDLError):
1006 """Error during info extraction."""
1007
1008 def __init__(self, msg, tb=None, expected=False, cause=None, video_id=None, ie=None):
1009 """ tb, if given, is the original traceback (so that it can be printed out).
1010 If expected is set, this is a normal error message and most likely not a bug in yt-dlp.
1011 """
1012 if sys.exc_info()[0] in network_exceptions:
1013 expected = True
1014
1015 self.orig_msg = str(msg)
1016 self.traceback = tb
1017 self.expected = expected
1018 self.cause = cause
1019 self.video_id = video_id
1020 self.ie = ie
1021 self.exc_info = sys.exc_info() # preserve original exception
1022
1023 super().__init__(''.join((
1024 format_field(ie, template='[%s] '),
1025 format_field(video_id, template='%s: '),
1026 msg,
1027 format_field(cause, template=' (caused by %r)'),
1028 '' if expected else bug_reports_message())))
1029
1030 def format_traceback(self):
1031 return join_nonempty(
1032 self.traceback and ''.join(traceback.format_tb(self.traceback)),
1033 self.cause and ''.join(traceback.format_exception(None, self.cause, self.cause.__traceback__)[1:]),
1034 delim='\n') or None
1035
1036
1037 class UnsupportedError(ExtractorError):
1038 def __init__(self, url):
1039 super().__init__(
1040 'Unsupported URL: %s' % url, expected=True)
1041 self.url = url
1042
1043
1044 class RegexNotFoundError(ExtractorError):
1045 """Error when a regex didn't match"""
1046 pass
1047
1048
1049 class GeoRestrictedError(ExtractorError):
1050 """Geographic restriction Error exception.
1051
1052 This exception may be thrown when a video is not available from your
1053 geographic location due to geographic restrictions imposed by a website.
1054 """
1055
1056 def __init__(self, msg, countries=None, **kwargs):
1057 kwargs['expected'] = True
1058 super().__init__(msg, **kwargs)
1059 self.countries = countries
1060
1061
1062 class DownloadError(YoutubeDLError):
1063 """Download Error exception.
1064
1065 This exception may be thrown by FileDownloader objects if they are not
1066 configured to continue on errors. They will contain the appropriate
1067 error message.
1068 """
1069
1070 def __init__(self, msg, exc_info=None):
1071 """ exc_info, if given, is the original exception that caused the trouble (as returned by sys.exc_info()). """
1072 super().__init__(msg)
1073 self.exc_info = exc_info
1074
1075
1076 class EntryNotInPlaylist(YoutubeDLError):
1077 """Entry not in playlist exception.
1078
1079 This exception will be thrown by YoutubeDL when a requested entry
1080 is not found in the playlist info_dict
1081 """
1082 msg = 'Entry not found in info'
1083
1084
1085 class SameFileError(YoutubeDLError):
1086 """Same File exception.
1087
1088 This exception will be thrown by FileDownloader objects if they detect
1089 multiple files would have to be downloaded to the same file on disk.
1090 """
1091 msg = 'Fixed output name but more than one file to download'
1092
1093 def __init__(self, filename=None):
1094 if filename is not None:
1095 self.msg += f': {filename}'
1096 super().__init__(self.msg)
1097
1098
1099 class PostProcessingError(YoutubeDLError):
1100 """Post Processing exception.
1101
1102 This exception may be raised by PostProcessor's .run() method to
1103 indicate an error in the postprocessing task.
1104 """
1105
1106
1107 class DownloadCancelled(YoutubeDLError):
1108 """ Exception raised when the download queue should be interrupted """
1109 msg = 'The download was cancelled'
1110
1111
1112 class ExistingVideoReached(DownloadCancelled):
1113 """ --break-on-existing triggered """
1114 msg = 'Encountered a video that is already in the archive, stopping due to --break-on-existing'
1115
1116
1117 class RejectedVideoReached(DownloadCancelled):
1118 """ --break-on-reject triggered """
1119 msg = 'Encountered a video that did not match filter, stopping due to --break-on-reject'
1120
1121
1122 class MaxDownloadsReached(DownloadCancelled):
1123 """ --max-downloads limit has been reached. """
1124 msg = 'Maximum number of downloads reached, stopping due to --max-downloads'
1125
1126
1127 class ReExtractInfo(YoutubeDLError):
1128 """ Video info needs to be re-extracted. """
1129
1130 def __init__(self, msg, expected=False):
1131 super().__init__(msg)
1132 self.expected = expected
1133
1134
1135 class ThrottledDownload(ReExtractInfo):
1136 """ Download speed below --throttled-rate. """
1137 msg = 'The download speed is below throttle limit'
1138
1139 def __init__(self):
1140 super().__init__(self.msg, expected=False)
1141
1142
1143 class UnavailableVideoError(YoutubeDLError):
1144 """Unavailable Format exception.
1145
1146 This exception will be thrown when a video is requested
1147 in a format that is not available for that video.
1148 """
1149 msg = 'Unable to download video'
1150
1151 def __init__(self, err=None):
1152 if err is not None:
1153 self.msg += f': {err}'
1154 super().__init__(self.msg)
1155
1156
1157 class ContentTooShortError(YoutubeDLError):
1158 """Content Too Short exception.
1159
1160 This exception may be raised by FileDownloader objects when a file they
1161 download is too small for what the server announced first, indicating
1162 the connection was probably interrupted.
1163 """
1164
1165 def __init__(self, downloaded, expected):
1166 super().__init__(f'Downloaded {downloaded} bytes, expected {expected} bytes')
1167 # Both in bytes
1168 self.downloaded = downloaded
1169 self.expected = expected
1170
1171
1172 class XAttrMetadataError(YoutubeDLError):
1173 def __init__(self, code=None, msg='Unknown error'):
1174 super().__init__(msg)
1175 self.code = code
1176 self.msg = msg
1177
1178 # Parsing code and msg
1179 if (self.code in (errno.ENOSPC, errno.EDQUOT)
1180 or 'No space left' in self.msg or 'Disk quota exceeded' in self.msg):
1181 self.reason = 'NO_SPACE'
1182 elif self.code == errno.E2BIG or 'Argument list too long' in self.msg:
1183 self.reason = 'VALUE_TOO_LONG'
1184 else:
1185 self.reason = 'NOT_SUPPORTED'
1186
1187
1188 class XAttrUnavailableError(YoutubeDLError):
1189 pass
1190
1191
1192 def _create_http_connection(ydl_handler, http_class, is_https, *args, **kwargs):
1193 hc = http_class(*args, **kwargs)
1194 source_address = ydl_handler._params.get('source_address')
1195
1196 if source_address is not None:
1197 # This is to workaround _create_connection() from socket where it will try all
1198 # address data from getaddrinfo() including IPv6. This filters the result from
1199 # getaddrinfo() based on the source_address value.
1200 # This is based on the cpython socket.create_connection() function.
1201 # https://github.com/python/cpython/blob/master/Lib/socket.py#L691
1202 def _create_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, source_address=None):
1203 host, port = address
1204 err = None
1205 addrs = socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM)
1206 af = socket.AF_INET if '.' in source_address[0] else socket.AF_INET6
1207 ip_addrs = [addr for addr in addrs if addr[0] == af]
1208 if addrs and not ip_addrs:
1209 ip_version = 'v4' if af == socket.AF_INET else 'v6'
1210 raise OSError(
1211 "No remote IP%s addresses available for connect, can't use '%s' as source address"
1212 % (ip_version, source_address[0]))
1213 for res in ip_addrs:
1214 af, socktype, proto, canonname, sa = res
1215 sock = None
1216 try:
1217 sock = socket.socket(af, socktype, proto)
1218 if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT:
1219 sock.settimeout(timeout)
1220 sock.bind(source_address)
1221 sock.connect(sa)
1222 err = None # Explicitly break reference cycle
1223 return sock
1224 except OSError as _:
1225 err = _
1226 if sock is not None:
1227 sock.close()
1228 if err is not None:
1229 raise err
1230 else:
1231 raise OSError('getaddrinfo returns an empty list')
1232 if hasattr(hc, '_create_connection'):
1233 hc._create_connection = _create_connection
1234 hc.source_address = (source_address, 0)
1235
1236 return hc
1237
1238
1239 def handle_youtubedl_headers(headers):
1240 filtered_headers = headers
1241
1242 if 'Youtubedl-no-compression' in filtered_headers:
1243 filtered_headers = {k: v for k, v in filtered_headers.items() if k.lower() != 'accept-encoding'}
1244 del filtered_headers['Youtubedl-no-compression']
1245
1246 return filtered_headers
1247
1248
1249 class YoutubeDLHandler(compat_urllib_request.HTTPHandler):
1250 """Handler for HTTP requests and responses.
1251
1252 This class, when installed with an OpenerDirector, automatically adds
1253 the standard headers to every HTTP request and handles gzipped and
1254 deflated responses from web servers. If compression is to be avoided in
1255 a particular request, the original request in the program code only has
1256 to include the HTTP header "Youtubedl-no-compression", which will be
1257 removed before making the real request.
1258
1259 Part of this code was copied from:
1260
1261 http://techknack.net/python-urllib2-handlers/
1262
1263 Andrew Rowls, the author of that code, agreed to release it to the
1264 public domain.
1265 """
1266
1267 def __init__(self, params, *args, **kwargs):
1268 compat_urllib_request.HTTPHandler.__init__(self, *args, **kwargs)
1269 self._params = params
1270
1271 def http_open(self, req):
1272 conn_class = compat_http_client.HTTPConnection
1273
1274 socks_proxy = req.headers.get('Ytdl-socks-proxy')
1275 if socks_proxy:
1276 conn_class = make_socks_conn_class(conn_class, socks_proxy)
1277 del req.headers['Ytdl-socks-proxy']
1278
1279 return self.do_open(functools.partial(
1280 _create_http_connection, self, conn_class, False),
1281 req)
1282
1283 @staticmethod
1284 def deflate(data):
1285 if not data:
1286 return data
1287 try:
1288 return zlib.decompress(data, -zlib.MAX_WBITS)
1289 except zlib.error:
1290 return zlib.decompress(data)
1291
1292 @staticmethod
1293 def brotli(data):
1294 if not data:
1295 return data
1296 return brotli.decompress(data)
1297
1298 def http_request(self, req):
1299 # According to RFC 3986, URLs can not contain non-ASCII characters, however this is not
1300 # always respected by websites, some tend to give out URLs with non percent-encoded
1301 # non-ASCII characters (see telemb.py, ard.py [#3412])
1302 # urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991)
1303 # To work around aforementioned issue we will replace request's original URL with
1304 # percent-encoded one
1305 # Since redirects are also affected (e.g. http://www.southpark.de/alle-episoden/s18e09)
1306 # the code of this workaround has been moved here from YoutubeDL.urlopen()
1307 url = req.get_full_url()
1308 url_escaped = escape_url(url)
1309
1310 # Substitute URL if any change after escaping
1311 if url != url_escaped:
1312 req = update_Request(req, url=url_escaped)
1313
1314 for h, v in self._params.get('http_headers', std_headers).items():
1315 # Capitalize is needed because of Python bug 2275: http://bugs.python.org/issue2275
1316 # The dict keys are capitalized because of this bug by urllib
1317 if h.capitalize() not in req.headers:
1318 req.add_header(h, v)
1319
1320 if 'Accept-encoding' not in req.headers:
1321 req.add_header('Accept-encoding', ', '.join(SUPPORTED_ENCODINGS))
1322
1323 req.headers = handle_youtubedl_headers(req.headers)
1324
1325 return req
1326
1327 def http_response(self, req, resp):
1328 old_resp = resp
1329 # gzip
1330 if resp.headers.get('Content-encoding', '') == 'gzip':
1331 content = resp.read()
1332 gz = gzip.GzipFile(fileobj=io.BytesIO(content), mode='rb')
1333 try:
1334 uncompressed = io.BytesIO(gz.read())
1335 except OSError as original_ioerror:
1336 # There may be junk add the end of the file
1337 # See http://stackoverflow.com/q/4928560/35070 for details
1338 for i in range(1, 1024):
1339 try:
1340 gz = gzip.GzipFile(fileobj=io.BytesIO(content[:-i]), mode='rb')
1341 uncompressed = io.BytesIO(gz.read())
1342 except OSError:
1343 continue
1344 break
1345 else:
1346 raise original_ioerror
1347 resp = compat_urllib_request.addinfourl(uncompressed, old_resp.headers, old_resp.url, old_resp.code)
1348 resp.msg = old_resp.msg
1349 del resp.headers['Content-encoding']
1350 # deflate
1351 if resp.headers.get('Content-encoding', '') == 'deflate':
1352 gz = io.BytesIO(self.deflate(resp.read()))
1353 resp = compat_urllib_request.addinfourl(gz, old_resp.headers, old_resp.url, old_resp.code)
1354 resp.msg = old_resp.msg
1355 del resp.headers['Content-encoding']
1356 # brotli
1357 if resp.headers.get('Content-encoding', '') == 'br':
1358 resp = compat_urllib_request.addinfourl(
1359 io.BytesIO(self.brotli(resp.read())), old_resp.headers, old_resp.url, old_resp.code)
1360 resp.msg = old_resp.msg
1361 del resp.headers['Content-encoding']
1362 # Percent-encode redirect URL of Location HTTP header to satisfy RFC 3986 (see
1363 # https://github.com/ytdl-org/youtube-dl/issues/6457).
1364 if 300 <= resp.code < 400:
1365 location = resp.headers.get('Location')
1366 if location:
1367 # As of RFC 2616 default charset is iso-8859-1 that is respected by python 3
1368 location = location.encode('iso-8859-1').decode()
1369 location_escaped = escape_url(location)
1370 if location != location_escaped:
1371 del resp.headers['Location']
1372 resp.headers['Location'] = location_escaped
1373 return resp
1374
1375 https_request = http_request
1376 https_response = http_response
1377
1378
1379 def make_socks_conn_class(base_class, socks_proxy):
1380 assert issubclass(base_class, (
1381 compat_http_client.HTTPConnection, compat_http_client.HTTPSConnection))
1382
1383 url_components = compat_urlparse.urlparse(socks_proxy)
1384 if url_components.scheme.lower() == 'socks5':
1385 socks_type = ProxyType.SOCKS5
1386 elif url_components.scheme.lower() in ('socks', 'socks4'):
1387 socks_type = ProxyType.SOCKS4
1388 elif url_components.scheme.lower() == 'socks4a':
1389 socks_type = ProxyType.SOCKS4A
1390
1391 def unquote_if_non_empty(s):
1392 if not s:
1393 return s
1394 return compat_urllib_parse_unquote_plus(s)
1395
1396 proxy_args = (
1397 socks_type,
1398 url_components.hostname, url_components.port or 1080,
1399 True, # Remote DNS
1400 unquote_if_non_empty(url_components.username),
1401 unquote_if_non_empty(url_components.password),
1402 )
1403
1404 class SocksConnection(base_class):
1405 def connect(self):
1406 self.sock = sockssocket()
1407 self.sock.setproxy(*proxy_args)
1408 if isinstance(self.timeout, (int, float)):
1409 self.sock.settimeout(self.timeout)
1410 self.sock.connect((self.host, self.port))
1411
1412 if isinstance(self, compat_http_client.HTTPSConnection):
1413 if hasattr(self, '_context'): # Python > 2.6
1414 self.sock = self._context.wrap_socket(
1415 self.sock, server_hostname=self.host)
1416 else:
1417 self.sock = ssl.wrap_socket(self.sock)
1418
1419 return SocksConnection
1420
1421
1422 class YoutubeDLHTTPSHandler(compat_urllib_request.HTTPSHandler):
1423 def __init__(self, params, https_conn_class=None, *args, **kwargs):
1424 compat_urllib_request.HTTPSHandler.__init__(self, *args, **kwargs)
1425 self._https_conn_class = https_conn_class or compat_http_client.HTTPSConnection
1426 self._params = params
1427
1428 def https_open(self, req):
1429 kwargs = {}
1430 conn_class = self._https_conn_class
1431
1432 if hasattr(self, '_context'): # python > 2.6
1433 kwargs['context'] = self._context
1434 if hasattr(self, '_check_hostname'): # python 3.x
1435 kwargs['check_hostname'] = self._check_hostname
1436
1437 socks_proxy = req.headers.get('Ytdl-socks-proxy')
1438 if socks_proxy:
1439 conn_class = make_socks_conn_class(conn_class, socks_proxy)
1440 del req.headers['Ytdl-socks-proxy']
1441
1442 try:
1443 return self.do_open(
1444 functools.partial(_create_http_connection, self, conn_class, True), req, **kwargs)
1445 except urllib.error.URLError as e:
1446 if (isinstance(e.reason, ssl.SSLError)
1447 and getattr(e.reason, 'reason', None) == 'SSLV3_ALERT_HANDSHAKE_FAILURE'):
1448 raise YoutubeDLError('SSLV3_ALERT_HANDSHAKE_FAILURE: Try using --legacy-server-connect')
1449 raise
1450
1451
1452 class YoutubeDLCookieJar(compat_cookiejar.MozillaCookieJar):
1453 """
1454 See [1] for cookie file format.
1455
1456 1. https://curl.haxx.se/docs/http-cookies.html
1457 """
1458 _HTTPONLY_PREFIX = '#HttpOnly_'
1459 _ENTRY_LEN = 7
1460 _HEADER = '''# Netscape HTTP Cookie File
1461 # This file is generated by yt-dlp. Do not edit.
1462
1463 '''
1464 _CookieFileEntry = collections.namedtuple(
1465 'CookieFileEntry',
1466 ('domain_name', 'include_subdomains', 'path', 'https_only', 'expires_at', 'name', 'value'))
1467
1468 def __init__(self, filename=None, *args, **kwargs):
1469 super().__init__(None, *args, **kwargs)
1470 if self.is_path(filename):
1471 filename = os.fspath(filename)
1472 self.filename = filename
1473
1474 @staticmethod
1475 def _true_or_false(cndn):
1476 return 'TRUE' if cndn else 'FALSE'
1477
1478 @staticmethod
1479 def is_path(file):
1480 return isinstance(file, (str, bytes, os.PathLike))
1481
1482 @contextlib.contextmanager
1483 def open(self, file, *, write=False):
1484 if self.is_path(file):
1485 with open(file, 'w' if write else 'r', encoding='utf-8') as f:
1486 yield f
1487 else:
1488 if write:
1489 file.truncate(0)
1490 yield file
1491
1492 def _really_save(self, f, ignore_discard=False, ignore_expires=False):
1493 now = time.time()
1494 for cookie in self:
1495 if (not ignore_discard and cookie.discard
1496 or not ignore_expires and cookie.is_expired(now)):
1497 continue
1498 name, value = cookie.name, cookie.value
1499 if value is None:
1500 # cookies.txt regards 'Set-Cookie: foo' as a cookie
1501 # with no name, whereas http.cookiejar regards it as a
1502 # cookie with no value.
1503 name, value = '', name
1504 f.write('%s\n' % '\t'.join((
1505 cookie.domain,
1506 self._true_or_false(cookie.domain.startswith('.')),
1507 cookie.path,
1508 self._true_or_false(cookie.secure),
1509 str_or_none(cookie.expires, default=''),
1510 name, value
1511 )))
1512
1513 def save(self, filename=None, *args, **kwargs):
1514 """
1515 Save cookies to a file.
1516 Code is taken from CPython 3.6
1517 https://github.com/python/cpython/blob/8d999cbf4adea053be6dbb612b9844635c4dfb8e/Lib/http/cookiejar.py#L2091-L2117 """
1518
1519 if filename is None:
1520 if self.filename is not None:
1521 filename = self.filename
1522 else:
1523 raise ValueError(compat_cookiejar.MISSING_FILENAME_TEXT)
1524
1525 # Store session cookies with `expires` set to 0 instead of an empty string
1526 for cookie in self:
1527 if cookie.expires is None:
1528 cookie.expires = 0
1529
1530 with self.open(filename, write=True) as f:
1531 f.write(self._HEADER)
1532 self._really_save(f, *args, **kwargs)
1533
1534 def load(self, filename=None, ignore_discard=False, ignore_expires=False):
1535 """Load cookies from a file."""
1536 if filename is None:
1537 if self.filename is not None:
1538 filename = self.filename
1539 else:
1540 raise ValueError(compat_cookiejar.MISSING_FILENAME_TEXT)
1541
1542 def prepare_line(line):
1543 if line.startswith(self._HTTPONLY_PREFIX):
1544 line = line[len(self._HTTPONLY_PREFIX):]
1545 # comments and empty lines are fine
1546 if line.startswith('#') or not line.strip():
1547 return line
1548 cookie_list = line.split('\t')
1549 if len(cookie_list) != self._ENTRY_LEN:
1550 raise compat_cookiejar.LoadError('invalid length %d' % len(cookie_list))
1551 cookie = self._CookieFileEntry(*cookie_list)
1552 if cookie.expires_at and not cookie.expires_at.isdigit():
1553 raise compat_cookiejar.LoadError('invalid expires at %s' % cookie.expires_at)
1554 return line
1555
1556 cf = io.StringIO()
1557 with self.open(filename) as f:
1558 for line in f:
1559 try:
1560 cf.write(prepare_line(line))
1561 except compat_cookiejar.LoadError as e:
1562 if f'{line.strip()} '[0] in '[{"':
1563 raise compat_cookiejar.LoadError(
1564 'Cookies file must be Netscape formatted, not JSON. See '
1565 'https://github.com/ytdl-org/youtube-dl#how-do-i-pass-cookies-to-youtube-dl')
1566 write_string(f'WARNING: skipping cookie file entry due to {e}: {line!r}\n')
1567 continue
1568 cf.seek(0)
1569 self._really_load(cf, filename, ignore_discard, ignore_expires)
1570 # Session cookies are denoted by either `expires` field set to
1571 # an empty string or 0. MozillaCookieJar only recognizes the former
1572 # (see [1]). So we need force the latter to be recognized as session
1573 # cookies on our own.
1574 # Session cookies may be important for cookies-based authentication,
1575 # e.g. usually, when user does not check 'Remember me' check box while
1576 # logging in on a site, some important cookies are stored as session
1577 # cookies so that not recognizing them will result in failed login.
1578 # 1. https://bugs.python.org/issue17164
1579 for cookie in self:
1580 # Treat `expires=0` cookies as session cookies
1581 if cookie.expires == 0:
1582 cookie.expires = None
1583 cookie.discard = True
1584
1585
1586 class YoutubeDLCookieProcessor(compat_urllib_request.HTTPCookieProcessor):
1587 def __init__(self, cookiejar=None):
1588 compat_urllib_request.HTTPCookieProcessor.__init__(self, cookiejar)
1589
1590 def http_response(self, request, response):
1591 return compat_urllib_request.HTTPCookieProcessor.http_response(self, request, response)
1592
1593 https_request = compat_urllib_request.HTTPCookieProcessor.http_request
1594 https_response = http_response
1595
1596
1597 class YoutubeDLRedirectHandler(compat_urllib_request.HTTPRedirectHandler):
1598 """YoutubeDL redirect handler
1599
1600 The code is based on HTTPRedirectHandler implementation from CPython [1].
1601
1602 This redirect handler solves two issues:
1603 - ensures redirect URL is always unicode under python 2
1604 - introduces support for experimental HTTP response status code
1605 308 Permanent Redirect [2] used by some sites [3]
1606
1607 1. https://github.com/python/cpython/blob/master/Lib/urllib/request.py
1608 2. https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/308
1609 3. https://github.com/ytdl-org/youtube-dl/issues/28768
1610 """
1611
1612 http_error_301 = http_error_303 = http_error_307 = http_error_308 = compat_urllib_request.HTTPRedirectHandler.http_error_302
1613
1614 def redirect_request(self, req, fp, code, msg, headers, newurl):
1615 """Return a Request or None in response to a redirect.
1616
1617 This is called by the http_error_30x methods when a
1618 redirection response is received. If a redirection should
1619 take place, return a new Request to allow http_error_30x to
1620 perform the redirect. Otherwise, raise HTTPError if no-one
1621 else should try to handle this url. Return None if you can't
1622 but another Handler might.
1623 """
1624 m = req.get_method()
1625 if (not (code in (301, 302, 303, 307, 308) and m in ("GET", "HEAD")
1626 or code in (301, 302, 303) and m == "POST")):
1627 raise compat_HTTPError(req.full_url, code, msg, headers, fp)
1628 # Strictly (according to RFC 2616), 301 or 302 in response to
1629 # a POST MUST NOT cause a redirection without confirmation
1630 # from the user (of urllib.request, in this case). In practice,
1631 # essentially all clients do redirect in this case, so we do
1632 # the same.
1633
1634 # Be conciliant with URIs containing a space. This is mainly
1635 # redundant with the more complete encoding done in http_error_302(),
1636 # but it is kept for compatibility with other callers.
1637 newurl = newurl.replace(' ', '%20')
1638
1639 CONTENT_HEADERS = ("content-length", "content-type")
1640 # NB: don't use dict comprehension for python 2.6 compatibility
1641 newheaders = {k: v for k, v in req.headers.items() if k.lower() not in CONTENT_HEADERS}
1642
1643 # A 303 must either use GET or HEAD for subsequent request
1644 # https://datatracker.ietf.org/doc/html/rfc7231#section-6.4.4
1645 if code == 303 and m != 'HEAD':
1646 m = 'GET'
1647 # 301 and 302 redirects are commonly turned into a GET from a POST
1648 # for subsequent requests by browsers, so we'll do the same.
1649 # https://datatracker.ietf.org/doc/html/rfc7231#section-6.4.2
1650 # https://datatracker.ietf.org/doc/html/rfc7231#section-6.4.3
1651 if code in (301, 302) and m == 'POST':
1652 m = 'GET'
1653
1654 return compat_urllib_request.Request(
1655 newurl, headers=newheaders, origin_req_host=req.origin_req_host,
1656 unverifiable=True, method=m)
1657
1658
1659 def extract_timezone(date_str):
1660 m = re.search(
1661 r'''(?x)
1662 ^.{8,}? # >=8 char non-TZ prefix, if present
1663 (?P<tz>Z| # just the UTC Z, or
1664 (?:(?<=.\b\d{4}|\b\d{2}:\d\d)| # preceded by 4 digits or hh:mm or
1665 (?<!.\b[a-zA-Z]{3}|[a-zA-Z]{4}|..\b\d\d)) # not preceded by 3 alpha word or >= 4 alpha or 2 digits
1666 [ ]? # optional space
1667 (?P<sign>\+|-) # +/-
1668 (?P<hours>[0-9]{2}):?(?P<minutes>[0-9]{2}) # hh[:]mm
1669 $)
1670 ''', date_str)
1671 if not m:
1672 timezone = datetime.timedelta()
1673 else:
1674 date_str = date_str[:-len(m.group('tz'))]
1675 if not m.group('sign'):
1676 timezone = datetime.timedelta()
1677 else:
1678 sign = 1 if m.group('sign') == '+' else -1
1679 timezone = datetime.timedelta(
1680 hours=sign * int(m.group('hours')),
1681 minutes=sign * int(m.group('minutes')))
1682 return timezone, date_str
1683
1684
1685 def parse_iso8601(date_str, delimiter='T', timezone=None):
1686 """ Return a UNIX timestamp from the given date """
1687
1688 if date_str is None:
1689 return None
1690
1691 date_str = re.sub(r'\.[0-9]+', '', date_str)
1692
1693 if timezone is None:
1694 timezone, date_str = extract_timezone(date_str)
1695
1696 with contextlib.suppress(ValueError):
1697 date_format = f'%Y-%m-%d{delimiter}%H:%M:%S'
1698 dt = datetime.datetime.strptime(date_str, date_format) - timezone
1699 return calendar.timegm(dt.timetuple())
1700
1701
1702 def date_formats(day_first=True):
1703 return DATE_FORMATS_DAY_FIRST if day_first else DATE_FORMATS_MONTH_FIRST
1704
1705
1706 def unified_strdate(date_str, day_first=True):
1707 """Return a string with the date in the format YYYYMMDD"""
1708
1709 if date_str is None:
1710 return None
1711 upload_date = None
1712 # Replace commas
1713 date_str = date_str.replace(',', ' ')
1714 # Remove AM/PM + timezone
1715 date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str)
1716 _, date_str = extract_timezone(date_str)
1717
1718 for expression in date_formats(day_first):
1719 with contextlib.suppress(ValueError):
1720 upload_date = datetime.datetime.strptime(date_str, expression).strftime('%Y%m%d')
1721 if upload_date is None:
1722 timetuple = email.utils.parsedate_tz(date_str)
1723 if timetuple:
1724 with contextlib.suppress(ValueError):
1725 upload_date = datetime.datetime(*timetuple[:6]).strftime('%Y%m%d')
1726 if upload_date is not None:
1727 return compat_str(upload_date)
1728
1729
1730 def unified_timestamp(date_str, day_first=True):
1731 if date_str is None:
1732 return None
1733
1734 date_str = re.sub(r'[,|]', '', date_str)
1735
1736 pm_delta = 12 if re.search(r'(?i)PM', date_str) else 0
1737 timezone, date_str = extract_timezone(date_str)
1738
1739 # Remove AM/PM + timezone
1740 date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str)
1741
1742 # Remove unrecognized timezones from ISO 8601 alike timestamps
1743 m = re.search(r'\d{1,2}:\d{1,2}(?:\.\d+)?(?P<tz>\s*[A-Z]+)$', date_str)
1744 if m:
1745 date_str = date_str[:-len(m.group('tz'))]
1746
1747 # Python only supports microseconds, so remove nanoseconds
1748 m = re.search(r'^([0-9]{4,}-[0-9]{1,2}-[0-9]{1,2}T[0-9]{1,2}:[0-9]{1,2}:[0-9]{1,2}\.[0-9]{6})[0-9]+$', date_str)
1749 if m:
1750 date_str = m.group(1)
1751
1752 for expression in date_formats(day_first):
1753 with contextlib.suppress(ValueError):
1754 dt = datetime.datetime.strptime(date_str, expression) - timezone + datetime.timedelta(hours=pm_delta)
1755 return calendar.timegm(dt.timetuple())
1756 timetuple = email.utils.parsedate_tz(date_str)
1757 if timetuple:
1758 return calendar.timegm(timetuple) + pm_delta * 3600
1759
1760
1761 def determine_ext(url, default_ext='unknown_video'):
1762 if url is None or '.' not in url:
1763 return default_ext
1764 guess = url.partition('?')[0].rpartition('.')[2]
1765 if re.match(r'^[A-Za-z0-9]+$', guess):
1766 return guess
1767 # Try extract ext from URLs like http://example.com/foo/bar.mp4/?download
1768 elif guess.rstrip('/') in KNOWN_EXTENSIONS:
1769 return guess.rstrip('/')
1770 else:
1771 return default_ext
1772
1773
1774 def subtitles_filename(filename, sub_lang, sub_format, expected_real_ext=None):
1775 return replace_extension(filename, sub_lang + '.' + sub_format, expected_real_ext)
1776
1777
1778 def datetime_from_str(date_str, precision='auto', format='%Y%m%d'):
1779 R"""
1780 Return a datetime object from a string.
1781 Supported format:
1782 (now|today|yesterday|DATE)([+-]\d+(microsecond|second|minute|hour|day|week|month|year)s?)?
1783
1784 @param format strftime format of DATE
1785 @param precision Round the datetime object: auto|microsecond|second|minute|hour|day
1786 auto: round to the unit provided in date_str (if applicable).
1787 """
1788 auto_precision = False
1789 if precision == 'auto':
1790 auto_precision = True
1791 precision = 'microsecond'
1792 today = datetime_round(datetime.datetime.utcnow(), precision)
1793 if date_str in ('now', 'today'):
1794 return today
1795 if date_str == 'yesterday':
1796 return today - datetime.timedelta(days=1)
1797 match = re.match(
1798 r'(?P<start>.+)(?P<sign>[+-])(?P<time>\d+)(?P<unit>microsecond|second|minute|hour|day|week|month|year)s?',
1799 date_str)
1800 if match is not None:
1801 start_time = datetime_from_str(match.group('start'), precision, format)
1802 time = int(match.group('time')) * (-1 if match.group('sign') == '-' else 1)
1803 unit = match.group('unit')
1804 if unit == 'month' or unit == 'year':
1805 new_date = datetime_add_months(start_time, time * 12 if unit == 'year' else time)
1806 unit = 'day'
1807 else:
1808 if unit == 'week':
1809 unit = 'day'
1810 time *= 7
1811 delta = datetime.timedelta(**{unit + 's': time})
1812 new_date = start_time + delta
1813 if auto_precision:
1814 return datetime_round(new_date, unit)
1815 return new_date
1816
1817 return datetime_round(datetime.datetime.strptime(date_str, format), precision)
1818
1819
1820 def date_from_str(date_str, format='%Y%m%d', strict=False):
1821 R"""
1822 Return a date object from a string using datetime_from_str
1823
1824 @param strict Restrict allowed patterns to "YYYYMMDD" and
1825 (now|today|yesterday)(-\d+(day|week|month|year)s?)?
1826 """
1827 if strict and not re.fullmatch(r'\d{8}|(now|today|yesterday)(-\d+(day|week|month|year)s?)?', date_str):
1828 raise ValueError(f'Invalid date format "{date_str}"')
1829 return datetime_from_str(date_str, precision='microsecond', format=format).date()
1830
1831
1832 def datetime_add_months(dt, months):
1833 """Increment/Decrement a datetime object by months."""
1834 month = dt.month + months - 1
1835 year = dt.year + month // 12
1836 month = month % 12 + 1
1837 day = min(dt.day, calendar.monthrange(year, month)[1])
1838 return dt.replace(year, month, day)
1839
1840
1841 def datetime_round(dt, precision='day'):
1842 """
1843 Round a datetime object's time to a specific precision
1844 """
1845 if precision == 'microsecond':
1846 return dt
1847
1848 unit_seconds = {
1849 'day': 86400,
1850 'hour': 3600,
1851 'minute': 60,
1852 'second': 1,
1853 }
1854 roundto = lambda x, n: ((x + n / 2) // n) * n
1855 timestamp = calendar.timegm(dt.timetuple())
1856 return datetime.datetime.utcfromtimestamp(roundto(timestamp, unit_seconds[precision]))
1857
1858
1859 def hyphenate_date(date_str):
1860 """
1861 Convert a date in 'YYYYMMDD' format to 'YYYY-MM-DD' format"""
1862 match = re.match(r'^(\d\d\d\d)(\d\d)(\d\d)$', date_str)
1863 if match is not None:
1864 return '-'.join(match.groups())
1865 else:
1866 return date_str
1867
1868
1869 class DateRange:
1870 """Represents a time interval between two dates"""
1871
1872 def __init__(self, start=None, end=None):
1873 """start and end must be strings in the format accepted by date"""
1874 if start is not None:
1875 self.start = date_from_str(start, strict=True)
1876 else:
1877 self.start = datetime.datetime.min.date()
1878 if end is not None:
1879 self.end = date_from_str(end, strict=True)
1880 else:
1881 self.end = datetime.datetime.max.date()
1882 if self.start > self.end:
1883 raise ValueError('Date range: "%s" , the start date must be before the end date' % self)
1884
1885 @classmethod
1886 def day(cls, day):
1887 """Returns a range that only contains the given day"""
1888 return cls(day, day)
1889
1890 def __contains__(self, date):
1891 """Check if the date is in the range"""
1892 if not isinstance(date, datetime.date):
1893 date = date_from_str(date)
1894 return self.start <= date <= self.end
1895
1896 def __str__(self):
1897 return f'{self.start.isoformat()} - {self.end.isoformat()}'
1898
1899
1900 def platform_name():
1901 """ Returns the platform name as a compat_str """
1902 res = platform.platform()
1903 if isinstance(res, bytes):
1904 res = res.decode(preferredencoding())
1905
1906 assert isinstance(res, compat_str)
1907 return res
1908
1909
1910 @functools.cache
1911 def get_windows_version():
1912 ''' Get Windows version. returns () if it's not running on Windows '''
1913 if compat_os_name == 'nt':
1914 return version_tuple(platform.win32_ver()[1])
1915 else:
1916 return ()
1917
1918
1919 def write_string(s, out=None, encoding=None):
1920 assert isinstance(s, str)
1921 out = out or sys.stderr
1922
1923 if compat_os_name == 'nt' and supports_terminal_sequences(out):
1924 s = re.sub(r'([\r\n]+)', r' \1', s)
1925
1926 enc, buffer = None, out
1927 if 'b' in getattr(out, 'mode', ''):
1928 enc = encoding or preferredencoding()
1929 elif hasattr(out, 'buffer'):
1930 buffer = out.buffer
1931 enc = encoding or getattr(out, 'encoding', None) or preferredencoding()
1932
1933 buffer.write(s.encode(enc, 'ignore') if enc else s)
1934 out.flush()
1935
1936
1937 def bytes_to_intlist(bs):
1938 if not bs:
1939 return []
1940 if isinstance(bs[0], int): # Python 3
1941 return list(bs)
1942 else:
1943 return [ord(c) for c in bs]
1944
1945
1946 def intlist_to_bytes(xs):
1947 if not xs:
1948 return b''
1949 return compat_struct_pack('%dB' % len(xs), *xs)
1950
1951
1952 class LockingUnsupportedError(OSError):
1953 msg = 'File locking is not supported'
1954
1955 def __init__(self):
1956 super().__init__(self.msg)
1957
1958
1959 # Cross-platform file locking
1960 if sys.platform == 'win32':
1961 import ctypes.wintypes
1962 import msvcrt
1963
1964 class OVERLAPPED(ctypes.Structure):
1965 _fields_ = [
1966 ('Internal', ctypes.wintypes.LPVOID),
1967 ('InternalHigh', ctypes.wintypes.LPVOID),
1968 ('Offset', ctypes.wintypes.DWORD),
1969 ('OffsetHigh', ctypes.wintypes.DWORD),
1970 ('hEvent', ctypes.wintypes.HANDLE),
1971 ]
1972
1973 kernel32 = ctypes.windll.kernel32
1974 LockFileEx = kernel32.LockFileEx
1975 LockFileEx.argtypes = [
1976 ctypes.wintypes.HANDLE, # hFile
1977 ctypes.wintypes.DWORD, # dwFlags
1978 ctypes.wintypes.DWORD, # dwReserved
1979 ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow
1980 ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh
1981 ctypes.POINTER(OVERLAPPED) # Overlapped
1982 ]
1983 LockFileEx.restype = ctypes.wintypes.BOOL
1984 UnlockFileEx = kernel32.UnlockFileEx
1985 UnlockFileEx.argtypes = [
1986 ctypes.wintypes.HANDLE, # hFile
1987 ctypes.wintypes.DWORD, # dwReserved
1988 ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow
1989 ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh
1990 ctypes.POINTER(OVERLAPPED) # Overlapped
1991 ]
1992 UnlockFileEx.restype = ctypes.wintypes.BOOL
1993 whole_low = 0xffffffff
1994 whole_high = 0x7fffffff
1995
1996 def _lock_file(f, exclusive, block):
1997 overlapped = OVERLAPPED()
1998 overlapped.Offset = 0
1999 overlapped.OffsetHigh = 0
2000 overlapped.hEvent = 0
2001 f._lock_file_overlapped_p = ctypes.pointer(overlapped)
2002
2003 if not LockFileEx(msvcrt.get_osfhandle(f.fileno()),
2004 (0x2 if exclusive else 0x0) | (0x0 if block else 0x1),
2005 0, whole_low, whole_high, f._lock_file_overlapped_p):
2006 raise BlockingIOError('Locking file failed: %r' % ctypes.FormatError())
2007
2008 def _unlock_file(f):
2009 assert f._lock_file_overlapped_p
2010 handle = msvcrt.get_osfhandle(f.fileno())
2011 if not UnlockFileEx(handle, 0, whole_low, whole_high, f._lock_file_overlapped_p):
2012 raise OSError('Unlocking file failed: %r' % ctypes.FormatError())
2013
2014 else:
2015 try:
2016 import fcntl
2017
2018 def _lock_file(f, exclusive, block):
2019 flags = fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH
2020 if not block:
2021 flags |= fcntl.LOCK_NB
2022 try:
2023 fcntl.flock(f, flags)
2024 except BlockingIOError:
2025 raise
2026 except OSError: # AOSP does not have flock()
2027 fcntl.lockf(f, flags)
2028
2029 def _unlock_file(f):
2030 try:
2031 fcntl.flock(f, fcntl.LOCK_UN)
2032 except OSError:
2033 fcntl.lockf(f, fcntl.LOCK_UN)
2034
2035 except ImportError:
2036
2037 def _lock_file(f, exclusive, block):
2038 raise LockingUnsupportedError()
2039
2040 def _unlock_file(f):
2041 raise LockingUnsupportedError()
2042
2043
2044 class locked_file:
2045 locked = False
2046
2047 def __init__(self, filename, mode, block=True, encoding=None):
2048 if mode not in {'r', 'rb', 'a', 'ab', 'w', 'wb'}:
2049 raise NotImplementedError(mode)
2050 self.mode, self.block = mode, block
2051
2052 writable = any(f in mode for f in 'wax+')
2053 readable = any(f in mode for f in 'r+')
2054 flags = functools.reduce(operator.ior, (
2055 getattr(os, 'O_CLOEXEC', 0), # UNIX only
2056 getattr(os, 'O_BINARY', 0), # Windows only
2057 getattr(os, 'O_NOINHERIT', 0), # Windows only
2058 os.O_CREAT if writable else 0, # O_TRUNC only after locking
2059 os.O_APPEND if 'a' in mode else 0,
2060 os.O_EXCL if 'x' in mode else 0,
2061 os.O_RDONLY if not writable else os.O_RDWR if readable else os.O_WRONLY,
2062 ))
2063
2064 self.f = os.fdopen(os.open(filename, flags, 0o666), mode, encoding=encoding)
2065
2066 def __enter__(self):
2067 exclusive = 'r' not in self.mode
2068 try:
2069 _lock_file(self.f, exclusive, self.block)
2070 self.locked = True
2071 except OSError:
2072 self.f.close()
2073 raise
2074 if 'w' in self.mode:
2075 try:
2076 self.f.truncate()
2077 except OSError as e:
2078 if e.errno not in (
2079 errno.ESPIPE, # Illegal seek - expected for FIFO
2080 errno.EINVAL, # Invalid argument - expected for /dev/null
2081 ):
2082 raise
2083 return self
2084
2085 def unlock(self):
2086 if not self.locked:
2087 return
2088 try:
2089 _unlock_file(self.f)
2090 finally:
2091 self.locked = False
2092
2093 def __exit__(self, *_):
2094 try:
2095 self.unlock()
2096 finally:
2097 self.f.close()
2098
2099 open = __enter__
2100 close = __exit__
2101
2102 def __getattr__(self, attr):
2103 return getattr(self.f, attr)
2104
2105 def __iter__(self):
2106 return iter(self.f)
2107
2108
2109 @functools.cache
2110 def get_filesystem_encoding():
2111 encoding = sys.getfilesystemencoding()
2112 return encoding if encoding is not None else 'utf-8'
2113
2114
2115 def shell_quote(args):
2116 quoted_args = []
2117 encoding = get_filesystem_encoding()
2118 for a in args:
2119 if isinstance(a, bytes):
2120 # We may get a filename encoded with 'encodeFilename'
2121 a = a.decode(encoding)
2122 quoted_args.append(compat_shlex_quote(a))
2123 return ' '.join(quoted_args)
2124
2125
2126 def smuggle_url(url, data):
2127 """ Pass additional data in a URL for internal use. """
2128
2129 url, idata = unsmuggle_url(url, {})
2130 data.update(idata)
2131 sdata = compat_urllib_parse_urlencode(
2132 {'__youtubedl_smuggle': json.dumps(data)})
2133 return url + '#' + sdata
2134
2135
2136 def unsmuggle_url(smug_url, default=None):
2137 if '#__youtubedl_smuggle' not in smug_url:
2138 return smug_url, default
2139 url, _, sdata = smug_url.rpartition('#')
2140 jsond = compat_parse_qs(sdata)['__youtubedl_smuggle'][0]
2141 data = json.loads(jsond)
2142 return url, data
2143
2144
2145 def format_decimal_suffix(num, fmt='%d%s', *, factor=1000):
2146 """ Formats numbers with decimal sufixes like K, M, etc """
2147 num, factor = float_or_none(num), float(factor)
2148 if num is None or num < 0:
2149 return None
2150 POSSIBLE_SUFFIXES = 'kMGTPEZY'
2151 exponent = 0 if num == 0 else min(int(math.log(num, factor)), len(POSSIBLE_SUFFIXES))
2152 suffix = ['', *POSSIBLE_SUFFIXES][exponent]
2153 if factor == 1024:
2154 suffix = {'k': 'Ki', '': ''}.get(suffix, f'{suffix}i')
2155 converted = num / (factor ** exponent)
2156 return fmt % (converted, suffix)
2157
2158
2159 def format_bytes(bytes):
2160 return format_decimal_suffix(bytes, '%.2f%sB', factor=1024) or 'N/A'
2161
2162
2163 def lookup_unit_table(unit_table, s):
2164 units_re = '|'.join(re.escape(u) for u in unit_table)
2165 m = re.match(
2166 r'(?P<num>[0-9]+(?:[,.][0-9]*)?)\s*(?P<unit>%s)\b' % units_re, s)
2167 if not m:
2168 return None
2169 num_str = m.group('num').replace(',', '.')
2170 mult = unit_table[m.group('unit')]
2171 return int(float(num_str) * mult)
2172
2173
2174 def parse_filesize(s):
2175 if s is None:
2176 return None
2177
2178 # The lower-case forms are of course incorrect and unofficial,
2179 # but we support those too
2180 _UNIT_TABLE = {
2181 'B': 1,
2182 'b': 1,
2183 'bytes': 1,
2184 'KiB': 1024,
2185 'KB': 1000,
2186 'kB': 1024,
2187 'Kb': 1000,
2188 'kb': 1000,
2189 'kilobytes': 1000,
2190 'kibibytes': 1024,
2191 'MiB': 1024 ** 2,
2192 'MB': 1000 ** 2,
2193 'mB': 1024 ** 2,
2194 'Mb': 1000 ** 2,
2195 'mb': 1000 ** 2,
2196 'megabytes': 1000 ** 2,
2197 'mebibytes': 1024 ** 2,
2198 'GiB': 1024 ** 3,
2199 'GB': 1000 ** 3,
2200 'gB': 1024 ** 3,
2201 'Gb': 1000 ** 3,
2202 'gb': 1000 ** 3,
2203 'gigabytes': 1000 ** 3,
2204 'gibibytes': 1024 ** 3,
2205 'TiB': 1024 ** 4,
2206 'TB': 1000 ** 4,
2207 'tB': 1024 ** 4,
2208 'Tb': 1000 ** 4,
2209 'tb': 1000 ** 4,
2210 'terabytes': 1000 ** 4,
2211 'tebibytes': 1024 ** 4,
2212 'PiB': 1024 ** 5,
2213 'PB': 1000 ** 5,
2214 'pB': 1024 ** 5,
2215 'Pb': 1000 ** 5,
2216 'pb': 1000 ** 5,
2217 'petabytes': 1000 ** 5,
2218 'pebibytes': 1024 ** 5,
2219 'EiB': 1024 ** 6,
2220 'EB': 1000 ** 6,
2221 'eB': 1024 ** 6,
2222 'Eb': 1000 ** 6,
2223 'eb': 1000 ** 6,
2224 'exabytes': 1000 ** 6,
2225 'exbibytes': 1024 ** 6,
2226 'ZiB': 1024 ** 7,
2227 'ZB': 1000 ** 7,
2228 'zB': 1024 ** 7,
2229 'Zb': 1000 ** 7,
2230 'zb': 1000 ** 7,
2231 'zettabytes': 1000 ** 7,
2232 'zebibytes': 1024 ** 7,
2233 'YiB': 1024 ** 8,
2234 'YB': 1000 ** 8,
2235 'yB': 1024 ** 8,
2236 'Yb': 1000 ** 8,
2237 'yb': 1000 ** 8,
2238 'yottabytes': 1000 ** 8,
2239 'yobibytes': 1024 ** 8,
2240 }
2241
2242 return lookup_unit_table(_UNIT_TABLE, s)
2243
2244
2245 def parse_count(s):
2246 if s is None:
2247 return None
2248
2249 s = re.sub(r'^[^\d]+\s', '', s).strip()
2250
2251 if re.match(r'^[\d,.]+$', s):
2252 return str_to_int(s)
2253
2254 _UNIT_TABLE = {
2255 'k': 1000,
2256 'K': 1000,
2257 'm': 1000 ** 2,
2258 'M': 1000 ** 2,
2259 'kk': 1000 ** 2,
2260 'KK': 1000 ** 2,
2261 'b': 1000 ** 3,
2262 'B': 1000 ** 3,
2263 }
2264
2265 ret = lookup_unit_table(_UNIT_TABLE, s)
2266 if ret is not None:
2267 return ret
2268
2269 mobj = re.match(r'([\d,.]+)(?:$|\s)', s)
2270 if mobj:
2271 return str_to_int(mobj.group(1))
2272
2273
2274 def parse_resolution(s, *, lenient=False):
2275 if s is None:
2276 return {}
2277
2278 if lenient:
2279 mobj = re.search(r'(?P<w>\d+)\s*[xX×,]\s*(?P<h>\d+)', s)
2280 else:
2281 mobj = re.search(r'(?<![a-zA-Z0-9])(?P<w>\d+)\s*[xX×,]\s*(?P<h>\d+)(?![a-zA-Z0-9])', s)
2282 if mobj:
2283 return {
2284 'width': int(mobj.group('w')),
2285 'height': int(mobj.group('h')),
2286 }
2287
2288 mobj = re.search(r'(?<![a-zA-Z0-9])(\d+)[pPiI](?![a-zA-Z0-9])', s)
2289 if mobj:
2290 return {'height': int(mobj.group(1))}
2291
2292 mobj = re.search(r'\b([48])[kK]\b', s)
2293 if mobj:
2294 return {'height': int(mobj.group(1)) * 540}
2295
2296 return {}
2297
2298
2299 def parse_bitrate(s):
2300 if not isinstance(s, compat_str):
2301 return
2302 mobj = re.search(r'\b(\d+)\s*kbps', s)
2303 if mobj:
2304 return int(mobj.group(1))
2305
2306
2307 def month_by_name(name, lang='en'):
2308 """ Return the number of a month by (locale-independently) English name """
2309
2310 month_names = MONTH_NAMES.get(lang, MONTH_NAMES['en'])
2311
2312 try:
2313 return month_names.index(name) + 1
2314 except ValueError:
2315 return None
2316
2317
2318 def month_by_abbreviation(abbrev):
2319 """ Return the number of a month by (locale-independently) English
2320 abbreviations """
2321
2322 try:
2323 return [s[:3] for s in ENGLISH_MONTH_NAMES].index(abbrev) + 1
2324 except ValueError:
2325 return None
2326
2327
2328 def fix_xml_ampersands(xml_str):
2329 """Replace all the '&' by '&amp;' in XML"""
2330 return re.sub(
2331 r'&(?!amp;|lt;|gt;|apos;|quot;|#x[0-9a-fA-F]{,4};|#[0-9]{,4};)',
2332 '&amp;',
2333 xml_str)
2334
2335
2336 def setproctitle(title):
2337 assert isinstance(title, compat_str)
2338
2339 # ctypes in Jython is not complete
2340 # http://bugs.jython.org/issue2148
2341 if sys.platform.startswith('java'):
2342 return
2343
2344 try:
2345 libc = ctypes.cdll.LoadLibrary('libc.so.6')
2346 except OSError:
2347 return
2348 except TypeError:
2349 # LoadLibrary in Windows Python 2.7.13 only expects
2350 # a bytestring, but since unicode_literals turns
2351 # every string into a unicode string, it fails.
2352 return
2353 title_bytes = title.encode()
2354 buf = ctypes.create_string_buffer(len(title_bytes))
2355 buf.value = title_bytes
2356 try:
2357 libc.prctl(15, buf, 0, 0, 0)
2358 except AttributeError:
2359 return # Strange libc, just skip this
2360
2361
2362 def remove_start(s, start):
2363 return s[len(start):] if s is not None and s.startswith(start) else s
2364
2365
2366 def remove_end(s, end):
2367 return s[:-len(end)] if s is not None and s.endswith(end) else s
2368
2369
2370 def remove_quotes(s):
2371 if s is None or len(s) < 2:
2372 return s
2373 for quote in ('"', "'", ):
2374 if s[0] == quote and s[-1] == quote:
2375 return s[1:-1]
2376 return s
2377
2378
2379 def get_domain(url):
2380 domain = re.match(r'(?:https?:\/\/)?(?:www\.)?(?P<domain>[^\n\/]+\.[^\n\/]+)(?:\/(.*))?', url)
2381 return domain.group('domain') if domain else None
2382
2383
2384 def url_basename(url):
2385 path = compat_urlparse.urlparse(url).path
2386 return path.strip('/').split('/')[-1]
2387
2388
2389 def base_url(url):
2390 return re.match(r'https?://[^?#&]+/', url).group()
2391
2392
2393 def urljoin(base, path):
2394 if isinstance(path, bytes):
2395 path = path.decode()
2396 if not isinstance(path, compat_str) or not path:
2397 return None
2398 if re.match(r'^(?:[a-zA-Z][a-zA-Z0-9+-.]*:)?//', path):
2399 return path
2400 if isinstance(base, bytes):
2401 base = base.decode()
2402 if not isinstance(base, compat_str) or not re.match(
2403 r'^(?:https?:)?//', base):
2404 return None
2405 return compat_urlparse.urljoin(base, path)
2406
2407
2408 class HEADRequest(compat_urllib_request.Request):
2409 def get_method(self):
2410 return 'HEAD'
2411
2412
2413 class PUTRequest(compat_urllib_request.Request):
2414 def get_method(self):
2415 return 'PUT'
2416
2417
2418 def int_or_none(v, scale=1, default=None, get_attr=None, invscale=1):
2419 if get_attr and v is not None:
2420 v = getattr(v, get_attr, None)
2421 try:
2422 return int(v) * invscale // scale
2423 except (ValueError, TypeError, OverflowError):
2424 return default
2425
2426
2427 def str_or_none(v, default=None):
2428 return default if v is None else compat_str(v)
2429
2430
2431 def str_to_int(int_str):
2432 """ A more relaxed version of int_or_none """
2433 if isinstance(int_str, int):
2434 return int_str
2435 elif isinstance(int_str, compat_str):
2436 int_str = re.sub(r'[,\.\+]', '', int_str)
2437 return int_or_none(int_str)
2438
2439
2440 def float_or_none(v, scale=1, invscale=1, default=None):
2441 if v is None:
2442 return default
2443 try:
2444 return float(v) * invscale / scale
2445 except (ValueError, TypeError):
2446 return default
2447
2448
2449 def bool_or_none(v, default=None):
2450 return v if isinstance(v, bool) else default
2451
2452
2453 def strip_or_none(v, default=None):
2454 return v.strip() if isinstance(v, compat_str) else default
2455
2456
2457 def url_or_none(url):
2458 if not url or not isinstance(url, compat_str):
2459 return None
2460 url = url.strip()
2461 return url if re.match(r'^(?:(?:https?|rt(?:m(?:pt?[es]?|fp)|sp[su]?)|mms|ftps?):)?//', url) else None
2462
2463
2464 def request_to_url(req):
2465 if isinstance(req, compat_urllib_request.Request):
2466 return req.get_full_url()
2467 else:
2468 return req
2469
2470
2471 def strftime_or_none(timestamp, date_format, default=None):
2472 datetime_object = None
2473 try:
2474 if isinstance(timestamp, (int, float)): # unix timestamp
2475 datetime_object = datetime.datetime.utcfromtimestamp(timestamp)
2476 elif isinstance(timestamp, compat_str): # assume YYYYMMDD
2477 datetime_object = datetime.datetime.strptime(timestamp, '%Y%m%d')
2478 return datetime_object.strftime(date_format)
2479 except (ValueError, TypeError, AttributeError):
2480 return default
2481
2482
2483 def parse_duration(s):
2484 if not isinstance(s, str):
2485 return None
2486 s = s.strip()
2487 if not s:
2488 return None
2489
2490 days, hours, mins, secs, ms = [None] * 5
2491 m = re.match(r'''(?x)
2492 (?P<before_secs>
2493 (?:(?:(?P<days>[0-9]+):)?(?P<hours>[0-9]+):)?(?P<mins>[0-9]+):)?
2494 (?P<secs>(?(before_secs)[0-9]{1,2}|[0-9]+))
2495 (?P<ms>[.:][0-9]+)?Z?$
2496 ''', s)
2497 if m:
2498 days, hours, mins, secs, ms = m.group('days', 'hours', 'mins', 'secs', 'ms')
2499 else:
2500 m = re.match(
2501 r'''(?ix)(?:P?
2502 (?:
2503 [0-9]+\s*y(?:ears?)?,?\s*
2504 )?
2505 (?:
2506 [0-9]+\s*m(?:onths?)?,?\s*
2507 )?
2508 (?:
2509 [0-9]+\s*w(?:eeks?)?,?\s*
2510 )?
2511 (?:
2512 (?P<days>[0-9]+)\s*d(?:ays?)?,?\s*
2513 )?
2514 T)?
2515 (?:
2516 (?P<hours>[0-9]+)\s*h(?:ours?)?,?\s*
2517 )?
2518 (?:
2519 (?P<mins>[0-9]+)\s*m(?:in(?:ute)?s?)?,?\s*
2520 )?
2521 (?:
2522 (?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?\s*s(?:ec(?:ond)?s?)?\s*
2523 )?Z?$''', s)
2524 if m:
2525 days, hours, mins, secs, ms = m.groups()
2526 else:
2527 m = re.match(r'(?i)(?:(?P<hours>[0-9.]+)\s*(?:hours?)|(?P<mins>[0-9.]+)\s*(?:mins?\.?|minutes?)\s*)Z?$', s)
2528 if m:
2529 hours, mins = m.groups()
2530 else:
2531 return None
2532
2533 if ms:
2534 ms = ms.replace(':', '.')
2535 return sum(float(part or 0) * mult for part, mult in (
2536 (days, 86400), (hours, 3600), (mins, 60), (secs, 1), (ms, 1)))
2537
2538
2539 def prepend_extension(filename, ext, expected_real_ext=None):
2540 name, real_ext = os.path.splitext(filename)
2541 return (
2542 f'{name}.{ext}{real_ext}'
2543 if not expected_real_ext or real_ext[1:] == expected_real_ext
2544 else f'{filename}.{ext}')
2545
2546
2547 def replace_extension(filename, ext, expected_real_ext=None):
2548 name, real_ext = os.path.splitext(filename)
2549 return '{}.{}'.format(
2550 name if not expected_real_ext or real_ext[1:] == expected_real_ext else filename,
2551 ext)
2552
2553
2554 def check_executable(exe, args=[]):
2555 """ Checks if the given binary is installed somewhere in PATH, and returns its name.
2556 args can be a list of arguments for a short output (like -version) """
2557 try:
2558 Popen([exe] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate_or_kill()
2559 except OSError:
2560 return False
2561 return exe
2562
2563
2564 def _get_exe_version_output(exe, args, *, to_screen=None):
2565 if to_screen:
2566 to_screen(f'Checking exe version: {shell_quote([exe] + args)}')
2567 try:
2568 # STDIN should be redirected too. On UNIX-like systems, ffmpeg triggers
2569 # SIGTTOU if yt-dlp is run in the background.
2570 # See https://github.com/ytdl-org/youtube-dl/issues/955#issuecomment-209789656
2571 out, _ = Popen(
2572 [encodeArgument(exe)] + args, stdin=subprocess.PIPE,
2573 stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate_or_kill()
2574 except OSError:
2575 return False
2576 if isinstance(out, bytes): # Python 2.x
2577 out = out.decode('ascii', 'ignore')
2578 return out
2579
2580
2581 def detect_exe_version(output, version_re=None, unrecognized='present'):
2582 assert isinstance(output, compat_str)
2583 if version_re is None:
2584 version_re = r'version\s+([-0-9._a-zA-Z]+)'
2585 m = re.search(version_re, output)
2586 if m:
2587 return m.group(1)
2588 else:
2589 return unrecognized
2590
2591
2592 def get_exe_version(exe, args=['--version'],
2593 version_re=None, unrecognized='present'):
2594 """ Returns the version of the specified executable,
2595 or False if the executable is not present """
2596 out = _get_exe_version_output(exe, args)
2597 return detect_exe_version(out, version_re, unrecognized) if out else False
2598
2599
2600 class LazyList(collections.abc.Sequence):
2601 """Lazy immutable list from an iterable
2602 Note that slices of a LazyList are lists and not LazyList"""
2603
2604 class IndexError(IndexError):
2605 pass
2606
2607 def __init__(self, iterable, *, reverse=False, _cache=None):
2608 self._iterable = iter(iterable)
2609 self._cache = [] if _cache is None else _cache
2610 self._reversed = reverse
2611
2612 def __iter__(self):
2613 if self._reversed:
2614 # We need to consume the entire iterable to iterate in reverse
2615 yield from self.exhaust()
2616 return
2617 yield from self._cache
2618 for item in self._iterable:
2619 self._cache.append(item)
2620 yield item
2621
2622 def _exhaust(self):
2623 self._cache.extend(self._iterable)
2624 self._iterable = [] # Discard the emptied iterable to make it pickle-able
2625 return self._cache
2626
2627 def exhaust(self):
2628 """Evaluate the entire iterable"""
2629 return self._exhaust()[::-1 if self._reversed else 1]
2630
2631 @staticmethod
2632 def _reverse_index(x):
2633 return None if x is None else -(x + 1)
2634
2635 def __getitem__(self, idx):
2636 if isinstance(idx, slice):
2637 if self._reversed:
2638 idx = slice(self._reverse_index(idx.start), self._reverse_index(idx.stop), -(idx.step or 1))
2639 start, stop, step = idx.start, idx.stop, idx.step or 1
2640 elif isinstance(idx, int):
2641 if self._reversed:
2642 idx = self._reverse_index(idx)
2643 start, stop, step = idx, idx, 0
2644 else:
2645 raise TypeError('indices must be integers or slices')
2646 if ((start or 0) < 0 or (stop or 0) < 0
2647 or (start is None and step < 0)
2648 or (stop is None and step > 0)):
2649 # We need to consume the entire iterable to be able to slice from the end
2650 # Obviously, never use this with infinite iterables
2651 self._exhaust()
2652 try:
2653 return self._cache[idx]
2654 except IndexError as e:
2655 raise self.IndexError(e) from e
2656 n = max(start or 0, stop or 0) - len(self._cache) + 1
2657 if n > 0:
2658 self._cache.extend(itertools.islice(self._iterable, n))
2659 try:
2660 return self._cache[idx]
2661 except IndexError as e:
2662 raise self.IndexError(e) from e
2663
2664 def __bool__(self):
2665 try:
2666 self[-1] if self._reversed else self[0]
2667 except self.IndexError:
2668 return False
2669 return True
2670
2671 def __len__(self):
2672 self._exhaust()
2673 return len(self._cache)
2674
2675 def __reversed__(self):
2676 return type(self)(self._iterable, reverse=not self._reversed, _cache=self._cache)
2677
2678 def __copy__(self):
2679 return type(self)(self._iterable, reverse=self._reversed, _cache=self._cache)
2680
2681 def __repr__(self):
2682 # repr and str should mimic a list. So we exhaust the iterable
2683 return repr(self.exhaust())
2684
2685 def __str__(self):
2686 return repr(self.exhaust())
2687
2688
2689 class PagedList:
2690
2691 class IndexError(IndexError):
2692 pass
2693
2694 def __len__(self):
2695 # This is only useful for tests
2696 return len(self.getslice())
2697
2698 def __init__(self, pagefunc, pagesize, use_cache=True):
2699 self._pagefunc = pagefunc
2700 self._pagesize = pagesize
2701 self._pagecount = float('inf')
2702 self._use_cache = use_cache
2703 self._cache = {}
2704
2705 def getpage(self, pagenum):
2706 page_results = self._cache.get(pagenum)
2707 if page_results is None:
2708 page_results = [] if pagenum > self._pagecount else list(self._pagefunc(pagenum))
2709 if self._use_cache:
2710 self._cache[pagenum] = page_results
2711 return page_results
2712
2713 def getslice(self, start=0, end=None):
2714 return list(self._getslice(start, end))
2715
2716 def _getslice(self, start, end):
2717 raise NotImplementedError('This method must be implemented by subclasses')
2718
2719 def __getitem__(self, idx):
2720 assert self._use_cache, 'Indexing PagedList requires cache'
2721 if not isinstance(idx, int) or idx < 0:
2722 raise TypeError('indices must be non-negative integers')
2723 entries = self.getslice(idx, idx + 1)
2724 if not entries:
2725 raise self.IndexError()
2726 return entries[0]
2727
2728
2729 class OnDemandPagedList(PagedList):
2730 """Download pages until a page with less than maximum results"""
2731
2732 def _getslice(self, start, end):
2733 for pagenum in itertools.count(start // self._pagesize):
2734 firstid = pagenum * self._pagesize
2735 nextfirstid = pagenum * self._pagesize + self._pagesize
2736 if start >= nextfirstid:
2737 continue
2738
2739 startv = (
2740 start % self._pagesize
2741 if firstid <= start < nextfirstid
2742 else 0)
2743 endv = (
2744 ((end - 1) % self._pagesize) + 1
2745 if (end is not None and firstid <= end <= nextfirstid)
2746 else None)
2747
2748 try:
2749 page_results = self.getpage(pagenum)
2750 except Exception:
2751 self._pagecount = pagenum - 1
2752 raise
2753 if startv != 0 or endv is not None:
2754 page_results = page_results[startv:endv]
2755 yield from page_results
2756
2757 # A little optimization - if current page is not "full", ie. does
2758 # not contain page_size videos then we can assume that this page
2759 # is the last one - there are no more ids on further pages -
2760 # i.e. no need to query again.
2761 if len(page_results) + startv < self._pagesize:
2762 break
2763
2764 # If we got the whole page, but the next page is not interesting,
2765 # break out early as well
2766 if end == nextfirstid:
2767 break
2768
2769
2770 class InAdvancePagedList(PagedList):
2771 """PagedList with total number of pages known in advance"""
2772
2773 def __init__(self, pagefunc, pagecount, pagesize):
2774 PagedList.__init__(self, pagefunc, pagesize, True)
2775 self._pagecount = pagecount
2776
2777 def _getslice(self, start, end):
2778 start_page = start // self._pagesize
2779 end_page = self._pagecount if end is None else min(self._pagecount, end // self._pagesize + 1)
2780 skip_elems = start - start_page * self._pagesize
2781 only_more = None if end is None else end - start
2782 for pagenum in range(start_page, end_page):
2783 page_results = self.getpage(pagenum)
2784 if skip_elems:
2785 page_results = page_results[skip_elems:]
2786 skip_elems = None
2787 if only_more is not None:
2788 if len(page_results) < only_more:
2789 only_more -= len(page_results)
2790 else:
2791 yield from page_results[:only_more]
2792 break
2793 yield from page_results
2794
2795
2796 def uppercase_escape(s):
2797 unicode_escape = codecs.getdecoder('unicode_escape')
2798 return re.sub(
2799 r'\\U[0-9a-fA-F]{8}',
2800 lambda m: unicode_escape(m.group(0))[0],
2801 s)
2802
2803
2804 def lowercase_escape(s):
2805 unicode_escape = codecs.getdecoder('unicode_escape')
2806 return re.sub(
2807 r'\\u[0-9a-fA-F]{4}',
2808 lambda m: unicode_escape(m.group(0))[0],
2809 s)
2810
2811
2812 def escape_rfc3986(s):
2813 """Escape non-ASCII characters as suggested by RFC 3986"""
2814 return urllib.parse.quote(s, b"%/;:@&=+$,!~*'()?#[]")
2815
2816
2817 def escape_url(url):
2818 """Escape URL as suggested by RFC 3986"""
2819 url_parsed = compat_urllib_parse_urlparse(url)
2820 return url_parsed._replace(
2821 netloc=url_parsed.netloc.encode('idna').decode('ascii'),
2822 path=escape_rfc3986(url_parsed.path),
2823 params=escape_rfc3986(url_parsed.params),
2824 query=escape_rfc3986(url_parsed.query),
2825 fragment=escape_rfc3986(url_parsed.fragment)
2826 ).geturl()
2827
2828
2829 def parse_qs(url):
2830 return compat_parse_qs(compat_urllib_parse_urlparse(url).query)
2831
2832
2833 def read_batch_urls(batch_fd):
2834 def fixup(url):
2835 if not isinstance(url, compat_str):
2836 url = url.decode('utf-8', 'replace')
2837 BOM_UTF8 = ('\xef\xbb\xbf', '\ufeff')
2838 for bom in BOM_UTF8:
2839 if url.startswith(bom):
2840 url = url[len(bom):]
2841 url = url.lstrip()
2842 if not url or url.startswith(('#', ';', ']')):
2843 return False
2844 # "#" cannot be stripped out since it is part of the URI
2845 # However, it can be safely stipped out if follwing a whitespace
2846 return re.split(r'\s#', url, 1)[0].rstrip()
2847
2848 with contextlib.closing(batch_fd) as fd:
2849 return [url for url in map(fixup, fd) if url]
2850
2851
2852 def urlencode_postdata(*args, **kargs):
2853 return compat_urllib_parse_urlencode(*args, **kargs).encode('ascii')
2854
2855
2856 def update_url_query(url, query):
2857 if not query:
2858 return url
2859 parsed_url = compat_urlparse.urlparse(url)
2860 qs = compat_parse_qs(parsed_url.query)
2861 qs.update(query)
2862 return compat_urlparse.urlunparse(parsed_url._replace(
2863 query=compat_urllib_parse_urlencode(qs, True)))
2864
2865
2866 def update_Request(req, url=None, data=None, headers={}, query={}):
2867 req_headers = req.headers.copy()
2868 req_headers.update(headers)
2869 req_data = data or req.data
2870 req_url = update_url_query(url or req.get_full_url(), query)
2871 req_get_method = req.get_method()
2872 if req_get_method == 'HEAD':
2873 req_type = HEADRequest
2874 elif req_get_method == 'PUT':
2875 req_type = PUTRequest
2876 else:
2877 req_type = compat_urllib_request.Request
2878 new_req = req_type(
2879 req_url, data=req_data, headers=req_headers,
2880 origin_req_host=req.origin_req_host, unverifiable=req.unverifiable)
2881 if hasattr(req, 'timeout'):
2882 new_req.timeout = req.timeout
2883 return new_req
2884
2885
2886 def _multipart_encode_impl(data, boundary):
2887 content_type = 'multipart/form-data; boundary=%s' % boundary
2888
2889 out = b''
2890 for k, v in data.items():
2891 out += b'--' + boundary.encode('ascii') + b'\r\n'
2892 if isinstance(k, compat_str):
2893 k = k.encode()
2894 if isinstance(v, compat_str):
2895 v = v.encode()
2896 # RFC 2047 requires non-ASCII field names to be encoded, while RFC 7578
2897 # suggests sending UTF-8 directly. Firefox sends UTF-8, too
2898 content = b'Content-Disposition: form-data; name="' + k + b'"\r\n\r\n' + v + b'\r\n'
2899 if boundary.encode('ascii') in content:
2900 raise ValueError('Boundary overlaps with data')
2901 out += content
2902
2903 out += b'--' + boundary.encode('ascii') + b'--\r\n'
2904
2905 return out, content_type
2906
2907
2908 def multipart_encode(data, boundary=None):
2909 '''
2910 Encode a dict to RFC 7578-compliant form-data
2911
2912 data:
2913 A dict where keys and values can be either Unicode or bytes-like
2914 objects.
2915 boundary:
2916 If specified a Unicode object, it's used as the boundary. Otherwise
2917 a random boundary is generated.
2918
2919 Reference: https://tools.ietf.org/html/rfc7578
2920 '''
2921 has_specified_boundary = boundary is not None
2922
2923 while True:
2924 if boundary is None:
2925 boundary = '---------------' + str(random.randrange(0x0fffffff, 0xffffffff))
2926
2927 try:
2928 out, content_type = _multipart_encode_impl(data, boundary)
2929 break
2930 except ValueError:
2931 if has_specified_boundary:
2932 raise
2933 boundary = None
2934
2935 return out, content_type
2936
2937
2938 def dict_get(d, key_or_keys, default=None, skip_false_values=True):
2939 for val in map(d.get, variadic(key_or_keys)):
2940 if val is not None and (val or not skip_false_values):
2941 return val
2942 return default
2943
2944
2945 def try_call(*funcs, expected_type=None, args=[], kwargs={}):
2946 for f in funcs:
2947 try:
2948 val = f(*args, **kwargs)
2949 except (AttributeError, KeyError, TypeError, IndexError, ZeroDivisionError):
2950 pass
2951 else:
2952 if expected_type is None or isinstance(val, expected_type):
2953 return val
2954
2955
2956 def try_get(src, getter, expected_type=None):
2957 return try_call(*variadic(getter), args=(src,), expected_type=expected_type)
2958
2959
2960 def filter_dict(dct, cndn=lambda _, v: v is not None):
2961 return {k: v for k, v in dct.items() if cndn(k, v)}
2962
2963
2964 def merge_dicts(*dicts):
2965 merged = {}
2966 for a_dict in dicts:
2967 for k, v in a_dict.items():
2968 if (v is not None and k not in merged
2969 or isinstance(v, str) and merged[k] == ''):
2970 merged[k] = v
2971 return merged
2972
2973
2974 def encode_compat_str(string, encoding=preferredencoding(), errors='strict'):
2975 return string if isinstance(string, compat_str) else compat_str(string, encoding, errors)
2976
2977
2978 US_RATINGS = {
2979 'G': 0,
2980 'PG': 10,
2981 'PG-13': 13,
2982 'R': 16,
2983 'NC': 18,
2984 }
2985
2986
2987 TV_PARENTAL_GUIDELINES = {
2988 'TV-Y': 0,
2989 'TV-Y7': 7,
2990 'TV-G': 0,
2991 'TV-PG': 0,
2992 'TV-14': 14,
2993 'TV-MA': 17,
2994 }
2995
2996
2997 def parse_age_limit(s):
2998 # isinstance(False, int) is True. So type() must be used instead
2999 if type(s) is int: # noqa: E721
3000 return s if 0 <= s <= 21 else None
3001 elif not isinstance(s, str):
3002 return None
3003 m = re.match(r'^(?P<age>\d{1,2})\+?$', s)
3004 if m:
3005 return int(m.group('age'))
3006 s = s.upper()
3007 if s in US_RATINGS:
3008 return US_RATINGS[s]
3009 m = re.match(r'^TV[_-]?(%s)$' % '|'.join(k[3:] for k in TV_PARENTAL_GUIDELINES), s)
3010 if m:
3011 return TV_PARENTAL_GUIDELINES['TV-' + m.group(1)]
3012 return None
3013
3014
3015 def strip_jsonp(code):
3016 return re.sub(
3017 r'''(?sx)^
3018 (?:window\.)?(?P<func_name>[a-zA-Z0-9_.$]*)
3019 (?:\s*&&\s*(?P=func_name))?
3020 \s*\(\s*(?P<callback_data>.*)\);?
3021 \s*?(?://[^\n]*)*$''',
3022 r'\g<callback_data>', code)
3023
3024
3025 def js_to_json(code, vars={}):
3026 # vars is a dict of var, val pairs to substitute
3027 COMMENT_RE = r'/\*(?:(?!\*/).)*?\*/|//[^\n]*\n'
3028 SKIP_RE = fr'\s*(?:{COMMENT_RE})?\s*'
3029 INTEGER_TABLE = (
3030 (fr'(?s)^(0[xX][0-9a-fA-F]+){SKIP_RE}:?$', 16),
3031 (fr'(?s)^(0+[0-7]+){SKIP_RE}:?$', 8),
3032 )
3033
3034 def fix_kv(m):
3035 v = m.group(0)
3036 if v in ('true', 'false', 'null'):
3037 return v
3038 elif v in ('undefined', 'void 0'):
3039 return 'null'
3040 elif v.startswith('/*') or v.startswith('//') or v.startswith('!') or v == ',':
3041 return ""
3042
3043 if v[0] in ("'", '"'):
3044 v = re.sub(r'(?s)\\.|"', lambda m: {
3045 '"': '\\"',
3046 "\\'": "'",
3047 '\\\n': '',
3048 '\\x': '\\u00',
3049 }.get(m.group(0), m.group(0)), v[1:-1])
3050 else:
3051 for regex, base in INTEGER_TABLE:
3052 im = re.match(regex, v)
3053 if im:
3054 i = int(im.group(1), base)
3055 return '"%d":' % i if v.endswith(':') else '%d' % i
3056
3057 if v in vars:
3058 return vars[v]
3059
3060 return '"%s"' % v
3061
3062 code = re.sub(r'new Date\((".+")\)', r'\g<1>', code)
3063
3064 return re.sub(r'''(?sx)
3065 "(?:[^"\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^"\\]*"|
3066 '(?:[^'\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^'\\]*'|
3067 {comment}|,(?={skip}[\]}}])|
3068 void\s0|(?:(?<![0-9])[eE]|[a-df-zA-DF-Z_$])[.a-zA-Z_$0-9]*|
3069 \b(?:0[xX][0-9a-fA-F]+|0+[0-7]+)(?:{skip}:)?|
3070 [0-9]+(?={skip}:)|
3071 !+
3072 '''.format(comment=COMMENT_RE, skip=SKIP_RE), fix_kv, code)
3073
3074
3075 def qualities(quality_ids):
3076 """ Get a numeric quality value out of a list of possible values """
3077 def q(qid):
3078 try:
3079 return quality_ids.index(qid)
3080 except ValueError:
3081 return -1
3082 return q
3083
3084
3085 POSTPROCESS_WHEN = ('pre_process', 'after_filter', 'before_dl', 'after_move', 'post_process', 'after_video', 'playlist')
3086
3087
3088 DEFAULT_OUTTMPL = {
3089 'default': '%(title)s [%(id)s].%(ext)s',
3090 'chapter': '%(title)s - %(section_number)03d %(section_title)s [%(id)s].%(ext)s',
3091 }
3092 OUTTMPL_TYPES = {
3093 'chapter': None,
3094 'subtitle': None,
3095 'thumbnail': None,
3096 'description': 'description',
3097 'annotation': 'annotations.xml',
3098 'infojson': 'info.json',
3099 'link': None,
3100 'pl_video': None,
3101 'pl_thumbnail': None,
3102 'pl_description': 'description',
3103 'pl_infojson': 'info.json',
3104 }
3105
3106 # As of [1] format syntax is:
3107 # %[mapping_key][conversion_flags][minimum_width][.precision][length_modifier]type
3108 # 1. https://docs.python.org/2/library/stdtypes.html#string-formatting
3109 STR_FORMAT_RE_TMPL = r'''(?x)
3110 (?<!%)(?P<prefix>(?:%%)*)
3111 %
3112 (?P<has_key>\((?P<key>{0})\))?
3113 (?P<format>
3114 (?P<conversion>[#0\-+ ]+)?
3115 (?P<min_width>\d+)?
3116 (?P<precision>\.\d+)?
3117 (?P<len_mod>[hlL])? # unused in python
3118 {1} # conversion type
3119 )
3120 '''
3121
3122
3123 STR_FORMAT_TYPES = 'diouxXeEfFgGcrs'
3124
3125
3126 def limit_length(s, length):
3127 """ Add ellipses to overly long strings """
3128 if s is None:
3129 return None
3130 ELLIPSES = '...'
3131 if len(s) > length:
3132 return s[:length - len(ELLIPSES)] + ELLIPSES
3133 return s
3134
3135
3136 def version_tuple(v):
3137 return tuple(int(e) for e in re.split(r'[-.]', v))
3138
3139
3140 def is_outdated_version(version, limit, assume_new=True):
3141 if not version:
3142 return not assume_new
3143 try:
3144 return version_tuple(version) < version_tuple(limit)
3145 except ValueError:
3146 return not assume_new
3147
3148
3149 def ytdl_is_updateable():
3150 """ Returns if yt-dlp can be updated with -U """
3151
3152 from .update import is_non_updateable
3153
3154 return not is_non_updateable()
3155
3156
3157 def args_to_str(args):
3158 # Get a short string representation for a subprocess command
3159 return ' '.join(compat_shlex_quote(a) for a in args)
3160
3161
3162 def error_to_compat_str(err):
3163 return str(err)
3164
3165
3166 def error_to_str(err):
3167 return f'{type(err).__name__}: {err}'
3168
3169
3170 def mimetype2ext(mt):
3171 if mt is None:
3172 return None
3173
3174 mt, _, params = mt.partition(';')
3175 mt = mt.strip()
3176
3177 FULL_MAP = {
3178 'audio/mp4': 'm4a',
3179 # Per RFC 3003, audio/mpeg can be .mp1, .mp2 or .mp3. Here use .mp3 as
3180 # it's the most popular one
3181 'audio/mpeg': 'mp3',
3182 'audio/x-wav': 'wav',
3183 'audio/wav': 'wav',
3184 'audio/wave': 'wav',
3185 }
3186
3187 ext = FULL_MAP.get(mt)
3188 if ext is not None:
3189 return ext
3190
3191 SUBTYPE_MAP = {
3192 '3gpp': '3gp',
3193 'smptett+xml': 'tt',
3194 'ttaf+xml': 'dfxp',
3195 'ttml+xml': 'ttml',
3196 'x-flv': 'flv',
3197 'x-mp4-fragmented': 'mp4',
3198 'x-ms-sami': 'sami',
3199 'x-ms-wmv': 'wmv',
3200 'mpegurl': 'm3u8',
3201 'x-mpegurl': 'm3u8',
3202 'vnd.apple.mpegurl': 'm3u8',
3203 'dash+xml': 'mpd',
3204 'f4m+xml': 'f4m',
3205 'hds+xml': 'f4m',
3206 'vnd.ms-sstr+xml': 'ism',
3207 'quicktime': 'mov',
3208 'mp2t': 'ts',
3209 'x-wav': 'wav',
3210 'filmstrip+json': 'fs',
3211 'svg+xml': 'svg',
3212 }
3213
3214 _, _, subtype = mt.rpartition('/')
3215 ext = SUBTYPE_MAP.get(subtype.lower())
3216 if ext is not None:
3217 return ext
3218
3219 SUFFIX_MAP = {
3220 'json': 'json',
3221 'xml': 'xml',
3222 'zip': 'zip',
3223 'gzip': 'gz',
3224 }
3225
3226 _, _, suffix = subtype.partition('+')
3227 ext = SUFFIX_MAP.get(suffix)
3228 if ext is not None:
3229 return ext
3230
3231 return subtype.replace('+', '.')
3232
3233
3234 def ext2mimetype(ext_or_url):
3235 if not ext_or_url:
3236 return None
3237 if '.' not in ext_or_url:
3238 ext_or_url = f'file.{ext_or_url}'
3239 return mimetypes.guess_type(ext_or_url)[0]
3240
3241
3242 def parse_codecs(codecs_str):
3243 # http://tools.ietf.org/html/rfc6381
3244 if not codecs_str:
3245 return {}
3246 split_codecs = list(filter(None, map(
3247 str.strip, codecs_str.strip().strip(',').split(','))))
3248 vcodec, acodec, scodec, hdr = None, None, None, None
3249 for full_codec in split_codecs:
3250 parts = full_codec.split('.')
3251 codec = parts[0].replace('0', '')
3252 if codec in ('avc1', 'avc2', 'avc3', 'avc4', 'vp9', 'vp8', 'hev1', 'hev2',
3253 'h263', 'h264', 'mp4v', 'hvc1', 'av1', 'theora', 'dvh1', 'dvhe'):
3254 if not vcodec:
3255 vcodec = '.'.join(parts[:4]) if codec in ('vp9', 'av1', 'hvc1') else full_codec
3256 if codec in ('dvh1', 'dvhe'):
3257 hdr = 'DV'
3258 elif codec == 'av1' and len(parts) > 3 and parts[3] == '10':
3259 hdr = 'HDR10'
3260 elif full_codec.replace('0', '').startswith('vp9.2'):
3261 hdr = 'HDR10'
3262 elif codec in ('flac', 'mp4a', 'opus', 'vorbis', 'mp3', 'aac', 'ac-3', 'ec-3', 'eac3', 'dtsc', 'dtse', 'dtsh', 'dtsl'):
3263 if not acodec:
3264 acodec = full_codec
3265 elif codec in ('stpp', 'wvtt',):
3266 if not scodec:
3267 scodec = full_codec
3268 else:
3269 write_string(f'WARNING: Unknown codec {full_codec}\n')
3270 if vcodec or acodec or scodec:
3271 return {
3272 'vcodec': vcodec or 'none',
3273 'acodec': acodec or 'none',
3274 'dynamic_range': hdr,
3275 **({'scodec': scodec} if scodec is not None else {}),
3276 }
3277 elif len(split_codecs) == 2:
3278 return {
3279 'vcodec': split_codecs[0],
3280 'acodec': split_codecs[1],
3281 }
3282 return {}
3283
3284
3285 def urlhandle_detect_ext(url_handle):
3286 getheader = url_handle.headers.get
3287
3288 cd = getheader('Content-Disposition')
3289 if cd:
3290 m = re.match(r'attachment;\s*filename="(?P<filename>[^"]+)"', cd)
3291 if m:
3292 e = determine_ext(m.group('filename'), default_ext=None)
3293 if e:
3294 return e
3295
3296 return mimetype2ext(getheader('Content-Type'))
3297
3298
3299 def encode_data_uri(data, mime_type):
3300 return 'data:%s;base64,%s' % (mime_type, base64.b64encode(data).decode('ascii'))
3301
3302
3303 def age_restricted(content_limit, age_limit):
3304 """ Returns True iff the content should be blocked """
3305
3306 if age_limit is None: # No limit set
3307 return False
3308 if content_limit is None:
3309 return False # Content available for everyone
3310 return age_limit < content_limit
3311
3312
3313 def is_html(first_bytes):
3314 """ Detect whether a file contains HTML by examining its first bytes. """
3315
3316 BOMS = [
3317 (b'\xef\xbb\xbf', 'utf-8'),
3318 (b'\x00\x00\xfe\xff', 'utf-32-be'),
3319 (b'\xff\xfe\x00\x00', 'utf-32-le'),
3320 (b'\xff\xfe', 'utf-16-le'),
3321 (b'\xfe\xff', 'utf-16-be'),
3322 ]
3323
3324 encoding = 'utf-8'
3325 for bom, enc in BOMS:
3326 while first_bytes.startswith(bom):
3327 encoding, first_bytes = enc, first_bytes[len(bom):]
3328
3329 return re.match(r'^\s*<', first_bytes.decode(encoding, 'replace'))
3330
3331
3332 def determine_protocol(info_dict):
3333 protocol = info_dict.get('protocol')
3334 if protocol is not None:
3335 return protocol
3336
3337 url = sanitize_url(info_dict['url'])
3338 if url.startswith('rtmp'):
3339 return 'rtmp'
3340 elif url.startswith('mms'):
3341 return 'mms'
3342 elif url.startswith('rtsp'):
3343 return 'rtsp'
3344
3345 ext = determine_ext(url)
3346 if ext == 'm3u8':
3347 return 'm3u8'
3348 elif ext == 'f4m':
3349 return 'f4m'
3350
3351 return compat_urllib_parse_urlparse(url).scheme
3352
3353
3354 def render_table(header_row, data, delim=False, extra_gap=0, hide_empty=False):
3355 """ Render a list of rows, each as a list of values.
3356 Text after a \t will be right aligned """
3357 def width(string):
3358 return len(remove_terminal_sequences(string).replace('\t', ''))
3359
3360 def get_max_lens(table):
3361 return [max(width(str(v)) for v in col) for col in zip(*table)]
3362
3363 def filter_using_list(row, filterArray):
3364 return [col for take, col in itertools.zip_longest(filterArray, row, fillvalue=True) if take]
3365
3366 max_lens = get_max_lens(data) if hide_empty else []
3367 header_row = filter_using_list(header_row, max_lens)
3368 data = [filter_using_list(row, max_lens) for row in data]
3369
3370 table = [header_row] + data
3371 max_lens = get_max_lens(table)
3372 extra_gap += 1
3373 if delim:
3374 table = [header_row, [delim * (ml + extra_gap) for ml in max_lens]] + data
3375 table[1][-1] = table[1][-1][:-extra_gap * len(delim)] # Remove extra_gap from end of delimiter
3376 for row in table:
3377 for pos, text in enumerate(map(str, row)):
3378 if '\t' in text:
3379 row[pos] = text.replace('\t', ' ' * (max_lens[pos] - width(text))) + ' ' * extra_gap
3380 else:
3381 row[pos] = text + ' ' * (max_lens[pos] - width(text) + extra_gap)
3382 ret = '\n'.join(''.join(row).rstrip() for row in table)
3383 return ret
3384
3385
3386 def _match_one(filter_part, dct, incomplete):
3387 # TODO: Generalize code with YoutubeDL._build_format_filter
3388 STRING_OPERATORS = {
3389 '*=': operator.contains,
3390 '^=': lambda attr, value: attr.startswith(value),
3391 '$=': lambda attr, value: attr.endswith(value),
3392 '~=': lambda attr, value: re.search(value, attr),
3393 }
3394 COMPARISON_OPERATORS = {
3395 **STRING_OPERATORS,
3396 '<=': operator.le, # "<=" must be defined above "<"
3397 '<': operator.lt,
3398 '>=': operator.ge,
3399 '>': operator.gt,
3400 '=': operator.eq,
3401 }
3402
3403 if isinstance(incomplete, bool):
3404 is_incomplete = lambda _: incomplete
3405 else:
3406 is_incomplete = lambda k: k in incomplete
3407
3408 operator_rex = re.compile(r'''(?x)
3409 (?P<key>[a-z_]+)
3410 \s*(?P<negation>!\s*)?(?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
3411 (?:
3412 (?P<quote>["\'])(?P<quotedstrval>.+?)(?P=quote)|
3413 (?P<strval>.+?)
3414 )
3415 ''' % '|'.join(map(re.escape, COMPARISON_OPERATORS.keys())))
3416 m = operator_rex.fullmatch(filter_part.strip())
3417 if m:
3418 m = m.groupdict()
3419 unnegated_op = COMPARISON_OPERATORS[m['op']]
3420 if m['negation']:
3421 op = lambda attr, value: not unnegated_op(attr, value)
3422 else:
3423 op = unnegated_op
3424 comparison_value = m['quotedstrval'] or m['strval'] or m['intval']
3425 if m['quote']:
3426 comparison_value = comparison_value.replace(r'\%s' % m['quote'], m['quote'])
3427 actual_value = dct.get(m['key'])
3428 numeric_comparison = None
3429 if isinstance(actual_value, (int, float)):
3430 # If the original field is a string and matching comparisonvalue is
3431 # a number we should respect the origin of the original field
3432 # and process comparison value as a string (see
3433 # https://github.com/ytdl-org/youtube-dl/issues/11082)
3434 try:
3435 numeric_comparison = int(comparison_value)
3436 except ValueError:
3437 numeric_comparison = parse_filesize(comparison_value)
3438 if numeric_comparison is None:
3439 numeric_comparison = parse_filesize(f'{comparison_value}B')
3440 if numeric_comparison is None:
3441 numeric_comparison = parse_duration(comparison_value)
3442 if numeric_comparison is not None and m['op'] in STRING_OPERATORS:
3443 raise ValueError('Operator %s only supports string values!' % m['op'])
3444 if actual_value is None:
3445 return is_incomplete(m['key']) or m['none_inclusive']
3446 return op(actual_value, comparison_value if numeric_comparison is None else numeric_comparison)
3447
3448 UNARY_OPERATORS = {
3449 '': lambda v: (v is True) if isinstance(v, bool) else (v is not None),
3450 '!': lambda v: (v is False) if isinstance(v, bool) else (v is None),
3451 }
3452 operator_rex = re.compile(r'''(?x)
3453 (?P<op>%s)\s*(?P<key>[a-z_]+)
3454 ''' % '|'.join(map(re.escape, UNARY_OPERATORS.keys())))
3455 m = operator_rex.fullmatch(filter_part.strip())
3456 if m:
3457 op = UNARY_OPERATORS[m.group('op')]
3458 actual_value = dct.get(m.group('key'))
3459 if is_incomplete(m.group('key')) and actual_value is None:
3460 return True
3461 return op(actual_value)
3462
3463 raise ValueError('Invalid filter part %r' % filter_part)
3464
3465
3466 def match_str(filter_str, dct, incomplete=False):
3467 """ Filter a dictionary with a simple string syntax.
3468 @returns Whether the filter passes
3469 @param incomplete Set of keys that is expected to be missing from dct.
3470 Can be True/False to indicate all/none of the keys may be missing.
3471 All conditions on incomplete keys pass if the key is missing
3472 """
3473 return all(
3474 _match_one(filter_part.replace(r'\&', '&'), dct, incomplete)
3475 for filter_part in re.split(r'(?<!\\)&', filter_str))
3476
3477
3478 def match_filter_func(filters):
3479 if not filters:
3480 return None
3481 filters = set(variadic(filters))
3482
3483 interactive = '-' in filters
3484 if interactive:
3485 filters.remove('-')
3486
3487 def _match_func(info_dict, incomplete=False):
3488 if not filters or any(match_str(f, info_dict, incomplete) for f in filters):
3489 return NO_DEFAULT if interactive and not incomplete else None
3490 else:
3491 video_title = info_dict.get('title') or info_dict.get('id') or 'video'
3492 filter_str = ') | ('.join(map(str.strip, filters))
3493 return f'{video_title} does not pass filter ({filter_str}), skipping ..'
3494 return _match_func
3495
3496
3497 def download_range_func(chapters, ranges):
3498 def inner(info_dict, ydl):
3499 warning = ('There are no chapters matching the regex' if info_dict.get('chapters')
3500 else 'Cannot match chapters since chapter information is unavailable')
3501 for regex in chapters or []:
3502 for i, chapter in enumerate(info_dict.get('chapters') or []):
3503 if re.search(regex, chapter['title']):
3504 warning = None
3505 yield {**chapter, 'index': i}
3506 if chapters and warning:
3507 ydl.to_screen(f'[info] {info_dict["id"]}: {warning}')
3508
3509 yield from ({'start_time': start, 'end_time': end} for start, end in ranges or [])
3510
3511 return inner
3512
3513
3514 def parse_dfxp_time_expr(time_expr):
3515 if not time_expr:
3516 return
3517
3518 mobj = re.match(rf'^(?P<time_offset>{NUMBER_RE})s?$', time_expr)
3519 if mobj:
3520 return float(mobj.group('time_offset'))
3521
3522 mobj = re.match(r'^(\d+):(\d\d):(\d\d(?:(?:\.|:)\d+)?)$', time_expr)
3523 if mobj:
3524 return 3600 * int(mobj.group(1)) + 60 * int(mobj.group(2)) + float(mobj.group(3).replace(':', '.'))
3525
3526
3527 def srt_subtitles_timecode(seconds):
3528 return '%02d:%02d:%02d,%03d' % timetuple_from_msec(seconds * 1000)
3529
3530
3531 def ass_subtitles_timecode(seconds):
3532 time = timetuple_from_msec(seconds * 1000)
3533 return '%01d:%02d:%02d.%02d' % (*time[:-1], time.milliseconds / 10)
3534
3535
3536 def dfxp2srt(dfxp_data):
3537 '''
3538 @param dfxp_data A bytes-like object containing DFXP data
3539 @returns A unicode object containing converted SRT data
3540 '''
3541 LEGACY_NAMESPACES = (
3542 (b'http://www.w3.org/ns/ttml', [
3543 b'http://www.w3.org/2004/11/ttaf1',
3544 b'http://www.w3.org/2006/04/ttaf1',
3545 b'http://www.w3.org/2006/10/ttaf1',
3546 ]),
3547 (b'http://www.w3.org/ns/ttml#styling', [
3548 b'http://www.w3.org/ns/ttml#style',
3549 ]),
3550 )
3551
3552 SUPPORTED_STYLING = [
3553 'color',
3554 'fontFamily',
3555 'fontSize',
3556 'fontStyle',
3557 'fontWeight',
3558 'textDecoration'
3559 ]
3560
3561 _x = functools.partial(xpath_with_ns, ns_map={
3562 'xml': 'http://www.w3.org/XML/1998/namespace',
3563 'ttml': 'http://www.w3.org/ns/ttml',
3564 'tts': 'http://www.w3.org/ns/ttml#styling',
3565 })
3566
3567 styles = {}
3568 default_style = {}
3569
3570 class TTMLPElementParser:
3571 _out = ''
3572 _unclosed_elements = []
3573 _applied_styles = []
3574
3575 def start(self, tag, attrib):
3576 if tag in (_x('ttml:br'), 'br'):
3577 self._out += '\n'
3578 else:
3579 unclosed_elements = []
3580 style = {}
3581 element_style_id = attrib.get('style')
3582 if default_style:
3583 style.update(default_style)
3584 if element_style_id:
3585 style.update(styles.get(element_style_id, {}))
3586 for prop in SUPPORTED_STYLING:
3587 prop_val = attrib.get(_x('tts:' + prop))
3588 if prop_val:
3589 style[prop] = prop_val
3590 if style:
3591 font = ''
3592 for k, v in sorted(style.items()):
3593 if self._applied_styles and self._applied_styles[-1].get(k) == v:
3594 continue
3595 if k == 'color':
3596 font += ' color="%s"' % v
3597 elif k == 'fontSize':
3598 font += ' size="%s"' % v
3599 elif k == 'fontFamily':
3600 font += ' face="%s"' % v
3601 elif k == 'fontWeight' and v == 'bold':
3602 self._out += '<b>'
3603 unclosed_elements.append('b')
3604 elif k == 'fontStyle' and v == 'italic':
3605 self._out += '<i>'
3606 unclosed_elements.append('i')
3607 elif k == 'textDecoration' and v == 'underline':
3608 self._out += '<u>'
3609 unclosed_elements.append('u')
3610 if font:
3611 self._out += '<font' + font + '>'
3612 unclosed_elements.append('font')
3613 applied_style = {}
3614 if self._applied_styles:
3615 applied_style.update(self._applied_styles[-1])
3616 applied_style.update(style)
3617 self._applied_styles.append(applied_style)
3618 self._unclosed_elements.append(unclosed_elements)
3619
3620 def end(self, tag):
3621 if tag not in (_x('ttml:br'), 'br'):
3622 unclosed_elements = self._unclosed_elements.pop()
3623 for element in reversed(unclosed_elements):
3624 self._out += '</%s>' % element
3625 if unclosed_elements and self._applied_styles:
3626 self._applied_styles.pop()
3627
3628 def data(self, data):
3629 self._out += data
3630
3631 def close(self):
3632 return self._out.strip()
3633
3634 def parse_node(node):
3635 target = TTMLPElementParser()
3636 parser = xml.etree.ElementTree.XMLParser(target=target)
3637 parser.feed(xml.etree.ElementTree.tostring(node))
3638 return parser.close()
3639
3640 for k, v in LEGACY_NAMESPACES:
3641 for ns in v:
3642 dfxp_data = dfxp_data.replace(ns, k)
3643
3644 dfxp = compat_etree_fromstring(dfxp_data)
3645 out = []
3646 paras = dfxp.findall(_x('.//ttml:p')) or dfxp.findall('.//p')
3647
3648 if not paras:
3649 raise ValueError('Invalid dfxp/TTML subtitle')
3650
3651 repeat = False
3652 while True:
3653 for style in dfxp.findall(_x('.//ttml:style')):
3654 style_id = style.get('id') or style.get(_x('xml:id'))
3655 if not style_id:
3656 continue
3657 parent_style_id = style.get('style')
3658 if parent_style_id:
3659 if parent_style_id not in styles:
3660 repeat = True
3661 continue
3662 styles[style_id] = styles[parent_style_id].copy()
3663 for prop in SUPPORTED_STYLING:
3664 prop_val = style.get(_x('tts:' + prop))
3665 if prop_val:
3666 styles.setdefault(style_id, {})[prop] = prop_val
3667 if repeat:
3668 repeat = False
3669 else:
3670 break
3671
3672 for p in ('body', 'div'):
3673 ele = xpath_element(dfxp, [_x('.//ttml:' + p), './/' + p])
3674 if ele is None:
3675 continue
3676 style = styles.get(ele.get('style'))
3677 if not style:
3678 continue
3679 default_style.update(style)
3680
3681 for para, index in zip(paras, itertools.count(1)):
3682 begin_time = parse_dfxp_time_expr(para.attrib.get('begin'))
3683 end_time = parse_dfxp_time_expr(para.attrib.get('end'))
3684 dur = parse_dfxp_time_expr(para.attrib.get('dur'))
3685 if begin_time is None:
3686 continue
3687 if not end_time:
3688 if not dur:
3689 continue
3690 end_time = begin_time + dur
3691 out.append('%d\n%s --> %s\n%s\n\n' % (
3692 index,
3693 srt_subtitles_timecode(begin_time),
3694 srt_subtitles_timecode(end_time),
3695 parse_node(para)))
3696
3697 return ''.join(out)
3698
3699
3700 def cli_option(params, command_option, param, separator=None):
3701 param = params.get(param)
3702 return ([] if param is None
3703 else [command_option, str(param)] if separator is None
3704 else [f'{command_option}{separator}{param}'])
3705
3706
3707 def cli_bool_option(params, command_option, param, true_value='true', false_value='false', separator=None):
3708 param = params.get(param)
3709 assert param in (True, False, None)
3710 return cli_option({True: true_value, False: false_value}, command_option, param, separator)
3711
3712
3713 def cli_valueless_option(params, command_option, param, expected_value=True):
3714 return [command_option] if params.get(param) == expected_value else []
3715
3716
3717 def cli_configuration_args(argdict, keys, default=[], use_compat=True):
3718 if isinstance(argdict, (list, tuple)): # for backward compatibility
3719 if use_compat:
3720 return argdict
3721 else:
3722 argdict = None
3723 if argdict is None:
3724 return default
3725 assert isinstance(argdict, dict)
3726
3727 assert isinstance(keys, (list, tuple))
3728 for key_list in keys:
3729 arg_list = list(filter(
3730 lambda x: x is not None,
3731 [argdict.get(key.lower()) for key in variadic(key_list)]))
3732 if arg_list:
3733 return [arg for args in arg_list for arg in args]
3734 return default
3735
3736
3737 def _configuration_args(main_key, argdict, exe, keys=None, default=[], use_compat=True):
3738 main_key, exe = main_key.lower(), exe.lower()
3739 root_key = exe if main_key == exe else f'{main_key}+{exe}'
3740 keys = [f'{root_key}{k}' for k in (keys or [''])]
3741 if root_key in keys:
3742 if main_key != exe:
3743 keys.append((main_key, exe))
3744 keys.append('default')
3745 else:
3746 use_compat = False
3747 return cli_configuration_args(argdict, keys, default, use_compat)
3748
3749
3750 class ISO639Utils:
3751 # See http://www.loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt
3752 _lang_map = {
3753 'aa': 'aar',
3754 'ab': 'abk',
3755 'ae': 'ave',
3756 'af': 'afr',
3757 'ak': 'aka',
3758 'am': 'amh',
3759 'an': 'arg',
3760 'ar': 'ara',
3761 'as': 'asm',
3762 'av': 'ava',
3763 'ay': 'aym',
3764 'az': 'aze',
3765 'ba': 'bak',
3766 'be': 'bel',
3767 'bg': 'bul',
3768 'bh': 'bih',
3769 'bi': 'bis',
3770 'bm': 'bam',
3771 'bn': 'ben',
3772 'bo': 'bod',
3773 'br': 'bre',
3774 'bs': 'bos',
3775 'ca': 'cat',
3776 'ce': 'che',
3777 'ch': 'cha',
3778 'co': 'cos',
3779 'cr': 'cre',
3780 'cs': 'ces',
3781 'cu': 'chu',
3782 'cv': 'chv',
3783 'cy': 'cym',
3784 'da': 'dan',
3785 'de': 'deu',
3786 'dv': 'div',
3787 'dz': 'dzo',
3788 'ee': 'ewe',
3789 'el': 'ell',
3790 'en': 'eng',
3791 'eo': 'epo',
3792 'es': 'spa',
3793 'et': 'est',
3794 'eu': 'eus',
3795 'fa': 'fas',
3796 'ff': 'ful',
3797 'fi': 'fin',
3798 'fj': 'fij',
3799 'fo': 'fao',
3800 'fr': 'fra',
3801 'fy': 'fry',
3802 'ga': 'gle',
3803 'gd': 'gla',
3804 'gl': 'glg',
3805 'gn': 'grn',
3806 'gu': 'guj',
3807 'gv': 'glv',
3808 'ha': 'hau',
3809 'he': 'heb',
3810 'iw': 'heb', # Replaced by he in 1989 revision
3811 'hi': 'hin',
3812 'ho': 'hmo',
3813 'hr': 'hrv',
3814 'ht': 'hat',
3815 'hu': 'hun',
3816 'hy': 'hye',
3817 'hz': 'her',
3818 'ia': 'ina',
3819 'id': 'ind',
3820 'in': 'ind', # Replaced by id in 1989 revision
3821 'ie': 'ile',
3822 'ig': 'ibo',
3823 'ii': 'iii',
3824 'ik': 'ipk',
3825 'io': 'ido',
3826 'is': 'isl',
3827 'it': 'ita',
3828 'iu': 'iku',
3829 'ja': 'jpn',
3830 'jv': 'jav',
3831 'ka': 'kat',
3832 'kg': 'kon',
3833 'ki': 'kik',
3834 'kj': 'kua',
3835 'kk': 'kaz',
3836 'kl': 'kal',
3837 'km': 'khm',
3838 'kn': 'kan',
3839 'ko': 'kor',
3840 'kr': 'kau',
3841 'ks': 'kas',
3842 'ku': 'kur',
3843 'kv': 'kom',
3844 'kw': 'cor',
3845 'ky': 'kir',
3846 'la': 'lat',
3847 'lb': 'ltz',
3848 'lg': 'lug',
3849 'li': 'lim',
3850 'ln': 'lin',
3851 'lo': 'lao',
3852 'lt': 'lit',
3853 'lu': 'lub',
3854 'lv': 'lav',
3855 'mg': 'mlg',
3856 'mh': 'mah',
3857 'mi': 'mri',
3858 'mk': 'mkd',
3859 'ml': 'mal',
3860 'mn': 'mon',
3861 'mr': 'mar',
3862 'ms': 'msa',
3863 'mt': 'mlt',
3864 'my': 'mya',
3865 'na': 'nau',
3866 'nb': 'nob',
3867 'nd': 'nde',
3868 'ne': 'nep',
3869 'ng': 'ndo',
3870 'nl': 'nld',
3871 'nn': 'nno',
3872 'no': 'nor',
3873 'nr': 'nbl',
3874 'nv': 'nav',
3875 'ny': 'nya',
3876 'oc': 'oci',
3877 'oj': 'oji',
3878 'om': 'orm',
3879 'or': 'ori',
3880 'os': 'oss',
3881 'pa': 'pan',
3882 'pi': 'pli',
3883 'pl': 'pol',
3884 'ps': 'pus',
3885 'pt': 'por',
3886 'qu': 'que',
3887 'rm': 'roh',
3888 'rn': 'run',
3889 'ro': 'ron',
3890 'ru': 'rus',
3891 'rw': 'kin',
3892 'sa': 'san',
3893 'sc': 'srd',
3894 'sd': 'snd',
3895 'se': 'sme',
3896 'sg': 'sag',
3897 'si': 'sin',
3898 'sk': 'slk',
3899 'sl': 'slv',
3900 'sm': 'smo',
3901 'sn': 'sna',
3902 'so': 'som',
3903 'sq': 'sqi',
3904 'sr': 'srp',
3905 'ss': 'ssw',
3906 'st': 'sot',
3907 'su': 'sun',
3908 'sv': 'swe',
3909 'sw': 'swa',
3910 'ta': 'tam',
3911 'te': 'tel',
3912 'tg': 'tgk',
3913 'th': 'tha',
3914 'ti': 'tir',
3915 'tk': 'tuk',
3916 'tl': 'tgl',
3917 'tn': 'tsn',
3918 'to': 'ton',
3919 'tr': 'tur',
3920 'ts': 'tso',
3921 'tt': 'tat',
3922 'tw': 'twi',
3923 'ty': 'tah',
3924 'ug': 'uig',
3925 'uk': 'ukr',
3926 'ur': 'urd',
3927 'uz': 'uzb',
3928 've': 'ven',
3929 'vi': 'vie',
3930 'vo': 'vol',
3931 'wa': 'wln',
3932 'wo': 'wol',
3933 'xh': 'xho',
3934 'yi': 'yid',
3935 'ji': 'yid', # Replaced by yi in 1989 revision
3936 'yo': 'yor',
3937 'za': 'zha',
3938 'zh': 'zho',
3939 'zu': 'zul',
3940 }
3941
3942 @classmethod
3943 def short2long(cls, code):
3944 """Convert language code from ISO 639-1 to ISO 639-2/T"""
3945 return cls._lang_map.get(code[:2])
3946
3947 @classmethod
3948 def long2short(cls, code):
3949 """Convert language code from ISO 639-2/T to ISO 639-1"""
3950 for short_name, long_name in cls._lang_map.items():
3951 if long_name == code:
3952 return short_name
3953
3954
3955 class ISO3166Utils:
3956 # From http://data.okfn.org/data/core/country-list
3957 _country_map = {
3958 'AF': 'Afghanistan',
3959 'AX': 'Åland Islands',
3960 'AL': 'Albania',
3961 'DZ': 'Algeria',
3962 'AS': 'American Samoa',
3963 'AD': 'Andorra',
3964 'AO': 'Angola',
3965 'AI': 'Anguilla',
3966 'AQ': 'Antarctica',
3967 'AG': 'Antigua and Barbuda',
3968 'AR': 'Argentina',
3969 'AM': 'Armenia',
3970 'AW': 'Aruba',
3971 'AU': 'Australia',
3972 'AT': 'Austria',
3973 'AZ': 'Azerbaijan',
3974 'BS': 'Bahamas',
3975 'BH': 'Bahrain',
3976 'BD': 'Bangladesh',
3977 'BB': 'Barbados',
3978 'BY': 'Belarus',
3979 'BE': 'Belgium',
3980 'BZ': 'Belize',
3981 'BJ': 'Benin',
3982 'BM': 'Bermuda',
3983 'BT': 'Bhutan',
3984 'BO': 'Bolivia, Plurinational State of',
3985 'BQ': 'Bonaire, Sint Eustatius and Saba',
3986 'BA': 'Bosnia and Herzegovina',
3987 'BW': 'Botswana',
3988 'BV': 'Bouvet Island',
3989 'BR': 'Brazil',
3990 'IO': 'British Indian Ocean Territory',
3991 'BN': 'Brunei Darussalam',
3992 'BG': 'Bulgaria',
3993 'BF': 'Burkina Faso',
3994 'BI': 'Burundi',
3995 'KH': 'Cambodia',
3996 'CM': 'Cameroon',
3997 'CA': 'Canada',
3998 'CV': 'Cape Verde',
3999 'KY': 'Cayman Islands',
4000 'CF': 'Central African Republic',
4001 'TD': 'Chad',
4002 'CL': 'Chile',
4003 'CN': 'China',
4004 'CX': 'Christmas Island',
4005 'CC': 'Cocos (Keeling) Islands',
4006 'CO': 'Colombia',
4007 'KM': 'Comoros',
4008 'CG': 'Congo',
4009 'CD': 'Congo, the Democratic Republic of the',
4010 'CK': 'Cook Islands',
4011 'CR': 'Costa Rica',
4012 'CI': 'Côte d\'Ivoire',
4013 'HR': 'Croatia',
4014 'CU': 'Cuba',
4015 'CW': 'Curaçao',
4016 'CY': 'Cyprus',
4017 'CZ': 'Czech Republic',
4018 'DK': 'Denmark',
4019 'DJ': 'Djibouti',
4020 'DM': 'Dominica',
4021 'DO': 'Dominican Republic',
4022 'EC': 'Ecuador',
4023 'EG': 'Egypt',
4024 'SV': 'El Salvador',
4025 'GQ': 'Equatorial Guinea',
4026 'ER': 'Eritrea',
4027 'EE': 'Estonia',
4028 'ET': 'Ethiopia',
4029 'FK': 'Falkland Islands (Malvinas)',
4030 'FO': 'Faroe Islands',
4031 'FJ': 'Fiji',
4032 'FI': 'Finland',
4033 'FR': 'France',
4034 'GF': 'French Guiana',
4035 'PF': 'French Polynesia',
4036 'TF': 'French Southern Territories',
4037 'GA': 'Gabon',
4038 'GM': 'Gambia',
4039 'GE': 'Georgia',
4040 'DE': 'Germany',
4041 'GH': 'Ghana',
4042 'GI': 'Gibraltar',
4043 'GR': 'Greece',
4044 'GL': 'Greenland',
4045 'GD': 'Grenada',
4046 'GP': 'Guadeloupe',
4047 'GU': 'Guam',
4048 'GT': 'Guatemala',
4049 'GG': 'Guernsey',
4050 'GN': 'Guinea',
4051 'GW': 'Guinea-Bissau',
4052 'GY': 'Guyana',
4053 'HT': 'Haiti',
4054 'HM': 'Heard Island and McDonald Islands',
4055 'VA': 'Holy See (Vatican City State)',
4056 'HN': 'Honduras',
4057 'HK': 'Hong Kong',
4058 'HU': 'Hungary',
4059 'IS': 'Iceland',
4060 'IN': 'India',
4061 'ID': 'Indonesia',
4062 'IR': 'Iran, Islamic Republic of',
4063 'IQ': 'Iraq',
4064 'IE': 'Ireland',
4065 'IM': 'Isle of Man',
4066 'IL': 'Israel',
4067 'IT': 'Italy',
4068 'JM': 'Jamaica',
4069 'JP': 'Japan',
4070 'JE': 'Jersey',
4071 'JO': 'Jordan',
4072 'KZ': 'Kazakhstan',
4073 'KE': 'Kenya',
4074 'KI': 'Kiribati',
4075 'KP': 'Korea, Democratic People\'s Republic of',
4076 'KR': 'Korea, Republic of',
4077 'KW': 'Kuwait',
4078 'KG': 'Kyrgyzstan',
4079 'LA': 'Lao People\'s Democratic Republic',
4080 'LV': 'Latvia',
4081 'LB': 'Lebanon',
4082 'LS': 'Lesotho',
4083 'LR': 'Liberia',
4084 'LY': 'Libya',
4085 'LI': 'Liechtenstein',
4086 'LT': 'Lithuania',
4087 'LU': 'Luxembourg',
4088 'MO': 'Macao',
4089 'MK': 'Macedonia, the Former Yugoslav Republic of',
4090 'MG': 'Madagascar',
4091 'MW': 'Malawi',
4092 'MY': 'Malaysia',
4093 'MV': 'Maldives',
4094 'ML': 'Mali',
4095 'MT': 'Malta',
4096 'MH': 'Marshall Islands',
4097 'MQ': 'Martinique',
4098 'MR': 'Mauritania',
4099 'MU': 'Mauritius',
4100 'YT': 'Mayotte',
4101 'MX': 'Mexico',
4102 'FM': 'Micronesia, Federated States of',
4103 'MD': 'Moldova, Republic of',
4104 'MC': 'Monaco',
4105 'MN': 'Mongolia',
4106 'ME': 'Montenegro',
4107 'MS': 'Montserrat',
4108 'MA': 'Morocco',
4109 'MZ': 'Mozambique',
4110 'MM': 'Myanmar',
4111 'NA': 'Namibia',
4112 'NR': 'Nauru',
4113 'NP': 'Nepal',
4114 'NL': 'Netherlands',
4115 'NC': 'New Caledonia',
4116 'NZ': 'New Zealand',
4117 'NI': 'Nicaragua',
4118 'NE': 'Niger',
4119 'NG': 'Nigeria',
4120 'NU': 'Niue',
4121 'NF': 'Norfolk Island',
4122 'MP': 'Northern Mariana Islands',
4123 'NO': 'Norway',
4124 'OM': 'Oman',
4125 'PK': 'Pakistan',
4126 'PW': 'Palau',
4127 'PS': 'Palestine, State of',
4128 'PA': 'Panama',
4129 'PG': 'Papua New Guinea',
4130 'PY': 'Paraguay',
4131 'PE': 'Peru',
4132 'PH': 'Philippines',
4133 'PN': 'Pitcairn',
4134 'PL': 'Poland',
4135 'PT': 'Portugal',
4136 'PR': 'Puerto Rico',
4137 'QA': 'Qatar',
4138 'RE': 'Réunion',
4139 'RO': 'Romania',
4140 'RU': 'Russian Federation',
4141 'RW': 'Rwanda',
4142 'BL': 'Saint Barthélemy',
4143 'SH': 'Saint Helena, Ascension and Tristan da Cunha',
4144 'KN': 'Saint Kitts and Nevis',
4145 'LC': 'Saint Lucia',
4146 'MF': 'Saint Martin (French part)',
4147 'PM': 'Saint Pierre and Miquelon',
4148 'VC': 'Saint Vincent and the Grenadines',
4149 'WS': 'Samoa',
4150 'SM': 'San Marino',
4151 'ST': 'Sao Tome and Principe',
4152 'SA': 'Saudi Arabia',
4153 'SN': 'Senegal',
4154 'RS': 'Serbia',
4155 'SC': 'Seychelles',
4156 'SL': 'Sierra Leone',
4157 'SG': 'Singapore',
4158 'SX': 'Sint Maarten (Dutch part)',
4159 'SK': 'Slovakia',
4160 'SI': 'Slovenia',
4161 'SB': 'Solomon Islands',
4162 'SO': 'Somalia',
4163 'ZA': 'South Africa',
4164 'GS': 'South Georgia and the South Sandwich Islands',
4165 'SS': 'South Sudan',
4166 'ES': 'Spain',
4167 'LK': 'Sri Lanka',
4168 'SD': 'Sudan',
4169 'SR': 'Suriname',
4170 'SJ': 'Svalbard and Jan Mayen',
4171 'SZ': 'Swaziland',
4172 'SE': 'Sweden',
4173 'CH': 'Switzerland',
4174 'SY': 'Syrian Arab Republic',
4175 'TW': 'Taiwan, Province of China',
4176 'TJ': 'Tajikistan',
4177 'TZ': 'Tanzania, United Republic of',
4178 'TH': 'Thailand',
4179 'TL': 'Timor-Leste',
4180 'TG': 'Togo',
4181 'TK': 'Tokelau',
4182 'TO': 'Tonga',
4183 'TT': 'Trinidad and Tobago',
4184 'TN': 'Tunisia',
4185 'TR': 'Turkey',
4186 'TM': 'Turkmenistan',
4187 'TC': 'Turks and Caicos Islands',
4188 'TV': 'Tuvalu',
4189 'UG': 'Uganda',
4190 'UA': 'Ukraine',
4191 'AE': 'United Arab Emirates',
4192 'GB': 'United Kingdom',
4193 'US': 'United States',
4194 'UM': 'United States Minor Outlying Islands',
4195 'UY': 'Uruguay',
4196 'UZ': 'Uzbekistan',
4197 'VU': 'Vanuatu',
4198 'VE': 'Venezuela, Bolivarian Republic of',
4199 'VN': 'Viet Nam',
4200 'VG': 'Virgin Islands, British',
4201 'VI': 'Virgin Islands, U.S.',
4202 'WF': 'Wallis and Futuna',
4203 'EH': 'Western Sahara',
4204 'YE': 'Yemen',
4205 'ZM': 'Zambia',
4206 'ZW': 'Zimbabwe',
4207 # Not ISO 3166 codes, but used for IP blocks
4208 'AP': 'Asia/Pacific Region',
4209 'EU': 'Europe',
4210 }
4211
4212 @classmethod
4213 def short2full(cls, code):
4214 """Convert an ISO 3166-2 country code to the corresponding full name"""
4215 return cls._country_map.get(code.upper())
4216
4217
4218 class GeoUtils:
4219 # Major IPv4 address blocks per country
4220 _country_ip_map = {
4221 'AD': '46.172.224.0/19',
4222 'AE': '94.200.0.0/13',
4223 'AF': '149.54.0.0/17',
4224 'AG': '209.59.64.0/18',
4225 'AI': '204.14.248.0/21',
4226 'AL': '46.99.0.0/16',
4227 'AM': '46.70.0.0/15',
4228 'AO': '105.168.0.0/13',
4229 'AP': '182.50.184.0/21',
4230 'AQ': '23.154.160.0/24',
4231 'AR': '181.0.0.0/12',
4232 'AS': '202.70.112.0/20',
4233 'AT': '77.116.0.0/14',
4234 'AU': '1.128.0.0/11',
4235 'AW': '181.41.0.0/18',
4236 'AX': '185.217.4.0/22',
4237 'AZ': '5.197.0.0/16',
4238 'BA': '31.176.128.0/17',
4239 'BB': '65.48.128.0/17',
4240 'BD': '114.130.0.0/16',
4241 'BE': '57.0.0.0/8',
4242 'BF': '102.178.0.0/15',
4243 'BG': '95.42.0.0/15',
4244 'BH': '37.131.0.0/17',
4245 'BI': '154.117.192.0/18',
4246 'BJ': '137.255.0.0/16',
4247 'BL': '185.212.72.0/23',
4248 'BM': '196.12.64.0/18',
4249 'BN': '156.31.0.0/16',
4250 'BO': '161.56.0.0/16',
4251 'BQ': '161.0.80.0/20',
4252 'BR': '191.128.0.0/12',
4253 'BS': '24.51.64.0/18',
4254 'BT': '119.2.96.0/19',
4255 'BW': '168.167.0.0/16',
4256 'BY': '178.120.0.0/13',
4257 'BZ': '179.42.192.0/18',
4258 'CA': '99.224.0.0/11',
4259 'CD': '41.243.0.0/16',
4260 'CF': '197.242.176.0/21',
4261 'CG': '160.113.0.0/16',
4262 'CH': '85.0.0.0/13',
4263 'CI': '102.136.0.0/14',
4264 'CK': '202.65.32.0/19',
4265 'CL': '152.172.0.0/14',
4266 'CM': '102.244.0.0/14',
4267 'CN': '36.128.0.0/10',
4268 'CO': '181.240.0.0/12',
4269 'CR': '201.192.0.0/12',
4270 'CU': '152.206.0.0/15',
4271 'CV': '165.90.96.0/19',
4272 'CW': '190.88.128.0/17',
4273 'CY': '31.153.0.0/16',
4274 'CZ': '88.100.0.0/14',
4275 'DE': '53.0.0.0/8',
4276 'DJ': '197.241.0.0/17',
4277 'DK': '87.48.0.0/12',
4278 'DM': '192.243.48.0/20',
4279 'DO': '152.166.0.0/15',
4280 'DZ': '41.96.0.0/12',
4281 'EC': '186.68.0.0/15',
4282 'EE': '90.190.0.0/15',
4283 'EG': '156.160.0.0/11',
4284 'ER': '196.200.96.0/20',
4285 'ES': '88.0.0.0/11',
4286 'ET': '196.188.0.0/14',
4287 'EU': '2.16.0.0/13',
4288 'FI': '91.152.0.0/13',
4289 'FJ': '144.120.0.0/16',
4290 'FK': '80.73.208.0/21',
4291 'FM': '119.252.112.0/20',
4292 'FO': '88.85.32.0/19',
4293 'FR': '90.0.0.0/9',
4294 'GA': '41.158.0.0/15',
4295 'GB': '25.0.0.0/8',
4296 'GD': '74.122.88.0/21',
4297 'GE': '31.146.0.0/16',
4298 'GF': '161.22.64.0/18',
4299 'GG': '62.68.160.0/19',
4300 'GH': '154.160.0.0/12',
4301 'GI': '95.164.0.0/16',
4302 'GL': '88.83.0.0/19',
4303 'GM': '160.182.0.0/15',
4304 'GN': '197.149.192.0/18',
4305 'GP': '104.250.0.0/19',
4306 'GQ': '105.235.224.0/20',
4307 'GR': '94.64.0.0/13',
4308 'GT': '168.234.0.0/16',
4309 'GU': '168.123.0.0/16',
4310 'GW': '197.214.80.0/20',
4311 'GY': '181.41.64.0/18',
4312 'HK': '113.252.0.0/14',
4313 'HN': '181.210.0.0/16',
4314 'HR': '93.136.0.0/13',
4315 'HT': '148.102.128.0/17',
4316 'HU': '84.0.0.0/14',
4317 'ID': '39.192.0.0/10',
4318 'IE': '87.32.0.0/12',
4319 'IL': '79.176.0.0/13',
4320 'IM': '5.62.80.0/20',
4321 'IN': '117.192.0.0/10',
4322 'IO': '203.83.48.0/21',
4323 'IQ': '37.236.0.0/14',
4324 'IR': '2.176.0.0/12',
4325 'IS': '82.221.0.0/16',
4326 'IT': '79.0.0.0/10',
4327 'JE': '87.244.64.0/18',
4328 'JM': '72.27.0.0/17',
4329 'JO': '176.29.0.0/16',
4330 'JP': '133.0.0.0/8',
4331 'KE': '105.48.0.0/12',
4332 'KG': '158.181.128.0/17',
4333 'KH': '36.37.128.0/17',
4334 'KI': '103.25.140.0/22',
4335 'KM': '197.255.224.0/20',
4336 'KN': '198.167.192.0/19',
4337 'KP': '175.45.176.0/22',
4338 'KR': '175.192.0.0/10',
4339 'KW': '37.36.0.0/14',
4340 'KY': '64.96.0.0/15',
4341 'KZ': '2.72.0.0/13',
4342 'LA': '115.84.64.0/18',
4343 'LB': '178.135.0.0/16',
4344 'LC': '24.92.144.0/20',
4345 'LI': '82.117.0.0/19',
4346 'LK': '112.134.0.0/15',
4347 'LR': '102.183.0.0/16',
4348 'LS': '129.232.0.0/17',
4349 'LT': '78.56.0.0/13',
4350 'LU': '188.42.0.0/16',
4351 'LV': '46.109.0.0/16',
4352 'LY': '41.252.0.0/14',
4353 'MA': '105.128.0.0/11',
4354 'MC': '88.209.64.0/18',
4355 'MD': '37.246.0.0/16',
4356 'ME': '178.175.0.0/17',
4357 'MF': '74.112.232.0/21',
4358 'MG': '154.126.0.0/17',
4359 'MH': '117.103.88.0/21',
4360 'MK': '77.28.0.0/15',
4361 'ML': '154.118.128.0/18',
4362 'MM': '37.111.0.0/17',
4363 'MN': '49.0.128.0/17',
4364 'MO': '60.246.0.0/16',
4365 'MP': '202.88.64.0/20',
4366 'MQ': '109.203.224.0/19',
4367 'MR': '41.188.64.0/18',
4368 'MS': '208.90.112.0/22',
4369 'MT': '46.11.0.0/16',
4370 'MU': '105.16.0.0/12',
4371 'MV': '27.114.128.0/18',
4372 'MW': '102.70.0.0/15',
4373 'MX': '187.192.0.0/11',
4374 'MY': '175.136.0.0/13',
4375 'MZ': '197.218.0.0/15',
4376 'NA': '41.182.0.0/16',
4377 'NC': '101.101.0.0/18',
4378 'NE': '197.214.0.0/18',
4379 'NF': '203.17.240.0/22',
4380 'NG': '105.112.0.0/12',
4381 'NI': '186.76.0.0/15',
4382 'NL': '145.96.0.0/11',
4383 'NO': '84.208.0.0/13',
4384 'NP': '36.252.0.0/15',
4385 'NR': '203.98.224.0/19',
4386 'NU': '49.156.48.0/22',
4387 'NZ': '49.224.0.0/14',
4388 'OM': '5.36.0.0/15',
4389 'PA': '186.72.0.0/15',
4390 'PE': '186.160.0.0/14',
4391 'PF': '123.50.64.0/18',
4392 'PG': '124.240.192.0/19',
4393 'PH': '49.144.0.0/13',
4394 'PK': '39.32.0.0/11',
4395 'PL': '83.0.0.0/11',
4396 'PM': '70.36.0.0/20',
4397 'PR': '66.50.0.0/16',
4398 'PS': '188.161.0.0/16',
4399 'PT': '85.240.0.0/13',
4400 'PW': '202.124.224.0/20',
4401 'PY': '181.120.0.0/14',
4402 'QA': '37.210.0.0/15',
4403 'RE': '102.35.0.0/16',
4404 'RO': '79.112.0.0/13',
4405 'RS': '93.86.0.0/15',
4406 'RU': '5.136.0.0/13',
4407 'RW': '41.186.0.0/16',
4408 'SA': '188.48.0.0/13',
4409 'SB': '202.1.160.0/19',
4410 'SC': '154.192.0.0/11',
4411 'SD': '102.120.0.0/13',
4412 'SE': '78.64.0.0/12',
4413 'SG': '8.128.0.0/10',
4414 'SI': '188.196.0.0/14',
4415 'SK': '78.98.0.0/15',
4416 'SL': '102.143.0.0/17',
4417 'SM': '89.186.32.0/19',
4418 'SN': '41.82.0.0/15',
4419 'SO': '154.115.192.0/18',
4420 'SR': '186.179.128.0/17',
4421 'SS': '105.235.208.0/21',
4422 'ST': '197.159.160.0/19',
4423 'SV': '168.243.0.0/16',
4424 'SX': '190.102.0.0/20',
4425 'SY': '5.0.0.0/16',
4426 'SZ': '41.84.224.0/19',
4427 'TC': '65.255.48.0/20',
4428 'TD': '154.68.128.0/19',
4429 'TG': '196.168.0.0/14',
4430 'TH': '171.96.0.0/13',
4431 'TJ': '85.9.128.0/18',
4432 'TK': '27.96.24.0/21',
4433 'TL': '180.189.160.0/20',
4434 'TM': '95.85.96.0/19',
4435 'TN': '197.0.0.0/11',
4436 'TO': '175.176.144.0/21',
4437 'TR': '78.160.0.0/11',
4438 'TT': '186.44.0.0/15',
4439 'TV': '202.2.96.0/19',
4440 'TW': '120.96.0.0/11',
4441 'TZ': '156.156.0.0/14',
4442 'UA': '37.52.0.0/14',
4443 'UG': '102.80.0.0/13',
4444 'US': '6.0.0.0/8',
4445 'UY': '167.56.0.0/13',
4446 'UZ': '84.54.64.0/18',
4447 'VA': '212.77.0.0/19',
4448 'VC': '207.191.240.0/21',
4449 'VE': '186.88.0.0/13',
4450 'VG': '66.81.192.0/20',
4451 'VI': '146.226.0.0/16',
4452 'VN': '14.160.0.0/11',
4453 'VU': '202.80.32.0/20',
4454 'WF': '117.20.32.0/21',
4455 'WS': '202.4.32.0/19',
4456 'YE': '134.35.0.0/16',
4457 'YT': '41.242.116.0/22',
4458 'ZA': '41.0.0.0/11',
4459 'ZM': '102.144.0.0/13',
4460 'ZW': '102.177.192.0/18',
4461 }
4462
4463 @classmethod
4464 def random_ipv4(cls, code_or_block):
4465 if len(code_or_block) == 2:
4466 block = cls._country_ip_map.get(code_or_block.upper())
4467 if not block:
4468 return None
4469 else:
4470 block = code_or_block
4471 addr, preflen = block.split('/')
4472 addr_min = compat_struct_unpack('!L', socket.inet_aton(addr))[0]
4473 addr_max = addr_min | (0xffffffff >> int(preflen))
4474 return compat_str(socket.inet_ntoa(
4475 compat_struct_pack('!L', random.randint(addr_min, addr_max))))
4476
4477
4478 class PerRequestProxyHandler(compat_urllib_request.ProxyHandler):
4479 def __init__(self, proxies=None):
4480 # Set default handlers
4481 for type in ('http', 'https'):
4482 setattr(self, '%s_open' % type,
4483 lambda r, proxy='__noproxy__', type=type, meth=self.proxy_open:
4484 meth(r, proxy, type))
4485 compat_urllib_request.ProxyHandler.__init__(self, proxies)
4486
4487 def proxy_open(self, req, proxy, type):
4488 req_proxy = req.headers.get('Ytdl-request-proxy')
4489 if req_proxy is not None:
4490 proxy = req_proxy
4491 del req.headers['Ytdl-request-proxy']
4492
4493 if proxy == '__noproxy__':
4494 return None # No Proxy
4495 if compat_urlparse.urlparse(proxy).scheme.lower() in ('socks', 'socks4', 'socks4a', 'socks5'):
4496 req.add_header('Ytdl-socks-proxy', proxy)
4497 # yt-dlp's http/https handlers do wrapping the socket with socks
4498 return None
4499 return compat_urllib_request.ProxyHandler.proxy_open(
4500 self, req, proxy, type)
4501
4502
4503 # Both long_to_bytes and bytes_to_long are adapted from PyCrypto, which is
4504 # released into Public Domain
4505 # https://github.com/dlitz/pycrypto/blob/master/lib/Crypto/Util/number.py#L387
4506
4507 def long_to_bytes(n, blocksize=0):
4508 """long_to_bytes(n:long, blocksize:int) : string
4509 Convert a long integer to a byte string.
4510
4511 If optional blocksize is given and greater than zero, pad the front of the
4512 byte string with binary zeros so that the length is a multiple of
4513 blocksize.
4514 """
4515 # after much testing, this algorithm was deemed to be the fastest
4516 s = b''
4517 n = int(n)
4518 while n > 0:
4519 s = compat_struct_pack('>I', n & 0xffffffff) + s
4520 n = n >> 32
4521 # strip off leading zeros
4522 for i in range(len(s)):
4523 if s[i] != b'\000'[0]:
4524 break
4525 else:
4526 # only happens when n == 0
4527 s = b'\000'
4528 i = 0
4529 s = s[i:]
4530 # add back some pad bytes. this could be done more efficiently w.r.t. the
4531 # de-padding being done above, but sigh...
4532 if blocksize > 0 and len(s) % blocksize:
4533 s = (blocksize - len(s) % blocksize) * b'\000' + s
4534 return s
4535
4536
4537 def bytes_to_long(s):
4538 """bytes_to_long(string) : long
4539 Convert a byte string to a long integer.
4540
4541 This is (essentially) the inverse of long_to_bytes().
4542 """
4543 acc = 0
4544 length = len(s)
4545 if length % 4:
4546 extra = (4 - length % 4)
4547 s = b'\000' * extra + s
4548 length = length + extra
4549 for i in range(0, length, 4):
4550 acc = (acc << 32) + compat_struct_unpack('>I', s[i:i + 4])[0]
4551 return acc
4552
4553
4554 def ohdave_rsa_encrypt(data, exponent, modulus):
4555 '''
4556 Implement OHDave's RSA algorithm. See http://www.ohdave.com/rsa/
4557
4558 Input:
4559 data: data to encrypt, bytes-like object
4560 exponent, modulus: parameter e and N of RSA algorithm, both integer
4561 Output: hex string of encrypted data
4562
4563 Limitation: supports one block encryption only
4564 '''
4565
4566 payload = int(binascii.hexlify(data[::-1]), 16)
4567 encrypted = pow(payload, exponent, modulus)
4568 return '%x' % encrypted
4569
4570
4571 def pkcs1pad(data, length):
4572 """
4573 Padding input data with PKCS#1 scheme
4574
4575 @param {int[]} data input data
4576 @param {int} length target length
4577 @returns {int[]} padded data
4578 """
4579 if len(data) > length - 11:
4580 raise ValueError('Input data too long for PKCS#1 padding')
4581
4582 pseudo_random = [random.randint(0, 254) for _ in range(length - len(data) - 3)]
4583 return [0, 2] + pseudo_random + [0] + data
4584
4585
4586 def encode_base_n(num, n, table=None):
4587 FULL_TABLE = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
4588 if not table:
4589 table = FULL_TABLE[:n]
4590
4591 if n > len(table):
4592 raise ValueError('base %d exceeds table length %d' % (n, len(table)))
4593
4594 if num == 0:
4595 return table[0]
4596
4597 ret = ''
4598 while num:
4599 ret = table[num % n] + ret
4600 num = num // n
4601 return ret
4602
4603
4604 def decode_packed_codes(code):
4605 mobj = re.search(PACKED_CODES_RE, code)
4606 obfuscated_code, base, count, symbols = mobj.groups()
4607 base = int(base)
4608 count = int(count)
4609 symbols = symbols.split('|')
4610 symbol_table = {}
4611
4612 while count:
4613 count -= 1
4614 base_n_count = encode_base_n(count, base)
4615 symbol_table[base_n_count] = symbols[count] or base_n_count
4616
4617 return re.sub(
4618 r'\b(\w+)\b', lambda mobj: symbol_table[mobj.group(0)],
4619 obfuscated_code)
4620
4621
4622 def caesar(s, alphabet, shift):
4623 if shift == 0:
4624 return s
4625 l = len(alphabet)
4626 return ''.join(
4627 alphabet[(alphabet.index(c) + shift) % l] if c in alphabet else c
4628 for c in s)
4629
4630
4631 def rot47(s):
4632 return caesar(s, r'''!"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~''', 47)
4633
4634
4635 def parse_m3u8_attributes(attrib):
4636 info = {}
4637 for (key, val) in re.findall(r'(?P<key>[A-Z0-9-]+)=(?P<val>"[^"]+"|[^",]+)(?:,|$)', attrib):
4638 if val.startswith('"'):
4639 val = val[1:-1]
4640 info[key] = val
4641 return info
4642
4643
4644 def urshift(val, n):
4645 return val >> n if val >= 0 else (val + 0x100000000) >> n
4646
4647
4648 # Based on png2str() written by @gdkchan and improved by @yokrysty
4649 # Originally posted at https://github.com/ytdl-org/youtube-dl/issues/9706
4650 def decode_png(png_data):
4651 # Reference: https://www.w3.org/TR/PNG/
4652 header = png_data[8:]
4653
4654 if png_data[:8] != b'\x89PNG\x0d\x0a\x1a\x0a' or header[4:8] != b'IHDR':
4655 raise OSError('Not a valid PNG file.')
4656
4657 int_map = {1: '>B', 2: '>H', 4: '>I'}
4658 unpack_integer = lambda x: compat_struct_unpack(int_map[len(x)], x)[0]
4659
4660 chunks = []
4661
4662 while header:
4663 length = unpack_integer(header[:4])
4664 header = header[4:]
4665
4666 chunk_type = header[:4]
4667 header = header[4:]
4668
4669 chunk_data = header[:length]
4670 header = header[length:]
4671
4672 header = header[4:] # Skip CRC
4673
4674 chunks.append({
4675 'type': chunk_type,
4676 'length': length,
4677 'data': chunk_data
4678 })
4679
4680 ihdr = chunks[0]['data']
4681
4682 width = unpack_integer(ihdr[:4])
4683 height = unpack_integer(ihdr[4:8])
4684
4685 idat = b''
4686
4687 for chunk in chunks:
4688 if chunk['type'] == b'IDAT':
4689 idat += chunk['data']
4690
4691 if not idat:
4692 raise OSError('Unable to read PNG data.')
4693
4694 decompressed_data = bytearray(zlib.decompress(idat))
4695
4696 stride = width * 3
4697 pixels = []
4698
4699 def _get_pixel(idx):
4700 x = idx % stride
4701 y = idx // stride
4702 return pixels[y][x]
4703
4704 for y in range(height):
4705 basePos = y * (1 + stride)
4706 filter_type = decompressed_data[basePos]
4707
4708 current_row = []
4709
4710 pixels.append(current_row)
4711
4712 for x in range(stride):
4713 color = decompressed_data[1 + basePos + x]
4714 basex = y * stride + x
4715 left = 0
4716 up = 0
4717
4718 if x > 2:
4719 left = _get_pixel(basex - 3)
4720 if y > 0:
4721 up = _get_pixel(basex - stride)
4722
4723 if filter_type == 1: # Sub
4724 color = (color + left) & 0xff
4725 elif filter_type == 2: # Up
4726 color = (color + up) & 0xff
4727 elif filter_type == 3: # Average
4728 color = (color + ((left + up) >> 1)) & 0xff
4729 elif filter_type == 4: # Paeth
4730 a = left
4731 b = up
4732 c = 0
4733
4734 if x > 2 and y > 0:
4735 c = _get_pixel(basex - stride - 3)
4736
4737 p = a + b - c
4738
4739 pa = abs(p - a)
4740 pb = abs(p - b)
4741 pc = abs(p - c)
4742
4743 if pa <= pb and pa <= pc:
4744 color = (color + a) & 0xff
4745 elif pb <= pc:
4746 color = (color + b) & 0xff
4747 else:
4748 color = (color + c) & 0xff
4749
4750 current_row.append(color)
4751
4752 return width, height, pixels
4753
4754
4755 def write_xattr(path, key, value):
4756 # Windows: Write xattrs to NTFS Alternate Data Streams:
4757 # http://en.wikipedia.org/wiki/NTFS#Alternate_data_streams_.28ADS.29
4758 if compat_os_name == 'nt':
4759 assert ':' not in key
4760 assert os.path.exists(path)
4761
4762 try:
4763 with open(f'{path}:{key}', 'wb') as f:
4764 f.write(value)
4765 except OSError as e:
4766 raise XAttrMetadataError(e.errno, e.strerror)
4767 return
4768
4769 # UNIX Method 1. Use xattrs/pyxattrs modules
4770 from .dependencies import xattr
4771
4772 setxattr = None
4773 if getattr(xattr, '_yt_dlp__identifier', None) == 'pyxattr':
4774 # Unicode arguments are not supported in pyxattr until version 0.5.0
4775 # See https://github.com/ytdl-org/youtube-dl/issues/5498
4776 if version_tuple(xattr.__version__) >= (0, 5, 0):
4777 setxattr = xattr.set
4778 elif xattr:
4779 setxattr = xattr.setxattr
4780
4781 if setxattr:
4782 try:
4783 setxattr(path, key, value)
4784 except OSError as e:
4785 raise XAttrMetadataError(e.errno, e.strerror)
4786 return
4787
4788 # UNIX Method 2. Use setfattr/xattr executables
4789 exe = ('setfattr' if check_executable('setfattr', ['--version'])
4790 else 'xattr' if check_executable('xattr', ['-h']) else None)
4791 if not exe:
4792 raise XAttrUnavailableError(
4793 'Couldn\'t find a tool to set the xattrs. Install either the python "xattr" or "pyxattr" modules or the '
4794 + ('"xattr" binary' if sys.platform != 'linux' else 'GNU "attr" package (which contains the "setfattr" tool)'))
4795
4796 value = value.decode()
4797 try:
4798 p = Popen(
4799 [exe, '-w', key, value, path] if exe == 'xattr' else [exe, '-n', key, '-v', value, path],
4800 stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
4801 except OSError as e:
4802 raise XAttrMetadataError(e.errno, e.strerror)
4803 stderr = p.communicate_or_kill()[1].decode('utf-8', 'replace')
4804 if p.returncode:
4805 raise XAttrMetadataError(p.returncode, stderr)
4806
4807
4808 def random_birthday(year_field, month_field, day_field):
4809 start_date = datetime.date(1950, 1, 1)
4810 end_date = datetime.date(1995, 12, 31)
4811 offset = random.randint(0, (end_date - start_date).days)
4812 random_date = start_date + datetime.timedelta(offset)
4813 return {
4814 year_field: str(random_date.year),
4815 month_field: str(random_date.month),
4816 day_field: str(random_date.day),
4817 }
4818
4819
4820 # Templates for internet shortcut files, which are plain text files.
4821 DOT_URL_LINK_TEMPLATE = '''\
4822 [InternetShortcut]
4823 URL=%(url)s
4824 '''
4825
4826 DOT_WEBLOC_LINK_TEMPLATE = '''\
4827 <?xml version="1.0" encoding="UTF-8"?>
4828 <!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
4829 <plist version="1.0">
4830 <dict>
4831 \t<key>URL</key>
4832 \t<string>%(url)s</string>
4833 </dict>
4834 </plist>
4835 '''
4836
4837 DOT_DESKTOP_LINK_TEMPLATE = '''\
4838 [Desktop Entry]
4839 Encoding=UTF-8
4840 Name=%(filename)s
4841 Type=Link
4842 URL=%(url)s
4843 Icon=text-html
4844 '''
4845
4846 LINK_TEMPLATES = {
4847 'url': DOT_URL_LINK_TEMPLATE,
4848 'desktop': DOT_DESKTOP_LINK_TEMPLATE,
4849 'webloc': DOT_WEBLOC_LINK_TEMPLATE,
4850 }
4851
4852
4853 def iri_to_uri(iri):
4854 """
4855 Converts an IRI (Internationalized Resource Identifier, allowing Unicode characters) to a URI (Uniform Resource Identifier, ASCII-only).
4856
4857 The function doesn't add an additional layer of escaping; e.g., it doesn't escape `%3C` as `%253C`. Instead, it percent-escapes characters with an underlying UTF-8 encoding *besides* those already escaped, leaving the URI intact.
4858 """
4859
4860 iri_parts = compat_urllib_parse_urlparse(iri)
4861
4862 if '[' in iri_parts.netloc:
4863 raise ValueError('IPv6 URIs are not, yet, supported.')
4864 # Querying `.netloc`, when there's only one bracket, also raises a ValueError.
4865
4866 # The `safe` argument values, that the following code uses, contain the characters that should not be percent-encoded. Everything else but letters, digits and '_.-' will be percent-encoded with an underlying UTF-8 encoding. Everything already percent-encoded will be left as is.
4867
4868 net_location = ''
4869 if iri_parts.username:
4870 net_location += urllib.parse.quote(iri_parts.username, safe=r"!$%&'()*+,~")
4871 if iri_parts.password is not None:
4872 net_location += ':' + urllib.parse.quote(iri_parts.password, safe=r"!$%&'()*+,~")
4873 net_location += '@'
4874
4875 net_location += iri_parts.hostname.encode('idna').decode() # Punycode for Unicode hostnames.
4876 # The 'idna' encoding produces ASCII text.
4877 if iri_parts.port is not None and iri_parts.port != 80:
4878 net_location += ':' + str(iri_parts.port)
4879
4880 return urllib.parse.urlunparse(
4881 (iri_parts.scheme,
4882 net_location,
4883
4884 urllib.parse.quote_plus(iri_parts.path, safe=r"!$%&'()*+,/:;=@|~"),
4885
4886 # Unsure about the `safe` argument, since this is a legacy way of handling parameters.
4887 urllib.parse.quote_plus(iri_parts.params, safe=r"!$%&'()*+,/:;=@|~"),
4888
4889 # Not totally sure about the `safe` argument, since the source does not explicitly mention the query URI component.
4890 urllib.parse.quote_plus(iri_parts.query, safe=r"!$%&'()*+,/:;=?@{|}~"),
4891
4892 urllib.parse.quote_plus(iri_parts.fragment, safe=r"!#$%&'()*+,/:;=?@{|}~")))
4893
4894 # Source for `safe` arguments: https://url.spec.whatwg.org/#percent-encoded-bytes.
4895
4896
4897 def to_high_limit_path(path):
4898 if sys.platform in ['win32', 'cygwin']:
4899 # Work around MAX_PATH limitation on Windows. The maximum allowed length for the individual path segments may still be quite limited.
4900 return '\\\\?\\' + os.path.abspath(path)
4901
4902 return path
4903
4904
4905 def format_field(obj, field=None, template='%s', ignore=NO_DEFAULT, default='', func=None):
4906 val = traverse_obj(obj, *variadic(field))
4907 if (not val and val != 0) if ignore is NO_DEFAULT else val in ignore:
4908 return default
4909 return template % (func(val) if func else val)
4910
4911
4912 def clean_podcast_url(url):
4913 return re.sub(r'''(?x)
4914 (?:
4915 (?:
4916 chtbl\.com/track|
4917 media\.blubrry\.com| # https://create.blubrry.com/resources/podcast-media-download-statistics/getting-started/
4918 play\.podtrac\.com
4919 )/[^/]+|
4920 (?:dts|www)\.podtrac\.com/(?:pts/)?redirect\.[0-9a-z]{3,4}| # http://analytics.podtrac.com/how-to-measure
4921 flex\.acast\.com|
4922 pd(?:
4923 cn\.co| # https://podcorn.com/analytics-prefix/
4924 st\.fm # https://podsights.com/docs/
4925 )/e
4926 )/''', '', url)
4927
4928
4929 _HEX_TABLE = '0123456789abcdef'
4930
4931
4932 def random_uuidv4():
4933 return re.sub(r'[xy]', lambda x: _HEX_TABLE[random.randint(0, 15)], 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx')
4934
4935
4936 def make_dir(path, to_screen=None):
4937 try:
4938 dn = os.path.dirname(path)
4939 if dn and not os.path.exists(dn):
4940 os.makedirs(dn)
4941 return True
4942 except OSError as err:
4943 if callable(to_screen) is not None:
4944 to_screen('unable to create directory ' + error_to_compat_str(err))
4945 return False
4946
4947
4948 def get_executable_path():
4949 from .update import _get_variant_and_executable_path
4950
4951 return os.path.dirname(os.path.abspath(_get_variant_and_executable_path()[1]))
4952
4953
4954 def load_plugins(name, suffix, namespace):
4955 classes = {}
4956 with contextlib.suppress(FileNotFoundError):
4957 plugins_spec = importlib.util.spec_from_file_location(
4958 name, os.path.join(get_executable_path(), 'ytdlp_plugins', name, '__init__.py'))
4959 plugins = importlib.util.module_from_spec(plugins_spec)
4960 sys.modules[plugins_spec.name] = plugins
4961 plugins_spec.loader.exec_module(plugins)
4962 for name in dir(plugins):
4963 if name in namespace:
4964 continue
4965 if not name.endswith(suffix):
4966 continue
4967 klass = getattr(plugins, name)
4968 classes[name] = namespace[name] = klass
4969 return classes
4970
4971
4972 def traverse_obj(
4973 obj, *path_list, default=None, expected_type=None, get_all=True,
4974 casesense=True, is_user_input=False, traverse_string=False):
4975 ''' Traverse nested list/dict/tuple
4976 @param path_list A list of paths which are checked one by one.
4977 Each path is a list of keys where each key is a:
4978 - None: Do nothing
4979 - string: A dictionary key
4980 - int: An index into a list
4981 - tuple: A list of keys all of which will be traversed
4982 - Ellipsis: Fetch all values in the object
4983 - Function: Takes the key and value as arguments
4984 and returns whether the key matches or not
4985 @param default Default value to return
4986 @param expected_type Only accept final value of this type (Can also be any callable)
4987 @param get_all Return all the values obtained from a path or only the first one
4988 @param casesense Whether to consider dictionary keys as case sensitive
4989 @param is_user_input Whether the keys are generated from user input. If True,
4990 strings are converted to int/slice if necessary
4991 @param traverse_string Whether to traverse inside strings. If True, any
4992 non-compatible object will also be converted into a string
4993 # TODO: Write tests
4994 '''
4995 if not casesense:
4996 _lower = lambda k: (k.lower() if isinstance(k, str) else k)
4997 path_list = (map(_lower, variadic(path)) for path in path_list)
4998
4999 def _traverse_obj(obj, path, _current_depth=0):
5000 nonlocal depth
5001 path = tuple(variadic(path))
5002 for i, key in enumerate(path):
5003 if None in (key, obj):
5004 return obj
5005 if isinstance(key, (list, tuple)):
5006 obj = [_traverse_obj(obj, sub_key, _current_depth) for sub_key in key]
5007 key = ...
5008 if key is ...:
5009 obj = (obj.values() if isinstance(obj, dict)
5010 else obj if isinstance(obj, (list, tuple, LazyList))
5011 else str(obj) if traverse_string else [])
5012 _current_depth += 1
5013 depth = max(depth, _current_depth)
5014 return [_traverse_obj(inner_obj, path[i + 1:], _current_depth) for inner_obj in obj]
5015 elif callable(key):
5016 if isinstance(obj, (list, tuple, LazyList)):
5017 obj = enumerate(obj)
5018 elif isinstance(obj, dict):
5019 obj = obj.items()
5020 else:
5021 if not traverse_string:
5022 return None
5023 obj = str(obj)
5024 _current_depth += 1
5025 depth = max(depth, _current_depth)
5026 return [_traverse_obj(v, path[i + 1:], _current_depth) for k, v in obj if try_call(key, args=(k, v))]
5027 elif isinstance(obj, dict) and not (is_user_input and key == ':'):
5028 obj = (obj.get(key) if casesense or (key in obj)
5029 else next((v for k, v in obj.items() if _lower(k) == key), None))
5030 else:
5031 if is_user_input:
5032 key = (int_or_none(key) if ':' not in key
5033 else slice(*map(int_or_none, key.split(':'))))
5034 if key == slice(None):
5035 return _traverse_obj(obj, (..., *path[i + 1:]), _current_depth)
5036 if not isinstance(key, (int, slice)):
5037 return None
5038 if not isinstance(obj, (list, tuple, LazyList)):
5039 if not traverse_string:
5040 return None
5041 obj = str(obj)
5042 try:
5043 obj = obj[key]
5044 except IndexError:
5045 return None
5046 return obj
5047
5048 if isinstance(expected_type, type):
5049 type_test = lambda val: val if isinstance(val, expected_type) else None
5050 elif expected_type is not None:
5051 type_test = expected_type
5052 else:
5053 type_test = lambda val: val
5054
5055 for path in path_list:
5056 depth = 0
5057 val = _traverse_obj(obj, path)
5058 if val is not None:
5059 if depth:
5060 for _ in range(depth - 1):
5061 val = itertools.chain.from_iterable(v for v in val if v is not None)
5062 val = [v for v in map(type_test, val) if v is not None]
5063 if val:
5064 return val if get_all else val[0]
5065 else:
5066 val = type_test(val)
5067 if val is not None:
5068 return val
5069 return default
5070
5071
5072 def traverse_dict(dictn, keys, casesense=True):
5073 write_string('DeprecationWarning: yt_dlp.utils.traverse_dict is deprecated '
5074 'and may be removed in a future version. Use yt_dlp.utils.traverse_obj instead')
5075 return traverse_obj(dictn, keys, casesense=casesense, is_user_input=True, traverse_string=True)
5076
5077
5078 def get_first(obj, keys, **kwargs):
5079 return traverse_obj(obj, (..., *variadic(keys)), **kwargs, get_all=False)
5080
5081
5082 def variadic(x, allowed_types=(str, bytes, dict)):
5083 return x if isinstance(x, collections.abc.Iterable) and not isinstance(x, allowed_types) else (x,)
5084
5085
5086 def decode_base(value, digits):
5087 # This will convert given base-x string to scalar (long or int)
5088 table = {char: index for index, char in enumerate(digits)}
5089 result = 0
5090 base = len(digits)
5091 for chr in value:
5092 result *= base
5093 result += table[chr]
5094 return result
5095
5096
5097 def time_seconds(**kwargs):
5098 t = datetime.datetime.now(datetime.timezone(datetime.timedelta(**kwargs)))
5099 return t.timestamp()
5100
5101
5102 # create a JSON Web Signature (jws) with HS256 algorithm
5103 # the resulting format is in JWS Compact Serialization
5104 # implemented following JWT https://www.rfc-editor.org/rfc/rfc7519.html
5105 # implemented following JWS https://www.rfc-editor.org/rfc/rfc7515.html
5106 def jwt_encode_hs256(payload_data, key, headers={}):
5107 header_data = {
5108 'alg': 'HS256',
5109 'typ': 'JWT',
5110 }
5111 if headers:
5112 header_data.update(headers)
5113 header_b64 = base64.b64encode(json.dumps(header_data).encode())
5114 payload_b64 = base64.b64encode(json.dumps(payload_data).encode())
5115 h = hmac.new(key.encode(), header_b64 + b'.' + payload_b64, hashlib.sha256)
5116 signature_b64 = base64.b64encode(h.digest())
5117 token = header_b64 + b'.' + payload_b64 + b'.' + signature_b64
5118 return token
5119
5120
5121 # can be extended in future to verify the signature and parse header and return the algorithm used if it's not HS256
5122 def jwt_decode_hs256(jwt):
5123 header_b64, payload_b64, signature_b64 = jwt.split('.')
5124 payload_data = json.loads(base64.urlsafe_b64decode(payload_b64))
5125 return payload_data
5126
5127
5128 WINDOWS_VT_MODE = False if compat_os_name == 'nt' else None
5129
5130
5131 @functools.cache
5132 def supports_terminal_sequences(stream):
5133 if compat_os_name == 'nt':
5134 if not WINDOWS_VT_MODE:
5135 return False
5136 elif not os.getenv('TERM'):
5137 return False
5138 try:
5139 return stream.isatty()
5140 except BaseException:
5141 return False
5142
5143
5144 def windows_enable_vt_mode(): # TODO: Do this the proper way https://bugs.python.org/issue30075
5145 if get_windows_version() < (10, 0, 10586):
5146 return
5147 global WINDOWS_VT_MODE
5148 startupinfo = subprocess.STARTUPINFO()
5149 startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
5150 try:
5151 subprocess.Popen('', shell=True, startupinfo=startupinfo).wait()
5152 except Exception:
5153 return
5154
5155 WINDOWS_VT_MODE = True
5156 supports_terminal_sequences.cache_clear()
5157
5158
5159 _terminal_sequences_re = re.compile('\033\\[[^m]+m')
5160
5161
5162 def remove_terminal_sequences(string):
5163 return _terminal_sequences_re.sub('', string)
5164
5165
5166 def number_of_digits(number):
5167 return len('%d' % number)
5168
5169
5170 def join_nonempty(*values, delim='-', from_dict=None):
5171 if from_dict is not None:
5172 values = map(from_dict.get, values)
5173 return delim.join(map(str, filter(None, values)))
5174
5175
5176 def scale_thumbnails_to_max_format_width(formats, thumbnails, url_width_re):
5177 """
5178 Find the largest format dimensions in terms of video width and, for each thumbnail:
5179 * Modify the URL: Match the width with the provided regex and replace with the former width
5180 * Update dimensions
5181
5182 This function is useful with video services that scale the provided thumbnails on demand
5183 """
5184 _keys = ('width', 'height')
5185 max_dimensions = max(
5186 (tuple(format.get(k) or 0 for k in _keys) for format in formats),
5187 default=(0, 0))
5188 if not max_dimensions[0]:
5189 return thumbnails
5190 return [
5191 merge_dicts(
5192 {'url': re.sub(url_width_re, str(max_dimensions[0]), thumbnail['url'])},
5193 dict(zip(_keys, max_dimensions)), thumbnail)
5194 for thumbnail in thumbnails
5195 ]
5196
5197
5198 def parse_http_range(range):
5199 """ Parse value of "Range" or "Content-Range" HTTP header into tuple. """
5200 if not range:
5201 return None, None, None
5202 crg = re.search(r'bytes[ =](\d+)-(\d+)?(?:/(\d+))?', range)
5203 if not crg:
5204 return None, None, None
5205 return int(crg.group(1)), int_or_none(crg.group(2)), int_or_none(crg.group(3))
5206
5207
5208 def read_stdin(what):
5209 eof = 'Ctrl+Z' if compat_os_name == 'nt' else 'Ctrl+D'
5210 write_string(f'Reading {what} from STDIN - EOF ({eof}) to end:\n')
5211 return sys.stdin
5212
5213
5214 class Config:
5215 own_args = None
5216 parsed_args = None
5217 filename = None
5218 __initialized = False
5219
5220 def __init__(self, parser, label=None):
5221 self.parser, self.label = parser, label
5222 self._loaded_paths, self.configs = set(), []
5223
5224 def init(self, args=None, filename=None):
5225 assert not self.__initialized
5226 directory = ''
5227 if filename:
5228 location = os.path.realpath(filename)
5229 directory = os.path.dirname(location)
5230 if location in self._loaded_paths:
5231 return False
5232 self._loaded_paths.add(location)
5233
5234 self.own_args, self.__initialized = args, True
5235 opts, _ = self.parser.parse_known_args(args)
5236 self.parsed_args, self.filename = args, filename
5237
5238 for location in opts.config_locations or []:
5239 if location == '-':
5240 self.append_config(shlex.split(read_stdin('options'), comments=True), label='stdin')
5241 continue
5242 location = os.path.join(directory, expand_path(location))
5243 if os.path.isdir(location):
5244 location = os.path.join(location, 'yt-dlp.conf')
5245 if not os.path.exists(location):
5246 self.parser.error(f'config location {location} does not exist')
5247 self.append_config(self.read_file(location), location)
5248 return True
5249
5250 def __str__(self):
5251 label = join_nonempty(
5252 self.label, 'config', f'"{self.filename}"' if self.filename else '',
5253 delim=' ')
5254 return join_nonempty(
5255 self.own_args is not None and f'{label[0].upper()}{label[1:]}: {self.hide_login_info(self.own_args)}',
5256 *(f'\n{c}'.replace('\n', '\n| ')[1:] for c in self.configs),
5257 delim='\n')
5258
5259 @staticmethod
5260 def read_file(filename, default=[]):
5261 try:
5262 optionf = open(filename)
5263 except OSError:
5264 return default # silently skip if file is not present
5265 try:
5266 # FIXME: https://github.com/ytdl-org/youtube-dl/commit/dfe5fa49aed02cf36ba9f743b11b0903554b5e56
5267 contents = optionf.read()
5268 res = shlex.split(contents, comments=True)
5269 finally:
5270 optionf.close()
5271 return res
5272
5273 @staticmethod
5274 def hide_login_info(opts):
5275 PRIVATE_OPTS = {'-p', '--password', '-u', '--username', '--video-password', '--ap-password', '--ap-username'}
5276 eqre = re.compile('^(?P<key>' + ('|'.join(re.escape(po) for po in PRIVATE_OPTS)) + ')=.+$')
5277
5278 def _scrub_eq(o):
5279 m = eqre.match(o)
5280 if m:
5281 return m.group('key') + '=PRIVATE'
5282 else:
5283 return o
5284
5285 opts = list(map(_scrub_eq, opts))
5286 for idx, opt in enumerate(opts):
5287 if opt in PRIVATE_OPTS and idx + 1 < len(opts):
5288 opts[idx + 1] = 'PRIVATE'
5289 return opts
5290
5291 def append_config(self, *args, label=None):
5292 config = type(self)(self.parser, label)
5293 config._loaded_paths = self._loaded_paths
5294 if config.init(*args):
5295 self.configs.append(config)
5296
5297 @property
5298 def all_args(self):
5299 for config in reversed(self.configs):
5300 yield from config.all_args
5301 yield from self.parsed_args or []
5302
5303 def parse_known_args(self, **kwargs):
5304 return self.parser.parse_known_args(self.all_args, **kwargs)
5305
5306 def parse_args(self):
5307 return self.parser.parse_args(self.all_args)
5308
5309
5310 class WebSocketsWrapper():
5311 """Wraps websockets module to use in non-async scopes"""
5312 pool = None
5313
5314 def __init__(self, url, headers=None, connect=True):
5315 self.loop = asyncio.new_event_loop()
5316 # XXX: "loop" is deprecated
5317 self.conn = websockets.connect(
5318 url, extra_headers=headers, ping_interval=None,
5319 close_timeout=float('inf'), loop=self.loop, ping_timeout=float('inf'))
5320 if connect:
5321 self.__enter__()
5322 atexit.register(self.__exit__, None, None, None)
5323
5324 def __enter__(self):
5325 if not self.pool:
5326 self.pool = self.run_with_loop(self.conn.__aenter__(), self.loop)
5327 return self
5328
5329 def send(self, *args):
5330 self.run_with_loop(self.pool.send(*args), self.loop)
5331
5332 def recv(self, *args):
5333 return self.run_with_loop(self.pool.recv(*args), self.loop)
5334
5335 def __exit__(self, type, value, traceback):
5336 try:
5337 return self.run_with_loop(self.conn.__aexit__(type, value, traceback), self.loop)
5338 finally:
5339 self.loop.close()
5340 self._cancel_all_tasks(self.loop)
5341
5342 # taken from https://github.com/python/cpython/blob/3.9/Lib/asyncio/runners.py with modifications
5343 # for contributors: If there's any new library using asyncio needs to be run in non-async, move these function out of this class
5344 @staticmethod
5345 def run_with_loop(main, loop):
5346 if not asyncio.iscoroutine(main):
5347 raise ValueError(f'a coroutine was expected, got {main!r}')
5348
5349 try:
5350 return loop.run_until_complete(main)
5351 finally:
5352 loop.run_until_complete(loop.shutdown_asyncgens())
5353 if hasattr(loop, 'shutdown_default_executor'):
5354 loop.run_until_complete(loop.shutdown_default_executor())
5355
5356 @staticmethod
5357 def _cancel_all_tasks(loop):
5358 to_cancel = asyncio.all_tasks(loop)
5359
5360 if not to_cancel:
5361 return
5362
5363 for task in to_cancel:
5364 task.cancel()
5365
5366 # XXX: "loop" is removed in python 3.10+
5367 loop.run_until_complete(
5368 asyncio.gather(*to_cancel, loop=loop, return_exceptions=True))
5369
5370 for task in to_cancel:
5371 if task.cancelled():
5372 continue
5373 if task.exception() is not None:
5374 loop.call_exception_handler({
5375 'message': 'unhandled exception during asyncio.run() shutdown',
5376 'exception': task.exception(),
5377 'task': task,
5378 })
5379
5380
5381 def merge_headers(*dicts):
5382 """Merge dicts of http headers case insensitively, prioritizing the latter ones"""
5383 return {k.title(): v for k, v in itertools.chain.from_iterable(map(dict.items, dicts))}
5384
5385
5386 class classproperty:
5387 """classmethod(property(func)) that works in py < 3.9"""
5388
5389 def __init__(self, func):
5390 functools.update_wrapper(self, func)
5391 self.func = func
5392
5393 def __get__(self, _, cls):
5394 return self.func(cls)
5395
5396
5397 class Namespace(types.SimpleNamespace):
5398 """Immutable namespace"""
5399
5400 def __iter__(self):
5401 return iter(self.__dict__.values())
5402
5403 @property
5404 def items_(self):
5405 return self.__dict__.items()
5406
5407
5408 # Deprecated
5409 has_certifi = bool(certifi)
5410 has_websockets = bool(websockets)