]> jfr.im git - yt-dlp.git/blob - yt_dlp/utils.py
4d1247eea3ad1d563d13764cef0a138e50c20a73
[yt-dlp.git] / yt_dlp / utils.py
1 import asyncio
2 import atexit
3 import base64
4 import binascii
5 import calendar
6 import codecs
7 import collections
8 import collections.abc
9 import contextlib
10 import datetime
11 import email.header
12 import email.utils
13 import errno
14 import gzip
15 import hashlib
16 import hmac
17 import html.entities
18 import html.parser
19 import http.client
20 import http.cookiejar
21 import importlib.util
22 import inspect
23 import io
24 import itertools
25 import json
26 import locale
27 import math
28 import mimetypes
29 import operator
30 import os
31 import platform
32 import random
33 import re
34 import shlex
35 import socket
36 import ssl
37 import struct
38 import subprocess
39 import sys
40 import tempfile
41 import time
42 import traceback
43 import types
44 import unicodedata
45 import urllib.error
46 import urllib.parse
47 import urllib.request
48 import xml.etree.ElementTree
49 import zlib
50
51 from .compat import functools # isort: split
52 from .compat import (
53 compat_etree_fromstring,
54 compat_expanduser,
55 compat_HTMLParseError,
56 compat_os_name,
57 compat_shlex_quote,
58 )
59 from .dependencies import brotli, certifi, websockets, xattr
60 from .socks import ProxyType, sockssocket
61
62
63 def register_socks_protocols():
64 # "Register" SOCKS protocols
65 # In Python < 2.6.5, urlsplit() suffers from bug https://bugs.python.org/issue7904
66 # URLs with protocols not in urlparse.uses_netloc are not handled correctly
67 for scheme in ('socks', 'socks4', 'socks4a', 'socks5'):
68 if scheme not in urllib.parse.uses_netloc:
69 urllib.parse.uses_netloc.append(scheme)
70
71
72 # This is not clearly defined otherwise
73 compiled_regex_type = type(re.compile(''))
74
75
76 def random_user_agent():
77 _USER_AGENT_TPL = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/%s Safari/537.36'
78 _CHROME_VERSIONS = (
79 '90.0.4430.212',
80 '90.0.4430.24',
81 '90.0.4430.70',
82 '90.0.4430.72',
83 '90.0.4430.85',
84 '90.0.4430.93',
85 '91.0.4472.101',
86 '91.0.4472.106',
87 '91.0.4472.114',
88 '91.0.4472.124',
89 '91.0.4472.164',
90 '91.0.4472.19',
91 '91.0.4472.77',
92 '92.0.4515.107',
93 '92.0.4515.115',
94 '92.0.4515.131',
95 '92.0.4515.159',
96 '92.0.4515.43',
97 '93.0.4556.0',
98 '93.0.4577.15',
99 '93.0.4577.63',
100 '93.0.4577.82',
101 '94.0.4606.41',
102 '94.0.4606.54',
103 '94.0.4606.61',
104 '94.0.4606.71',
105 '94.0.4606.81',
106 '94.0.4606.85',
107 '95.0.4638.17',
108 '95.0.4638.50',
109 '95.0.4638.54',
110 '95.0.4638.69',
111 '95.0.4638.74',
112 '96.0.4664.18',
113 '96.0.4664.45',
114 '96.0.4664.55',
115 '96.0.4664.93',
116 '97.0.4692.20',
117 )
118 return _USER_AGENT_TPL % random.choice(_CHROME_VERSIONS)
119
120
121 SUPPORTED_ENCODINGS = [
122 'gzip', 'deflate'
123 ]
124 if brotli:
125 SUPPORTED_ENCODINGS.append('br')
126
127 std_headers = {
128 'User-Agent': random_user_agent(),
129 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
130 'Accept-Language': 'en-us,en;q=0.5',
131 'Sec-Fetch-Mode': 'navigate',
132 }
133
134
135 USER_AGENTS = {
136 'Safari': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27',
137 }
138
139
140 NO_DEFAULT = object()
141 IDENTITY = lambda x: x
142
143 ENGLISH_MONTH_NAMES = [
144 'January', 'February', 'March', 'April', 'May', 'June',
145 'July', 'August', 'September', 'October', 'November', 'December']
146
147 MONTH_NAMES = {
148 'en': ENGLISH_MONTH_NAMES,
149 'fr': [
150 'janvier', 'février', 'mars', 'avril', 'mai', 'juin',
151 'juillet', 'août', 'septembre', 'octobre', 'novembre', 'décembre'],
152 # these follow the genitive grammatical case (dopełniacz)
153 # some websites might be using nominative, which will require another month list
154 # https://en.wikibooks.org/wiki/Polish/Noun_cases
155 'pl': ['stycznia', 'lutego', 'marca', 'kwietnia', 'maja', 'czerwca',
156 'lipca', 'sierpnia', 'września', 'października', 'listopada', 'grudnia'],
157 }
158
159 # From https://github.com/python/cpython/blob/3.11/Lib/email/_parseaddr.py#L36-L42
160 TIMEZONE_NAMES = {
161 'UT': 0, 'UTC': 0, 'GMT': 0, 'Z': 0,
162 'AST': -4, 'ADT': -3, # Atlantic (used in Canada)
163 'EST': -5, 'EDT': -4, # Eastern
164 'CST': -6, 'CDT': -5, # Central
165 'MST': -7, 'MDT': -6, # Mountain
166 'PST': -8, 'PDT': -7 # Pacific
167 }
168
169 # needed for sanitizing filenames in restricted mode
170 ACCENT_CHARS = dict(zip('ÂÃÄÀÁÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖŐØŒÙÚÛÜŰÝÞßàáâãäåæçèéêëìíîïðñòóôõöőøœùúûüűýþÿ',
171 itertools.chain('AAAAAA', ['AE'], 'CEEEEIIIIDNOOOOOOO', ['OE'], 'UUUUUY', ['TH', 'ss'],
172 'aaaaaa', ['ae'], 'ceeeeiiiionooooooo', ['oe'], 'uuuuuy', ['th'], 'y')))
173
174 DATE_FORMATS = (
175 '%d %B %Y',
176 '%d %b %Y',
177 '%B %d %Y',
178 '%B %dst %Y',
179 '%B %dnd %Y',
180 '%B %drd %Y',
181 '%B %dth %Y',
182 '%b %d %Y',
183 '%b %dst %Y',
184 '%b %dnd %Y',
185 '%b %drd %Y',
186 '%b %dth %Y',
187 '%b %dst %Y %I:%M',
188 '%b %dnd %Y %I:%M',
189 '%b %drd %Y %I:%M',
190 '%b %dth %Y %I:%M',
191 '%Y %m %d',
192 '%Y-%m-%d',
193 '%Y.%m.%d.',
194 '%Y/%m/%d',
195 '%Y/%m/%d %H:%M',
196 '%Y/%m/%d %H:%M:%S',
197 '%Y%m%d%H%M',
198 '%Y%m%d%H%M%S',
199 '%Y%m%d',
200 '%Y-%m-%d %H:%M',
201 '%Y-%m-%d %H:%M:%S',
202 '%Y-%m-%d %H:%M:%S.%f',
203 '%Y-%m-%d %H:%M:%S:%f',
204 '%d.%m.%Y %H:%M',
205 '%d.%m.%Y %H.%M',
206 '%Y-%m-%dT%H:%M:%SZ',
207 '%Y-%m-%dT%H:%M:%S.%fZ',
208 '%Y-%m-%dT%H:%M:%S.%f0Z',
209 '%Y-%m-%dT%H:%M:%S',
210 '%Y-%m-%dT%H:%M:%S.%f',
211 '%Y-%m-%dT%H:%M',
212 '%b %d %Y at %H:%M',
213 '%b %d %Y at %H:%M:%S',
214 '%B %d %Y at %H:%M',
215 '%B %d %Y at %H:%M:%S',
216 '%H:%M %d-%b-%Y',
217 )
218
219 DATE_FORMATS_DAY_FIRST = list(DATE_FORMATS)
220 DATE_FORMATS_DAY_FIRST.extend([
221 '%d-%m-%Y',
222 '%d.%m.%Y',
223 '%d.%m.%y',
224 '%d/%m/%Y',
225 '%d/%m/%y',
226 '%d/%m/%Y %H:%M:%S',
227 '%d-%m-%Y %H:%M',
228 ])
229
230 DATE_FORMATS_MONTH_FIRST = list(DATE_FORMATS)
231 DATE_FORMATS_MONTH_FIRST.extend([
232 '%m-%d-%Y',
233 '%m.%d.%Y',
234 '%m/%d/%Y',
235 '%m/%d/%y',
236 '%m/%d/%Y %H:%M:%S',
237 ])
238
239 PACKED_CODES_RE = r"}\('(.+)',(\d+),(\d+),'([^']+)'\.split\('\|'\)"
240 JSON_LD_RE = r'(?is)<script[^>]+type=(["\']?)application/ld\+json\1[^>]*>\s*(?P<json_ld>{.+?}|\[.+?\])\s*</script>'
241
242 NUMBER_RE = r'\d+(?:\.\d+)?'
243
244
245 @functools.cache
246 def preferredencoding():
247 """Get preferred encoding.
248
249 Returns the best encoding scheme for the system, based on
250 locale.getpreferredencoding() and some further tweaks.
251 """
252 try:
253 pref = locale.getpreferredencoding()
254 'TEST'.encode(pref)
255 except Exception:
256 pref = 'UTF-8'
257
258 return pref
259
260
261 def write_json_file(obj, fn):
262 """ Encode obj as JSON and write it to fn, atomically if possible """
263
264 tf = tempfile.NamedTemporaryFile(
265 prefix=f'{os.path.basename(fn)}.', dir=os.path.dirname(fn),
266 suffix='.tmp', delete=False, mode='w', encoding='utf-8')
267
268 try:
269 with tf:
270 json.dump(obj, tf, ensure_ascii=False)
271 if sys.platform == 'win32':
272 # Need to remove existing file on Windows, else os.rename raises
273 # WindowsError or FileExistsError.
274 with contextlib.suppress(OSError):
275 os.unlink(fn)
276 with contextlib.suppress(OSError):
277 mask = os.umask(0)
278 os.umask(mask)
279 os.chmod(tf.name, 0o666 & ~mask)
280 os.rename(tf.name, fn)
281 except Exception:
282 with contextlib.suppress(OSError):
283 os.remove(tf.name)
284 raise
285
286
287 def find_xpath_attr(node, xpath, key, val=None):
288 """ Find the xpath xpath[@key=val] """
289 assert re.match(r'^[a-zA-Z_-]+$', key)
290 expr = xpath + ('[@%s]' % key if val is None else f"[@{key}='{val}']")
291 return node.find(expr)
292
293 # On python2.6 the xml.etree.ElementTree.Element methods don't support
294 # the namespace parameter
295
296
297 def xpath_with_ns(path, ns_map):
298 components = [c.split(':') for c in path.split('/')]
299 replaced = []
300 for c in components:
301 if len(c) == 1:
302 replaced.append(c[0])
303 else:
304 ns, tag = c
305 replaced.append('{%s}%s' % (ns_map[ns], tag))
306 return '/'.join(replaced)
307
308
309 def xpath_element(node, xpath, name=None, fatal=False, default=NO_DEFAULT):
310 def _find_xpath(xpath):
311 return node.find(xpath)
312
313 if isinstance(xpath, str):
314 n = _find_xpath(xpath)
315 else:
316 for xp in xpath:
317 n = _find_xpath(xp)
318 if n is not None:
319 break
320
321 if n is None:
322 if default is not NO_DEFAULT:
323 return default
324 elif fatal:
325 name = xpath if name is None else name
326 raise ExtractorError('Could not find XML element %s' % name)
327 else:
328 return None
329 return n
330
331
332 def xpath_text(node, xpath, name=None, fatal=False, default=NO_DEFAULT):
333 n = xpath_element(node, xpath, name, fatal=fatal, default=default)
334 if n is None or n == default:
335 return n
336 if n.text is None:
337 if default is not NO_DEFAULT:
338 return default
339 elif fatal:
340 name = xpath if name is None else name
341 raise ExtractorError('Could not find XML element\'s text %s' % name)
342 else:
343 return None
344 return n.text
345
346
347 def xpath_attr(node, xpath, key, name=None, fatal=False, default=NO_DEFAULT):
348 n = find_xpath_attr(node, xpath, key)
349 if n is None:
350 if default is not NO_DEFAULT:
351 return default
352 elif fatal:
353 name = f'{xpath}[@{key}]' if name is None else name
354 raise ExtractorError('Could not find XML attribute %s' % name)
355 else:
356 return None
357 return n.attrib[key]
358
359
360 def get_element_by_id(id, html, **kwargs):
361 """Return the content of the tag with the specified ID in the passed HTML document"""
362 return get_element_by_attribute('id', id, html, **kwargs)
363
364
365 def get_element_html_by_id(id, html, **kwargs):
366 """Return the html of the tag with the specified ID in the passed HTML document"""
367 return get_element_html_by_attribute('id', id, html, **kwargs)
368
369
370 def get_element_by_class(class_name, html):
371 """Return the content of the first tag with the specified class in the passed HTML document"""
372 retval = get_elements_by_class(class_name, html)
373 return retval[0] if retval else None
374
375
376 def get_element_html_by_class(class_name, html):
377 """Return the html of the first tag with the specified class in the passed HTML document"""
378 retval = get_elements_html_by_class(class_name, html)
379 return retval[0] if retval else None
380
381
382 def get_element_by_attribute(attribute, value, html, **kwargs):
383 retval = get_elements_by_attribute(attribute, value, html, **kwargs)
384 return retval[0] if retval else None
385
386
387 def get_element_html_by_attribute(attribute, value, html, **kargs):
388 retval = get_elements_html_by_attribute(attribute, value, html, **kargs)
389 return retval[0] if retval else None
390
391
392 def get_elements_by_class(class_name, html, **kargs):
393 """Return the content of all tags with the specified class in the passed HTML document as a list"""
394 return get_elements_by_attribute(
395 'class', r'[^\'"]*(?<=[\'"\s])%s(?=[\'"\s])[^\'"]*' % re.escape(class_name),
396 html, escape_value=False)
397
398
399 def get_elements_html_by_class(class_name, html):
400 """Return the html of all tags with the specified class in the passed HTML document as a list"""
401 return get_elements_html_by_attribute(
402 'class', r'[^\'"]*(?<=[\'"\s])%s(?=[\'"\s])[^\'"]*' % re.escape(class_name),
403 html, escape_value=False)
404
405
406 def get_elements_by_attribute(*args, **kwargs):
407 """Return the content of the tag with the specified attribute in the passed HTML document"""
408 return [content for content, _ in get_elements_text_and_html_by_attribute(*args, **kwargs)]
409
410
411 def get_elements_html_by_attribute(*args, **kwargs):
412 """Return the html of the tag with the specified attribute in the passed HTML document"""
413 return [whole for _, whole in get_elements_text_and_html_by_attribute(*args, **kwargs)]
414
415
416 def get_elements_text_and_html_by_attribute(attribute, value, html, *, tag=r'[\w:.-]+', escape_value=True):
417 """
418 Return the text (content) and the html (whole) of the tag with the specified
419 attribute in the passed HTML document
420 """
421
422 quote = '' if re.match(r'''[\s"'`=<>]''', value) else '?'
423
424 value = re.escape(value) if escape_value else value
425
426 partial_element_re = rf'''(?x)
427 <(?P<tag>{tag})
428 (?:\s(?:[^>"']|"[^"]*"|'[^']*')*)?
429 \s{re.escape(attribute)}\s*=\s*(?P<_q>['"]{quote})(?-x:{value})(?P=_q)
430 '''
431
432 for m in re.finditer(partial_element_re, html):
433 content, whole = get_element_text_and_html_by_tag(m.group('tag'), html[m.start():])
434
435 yield (
436 unescapeHTML(re.sub(r'^(?P<q>["\'])(?P<content>.*)(?P=q)$', r'\g<content>', content, flags=re.DOTALL)),
437 whole
438 )
439
440
441 class HTMLBreakOnClosingTagParser(html.parser.HTMLParser):
442 """
443 HTML parser which raises HTMLBreakOnClosingTagException upon reaching the
444 closing tag for the first opening tag it has encountered, and can be used
445 as a context manager
446 """
447
448 class HTMLBreakOnClosingTagException(Exception):
449 pass
450
451 def __init__(self):
452 self.tagstack = collections.deque()
453 html.parser.HTMLParser.__init__(self)
454
455 def __enter__(self):
456 return self
457
458 def __exit__(self, *_):
459 self.close()
460
461 def close(self):
462 # handle_endtag does not return upon raising HTMLBreakOnClosingTagException,
463 # so data remains buffered; we no longer have any interest in it, thus
464 # override this method to discard it
465 pass
466
467 def handle_starttag(self, tag, _):
468 self.tagstack.append(tag)
469
470 def handle_endtag(self, tag):
471 if not self.tagstack:
472 raise compat_HTMLParseError('no tags in the stack')
473 while self.tagstack:
474 inner_tag = self.tagstack.pop()
475 if inner_tag == tag:
476 break
477 else:
478 raise compat_HTMLParseError(f'matching opening tag for closing {tag} tag not found')
479 if not self.tagstack:
480 raise self.HTMLBreakOnClosingTagException()
481
482
483 def get_element_text_and_html_by_tag(tag, html):
484 """
485 For the first element with the specified tag in the passed HTML document
486 return its' content (text) and the whole element (html)
487 """
488 def find_or_raise(haystack, needle, exc):
489 try:
490 return haystack.index(needle)
491 except ValueError:
492 raise exc
493 closing_tag = f'</{tag}>'
494 whole_start = find_or_raise(
495 html, f'<{tag}', compat_HTMLParseError(f'opening {tag} tag not found'))
496 content_start = find_or_raise(
497 html[whole_start:], '>', compat_HTMLParseError(f'malformed opening {tag} tag'))
498 content_start += whole_start + 1
499 with HTMLBreakOnClosingTagParser() as parser:
500 parser.feed(html[whole_start:content_start])
501 if not parser.tagstack or parser.tagstack[0] != tag:
502 raise compat_HTMLParseError(f'parser did not match opening {tag} tag')
503 offset = content_start
504 while offset < len(html):
505 next_closing_tag_start = find_or_raise(
506 html[offset:], closing_tag,
507 compat_HTMLParseError(f'closing {tag} tag not found'))
508 next_closing_tag_end = next_closing_tag_start + len(closing_tag)
509 try:
510 parser.feed(html[offset:offset + next_closing_tag_end])
511 offset += next_closing_tag_end
512 except HTMLBreakOnClosingTagParser.HTMLBreakOnClosingTagException:
513 return html[content_start:offset + next_closing_tag_start], \
514 html[whole_start:offset + next_closing_tag_end]
515 raise compat_HTMLParseError('unexpected end of html')
516
517
518 class HTMLAttributeParser(html.parser.HTMLParser):
519 """Trivial HTML parser to gather the attributes for a single element"""
520
521 def __init__(self):
522 self.attrs = {}
523 html.parser.HTMLParser.__init__(self)
524
525 def handle_starttag(self, tag, attrs):
526 self.attrs = dict(attrs)
527 raise compat_HTMLParseError('done')
528
529
530 class HTMLListAttrsParser(html.parser.HTMLParser):
531 """HTML parser to gather the attributes for the elements of a list"""
532
533 def __init__(self):
534 html.parser.HTMLParser.__init__(self)
535 self.items = []
536 self._level = 0
537
538 def handle_starttag(self, tag, attrs):
539 if tag == 'li' and self._level == 0:
540 self.items.append(dict(attrs))
541 self._level += 1
542
543 def handle_endtag(self, tag):
544 self._level -= 1
545
546
547 def extract_attributes(html_element):
548 """Given a string for an HTML element such as
549 <el
550 a="foo" B="bar" c="&98;az" d=boz
551 empty= noval entity="&amp;"
552 sq='"' dq="'"
553 >
554 Decode and return a dictionary of attributes.
555 {
556 'a': 'foo', 'b': 'bar', c: 'baz', d: 'boz',
557 'empty': '', 'noval': None, 'entity': '&',
558 'sq': '"', 'dq': '\''
559 }.
560 """
561 parser = HTMLAttributeParser()
562 with contextlib.suppress(compat_HTMLParseError):
563 parser.feed(html_element)
564 parser.close()
565 return parser.attrs
566
567
568 def parse_list(webpage):
569 """Given a string for an series of HTML <li> elements,
570 return a dictionary of their attributes"""
571 parser = HTMLListAttrsParser()
572 parser.feed(webpage)
573 parser.close()
574 return parser.items
575
576
577 def clean_html(html):
578 """Clean an HTML snippet into a readable string"""
579
580 if html is None: # Convenience for sanitizing descriptions etc.
581 return html
582
583 html = re.sub(r'\s+', ' ', html)
584 html = re.sub(r'(?u)\s?<\s?br\s?/?\s?>\s?', '\n', html)
585 html = re.sub(r'(?u)<\s?/\s?p\s?>\s?<\s?p[^>]*>', '\n', html)
586 # Strip html tags
587 html = re.sub('<.*?>', '', html)
588 # Replace html entities
589 html = unescapeHTML(html)
590 return html.strip()
591
592
593 class LenientJSONDecoder(json.JSONDecoder):
594 def __init__(self, *args, transform_source=None, ignore_extra=False, **kwargs):
595 self.transform_source, self.ignore_extra = transform_source, ignore_extra
596 super().__init__(*args, **kwargs)
597
598 def decode(self, s):
599 if self.transform_source:
600 s = self.transform_source(s)
601 try:
602 if self.ignore_extra:
603 return self.raw_decode(s.lstrip())[0]
604 return super().decode(s)
605 except json.JSONDecodeError as e:
606 if e.pos is not None:
607 raise type(e)(f'{e.msg} in {s[e.pos-10:e.pos+10]!r}', s, e.pos)
608 raise
609
610
611 def sanitize_open(filename, open_mode):
612 """Try to open the given filename, and slightly tweak it if this fails.
613
614 Attempts to open the given filename. If this fails, it tries to change
615 the filename slightly, step by step, until it's either able to open it
616 or it fails and raises a final exception, like the standard open()
617 function.
618
619 It returns the tuple (stream, definitive_file_name).
620 """
621 if filename == '-':
622 if sys.platform == 'win32':
623 import msvcrt
624
625 # stdout may be any IO stream, e.g. when using contextlib.redirect_stdout
626 with contextlib.suppress(io.UnsupportedOperation):
627 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
628 return (sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout, filename)
629
630 for attempt in range(2):
631 try:
632 try:
633 if sys.platform == 'win32':
634 # FIXME: An exclusive lock also locks the file from being read.
635 # Since windows locks are mandatory, don't lock the file on windows (for now).
636 # Ref: https://github.com/yt-dlp/yt-dlp/issues/3124
637 raise LockingUnsupportedError()
638 stream = locked_file(filename, open_mode, block=False).__enter__()
639 except OSError:
640 stream = open(filename, open_mode)
641 return stream, filename
642 except OSError as err:
643 if attempt or err.errno in (errno.EACCES,):
644 raise
645 old_filename, filename = filename, sanitize_path(filename)
646 if old_filename == filename:
647 raise
648
649
650 def timeconvert(timestr):
651 """Convert RFC 2822 defined time string into system timestamp"""
652 timestamp = None
653 timetuple = email.utils.parsedate_tz(timestr)
654 if timetuple is not None:
655 timestamp = email.utils.mktime_tz(timetuple)
656 return timestamp
657
658
659 def sanitize_filename(s, restricted=False, is_id=NO_DEFAULT):
660 """Sanitizes a string so it could be used as part of a filename.
661 @param restricted Use a stricter subset of allowed characters
662 @param is_id Whether this is an ID that should be kept unchanged if possible.
663 If unset, yt-dlp's new sanitization rules are in effect
664 """
665 if s == '':
666 return ''
667
668 def replace_insane(char):
669 if restricted and char in ACCENT_CHARS:
670 return ACCENT_CHARS[char]
671 elif not restricted and char == '\n':
672 return '\0 '
673 elif is_id is NO_DEFAULT and not restricted and char in '"*:<>?|/\\':
674 # Replace with their full-width unicode counterparts
675 return {'/': '\u29F8', '\\': '\u29f9'}.get(char, chr(ord(char) + 0xfee0))
676 elif char == '?' or ord(char) < 32 or ord(char) == 127:
677 return ''
678 elif char == '"':
679 return '' if restricted else '\''
680 elif char == ':':
681 return '\0_\0-' if restricted else '\0 \0-'
682 elif char in '\\/|*<>':
683 return '\0_'
684 if restricted and (char in '!&\'()[]{}$;`^,#' or char.isspace() or ord(char) > 127):
685 return '\0_'
686 return char
687
688 # Replace look-alike Unicode glyphs
689 if restricted and (is_id is NO_DEFAULT or not is_id):
690 s = unicodedata.normalize('NFKC', s)
691 s = re.sub(r'[0-9]+(?::[0-9]+)+', lambda m: m.group(0).replace(':', '_'), s) # Handle timestamps
692 result = ''.join(map(replace_insane, s))
693 if is_id is NO_DEFAULT:
694 result = re.sub(r'(\0.)(?:(?=\1)..)+', r'\1', result) # Remove repeated substitute chars
695 STRIP_RE = r'(?:\0.|[ _-])*'
696 result = re.sub(f'^\0.{STRIP_RE}|{STRIP_RE}\0.$', '', result) # Remove substitute chars from start/end
697 result = result.replace('\0', '') or '_'
698
699 if not is_id:
700 while '__' in result:
701 result = result.replace('__', '_')
702 result = result.strip('_')
703 # Common case of "Foreign band name - English song title"
704 if restricted and result.startswith('-_'):
705 result = result[2:]
706 if result.startswith('-'):
707 result = '_' + result[len('-'):]
708 result = result.lstrip('.')
709 if not result:
710 result = '_'
711 return result
712
713
714 def sanitize_path(s, force=False):
715 """Sanitizes and normalizes path on Windows"""
716 if sys.platform == 'win32':
717 force = False
718 drive_or_unc, _ = os.path.splitdrive(s)
719 elif force:
720 drive_or_unc = ''
721 else:
722 return s
723
724 norm_path = os.path.normpath(remove_start(s, drive_or_unc)).split(os.path.sep)
725 if drive_or_unc:
726 norm_path.pop(0)
727 sanitized_path = [
728 path_part if path_part in ['.', '..'] else re.sub(r'(?:[/<>:"\|\\?\*]|[\s.]$)', '#', path_part)
729 for path_part in norm_path]
730 if drive_or_unc:
731 sanitized_path.insert(0, drive_or_unc + os.path.sep)
732 elif force and s and s[0] == os.path.sep:
733 sanitized_path.insert(0, os.path.sep)
734 return os.path.join(*sanitized_path)
735
736
737 def sanitize_url(url, *, scheme='http'):
738 # Prepend protocol-less URLs with `http:` scheme in order to mitigate
739 # the number of unwanted failures due to missing protocol
740 if url is None:
741 return
742 elif url.startswith('//'):
743 return f'{scheme}:{url}'
744 # Fix some common typos seen so far
745 COMMON_TYPOS = (
746 # https://github.com/ytdl-org/youtube-dl/issues/15649
747 (r'^httpss://', r'https://'),
748 # https://bx1.be/lives/direct-tv/
749 (r'^rmtp([es]?)://', r'rtmp\1://'),
750 )
751 for mistake, fixup in COMMON_TYPOS:
752 if re.match(mistake, url):
753 return re.sub(mistake, fixup, url)
754 return url
755
756
757 def extract_basic_auth(url):
758 parts = urllib.parse.urlsplit(url)
759 if parts.username is None:
760 return url, None
761 url = urllib.parse.urlunsplit(parts._replace(netloc=(
762 parts.hostname if parts.port is None
763 else '%s:%d' % (parts.hostname, parts.port))))
764 auth_payload = base64.b64encode(
765 ('%s:%s' % (parts.username, parts.password or '')).encode())
766 return url, f'Basic {auth_payload.decode()}'
767
768
769 def sanitized_Request(url, *args, **kwargs):
770 url, auth_header = extract_basic_auth(escape_url(sanitize_url(url)))
771 if auth_header is not None:
772 headers = args[1] if len(args) >= 2 else kwargs.setdefault('headers', {})
773 headers['Authorization'] = auth_header
774 return urllib.request.Request(url, *args, **kwargs)
775
776
777 def expand_path(s):
778 """Expand shell variables and ~"""
779 return os.path.expandvars(compat_expanduser(s))
780
781
782 def orderedSet(iterable, *, lazy=False):
783 """Remove all duplicates from the input iterable"""
784 def _iter():
785 seen = [] # Do not use set since the items can be unhashable
786 for x in iterable:
787 if x not in seen:
788 seen.append(x)
789 yield x
790
791 return _iter() if lazy else list(_iter())
792
793
794 def _htmlentity_transform(entity_with_semicolon):
795 """Transforms an HTML entity to a character."""
796 entity = entity_with_semicolon[:-1]
797
798 # Known non-numeric HTML entity
799 if entity in html.entities.name2codepoint:
800 return chr(html.entities.name2codepoint[entity])
801
802 # TODO: HTML5 allows entities without a semicolon.
803 # E.g. '&Eacuteric' should be decoded as 'Éric'.
804 if entity_with_semicolon in html.entities.html5:
805 return html.entities.html5[entity_with_semicolon]
806
807 mobj = re.match(r'#(x[0-9a-fA-F]+|[0-9]+)', entity)
808 if mobj is not None:
809 numstr = mobj.group(1)
810 if numstr.startswith('x'):
811 base = 16
812 numstr = '0%s' % numstr
813 else:
814 base = 10
815 # See https://github.com/ytdl-org/youtube-dl/issues/7518
816 with contextlib.suppress(ValueError):
817 return chr(int(numstr, base))
818
819 # Unknown entity in name, return its literal representation
820 return '&%s;' % entity
821
822
823 def unescapeHTML(s):
824 if s is None:
825 return None
826 assert isinstance(s, str)
827
828 return re.sub(
829 r'&([^&;]+;)', lambda m: _htmlentity_transform(m.group(1)), s)
830
831
832 def escapeHTML(text):
833 return (
834 text
835 .replace('&', '&amp;')
836 .replace('<', '&lt;')
837 .replace('>', '&gt;')
838 .replace('"', '&quot;')
839 .replace("'", '&#39;')
840 )
841
842
843 def process_communicate_or_kill(p, *args, **kwargs):
844 deprecation_warning(f'"{__name__}.process_communicate_or_kill" is deprecated and may be removed '
845 f'in a future version. Use "{__name__}.Popen.communicate_or_kill" instead')
846 return Popen.communicate_or_kill(p, *args, **kwargs)
847
848
849 class Popen(subprocess.Popen):
850 if sys.platform == 'win32':
851 _startupinfo = subprocess.STARTUPINFO()
852 _startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
853 else:
854 _startupinfo = None
855
856 @staticmethod
857 def _fix_pyinstaller_ld_path(env):
858 """Restore LD_LIBRARY_PATH when using PyInstaller
859 Ref: https://github.com/pyinstaller/pyinstaller/blob/develop/doc/runtime-information.rst#ld_library_path--libpath-considerations
860 https://github.com/yt-dlp/yt-dlp/issues/4573
861 """
862 if not hasattr(sys, '_MEIPASS'):
863 return
864
865 def _fix(key):
866 orig = env.get(f'{key}_ORIG')
867 if orig is None:
868 env.pop(key, None)
869 else:
870 env[key] = orig
871
872 _fix('LD_LIBRARY_PATH') # Linux
873 _fix('DYLD_LIBRARY_PATH') # macOS
874
875 def __init__(self, *args, env=None, text=False, **kwargs):
876 if env is None:
877 env = os.environ.copy()
878 self._fix_pyinstaller_ld_path(env)
879
880 if text is True:
881 kwargs['universal_newlines'] = True # For 3.6 compatibility
882 kwargs.setdefault('encoding', 'utf-8')
883 kwargs.setdefault('errors', 'replace')
884 super().__init__(*args, env=env, **kwargs, startupinfo=self._startupinfo)
885
886 def communicate_or_kill(self, *args, **kwargs):
887 try:
888 return self.communicate(*args, **kwargs)
889 except BaseException: # Including KeyboardInterrupt
890 self.kill(timeout=None)
891 raise
892
893 def kill(self, *, timeout=0):
894 super().kill()
895 if timeout != 0:
896 self.wait(timeout=timeout)
897
898 @classmethod
899 def run(cls, *args, timeout=None, **kwargs):
900 with cls(*args, **kwargs) as proc:
901 default = '' if proc.text_mode else b''
902 stdout, stderr = proc.communicate_or_kill(timeout=timeout)
903 return stdout or default, stderr or default, proc.returncode
904
905
906 def get_subprocess_encoding():
907 if sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5:
908 # For subprocess calls, encode with locale encoding
909 # Refer to http://stackoverflow.com/a/9951851/35070
910 encoding = preferredencoding()
911 else:
912 encoding = sys.getfilesystemencoding()
913 if encoding is None:
914 encoding = 'utf-8'
915 return encoding
916
917
918 def encodeFilename(s, for_subprocess=False):
919 assert isinstance(s, str)
920 return s
921
922
923 def decodeFilename(b, for_subprocess=False):
924 return b
925
926
927 def encodeArgument(s):
928 # Legacy code that uses byte strings
929 # Uncomment the following line after fixing all post processors
930 # assert isinstance(s, str), 'Internal error: %r should be of type %r, is %r' % (s, str, type(s))
931 return s if isinstance(s, str) else s.decode('ascii')
932
933
934 def decodeArgument(b):
935 return b
936
937
938 def decodeOption(optval):
939 if optval is None:
940 return optval
941 if isinstance(optval, bytes):
942 optval = optval.decode(preferredencoding())
943
944 assert isinstance(optval, str)
945 return optval
946
947
948 _timetuple = collections.namedtuple('Time', ('hours', 'minutes', 'seconds', 'milliseconds'))
949
950
951 def timetuple_from_msec(msec):
952 secs, msec = divmod(msec, 1000)
953 mins, secs = divmod(secs, 60)
954 hrs, mins = divmod(mins, 60)
955 return _timetuple(hrs, mins, secs, msec)
956
957
958 def formatSeconds(secs, delim=':', msec=False):
959 time = timetuple_from_msec(secs * 1000)
960 if time.hours:
961 ret = '%d%s%02d%s%02d' % (time.hours, delim, time.minutes, delim, time.seconds)
962 elif time.minutes:
963 ret = '%d%s%02d' % (time.minutes, delim, time.seconds)
964 else:
965 ret = '%d' % time.seconds
966 return '%s.%03d' % (ret, time.milliseconds) if msec else ret
967
968
969 def _ssl_load_windows_store_certs(ssl_context, storename):
970 # Code adapted from _load_windows_store_certs in https://github.com/python/cpython/blob/main/Lib/ssl.py
971 try:
972 certs = [cert for cert, encoding, trust in ssl.enum_certificates(storename)
973 if encoding == 'x509_asn' and (
974 trust is True or ssl.Purpose.SERVER_AUTH.oid in trust)]
975 except PermissionError:
976 return
977 for cert in certs:
978 with contextlib.suppress(ssl.SSLError):
979 ssl_context.load_verify_locations(cadata=cert)
980
981
982 def make_HTTPS_handler(params, **kwargs):
983 opts_check_certificate = not params.get('nocheckcertificate')
984 context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
985 context.check_hostname = opts_check_certificate
986 if params.get('legacyserverconnect'):
987 context.options |= 4 # SSL_OP_LEGACY_SERVER_CONNECT
988 # Allow use of weaker ciphers in Python 3.10+. See https://bugs.python.org/issue43998
989 context.set_ciphers('DEFAULT')
990 elif (
991 sys.version_info < (3, 10)
992 and ssl.OPENSSL_VERSION_INFO >= (1, 1, 1)
993 and not ssl.OPENSSL_VERSION.startswith('LibreSSL')
994 ):
995 # Backport the default SSL ciphers and minimum TLS version settings from Python 3.10 [1].
996 # This is to ensure consistent behavior across Python versions, and help avoid fingerprinting
997 # in some situations [2][3].
998 # Python 3.10 only supports OpenSSL 1.1.1+ [4]. Because this change is likely
999 # untested on older versions, we only apply this to OpenSSL 1.1.1+ to be safe.
1000 # LibreSSL is excluded until further investigation due to cipher support issues [5][6].
1001 # 1. https://github.com/python/cpython/commit/e983252b516edb15d4338b0a47631b59ef1e2536
1002 # 2. https://github.com/yt-dlp/yt-dlp/issues/4627
1003 # 3. https://github.com/yt-dlp/yt-dlp/pull/5294
1004 # 4. https://peps.python.org/pep-0644/
1005 # 5. https://peps.python.org/pep-0644/#libressl-support
1006 # 6. https://github.com/yt-dlp/yt-dlp/commit/5b9f253fa0aee996cf1ed30185d4b502e00609c4#commitcomment-89054368
1007 context.set_ciphers('@SECLEVEL=2:ECDH+AESGCM:ECDH+CHACHA20:ECDH+AES:DHE+AES:!aNULL:!eNULL:!aDSS:!SHA1:!AESCCM')
1008 context.minimum_version = ssl.TLSVersion.TLSv1_2
1009
1010 context.verify_mode = ssl.CERT_REQUIRED if opts_check_certificate else ssl.CERT_NONE
1011 if opts_check_certificate:
1012 if has_certifi and 'no-certifi' not in params.get('compat_opts', []):
1013 context.load_verify_locations(cafile=certifi.where())
1014 else:
1015 try:
1016 context.load_default_certs()
1017 # Work around the issue in load_default_certs when there are bad certificates. See:
1018 # https://github.com/yt-dlp/yt-dlp/issues/1060,
1019 # https://bugs.python.org/issue35665, https://bugs.python.org/issue45312
1020 except ssl.SSLError:
1021 # enum_certificates is not present in mingw python. See https://github.com/yt-dlp/yt-dlp/issues/1151
1022 if sys.platform == 'win32' and hasattr(ssl, 'enum_certificates'):
1023 for storename in ('CA', 'ROOT'):
1024 _ssl_load_windows_store_certs(context, storename)
1025 context.set_default_verify_paths()
1026
1027 client_certfile = params.get('client_certificate')
1028 if client_certfile:
1029 try:
1030 context.load_cert_chain(
1031 client_certfile, keyfile=params.get('client_certificate_key'),
1032 password=params.get('client_certificate_password'))
1033 except ssl.SSLError:
1034 raise YoutubeDLError('Unable to load client certificate')
1035
1036 # Some servers may reject requests if ALPN extension is not sent. See:
1037 # https://github.com/python/cpython/issues/85140
1038 # https://github.com/yt-dlp/yt-dlp/issues/3878
1039 with contextlib.suppress(NotImplementedError):
1040 context.set_alpn_protocols(['http/1.1'])
1041
1042 return YoutubeDLHTTPSHandler(params, context=context, **kwargs)
1043
1044
1045 def bug_reports_message(before=';'):
1046 from .update import REPOSITORY
1047
1048 msg = (f'please report this issue on https://github.com/{REPOSITORY}/issues?q= , '
1049 'filling out the appropriate issue template. Confirm you are on the latest version using yt-dlp -U')
1050
1051 before = before.rstrip()
1052 if not before or before.endswith(('.', '!', '?')):
1053 msg = msg[0].title() + msg[1:]
1054
1055 return (before + ' ' if before else '') + msg
1056
1057
1058 class YoutubeDLError(Exception):
1059 """Base exception for YoutubeDL errors."""
1060 msg = None
1061
1062 def __init__(self, msg=None):
1063 if msg is not None:
1064 self.msg = msg
1065 elif self.msg is None:
1066 self.msg = type(self).__name__
1067 super().__init__(self.msg)
1068
1069
1070 network_exceptions = [urllib.error.URLError, http.client.HTTPException, socket.error]
1071 if hasattr(ssl, 'CertificateError'):
1072 network_exceptions.append(ssl.CertificateError)
1073 network_exceptions = tuple(network_exceptions)
1074
1075
1076 class ExtractorError(YoutubeDLError):
1077 """Error during info extraction."""
1078
1079 def __init__(self, msg, tb=None, expected=False, cause=None, video_id=None, ie=None):
1080 """ tb, if given, is the original traceback (so that it can be printed out).
1081 If expected is set, this is a normal error message and most likely not a bug in yt-dlp.
1082 """
1083 if sys.exc_info()[0] in network_exceptions:
1084 expected = True
1085
1086 self.orig_msg = str(msg)
1087 self.traceback = tb
1088 self.expected = expected
1089 self.cause = cause
1090 self.video_id = video_id
1091 self.ie = ie
1092 self.exc_info = sys.exc_info() # preserve original exception
1093 if isinstance(self.exc_info[1], ExtractorError):
1094 self.exc_info = self.exc_info[1].exc_info
1095
1096 super().__init__(''.join((
1097 format_field(ie, None, '[%s] '),
1098 format_field(video_id, None, '%s: '),
1099 msg,
1100 format_field(cause, None, ' (caused by %r)'),
1101 '' if expected else bug_reports_message())))
1102
1103 def format_traceback(self):
1104 return join_nonempty(
1105 self.traceback and ''.join(traceback.format_tb(self.traceback)),
1106 self.cause and ''.join(traceback.format_exception(None, self.cause, self.cause.__traceback__)[1:]),
1107 delim='\n') or None
1108
1109
1110 class UnsupportedError(ExtractorError):
1111 def __init__(self, url):
1112 super().__init__(
1113 'Unsupported URL: %s' % url, expected=True)
1114 self.url = url
1115
1116
1117 class RegexNotFoundError(ExtractorError):
1118 """Error when a regex didn't match"""
1119 pass
1120
1121
1122 class GeoRestrictedError(ExtractorError):
1123 """Geographic restriction Error exception.
1124
1125 This exception may be thrown when a video is not available from your
1126 geographic location due to geographic restrictions imposed by a website.
1127 """
1128
1129 def __init__(self, msg, countries=None, **kwargs):
1130 kwargs['expected'] = True
1131 super().__init__(msg, **kwargs)
1132 self.countries = countries
1133
1134
1135 class UserNotLive(ExtractorError):
1136 """Error when a channel/user is not live"""
1137
1138 def __init__(self, msg=None, **kwargs):
1139 kwargs['expected'] = True
1140 super().__init__(msg or 'The channel is not currently live', **kwargs)
1141
1142
1143 class DownloadError(YoutubeDLError):
1144 """Download Error exception.
1145
1146 This exception may be thrown by FileDownloader objects if they are not
1147 configured to continue on errors. They will contain the appropriate
1148 error message.
1149 """
1150
1151 def __init__(self, msg, exc_info=None):
1152 """ exc_info, if given, is the original exception that caused the trouble (as returned by sys.exc_info()). """
1153 super().__init__(msg)
1154 self.exc_info = exc_info
1155
1156
1157 class EntryNotInPlaylist(YoutubeDLError):
1158 """Entry not in playlist exception.
1159
1160 This exception will be thrown by YoutubeDL when a requested entry
1161 is not found in the playlist info_dict
1162 """
1163 msg = 'Entry not found in info'
1164
1165
1166 class SameFileError(YoutubeDLError):
1167 """Same File exception.
1168
1169 This exception will be thrown by FileDownloader objects if they detect
1170 multiple files would have to be downloaded to the same file on disk.
1171 """
1172 msg = 'Fixed output name but more than one file to download'
1173
1174 def __init__(self, filename=None):
1175 if filename is not None:
1176 self.msg += f': {filename}'
1177 super().__init__(self.msg)
1178
1179
1180 class PostProcessingError(YoutubeDLError):
1181 """Post Processing exception.
1182
1183 This exception may be raised by PostProcessor's .run() method to
1184 indicate an error in the postprocessing task.
1185 """
1186
1187
1188 class DownloadCancelled(YoutubeDLError):
1189 """ Exception raised when the download queue should be interrupted """
1190 msg = 'The download was cancelled'
1191
1192
1193 class ExistingVideoReached(DownloadCancelled):
1194 """ --break-on-existing triggered """
1195 msg = 'Encountered a video that is already in the archive, stopping due to --break-on-existing'
1196
1197
1198 class RejectedVideoReached(DownloadCancelled):
1199 """ --break-on-reject triggered """
1200 msg = 'Encountered a video that did not match filter, stopping due to --break-on-reject'
1201
1202
1203 class MaxDownloadsReached(DownloadCancelled):
1204 """ --max-downloads limit has been reached. """
1205 msg = 'Maximum number of downloads reached, stopping due to --max-downloads'
1206
1207
1208 class ReExtractInfo(YoutubeDLError):
1209 """ Video info needs to be re-extracted. """
1210
1211 def __init__(self, msg, expected=False):
1212 super().__init__(msg)
1213 self.expected = expected
1214
1215
1216 class ThrottledDownload(ReExtractInfo):
1217 """ Download speed below --throttled-rate. """
1218 msg = 'The download speed is below throttle limit'
1219
1220 def __init__(self):
1221 super().__init__(self.msg, expected=False)
1222
1223
1224 class UnavailableVideoError(YoutubeDLError):
1225 """Unavailable Format exception.
1226
1227 This exception will be thrown when a video is requested
1228 in a format that is not available for that video.
1229 """
1230 msg = 'Unable to download video'
1231
1232 def __init__(self, err=None):
1233 if err is not None:
1234 self.msg += f': {err}'
1235 super().__init__(self.msg)
1236
1237
1238 class ContentTooShortError(YoutubeDLError):
1239 """Content Too Short exception.
1240
1241 This exception may be raised by FileDownloader objects when a file they
1242 download is too small for what the server announced first, indicating
1243 the connection was probably interrupted.
1244 """
1245
1246 def __init__(self, downloaded, expected):
1247 super().__init__(f'Downloaded {downloaded} bytes, expected {expected} bytes')
1248 # Both in bytes
1249 self.downloaded = downloaded
1250 self.expected = expected
1251
1252
1253 class XAttrMetadataError(YoutubeDLError):
1254 def __init__(self, code=None, msg='Unknown error'):
1255 super().__init__(msg)
1256 self.code = code
1257 self.msg = msg
1258
1259 # Parsing code and msg
1260 if (self.code in (errno.ENOSPC, errno.EDQUOT)
1261 or 'No space left' in self.msg or 'Disk quota exceeded' in self.msg):
1262 self.reason = 'NO_SPACE'
1263 elif self.code == errno.E2BIG or 'Argument list too long' in self.msg:
1264 self.reason = 'VALUE_TOO_LONG'
1265 else:
1266 self.reason = 'NOT_SUPPORTED'
1267
1268
1269 class XAttrUnavailableError(YoutubeDLError):
1270 pass
1271
1272
1273 def _create_http_connection(ydl_handler, http_class, is_https, *args, **kwargs):
1274 hc = http_class(*args, **kwargs)
1275 source_address = ydl_handler._params.get('source_address')
1276
1277 if source_address is not None:
1278 # This is to workaround _create_connection() from socket where it will try all
1279 # address data from getaddrinfo() including IPv6. This filters the result from
1280 # getaddrinfo() based on the source_address value.
1281 # This is based on the cpython socket.create_connection() function.
1282 # https://github.com/python/cpython/blob/master/Lib/socket.py#L691
1283 def _create_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, source_address=None):
1284 host, port = address
1285 err = None
1286 addrs = socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM)
1287 af = socket.AF_INET if '.' in source_address[0] else socket.AF_INET6
1288 ip_addrs = [addr for addr in addrs if addr[0] == af]
1289 if addrs and not ip_addrs:
1290 ip_version = 'v4' if af == socket.AF_INET else 'v6'
1291 raise OSError(
1292 "No remote IP%s addresses available for connect, can't use '%s' as source address"
1293 % (ip_version, source_address[0]))
1294 for res in ip_addrs:
1295 af, socktype, proto, canonname, sa = res
1296 sock = None
1297 try:
1298 sock = socket.socket(af, socktype, proto)
1299 if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT:
1300 sock.settimeout(timeout)
1301 sock.bind(source_address)
1302 sock.connect(sa)
1303 err = None # Explicitly break reference cycle
1304 return sock
1305 except OSError as _:
1306 err = _
1307 if sock is not None:
1308 sock.close()
1309 if err is not None:
1310 raise err
1311 else:
1312 raise OSError('getaddrinfo returns an empty list')
1313 if hasattr(hc, '_create_connection'):
1314 hc._create_connection = _create_connection
1315 hc.source_address = (source_address, 0)
1316
1317 return hc
1318
1319
1320 def handle_youtubedl_headers(headers):
1321 filtered_headers = headers
1322
1323 if 'Youtubedl-no-compression' in filtered_headers:
1324 filtered_headers = {k: v for k, v in filtered_headers.items() if k.lower() != 'accept-encoding'}
1325 del filtered_headers['Youtubedl-no-compression']
1326
1327 return filtered_headers
1328
1329
1330 class YoutubeDLHandler(urllib.request.HTTPHandler):
1331 """Handler for HTTP requests and responses.
1332
1333 This class, when installed with an OpenerDirector, automatically adds
1334 the standard headers to every HTTP request and handles gzipped and
1335 deflated responses from web servers. If compression is to be avoided in
1336 a particular request, the original request in the program code only has
1337 to include the HTTP header "Youtubedl-no-compression", which will be
1338 removed before making the real request.
1339
1340 Part of this code was copied from:
1341
1342 http://techknack.net/python-urllib2-handlers/
1343
1344 Andrew Rowls, the author of that code, agreed to release it to the
1345 public domain.
1346 """
1347
1348 def __init__(self, params, *args, **kwargs):
1349 urllib.request.HTTPHandler.__init__(self, *args, **kwargs)
1350 self._params = params
1351
1352 def http_open(self, req):
1353 conn_class = http.client.HTTPConnection
1354
1355 socks_proxy = req.headers.get('Ytdl-socks-proxy')
1356 if socks_proxy:
1357 conn_class = make_socks_conn_class(conn_class, socks_proxy)
1358 del req.headers['Ytdl-socks-proxy']
1359
1360 return self.do_open(functools.partial(
1361 _create_http_connection, self, conn_class, False),
1362 req)
1363
1364 @staticmethod
1365 def deflate(data):
1366 if not data:
1367 return data
1368 try:
1369 return zlib.decompress(data, -zlib.MAX_WBITS)
1370 except zlib.error:
1371 return zlib.decompress(data)
1372
1373 @staticmethod
1374 def brotli(data):
1375 if not data:
1376 return data
1377 return brotli.decompress(data)
1378
1379 def http_request(self, req):
1380 # According to RFC 3986, URLs can not contain non-ASCII characters, however this is not
1381 # always respected by websites, some tend to give out URLs with non percent-encoded
1382 # non-ASCII characters (see telemb.py, ard.py [#3412])
1383 # urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991)
1384 # To work around aforementioned issue we will replace request's original URL with
1385 # percent-encoded one
1386 # Since redirects are also affected (e.g. http://www.southpark.de/alle-episoden/s18e09)
1387 # the code of this workaround has been moved here from YoutubeDL.urlopen()
1388 url = req.get_full_url()
1389 url_escaped = escape_url(url)
1390
1391 # Substitute URL if any change after escaping
1392 if url != url_escaped:
1393 req = update_Request(req, url=url_escaped)
1394
1395 for h, v in self._params.get('http_headers', std_headers).items():
1396 # Capitalize is needed because of Python bug 2275: http://bugs.python.org/issue2275
1397 # The dict keys are capitalized because of this bug by urllib
1398 if h.capitalize() not in req.headers:
1399 req.add_header(h, v)
1400
1401 if 'Accept-encoding' not in req.headers:
1402 req.add_header('Accept-encoding', ', '.join(SUPPORTED_ENCODINGS))
1403
1404 req.headers = handle_youtubedl_headers(req.headers)
1405
1406 return super().do_request_(req)
1407
1408 def http_response(self, req, resp):
1409 old_resp = resp
1410 # gzip
1411 if resp.headers.get('Content-encoding', '') == 'gzip':
1412 content = resp.read()
1413 gz = gzip.GzipFile(fileobj=io.BytesIO(content), mode='rb')
1414 try:
1415 uncompressed = io.BytesIO(gz.read())
1416 except OSError as original_ioerror:
1417 # There may be junk add the end of the file
1418 # See http://stackoverflow.com/q/4928560/35070 for details
1419 for i in range(1, 1024):
1420 try:
1421 gz = gzip.GzipFile(fileobj=io.BytesIO(content[:-i]), mode='rb')
1422 uncompressed = io.BytesIO(gz.read())
1423 except OSError:
1424 continue
1425 break
1426 else:
1427 raise original_ioerror
1428 resp = urllib.request.addinfourl(uncompressed, old_resp.headers, old_resp.url, old_resp.code)
1429 resp.msg = old_resp.msg
1430 del resp.headers['Content-encoding']
1431 # deflate
1432 if resp.headers.get('Content-encoding', '') == 'deflate':
1433 gz = io.BytesIO(self.deflate(resp.read()))
1434 resp = urllib.request.addinfourl(gz, old_resp.headers, old_resp.url, old_resp.code)
1435 resp.msg = old_resp.msg
1436 del resp.headers['Content-encoding']
1437 # brotli
1438 if resp.headers.get('Content-encoding', '') == 'br':
1439 resp = urllib.request.addinfourl(
1440 io.BytesIO(self.brotli(resp.read())), old_resp.headers, old_resp.url, old_resp.code)
1441 resp.msg = old_resp.msg
1442 del resp.headers['Content-encoding']
1443 # Percent-encode redirect URL of Location HTTP header to satisfy RFC 3986 (see
1444 # https://github.com/ytdl-org/youtube-dl/issues/6457).
1445 if 300 <= resp.code < 400:
1446 location = resp.headers.get('Location')
1447 if location:
1448 # As of RFC 2616 default charset is iso-8859-1 that is respected by python 3
1449 location = location.encode('iso-8859-1').decode()
1450 location_escaped = escape_url(location)
1451 if location != location_escaped:
1452 del resp.headers['Location']
1453 resp.headers['Location'] = location_escaped
1454 return resp
1455
1456 https_request = http_request
1457 https_response = http_response
1458
1459
1460 def make_socks_conn_class(base_class, socks_proxy):
1461 assert issubclass(base_class, (
1462 http.client.HTTPConnection, http.client.HTTPSConnection))
1463
1464 url_components = urllib.parse.urlparse(socks_proxy)
1465 if url_components.scheme.lower() == 'socks5':
1466 socks_type = ProxyType.SOCKS5
1467 elif url_components.scheme.lower() in ('socks', 'socks4'):
1468 socks_type = ProxyType.SOCKS4
1469 elif url_components.scheme.lower() == 'socks4a':
1470 socks_type = ProxyType.SOCKS4A
1471
1472 def unquote_if_non_empty(s):
1473 if not s:
1474 return s
1475 return urllib.parse.unquote_plus(s)
1476
1477 proxy_args = (
1478 socks_type,
1479 url_components.hostname, url_components.port or 1080,
1480 True, # Remote DNS
1481 unquote_if_non_empty(url_components.username),
1482 unquote_if_non_empty(url_components.password),
1483 )
1484
1485 class SocksConnection(base_class):
1486 def connect(self):
1487 self.sock = sockssocket()
1488 self.sock.setproxy(*proxy_args)
1489 if isinstance(self.timeout, (int, float)):
1490 self.sock.settimeout(self.timeout)
1491 self.sock.connect((self.host, self.port))
1492
1493 if isinstance(self, http.client.HTTPSConnection):
1494 if hasattr(self, '_context'): # Python > 2.6
1495 self.sock = self._context.wrap_socket(
1496 self.sock, server_hostname=self.host)
1497 else:
1498 self.sock = ssl.wrap_socket(self.sock)
1499
1500 return SocksConnection
1501
1502
1503 class YoutubeDLHTTPSHandler(urllib.request.HTTPSHandler):
1504 def __init__(self, params, https_conn_class=None, *args, **kwargs):
1505 urllib.request.HTTPSHandler.__init__(self, *args, **kwargs)
1506 self._https_conn_class = https_conn_class or http.client.HTTPSConnection
1507 self._params = params
1508
1509 def https_open(self, req):
1510 kwargs = {}
1511 conn_class = self._https_conn_class
1512
1513 if hasattr(self, '_context'): # python > 2.6
1514 kwargs['context'] = self._context
1515 if hasattr(self, '_check_hostname'): # python 3.x
1516 kwargs['check_hostname'] = self._check_hostname
1517
1518 socks_proxy = req.headers.get('Ytdl-socks-proxy')
1519 if socks_proxy:
1520 conn_class = make_socks_conn_class(conn_class, socks_proxy)
1521 del req.headers['Ytdl-socks-proxy']
1522
1523 try:
1524 return self.do_open(
1525 functools.partial(_create_http_connection, self, conn_class, True), req, **kwargs)
1526 except urllib.error.URLError as e:
1527 if (isinstance(e.reason, ssl.SSLError)
1528 and getattr(e.reason, 'reason', None) == 'SSLV3_ALERT_HANDSHAKE_FAILURE'):
1529 raise YoutubeDLError('SSLV3_ALERT_HANDSHAKE_FAILURE: Try using --legacy-server-connect')
1530 raise
1531
1532
1533 def is_path_like(f):
1534 return isinstance(f, (str, bytes, os.PathLike))
1535
1536
1537 class YoutubeDLCookieJar(http.cookiejar.MozillaCookieJar):
1538 """
1539 See [1] for cookie file format.
1540
1541 1. https://curl.haxx.se/docs/http-cookies.html
1542 """
1543 _HTTPONLY_PREFIX = '#HttpOnly_'
1544 _ENTRY_LEN = 7
1545 _HEADER = '''# Netscape HTTP Cookie File
1546 # This file is generated by yt-dlp. Do not edit.
1547
1548 '''
1549 _CookieFileEntry = collections.namedtuple(
1550 'CookieFileEntry',
1551 ('domain_name', 'include_subdomains', 'path', 'https_only', 'expires_at', 'name', 'value'))
1552
1553 def __init__(self, filename=None, *args, **kwargs):
1554 super().__init__(None, *args, **kwargs)
1555 if is_path_like(filename):
1556 filename = os.fspath(filename)
1557 self.filename = filename
1558
1559 @staticmethod
1560 def _true_or_false(cndn):
1561 return 'TRUE' if cndn else 'FALSE'
1562
1563 @contextlib.contextmanager
1564 def open(self, file, *, write=False):
1565 if is_path_like(file):
1566 with open(file, 'w' if write else 'r', encoding='utf-8') as f:
1567 yield f
1568 else:
1569 if write:
1570 file.truncate(0)
1571 yield file
1572
1573 def _really_save(self, f, ignore_discard=False, ignore_expires=False):
1574 now = time.time()
1575 for cookie in self:
1576 if (not ignore_discard and cookie.discard
1577 or not ignore_expires and cookie.is_expired(now)):
1578 continue
1579 name, value = cookie.name, cookie.value
1580 if value is None:
1581 # cookies.txt regards 'Set-Cookie: foo' as a cookie
1582 # with no name, whereas http.cookiejar regards it as a
1583 # cookie with no value.
1584 name, value = '', name
1585 f.write('%s\n' % '\t'.join((
1586 cookie.domain,
1587 self._true_or_false(cookie.domain.startswith('.')),
1588 cookie.path,
1589 self._true_or_false(cookie.secure),
1590 str_or_none(cookie.expires, default=''),
1591 name, value
1592 )))
1593
1594 def save(self, filename=None, *args, **kwargs):
1595 """
1596 Save cookies to a file.
1597 Code is taken from CPython 3.6
1598 https://github.com/python/cpython/blob/8d999cbf4adea053be6dbb612b9844635c4dfb8e/Lib/http/cookiejar.py#L2091-L2117 """
1599
1600 if filename is None:
1601 if self.filename is not None:
1602 filename = self.filename
1603 else:
1604 raise ValueError(http.cookiejar.MISSING_FILENAME_TEXT)
1605
1606 # Store session cookies with `expires` set to 0 instead of an empty string
1607 for cookie in self:
1608 if cookie.expires is None:
1609 cookie.expires = 0
1610
1611 with self.open(filename, write=True) as f:
1612 f.write(self._HEADER)
1613 self._really_save(f, *args, **kwargs)
1614
1615 def load(self, filename=None, ignore_discard=False, ignore_expires=False):
1616 """Load cookies from a file."""
1617 if filename is None:
1618 if self.filename is not None:
1619 filename = self.filename
1620 else:
1621 raise ValueError(http.cookiejar.MISSING_FILENAME_TEXT)
1622
1623 def prepare_line(line):
1624 if line.startswith(self._HTTPONLY_PREFIX):
1625 line = line[len(self._HTTPONLY_PREFIX):]
1626 # comments and empty lines are fine
1627 if line.startswith('#') or not line.strip():
1628 return line
1629 cookie_list = line.split('\t')
1630 if len(cookie_list) != self._ENTRY_LEN:
1631 raise http.cookiejar.LoadError('invalid length %d' % len(cookie_list))
1632 cookie = self._CookieFileEntry(*cookie_list)
1633 if cookie.expires_at and not cookie.expires_at.isdigit():
1634 raise http.cookiejar.LoadError('invalid expires at %s' % cookie.expires_at)
1635 return line
1636
1637 cf = io.StringIO()
1638 with self.open(filename) as f:
1639 for line in f:
1640 try:
1641 cf.write(prepare_line(line))
1642 except http.cookiejar.LoadError as e:
1643 if f'{line.strip()} '[0] in '[{"':
1644 raise http.cookiejar.LoadError(
1645 'Cookies file must be Netscape formatted, not JSON. See '
1646 'https://github.com/yt-dlp/yt-dlp/wiki/FAQ#how-do-i-pass-cookies-to-yt-dlp')
1647 write_string(f'WARNING: skipping cookie file entry due to {e}: {line!r}\n')
1648 continue
1649 cf.seek(0)
1650 self._really_load(cf, filename, ignore_discard, ignore_expires)
1651 # Session cookies are denoted by either `expires` field set to
1652 # an empty string or 0. MozillaCookieJar only recognizes the former
1653 # (see [1]). So we need force the latter to be recognized as session
1654 # cookies on our own.
1655 # Session cookies may be important for cookies-based authentication,
1656 # e.g. usually, when user does not check 'Remember me' check box while
1657 # logging in on a site, some important cookies are stored as session
1658 # cookies so that not recognizing them will result in failed login.
1659 # 1. https://bugs.python.org/issue17164
1660 for cookie in self:
1661 # Treat `expires=0` cookies as session cookies
1662 if cookie.expires == 0:
1663 cookie.expires = None
1664 cookie.discard = True
1665
1666
1667 class YoutubeDLCookieProcessor(urllib.request.HTTPCookieProcessor):
1668 def __init__(self, cookiejar=None):
1669 urllib.request.HTTPCookieProcessor.__init__(self, cookiejar)
1670
1671 def http_response(self, request, response):
1672 return urllib.request.HTTPCookieProcessor.http_response(self, request, response)
1673
1674 https_request = urllib.request.HTTPCookieProcessor.http_request
1675 https_response = http_response
1676
1677
1678 class YoutubeDLRedirectHandler(urllib.request.HTTPRedirectHandler):
1679 """YoutubeDL redirect handler
1680
1681 The code is based on HTTPRedirectHandler implementation from CPython [1].
1682
1683 This redirect handler solves two issues:
1684 - ensures redirect URL is always unicode under python 2
1685 - introduces support for experimental HTTP response status code
1686 308 Permanent Redirect [2] used by some sites [3]
1687
1688 1. https://github.com/python/cpython/blob/master/Lib/urllib/request.py
1689 2. https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/308
1690 3. https://github.com/ytdl-org/youtube-dl/issues/28768
1691 """
1692
1693 http_error_301 = http_error_303 = http_error_307 = http_error_308 = urllib.request.HTTPRedirectHandler.http_error_302
1694
1695 def redirect_request(self, req, fp, code, msg, headers, newurl):
1696 """Return a Request or None in response to a redirect.
1697
1698 This is called by the http_error_30x methods when a
1699 redirection response is received. If a redirection should
1700 take place, return a new Request to allow http_error_30x to
1701 perform the redirect. Otherwise, raise HTTPError if no-one
1702 else should try to handle this url. Return None if you can't
1703 but another Handler might.
1704 """
1705 m = req.get_method()
1706 if (not (code in (301, 302, 303, 307, 308) and m in ("GET", "HEAD")
1707 or code in (301, 302, 303) and m == "POST")):
1708 raise urllib.error.HTTPError(req.full_url, code, msg, headers, fp)
1709 # Strictly (according to RFC 2616), 301 or 302 in response to
1710 # a POST MUST NOT cause a redirection without confirmation
1711 # from the user (of urllib.request, in this case). In practice,
1712 # essentially all clients do redirect in this case, so we do
1713 # the same.
1714
1715 # Be conciliant with URIs containing a space. This is mainly
1716 # redundant with the more complete encoding done in http_error_302(),
1717 # but it is kept for compatibility with other callers.
1718 newurl = newurl.replace(' ', '%20')
1719
1720 CONTENT_HEADERS = ("content-length", "content-type")
1721 # NB: don't use dict comprehension for python 2.6 compatibility
1722 newheaders = {k: v for k, v in req.headers.items() if k.lower() not in CONTENT_HEADERS}
1723
1724 # A 303 must either use GET or HEAD for subsequent request
1725 # https://datatracker.ietf.org/doc/html/rfc7231#section-6.4.4
1726 if code == 303 and m != 'HEAD':
1727 m = 'GET'
1728 # 301 and 302 redirects are commonly turned into a GET from a POST
1729 # for subsequent requests by browsers, so we'll do the same.
1730 # https://datatracker.ietf.org/doc/html/rfc7231#section-6.4.2
1731 # https://datatracker.ietf.org/doc/html/rfc7231#section-6.4.3
1732 if code in (301, 302) and m == 'POST':
1733 m = 'GET'
1734
1735 return urllib.request.Request(
1736 newurl, headers=newheaders, origin_req_host=req.origin_req_host,
1737 unverifiable=True, method=m)
1738
1739
1740 def extract_timezone(date_str):
1741 m = re.search(
1742 r'''(?x)
1743 ^.{8,}? # >=8 char non-TZ prefix, if present
1744 (?P<tz>Z| # just the UTC Z, or
1745 (?:(?<=.\b\d{4}|\b\d{2}:\d\d)| # preceded by 4 digits or hh:mm or
1746 (?<!.\b[a-zA-Z]{3}|[a-zA-Z]{4}|..\b\d\d)) # not preceded by 3 alpha word or >= 4 alpha or 2 digits
1747 [ ]? # optional space
1748 (?P<sign>\+|-) # +/-
1749 (?P<hours>[0-9]{2}):?(?P<minutes>[0-9]{2}) # hh[:]mm
1750 $)
1751 ''', date_str)
1752 if not m:
1753 m = re.search(r'\d{1,2}:\d{1,2}(?:\.\d+)?(?P<tz>\s*[A-Z]+)$', date_str)
1754 timezone = TIMEZONE_NAMES.get(m and m.group('tz').strip())
1755 if timezone is not None:
1756 date_str = date_str[:-len(m.group('tz'))]
1757 timezone = datetime.timedelta(hours=timezone or 0)
1758 else:
1759 date_str = date_str[:-len(m.group('tz'))]
1760 if not m.group('sign'):
1761 timezone = datetime.timedelta()
1762 else:
1763 sign = 1 if m.group('sign') == '+' else -1
1764 timezone = datetime.timedelta(
1765 hours=sign * int(m.group('hours')),
1766 minutes=sign * int(m.group('minutes')))
1767 return timezone, date_str
1768
1769
1770 def parse_iso8601(date_str, delimiter='T', timezone=None):
1771 """ Return a UNIX timestamp from the given date """
1772
1773 if date_str is None:
1774 return None
1775
1776 date_str = re.sub(r'\.[0-9]+', '', date_str)
1777
1778 if timezone is None:
1779 timezone, date_str = extract_timezone(date_str)
1780
1781 with contextlib.suppress(ValueError):
1782 date_format = f'%Y-%m-%d{delimiter}%H:%M:%S'
1783 dt = datetime.datetime.strptime(date_str, date_format) - timezone
1784 return calendar.timegm(dt.timetuple())
1785
1786
1787 def date_formats(day_first=True):
1788 return DATE_FORMATS_DAY_FIRST if day_first else DATE_FORMATS_MONTH_FIRST
1789
1790
1791 def unified_strdate(date_str, day_first=True):
1792 """Return a string with the date in the format YYYYMMDD"""
1793
1794 if date_str is None:
1795 return None
1796 upload_date = None
1797 # Replace commas
1798 date_str = date_str.replace(',', ' ')
1799 # Remove AM/PM + timezone
1800 date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str)
1801 _, date_str = extract_timezone(date_str)
1802
1803 for expression in date_formats(day_first):
1804 with contextlib.suppress(ValueError):
1805 upload_date = datetime.datetime.strptime(date_str, expression).strftime('%Y%m%d')
1806 if upload_date is None:
1807 timetuple = email.utils.parsedate_tz(date_str)
1808 if timetuple:
1809 with contextlib.suppress(ValueError):
1810 upload_date = datetime.datetime(*timetuple[:6]).strftime('%Y%m%d')
1811 if upload_date is not None:
1812 return str(upload_date)
1813
1814
1815 def unified_timestamp(date_str, day_first=True):
1816 if date_str is None:
1817 return None
1818
1819 date_str = re.sub(r'\s+', ' ', re.sub(
1820 r'(?i)[,|]|(mon|tues?|wed(nes)?|thu(rs)?|fri|sat(ur)?)(day)?', '', date_str))
1821
1822 pm_delta = 12 if re.search(r'(?i)PM', date_str) else 0
1823 timezone, date_str = extract_timezone(date_str)
1824
1825 # Remove AM/PM + timezone
1826 date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str)
1827
1828 # Remove unrecognized timezones from ISO 8601 alike timestamps
1829 m = re.search(r'\d{1,2}:\d{1,2}(?:\.\d+)?(?P<tz>\s*[A-Z]+)$', date_str)
1830 if m:
1831 date_str = date_str[:-len(m.group('tz'))]
1832
1833 # Python only supports microseconds, so remove nanoseconds
1834 m = re.search(r'^([0-9]{4,}-[0-9]{1,2}-[0-9]{1,2}T[0-9]{1,2}:[0-9]{1,2}:[0-9]{1,2}\.[0-9]{6})[0-9]+$', date_str)
1835 if m:
1836 date_str = m.group(1)
1837
1838 for expression in date_formats(day_first):
1839 with contextlib.suppress(ValueError):
1840 dt = datetime.datetime.strptime(date_str, expression) - timezone + datetime.timedelta(hours=pm_delta)
1841 return calendar.timegm(dt.timetuple())
1842
1843 timetuple = email.utils.parsedate_tz(date_str)
1844 if timetuple:
1845 return calendar.timegm(timetuple) + pm_delta * 3600 - timezone.total_seconds()
1846
1847
1848 def determine_ext(url, default_ext='unknown_video'):
1849 if url is None or '.' not in url:
1850 return default_ext
1851 guess = url.partition('?')[0].rpartition('.')[2]
1852 if re.match(r'^[A-Za-z0-9]+$', guess):
1853 return guess
1854 # Try extract ext from URLs like http://example.com/foo/bar.mp4/?download
1855 elif guess.rstrip('/') in KNOWN_EXTENSIONS:
1856 return guess.rstrip('/')
1857 else:
1858 return default_ext
1859
1860
1861 def subtitles_filename(filename, sub_lang, sub_format, expected_real_ext=None):
1862 return replace_extension(filename, sub_lang + '.' + sub_format, expected_real_ext)
1863
1864
1865 def datetime_from_str(date_str, precision='auto', format='%Y%m%d'):
1866 R"""
1867 Return a datetime object from a string.
1868 Supported format:
1869 (now|today|yesterday|DATE)([+-]\d+(microsecond|second|minute|hour|day|week|month|year)s?)?
1870
1871 @param format strftime format of DATE
1872 @param precision Round the datetime object: auto|microsecond|second|minute|hour|day
1873 auto: round to the unit provided in date_str (if applicable).
1874 """
1875 auto_precision = False
1876 if precision == 'auto':
1877 auto_precision = True
1878 precision = 'microsecond'
1879 today = datetime_round(datetime.datetime.utcnow(), precision)
1880 if date_str in ('now', 'today'):
1881 return today
1882 if date_str == 'yesterday':
1883 return today - datetime.timedelta(days=1)
1884 match = re.match(
1885 r'(?P<start>.+)(?P<sign>[+-])(?P<time>\d+)(?P<unit>microsecond|second|minute|hour|day|week|month|year)s?',
1886 date_str)
1887 if match is not None:
1888 start_time = datetime_from_str(match.group('start'), precision, format)
1889 time = int(match.group('time')) * (-1 if match.group('sign') == '-' else 1)
1890 unit = match.group('unit')
1891 if unit == 'month' or unit == 'year':
1892 new_date = datetime_add_months(start_time, time * 12 if unit == 'year' else time)
1893 unit = 'day'
1894 else:
1895 if unit == 'week':
1896 unit = 'day'
1897 time *= 7
1898 delta = datetime.timedelta(**{unit + 's': time})
1899 new_date = start_time + delta
1900 if auto_precision:
1901 return datetime_round(new_date, unit)
1902 return new_date
1903
1904 return datetime_round(datetime.datetime.strptime(date_str, format), precision)
1905
1906
1907 def date_from_str(date_str, format='%Y%m%d', strict=False):
1908 R"""
1909 Return a date object from a string using datetime_from_str
1910
1911 @param strict Restrict allowed patterns to "YYYYMMDD" and
1912 (now|today|yesterday)(-\d+(day|week|month|year)s?)?
1913 """
1914 if strict and not re.fullmatch(r'\d{8}|(now|today|yesterday)(-\d+(day|week|month|year)s?)?', date_str):
1915 raise ValueError(f'Invalid date format "{date_str}"')
1916 return datetime_from_str(date_str, precision='microsecond', format=format).date()
1917
1918
1919 def datetime_add_months(dt, months):
1920 """Increment/Decrement a datetime object by months."""
1921 month = dt.month + months - 1
1922 year = dt.year + month // 12
1923 month = month % 12 + 1
1924 day = min(dt.day, calendar.monthrange(year, month)[1])
1925 return dt.replace(year, month, day)
1926
1927
1928 def datetime_round(dt, precision='day'):
1929 """
1930 Round a datetime object's time to a specific precision
1931 """
1932 if precision == 'microsecond':
1933 return dt
1934
1935 unit_seconds = {
1936 'day': 86400,
1937 'hour': 3600,
1938 'minute': 60,
1939 'second': 1,
1940 }
1941 roundto = lambda x, n: ((x + n / 2) // n) * n
1942 timestamp = calendar.timegm(dt.timetuple())
1943 return datetime.datetime.utcfromtimestamp(roundto(timestamp, unit_seconds[precision]))
1944
1945
1946 def hyphenate_date(date_str):
1947 """
1948 Convert a date in 'YYYYMMDD' format to 'YYYY-MM-DD' format"""
1949 match = re.match(r'^(\d\d\d\d)(\d\d)(\d\d)$', date_str)
1950 if match is not None:
1951 return '-'.join(match.groups())
1952 else:
1953 return date_str
1954
1955
1956 class DateRange:
1957 """Represents a time interval between two dates"""
1958
1959 def __init__(self, start=None, end=None):
1960 """start and end must be strings in the format accepted by date"""
1961 if start is not None:
1962 self.start = date_from_str(start, strict=True)
1963 else:
1964 self.start = datetime.datetime.min.date()
1965 if end is not None:
1966 self.end = date_from_str(end, strict=True)
1967 else:
1968 self.end = datetime.datetime.max.date()
1969 if self.start > self.end:
1970 raise ValueError('Date range: "%s" , the start date must be before the end date' % self)
1971
1972 @classmethod
1973 def day(cls, day):
1974 """Returns a range that only contains the given day"""
1975 return cls(day, day)
1976
1977 def __contains__(self, date):
1978 """Check if the date is in the range"""
1979 if not isinstance(date, datetime.date):
1980 date = date_from_str(date)
1981 return self.start <= date <= self.end
1982
1983 def __str__(self):
1984 return f'{self.start.isoformat()} - {self.end.isoformat()}'
1985
1986 def __eq__(self, other):
1987 return (isinstance(other, DateRange)
1988 and self.start == other.start and self.end == other.end)
1989
1990
1991 def platform_name():
1992 """ Returns the platform name as a str """
1993 deprecation_warning(f'"{__name__}.platform_name" is deprecated, use "platform.platform" instead')
1994 return platform.platform()
1995
1996
1997 @functools.cache
1998 def system_identifier():
1999 python_implementation = platform.python_implementation()
2000 if python_implementation == 'PyPy' and hasattr(sys, 'pypy_version_info'):
2001 python_implementation += ' version %d.%d.%d' % sys.pypy_version_info[:3]
2002 libc_ver = []
2003 with contextlib.suppress(OSError): # We may not have access to the executable
2004 libc_ver = platform.libc_ver()
2005
2006 return 'Python %s (%s %s) - %s (%s%s)' % (
2007 platform.python_version(),
2008 python_implementation,
2009 platform.architecture()[0],
2010 platform.platform(),
2011 ssl.OPENSSL_VERSION,
2012 format_field(join_nonempty(*libc_ver, delim=' '), None, ', %s'),
2013 )
2014
2015
2016 @functools.cache
2017 def get_windows_version():
2018 ''' Get Windows version. returns () if it's not running on Windows '''
2019 if compat_os_name == 'nt':
2020 return version_tuple(platform.win32_ver()[1])
2021 else:
2022 return ()
2023
2024
2025 def write_string(s, out=None, encoding=None):
2026 assert isinstance(s, str)
2027 out = out or sys.stderr
2028
2029 if compat_os_name == 'nt' and supports_terminal_sequences(out):
2030 s = re.sub(r'([\r\n]+)', r' \1', s)
2031
2032 enc, buffer = None, out
2033 if 'b' in getattr(out, 'mode', ''):
2034 enc = encoding or preferredencoding()
2035 elif hasattr(out, 'buffer'):
2036 buffer = out.buffer
2037 enc = encoding or getattr(out, 'encoding', None) or preferredencoding()
2038
2039 buffer.write(s.encode(enc, 'ignore') if enc else s)
2040 out.flush()
2041
2042
2043 def deprecation_warning(msg, *, printer=None, stacklevel=0, **kwargs):
2044 from . import _IN_CLI
2045 if _IN_CLI:
2046 if msg in deprecation_warning._cache:
2047 return
2048 deprecation_warning._cache.add(msg)
2049 if printer:
2050 return printer(f'{msg}{bug_reports_message()}', **kwargs)
2051 return write_string(f'ERROR: {msg}{bug_reports_message()}\n', **kwargs)
2052 else:
2053 import warnings
2054 warnings.warn(DeprecationWarning(msg), stacklevel=stacklevel + 3)
2055
2056
2057 deprecation_warning._cache = set()
2058
2059
2060 def bytes_to_intlist(bs):
2061 if not bs:
2062 return []
2063 if isinstance(bs[0], int): # Python 3
2064 return list(bs)
2065 else:
2066 return [ord(c) for c in bs]
2067
2068
2069 def intlist_to_bytes(xs):
2070 if not xs:
2071 return b''
2072 return struct.pack('%dB' % len(xs), *xs)
2073
2074
2075 class LockingUnsupportedError(OSError):
2076 msg = 'File locking is not supported'
2077
2078 def __init__(self):
2079 super().__init__(self.msg)
2080
2081
2082 # Cross-platform file locking
2083 if sys.platform == 'win32':
2084 import ctypes
2085 import ctypes.wintypes
2086 import msvcrt
2087
2088 class OVERLAPPED(ctypes.Structure):
2089 _fields_ = [
2090 ('Internal', ctypes.wintypes.LPVOID),
2091 ('InternalHigh', ctypes.wintypes.LPVOID),
2092 ('Offset', ctypes.wintypes.DWORD),
2093 ('OffsetHigh', ctypes.wintypes.DWORD),
2094 ('hEvent', ctypes.wintypes.HANDLE),
2095 ]
2096
2097 kernel32 = ctypes.windll.kernel32
2098 LockFileEx = kernel32.LockFileEx
2099 LockFileEx.argtypes = [
2100 ctypes.wintypes.HANDLE, # hFile
2101 ctypes.wintypes.DWORD, # dwFlags
2102 ctypes.wintypes.DWORD, # dwReserved
2103 ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow
2104 ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh
2105 ctypes.POINTER(OVERLAPPED) # Overlapped
2106 ]
2107 LockFileEx.restype = ctypes.wintypes.BOOL
2108 UnlockFileEx = kernel32.UnlockFileEx
2109 UnlockFileEx.argtypes = [
2110 ctypes.wintypes.HANDLE, # hFile
2111 ctypes.wintypes.DWORD, # dwReserved
2112 ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow
2113 ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh
2114 ctypes.POINTER(OVERLAPPED) # Overlapped
2115 ]
2116 UnlockFileEx.restype = ctypes.wintypes.BOOL
2117 whole_low = 0xffffffff
2118 whole_high = 0x7fffffff
2119
2120 def _lock_file(f, exclusive, block):
2121 overlapped = OVERLAPPED()
2122 overlapped.Offset = 0
2123 overlapped.OffsetHigh = 0
2124 overlapped.hEvent = 0
2125 f._lock_file_overlapped_p = ctypes.pointer(overlapped)
2126
2127 if not LockFileEx(msvcrt.get_osfhandle(f.fileno()),
2128 (0x2 if exclusive else 0x0) | (0x0 if block else 0x1),
2129 0, whole_low, whole_high, f._lock_file_overlapped_p):
2130 # NB: No argument form of "ctypes.FormatError" does not work on PyPy
2131 raise BlockingIOError(f'Locking file failed: {ctypes.FormatError(ctypes.GetLastError())!r}')
2132
2133 def _unlock_file(f):
2134 assert f._lock_file_overlapped_p
2135 handle = msvcrt.get_osfhandle(f.fileno())
2136 if not UnlockFileEx(handle, 0, whole_low, whole_high, f._lock_file_overlapped_p):
2137 raise OSError('Unlocking file failed: %r' % ctypes.FormatError())
2138
2139 else:
2140 try:
2141 import fcntl
2142
2143 def _lock_file(f, exclusive, block):
2144 flags = fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH
2145 if not block:
2146 flags |= fcntl.LOCK_NB
2147 try:
2148 fcntl.flock(f, flags)
2149 except BlockingIOError:
2150 raise
2151 except OSError: # AOSP does not have flock()
2152 fcntl.lockf(f, flags)
2153
2154 def _unlock_file(f):
2155 try:
2156 fcntl.flock(f, fcntl.LOCK_UN)
2157 except OSError:
2158 fcntl.lockf(f, fcntl.LOCK_UN)
2159
2160 except ImportError:
2161
2162 def _lock_file(f, exclusive, block):
2163 raise LockingUnsupportedError()
2164
2165 def _unlock_file(f):
2166 raise LockingUnsupportedError()
2167
2168
2169 class locked_file:
2170 locked = False
2171
2172 def __init__(self, filename, mode, block=True, encoding=None):
2173 if mode not in {'r', 'rb', 'a', 'ab', 'w', 'wb'}:
2174 raise NotImplementedError(mode)
2175 self.mode, self.block = mode, block
2176
2177 writable = any(f in mode for f in 'wax+')
2178 readable = any(f in mode for f in 'r+')
2179 flags = functools.reduce(operator.ior, (
2180 getattr(os, 'O_CLOEXEC', 0), # UNIX only
2181 getattr(os, 'O_BINARY', 0), # Windows only
2182 getattr(os, 'O_NOINHERIT', 0), # Windows only
2183 os.O_CREAT if writable else 0, # O_TRUNC only after locking
2184 os.O_APPEND if 'a' in mode else 0,
2185 os.O_EXCL if 'x' in mode else 0,
2186 os.O_RDONLY if not writable else os.O_RDWR if readable else os.O_WRONLY,
2187 ))
2188
2189 self.f = os.fdopen(os.open(filename, flags, 0o666), mode, encoding=encoding)
2190
2191 def __enter__(self):
2192 exclusive = 'r' not in self.mode
2193 try:
2194 _lock_file(self.f, exclusive, self.block)
2195 self.locked = True
2196 except OSError:
2197 self.f.close()
2198 raise
2199 if 'w' in self.mode:
2200 try:
2201 self.f.truncate()
2202 except OSError as e:
2203 if e.errno not in (
2204 errno.ESPIPE, # Illegal seek - expected for FIFO
2205 errno.EINVAL, # Invalid argument - expected for /dev/null
2206 ):
2207 raise
2208 return self
2209
2210 def unlock(self):
2211 if not self.locked:
2212 return
2213 try:
2214 _unlock_file(self.f)
2215 finally:
2216 self.locked = False
2217
2218 def __exit__(self, *_):
2219 try:
2220 self.unlock()
2221 finally:
2222 self.f.close()
2223
2224 open = __enter__
2225 close = __exit__
2226
2227 def __getattr__(self, attr):
2228 return getattr(self.f, attr)
2229
2230 def __iter__(self):
2231 return iter(self.f)
2232
2233
2234 @functools.cache
2235 def get_filesystem_encoding():
2236 encoding = sys.getfilesystemencoding()
2237 return encoding if encoding is not None else 'utf-8'
2238
2239
2240 def shell_quote(args):
2241 quoted_args = []
2242 encoding = get_filesystem_encoding()
2243 for a in args:
2244 if isinstance(a, bytes):
2245 # We may get a filename encoded with 'encodeFilename'
2246 a = a.decode(encoding)
2247 quoted_args.append(compat_shlex_quote(a))
2248 return ' '.join(quoted_args)
2249
2250
2251 def smuggle_url(url, data):
2252 """ Pass additional data in a URL for internal use. """
2253
2254 url, idata = unsmuggle_url(url, {})
2255 data.update(idata)
2256 sdata = urllib.parse.urlencode(
2257 {'__youtubedl_smuggle': json.dumps(data)})
2258 return url + '#' + sdata
2259
2260
2261 def unsmuggle_url(smug_url, default=None):
2262 if '#__youtubedl_smuggle' not in smug_url:
2263 return smug_url, default
2264 url, _, sdata = smug_url.rpartition('#')
2265 jsond = urllib.parse.parse_qs(sdata)['__youtubedl_smuggle'][0]
2266 data = json.loads(jsond)
2267 return url, data
2268
2269
2270 def format_decimal_suffix(num, fmt='%d%s', *, factor=1000):
2271 """ Formats numbers with decimal sufixes like K, M, etc """
2272 num, factor = float_or_none(num), float(factor)
2273 if num is None or num < 0:
2274 return None
2275 POSSIBLE_SUFFIXES = 'kMGTPEZY'
2276 exponent = 0 if num == 0 else min(int(math.log(num, factor)), len(POSSIBLE_SUFFIXES))
2277 suffix = ['', *POSSIBLE_SUFFIXES][exponent]
2278 if factor == 1024:
2279 suffix = {'k': 'Ki', '': ''}.get(suffix, f'{suffix}i')
2280 converted = num / (factor ** exponent)
2281 return fmt % (converted, suffix)
2282
2283
2284 def format_bytes(bytes):
2285 return format_decimal_suffix(bytes, '%.2f%sB', factor=1024) or 'N/A'
2286
2287
2288 def lookup_unit_table(unit_table, s):
2289 units_re = '|'.join(re.escape(u) for u in unit_table)
2290 m = re.match(
2291 r'(?P<num>[0-9]+(?:[,.][0-9]*)?)\s*(?P<unit>%s)\b' % units_re, s)
2292 if not m:
2293 return None
2294 num_str = m.group('num').replace(',', '.')
2295 mult = unit_table[m.group('unit')]
2296 return int(float(num_str) * mult)
2297
2298
2299 def parse_filesize(s):
2300 if s is None:
2301 return None
2302
2303 # The lower-case forms are of course incorrect and unofficial,
2304 # but we support those too
2305 _UNIT_TABLE = {
2306 'B': 1,
2307 'b': 1,
2308 'bytes': 1,
2309 'KiB': 1024,
2310 'KB': 1000,
2311 'kB': 1024,
2312 'Kb': 1000,
2313 'kb': 1000,
2314 'kilobytes': 1000,
2315 'kibibytes': 1024,
2316 'MiB': 1024 ** 2,
2317 'MB': 1000 ** 2,
2318 'mB': 1024 ** 2,
2319 'Mb': 1000 ** 2,
2320 'mb': 1000 ** 2,
2321 'megabytes': 1000 ** 2,
2322 'mebibytes': 1024 ** 2,
2323 'GiB': 1024 ** 3,
2324 'GB': 1000 ** 3,
2325 'gB': 1024 ** 3,
2326 'Gb': 1000 ** 3,
2327 'gb': 1000 ** 3,
2328 'gigabytes': 1000 ** 3,
2329 'gibibytes': 1024 ** 3,
2330 'TiB': 1024 ** 4,
2331 'TB': 1000 ** 4,
2332 'tB': 1024 ** 4,
2333 'Tb': 1000 ** 4,
2334 'tb': 1000 ** 4,
2335 'terabytes': 1000 ** 4,
2336 'tebibytes': 1024 ** 4,
2337 'PiB': 1024 ** 5,
2338 'PB': 1000 ** 5,
2339 'pB': 1024 ** 5,
2340 'Pb': 1000 ** 5,
2341 'pb': 1000 ** 5,
2342 'petabytes': 1000 ** 5,
2343 'pebibytes': 1024 ** 5,
2344 'EiB': 1024 ** 6,
2345 'EB': 1000 ** 6,
2346 'eB': 1024 ** 6,
2347 'Eb': 1000 ** 6,
2348 'eb': 1000 ** 6,
2349 'exabytes': 1000 ** 6,
2350 'exbibytes': 1024 ** 6,
2351 'ZiB': 1024 ** 7,
2352 'ZB': 1000 ** 7,
2353 'zB': 1024 ** 7,
2354 'Zb': 1000 ** 7,
2355 'zb': 1000 ** 7,
2356 'zettabytes': 1000 ** 7,
2357 'zebibytes': 1024 ** 7,
2358 'YiB': 1024 ** 8,
2359 'YB': 1000 ** 8,
2360 'yB': 1024 ** 8,
2361 'Yb': 1000 ** 8,
2362 'yb': 1000 ** 8,
2363 'yottabytes': 1000 ** 8,
2364 'yobibytes': 1024 ** 8,
2365 }
2366
2367 return lookup_unit_table(_UNIT_TABLE, s)
2368
2369
2370 def parse_count(s):
2371 if s is None:
2372 return None
2373
2374 s = re.sub(r'^[^\d]+\s', '', s).strip()
2375
2376 if re.match(r'^[\d,.]+$', s):
2377 return str_to_int(s)
2378
2379 _UNIT_TABLE = {
2380 'k': 1000,
2381 'K': 1000,
2382 'm': 1000 ** 2,
2383 'M': 1000 ** 2,
2384 'kk': 1000 ** 2,
2385 'KK': 1000 ** 2,
2386 'b': 1000 ** 3,
2387 'B': 1000 ** 3,
2388 }
2389
2390 ret = lookup_unit_table(_UNIT_TABLE, s)
2391 if ret is not None:
2392 return ret
2393
2394 mobj = re.match(r'([\d,.]+)(?:$|\s)', s)
2395 if mobj:
2396 return str_to_int(mobj.group(1))
2397
2398
2399 def parse_resolution(s, *, lenient=False):
2400 if s is None:
2401 return {}
2402
2403 if lenient:
2404 mobj = re.search(r'(?P<w>\d+)\s*[xX×,]\s*(?P<h>\d+)', s)
2405 else:
2406 mobj = re.search(r'(?<![a-zA-Z0-9])(?P<w>\d+)\s*[xX×,]\s*(?P<h>\d+)(?![a-zA-Z0-9])', s)
2407 if mobj:
2408 return {
2409 'width': int(mobj.group('w')),
2410 'height': int(mobj.group('h')),
2411 }
2412
2413 mobj = re.search(r'(?<![a-zA-Z0-9])(\d+)[pPiI](?![a-zA-Z0-9])', s)
2414 if mobj:
2415 return {'height': int(mobj.group(1))}
2416
2417 mobj = re.search(r'\b([48])[kK]\b', s)
2418 if mobj:
2419 return {'height': int(mobj.group(1)) * 540}
2420
2421 return {}
2422
2423
2424 def parse_bitrate(s):
2425 if not isinstance(s, str):
2426 return
2427 mobj = re.search(r'\b(\d+)\s*kbps', s)
2428 if mobj:
2429 return int(mobj.group(1))
2430
2431
2432 def month_by_name(name, lang='en'):
2433 """ Return the number of a month by (locale-independently) English name """
2434
2435 month_names = MONTH_NAMES.get(lang, MONTH_NAMES['en'])
2436
2437 try:
2438 return month_names.index(name) + 1
2439 except ValueError:
2440 return None
2441
2442
2443 def month_by_abbreviation(abbrev):
2444 """ Return the number of a month by (locale-independently) English
2445 abbreviations """
2446
2447 try:
2448 return [s[:3] for s in ENGLISH_MONTH_NAMES].index(abbrev) + 1
2449 except ValueError:
2450 return None
2451
2452
2453 def fix_xml_ampersands(xml_str):
2454 """Replace all the '&' by '&amp;' in XML"""
2455 return re.sub(
2456 r'&(?!amp;|lt;|gt;|apos;|quot;|#x[0-9a-fA-F]{,4};|#[0-9]{,4};)',
2457 '&amp;',
2458 xml_str)
2459
2460
2461 def setproctitle(title):
2462 assert isinstance(title, str)
2463
2464 # Workaround for https://github.com/yt-dlp/yt-dlp/issues/4541
2465 try:
2466 import ctypes
2467 except ImportError:
2468 return
2469
2470 try:
2471 libc = ctypes.cdll.LoadLibrary('libc.so.6')
2472 except OSError:
2473 return
2474 except TypeError:
2475 # LoadLibrary in Windows Python 2.7.13 only expects
2476 # a bytestring, but since unicode_literals turns
2477 # every string into a unicode string, it fails.
2478 return
2479 title_bytes = title.encode()
2480 buf = ctypes.create_string_buffer(len(title_bytes))
2481 buf.value = title_bytes
2482 try:
2483 libc.prctl(15, buf, 0, 0, 0)
2484 except AttributeError:
2485 return # Strange libc, just skip this
2486
2487
2488 def remove_start(s, start):
2489 return s[len(start):] if s is not None and s.startswith(start) else s
2490
2491
2492 def remove_end(s, end):
2493 return s[:-len(end)] if s is not None and s.endswith(end) else s
2494
2495
2496 def remove_quotes(s):
2497 if s is None or len(s) < 2:
2498 return s
2499 for quote in ('"', "'", ):
2500 if s[0] == quote and s[-1] == quote:
2501 return s[1:-1]
2502 return s
2503
2504
2505 def get_domain(url):
2506 """
2507 This implementation is inconsistent, but is kept for compatibility.
2508 Use this only for "webpage_url_domain"
2509 """
2510 return remove_start(urllib.parse.urlparse(url).netloc, 'www.') or None
2511
2512
2513 def url_basename(url):
2514 path = urllib.parse.urlparse(url).path
2515 return path.strip('/').split('/')[-1]
2516
2517
2518 def base_url(url):
2519 return re.match(r'https?://[^?#]+/', url).group()
2520
2521
2522 def urljoin(base, path):
2523 if isinstance(path, bytes):
2524 path = path.decode()
2525 if not isinstance(path, str) or not path:
2526 return None
2527 if re.match(r'^(?:[a-zA-Z][a-zA-Z0-9+-.]*:)?//', path):
2528 return path
2529 if isinstance(base, bytes):
2530 base = base.decode()
2531 if not isinstance(base, str) or not re.match(
2532 r'^(?:https?:)?//', base):
2533 return None
2534 return urllib.parse.urljoin(base, path)
2535
2536
2537 class HEADRequest(urllib.request.Request):
2538 def get_method(self):
2539 return 'HEAD'
2540
2541
2542 class PUTRequest(urllib.request.Request):
2543 def get_method(self):
2544 return 'PUT'
2545
2546
2547 def int_or_none(v, scale=1, default=None, get_attr=None, invscale=1):
2548 if get_attr and v is not None:
2549 v = getattr(v, get_attr, None)
2550 try:
2551 return int(v) * invscale // scale
2552 except (ValueError, TypeError, OverflowError):
2553 return default
2554
2555
2556 def str_or_none(v, default=None):
2557 return default if v is None else str(v)
2558
2559
2560 def str_to_int(int_str):
2561 """ A more relaxed version of int_or_none """
2562 if isinstance(int_str, int):
2563 return int_str
2564 elif isinstance(int_str, str):
2565 int_str = re.sub(r'[,\.\+]', '', int_str)
2566 return int_or_none(int_str)
2567
2568
2569 def float_or_none(v, scale=1, invscale=1, default=None):
2570 if v is None:
2571 return default
2572 try:
2573 return float(v) * invscale / scale
2574 except (ValueError, TypeError):
2575 return default
2576
2577
2578 def bool_or_none(v, default=None):
2579 return v if isinstance(v, bool) else default
2580
2581
2582 def strip_or_none(v, default=None):
2583 return v.strip() if isinstance(v, str) else default
2584
2585
2586 def url_or_none(url):
2587 if not url or not isinstance(url, str):
2588 return None
2589 url = url.strip()
2590 return url if re.match(r'^(?:(?:https?|rt(?:m(?:pt?[es]?|fp)|sp[su]?)|mms|ftps?):)?//', url) else None
2591
2592
2593 def request_to_url(req):
2594 if isinstance(req, urllib.request.Request):
2595 return req.get_full_url()
2596 else:
2597 return req
2598
2599
2600 def strftime_or_none(timestamp, date_format, default=None):
2601 datetime_object = None
2602 try:
2603 if isinstance(timestamp, (int, float)): # unix timestamp
2604 # Using naive datetime here can break timestamp() in Windows
2605 # Ref: https://github.com/yt-dlp/yt-dlp/issues/5185, https://github.com/python/cpython/issues/94414
2606 datetime_object = datetime.datetime.fromtimestamp(timestamp, datetime.timezone.utc)
2607 elif isinstance(timestamp, str): # assume YYYYMMDD
2608 datetime_object = datetime.datetime.strptime(timestamp, '%Y%m%d')
2609 date_format = re.sub( # Support %s on windows
2610 r'(?<!%)(%%)*%s', rf'\g<1>{int(datetime_object.timestamp())}', date_format)
2611 return datetime_object.strftime(date_format)
2612 except (ValueError, TypeError, AttributeError):
2613 return default
2614
2615
2616 def parse_duration(s):
2617 if not isinstance(s, str):
2618 return None
2619 s = s.strip()
2620 if not s:
2621 return None
2622
2623 days, hours, mins, secs, ms = [None] * 5
2624 m = re.match(r'''(?x)
2625 (?P<before_secs>
2626 (?:(?:(?P<days>[0-9]+):)?(?P<hours>[0-9]+):)?(?P<mins>[0-9]+):)?
2627 (?P<secs>(?(before_secs)[0-9]{1,2}|[0-9]+))
2628 (?P<ms>[.:][0-9]+)?Z?$
2629 ''', s)
2630 if m:
2631 days, hours, mins, secs, ms = m.group('days', 'hours', 'mins', 'secs', 'ms')
2632 else:
2633 m = re.match(
2634 r'''(?ix)(?:P?
2635 (?:
2636 [0-9]+\s*y(?:ears?)?,?\s*
2637 )?
2638 (?:
2639 [0-9]+\s*m(?:onths?)?,?\s*
2640 )?
2641 (?:
2642 [0-9]+\s*w(?:eeks?)?,?\s*
2643 )?
2644 (?:
2645 (?P<days>[0-9]+)\s*d(?:ays?)?,?\s*
2646 )?
2647 T)?
2648 (?:
2649 (?P<hours>[0-9]+)\s*h(?:ours?)?,?\s*
2650 )?
2651 (?:
2652 (?P<mins>[0-9]+)\s*m(?:in(?:ute)?s?)?,?\s*
2653 )?
2654 (?:
2655 (?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?\s*s(?:ec(?:ond)?s?)?\s*
2656 )?Z?$''', s)
2657 if m:
2658 days, hours, mins, secs, ms = m.groups()
2659 else:
2660 m = re.match(r'(?i)(?:(?P<hours>[0-9.]+)\s*(?:hours?)|(?P<mins>[0-9.]+)\s*(?:mins?\.?|minutes?)\s*)Z?$', s)
2661 if m:
2662 hours, mins = m.groups()
2663 else:
2664 return None
2665
2666 if ms:
2667 ms = ms.replace(':', '.')
2668 return sum(float(part or 0) * mult for part, mult in (
2669 (days, 86400), (hours, 3600), (mins, 60), (secs, 1), (ms, 1)))
2670
2671
2672 def prepend_extension(filename, ext, expected_real_ext=None):
2673 name, real_ext = os.path.splitext(filename)
2674 return (
2675 f'{name}.{ext}{real_ext}'
2676 if not expected_real_ext or real_ext[1:] == expected_real_ext
2677 else f'{filename}.{ext}')
2678
2679
2680 def replace_extension(filename, ext, expected_real_ext=None):
2681 name, real_ext = os.path.splitext(filename)
2682 return '{}.{}'.format(
2683 name if not expected_real_ext or real_ext[1:] == expected_real_ext else filename,
2684 ext)
2685
2686
2687 def check_executable(exe, args=[]):
2688 """ Checks if the given binary is installed somewhere in PATH, and returns its name.
2689 args can be a list of arguments for a short output (like -version) """
2690 try:
2691 Popen.run([exe] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
2692 except OSError:
2693 return False
2694 return exe
2695
2696
2697 def _get_exe_version_output(exe, args, *, to_screen=None):
2698 if to_screen:
2699 to_screen(f'Checking exe version: {shell_quote([exe] + args)}')
2700 try:
2701 # STDIN should be redirected too. On UNIX-like systems, ffmpeg triggers
2702 # SIGTTOU if yt-dlp is run in the background.
2703 # See https://github.com/ytdl-org/youtube-dl/issues/955#issuecomment-209789656
2704 stdout, _, _ = Popen.run([encodeArgument(exe)] + args, text=True,
2705 stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
2706 except OSError:
2707 return False
2708 return stdout
2709
2710
2711 def detect_exe_version(output, version_re=None, unrecognized='present'):
2712 assert isinstance(output, str)
2713 if version_re is None:
2714 version_re = r'version\s+([-0-9._a-zA-Z]+)'
2715 m = re.search(version_re, output)
2716 if m:
2717 return m.group(1)
2718 else:
2719 return unrecognized
2720
2721
2722 def get_exe_version(exe, args=['--version'],
2723 version_re=None, unrecognized='present'):
2724 """ Returns the version of the specified executable,
2725 or False if the executable is not present """
2726 out = _get_exe_version_output(exe, args)
2727 return detect_exe_version(out, version_re, unrecognized) if out else False
2728
2729
2730 def frange(start=0, stop=None, step=1):
2731 """Float range"""
2732 if stop is None:
2733 start, stop = 0, start
2734 sign = [-1, 1][step > 0] if step else 0
2735 while sign * start < sign * stop:
2736 yield start
2737 start += step
2738
2739
2740 class LazyList(collections.abc.Sequence):
2741 """Lazy immutable list from an iterable
2742 Note that slices of a LazyList are lists and not LazyList"""
2743
2744 class IndexError(IndexError):
2745 pass
2746
2747 def __init__(self, iterable, *, reverse=False, _cache=None):
2748 self._iterable = iter(iterable)
2749 self._cache = [] if _cache is None else _cache
2750 self._reversed = reverse
2751
2752 def __iter__(self):
2753 if self._reversed:
2754 # We need to consume the entire iterable to iterate in reverse
2755 yield from self.exhaust()
2756 return
2757 yield from self._cache
2758 for item in self._iterable:
2759 self._cache.append(item)
2760 yield item
2761
2762 def _exhaust(self):
2763 self._cache.extend(self._iterable)
2764 self._iterable = [] # Discard the emptied iterable to make it pickle-able
2765 return self._cache
2766
2767 def exhaust(self):
2768 """Evaluate the entire iterable"""
2769 return self._exhaust()[::-1 if self._reversed else 1]
2770
2771 @staticmethod
2772 def _reverse_index(x):
2773 return None if x is None else ~x
2774
2775 def __getitem__(self, idx):
2776 if isinstance(idx, slice):
2777 if self._reversed:
2778 idx = slice(self._reverse_index(idx.start), self._reverse_index(idx.stop), -(idx.step or 1))
2779 start, stop, step = idx.start, idx.stop, idx.step or 1
2780 elif isinstance(idx, int):
2781 if self._reversed:
2782 idx = self._reverse_index(idx)
2783 start, stop, step = idx, idx, 0
2784 else:
2785 raise TypeError('indices must be integers or slices')
2786 if ((start or 0) < 0 or (stop or 0) < 0
2787 or (start is None and step < 0)
2788 or (stop is None and step > 0)):
2789 # We need to consume the entire iterable to be able to slice from the end
2790 # Obviously, never use this with infinite iterables
2791 self._exhaust()
2792 try:
2793 return self._cache[idx]
2794 except IndexError as e:
2795 raise self.IndexError(e) from e
2796 n = max(start or 0, stop or 0) - len(self._cache) + 1
2797 if n > 0:
2798 self._cache.extend(itertools.islice(self._iterable, n))
2799 try:
2800 return self._cache[idx]
2801 except IndexError as e:
2802 raise self.IndexError(e) from e
2803
2804 def __bool__(self):
2805 try:
2806 self[-1] if self._reversed else self[0]
2807 except self.IndexError:
2808 return False
2809 return True
2810
2811 def __len__(self):
2812 self._exhaust()
2813 return len(self._cache)
2814
2815 def __reversed__(self):
2816 return type(self)(self._iterable, reverse=not self._reversed, _cache=self._cache)
2817
2818 def __copy__(self):
2819 return type(self)(self._iterable, reverse=self._reversed, _cache=self._cache)
2820
2821 def __repr__(self):
2822 # repr and str should mimic a list. So we exhaust the iterable
2823 return repr(self.exhaust())
2824
2825 def __str__(self):
2826 return repr(self.exhaust())
2827
2828
2829 class PagedList:
2830
2831 class IndexError(IndexError):
2832 pass
2833
2834 def __len__(self):
2835 # This is only useful for tests
2836 return len(self.getslice())
2837
2838 def __init__(self, pagefunc, pagesize, use_cache=True):
2839 self._pagefunc = pagefunc
2840 self._pagesize = pagesize
2841 self._pagecount = float('inf')
2842 self._use_cache = use_cache
2843 self._cache = {}
2844
2845 def getpage(self, pagenum):
2846 page_results = self._cache.get(pagenum)
2847 if page_results is None:
2848 page_results = [] if pagenum > self._pagecount else list(self._pagefunc(pagenum))
2849 if self._use_cache:
2850 self._cache[pagenum] = page_results
2851 return page_results
2852
2853 def getslice(self, start=0, end=None):
2854 return list(self._getslice(start, end))
2855
2856 def _getslice(self, start, end):
2857 raise NotImplementedError('This method must be implemented by subclasses')
2858
2859 def __getitem__(self, idx):
2860 assert self._use_cache, 'Indexing PagedList requires cache'
2861 if not isinstance(idx, int) or idx < 0:
2862 raise TypeError('indices must be non-negative integers')
2863 entries = self.getslice(idx, idx + 1)
2864 if not entries:
2865 raise self.IndexError()
2866 return entries[0]
2867
2868
2869 class OnDemandPagedList(PagedList):
2870 """Download pages until a page with less than maximum results"""
2871
2872 def _getslice(self, start, end):
2873 for pagenum in itertools.count(start // self._pagesize):
2874 firstid = pagenum * self._pagesize
2875 nextfirstid = pagenum * self._pagesize + self._pagesize
2876 if start >= nextfirstid:
2877 continue
2878
2879 startv = (
2880 start % self._pagesize
2881 if firstid <= start < nextfirstid
2882 else 0)
2883 endv = (
2884 ((end - 1) % self._pagesize) + 1
2885 if (end is not None and firstid <= end <= nextfirstid)
2886 else None)
2887
2888 try:
2889 page_results = self.getpage(pagenum)
2890 except Exception:
2891 self._pagecount = pagenum - 1
2892 raise
2893 if startv != 0 or endv is not None:
2894 page_results = page_results[startv:endv]
2895 yield from page_results
2896
2897 # A little optimization - if current page is not "full", ie. does
2898 # not contain page_size videos then we can assume that this page
2899 # is the last one - there are no more ids on further pages -
2900 # i.e. no need to query again.
2901 if len(page_results) + startv < self._pagesize:
2902 break
2903
2904 # If we got the whole page, but the next page is not interesting,
2905 # break out early as well
2906 if end == nextfirstid:
2907 break
2908
2909
2910 class InAdvancePagedList(PagedList):
2911 """PagedList with total number of pages known in advance"""
2912
2913 def __init__(self, pagefunc, pagecount, pagesize):
2914 PagedList.__init__(self, pagefunc, pagesize, True)
2915 self._pagecount = pagecount
2916
2917 def _getslice(self, start, end):
2918 start_page = start // self._pagesize
2919 end_page = self._pagecount if end is None else min(self._pagecount, end // self._pagesize + 1)
2920 skip_elems = start - start_page * self._pagesize
2921 only_more = None if end is None else end - start
2922 for pagenum in range(start_page, end_page):
2923 page_results = self.getpage(pagenum)
2924 if skip_elems:
2925 page_results = page_results[skip_elems:]
2926 skip_elems = None
2927 if only_more is not None:
2928 if len(page_results) < only_more:
2929 only_more -= len(page_results)
2930 else:
2931 yield from page_results[:only_more]
2932 break
2933 yield from page_results
2934
2935
2936 class PlaylistEntries:
2937 MissingEntry = object()
2938 is_exhausted = False
2939
2940 def __init__(self, ydl, info_dict):
2941 self.ydl = ydl
2942
2943 # _entries must be assigned now since infodict can change during iteration
2944 entries = info_dict.get('entries')
2945 if entries is None:
2946 raise EntryNotInPlaylist('There are no entries')
2947 elif isinstance(entries, list):
2948 self.is_exhausted = True
2949
2950 requested_entries = info_dict.get('requested_entries')
2951 self.is_incomplete = bool(requested_entries)
2952 if self.is_incomplete:
2953 assert self.is_exhausted
2954 self._entries = [self.MissingEntry] * max(requested_entries)
2955 for i, entry in zip(requested_entries, entries):
2956 self._entries[i - 1] = entry
2957 elif isinstance(entries, (list, PagedList, LazyList)):
2958 self._entries = entries
2959 else:
2960 self._entries = LazyList(entries)
2961
2962 PLAYLIST_ITEMS_RE = re.compile(r'''(?x)
2963 (?P<start>[+-]?\d+)?
2964 (?P<range>[:-]
2965 (?P<end>[+-]?\d+|inf(?:inite)?)?
2966 (?::(?P<step>[+-]?\d+))?
2967 )?''')
2968
2969 @classmethod
2970 def parse_playlist_items(cls, string):
2971 for segment in string.split(','):
2972 if not segment:
2973 raise ValueError('There is two or more consecutive commas')
2974 mobj = cls.PLAYLIST_ITEMS_RE.fullmatch(segment)
2975 if not mobj:
2976 raise ValueError(f'{segment!r} is not a valid specification')
2977 start, end, step, has_range = mobj.group('start', 'end', 'step', 'range')
2978 if int_or_none(step) == 0:
2979 raise ValueError(f'Step in {segment!r} cannot be zero')
2980 yield slice(int_or_none(start), float_or_none(end), int_or_none(step)) if has_range else int(start)
2981
2982 def get_requested_items(self):
2983 playlist_items = self.ydl.params.get('playlist_items')
2984 playlist_start = self.ydl.params.get('playliststart', 1)
2985 playlist_end = self.ydl.params.get('playlistend')
2986 # For backwards compatibility, interpret -1 as whole list
2987 if playlist_end in (-1, None):
2988 playlist_end = ''
2989 if not playlist_items:
2990 playlist_items = f'{playlist_start}:{playlist_end}'
2991 elif playlist_start != 1 or playlist_end:
2992 self.ydl.report_warning('Ignoring playliststart and playlistend because playlistitems was given', only_once=True)
2993
2994 for index in self.parse_playlist_items(playlist_items):
2995 for i, entry in self[index]:
2996 yield i, entry
2997 if not entry:
2998 continue
2999 try:
3000 # TODO: Add auto-generated fields
3001 self.ydl._match_entry(entry, incomplete=True, silent=True)
3002 except (ExistingVideoReached, RejectedVideoReached):
3003 return
3004
3005 def get_full_count(self):
3006 if self.is_exhausted and not self.is_incomplete:
3007 return len(self)
3008 elif isinstance(self._entries, InAdvancePagedList):
3009 if self._entries._pagesize == 1:
3010 return self._entries._pagecount
3011
3012 @functools.cached_property
3013 def _getter(self):
3014 if isinstance(self._entries, list):
3015 def get_entry(i):
3016 try:
3017 entry = self._entries[i]
3018 except IndexError:
3019 entry = self.MissingEntry
3020 if not self.is_incomplete:
3021 raise self.IndexError()
3022 if entry is self.MissingEntry:
3023 raise EntryNotInPlaylist(f'Entry {i} cannot be found')
3024 return entry
3025 else:
3026 def get_entry(i):
3027 try:
3028 return type(self.ydl)._handle_extraction_exceptions(lambda _, i: self._entries[i])(self.ydl, i)
3029 except (LazyList.IndexError, PagedList.IndexError):
3030 raise self.IndexError()
3031 return get_entry
3032
3033 def __getitem__(self, idx):
3034 if isinstance(idx, int):
3035 idx = slice(idx, idx)
3036
3037 # NB: PlaylistEntries[1:10] => (0, 1, ... 9)
3038 step = 1 if idx.step is None else idx.step
3039 if idx.start is None:
3040 start = 0 if step > 0 else len(self) - 1
3041 else:
3042 start = idx.start - 1 if idx.start >= 0 else len(self) + idx.start
3043
3044 # NB: Do not call len(self) when idx == [:]
3045 if idx.stop is None:
3046 stop = 0 if step < 0 else float('inf')
3047 else:
3048 stop = idx.stop - 1 if idx.stop >= 0 else len(self) + idx.stop
3049 stop += [-1, 1][step > 0]
3050
3051 for i in frange(start, stop, step):
3052 if i < 0:
3053 continue
3054 try:
3055 entry = self._getter(i)
3056 except self.IndexError:
3057 self.is_exhausted = True
3058 if step > 0:
3059 break
3060 continue
3061 yield i + 1, entry
3062
3063 def __len__(self):
3064 return len(tuple(self[:]))
3065
3066 class IndexError(IndexError):
3067 pass
3068
3069
3070 def uppercase_escape(s):
3071 unicode_escape = codecs.getdecoder('unicode_escape')
3072 return re.sub(
3073 r'\\U[0-9a-fA-F]{8}',
3074 lambda m: unicode_escape(m.group(0))[0],
3075 s)
3076
3077
3078 def lowercase_escape(s):
3079 unicode_escape = codecs.getdecoder('unicode_escape')
3080 return re.sub(
3081 r'\\u[0-9a-fA-F]{4}',
3082 lambda m: unicode_escape(m.group(0))[0],
3083 s)
3084
3085
3086 def escape_rfc3986(s):
3087 """Escape non-ASCII characters as suggested by RFC 3986"""
3088 return urllib.parse.quote(s, b"%/;:@&=+$,!~*'()?#[]")
3089
3090
3091 def escape_url(url):
3092 """Escape URL as suggested by RFC 3986"""
3093 url_parsed = urllib.parse.urlparse(url)
3094 return url_parsed._replace(
3095 netloc=url_parsed.netloc.encode('idna').decode('ascii'),
3096 path=escape_rfc3986(url_parsed.path),
3097 params=escape_rfc3986(url_parsed.params),
3098 query=escape_rfc3986(url_parsed.query),
3099 fragment=escape_rfc3986(url_parsed.fragment)
3100 ).geturl()
3101
3102
3103 def parse_qs(url, **kwargs):
3104 return urllib.parse.parse_qs(urllib.parse.urlparse(url).query, **kwargs)
3105
3106
3107 def read_batch_urls(batch_fd):
3108 def fixup(url):
3109 if not isinstance(url, str):
3110 url = url.decode('utf-8', 'replace')
3111 BOM_UTF8 = ('\xef\xbb\xbf', '\ufeff')
3112 for bom in BOM_UTF8:
3113 if url.startswith(bom):
3114 url = url[len(bom):]
3115 url = url.lstrip()
3116 if not url or url.startswith(('#', ';', ']')):
3117 return False
3118 # "#" cannot be stripped out since it is part of the URI
3119 # However, it can be safely stripped out if following a whitespace
3120 return re.split(r'\s#', url, 1)[0].rstrip()
3121
3122 with contextlib.closing(batch_fd) as fd:
3123 return [url for url in map(fixup, fd) if url]
3124
3125
3126 def urlencode_postdata(*args, **kargs):
3127 return urllib.parse.urlencode(*args, **kargs).encode('ascii')
3128
3129
3130 def update_url_query(url, query):
3131 if not query:
3132 return url
3133 parsed_url = urllib.parse.urlparse(url)
3134 qs = urllib.parse.parse_qs(parsed_url.query)
3135 qs.update(query)
3136 return urllib.parse.urlunparse(parsed_url._replace(
3137 query=urllib.parse.urlencode(qs, True)))
3138
3139
3140 def update_Request(req, url=None, data=None, headers=None, query=None):
3141 req_headers = req.headers.copy()
3142 req_headers.update(headers or {})
3143 req_data = data or req.data
3144 req_url = update_url_query(url or req.get_full_url(), query)
3145 req_get_method = req.get_method()
3146 if req_get_method == 'HEAD':
3147 req_type = HEADRequest
3148 elif req_get_method == 'PUT':
3149 req_type = PUTRequest
3150 else:
3151 req_type = urllib.request.Request
3152 new_req = req_type(
3153 req_url, data=req_data, headers=req_headers,
3154 origin_req_host=req.origin_req_host, unverifiable=req.unverifiable)
3155 if hasattr(req, 'timeout'):
3156 new_req.timeout = req.timeout
3157 return new_req
3158
3159
3160 def _multipart_encode_impl(data, boundary):
3161 content_type = 'multipart/form-data; boundary=%s' % boundary
3162
3163 out = b''
3164 for k, v in data.items():
3165 out += b'--' + boundary.encode('ascii') + b'\r\n'
3166 if isinstance(k, str):
3167 k = k.encode()
3168 if isinstance(v, str):
3169 v = v.encode()
3170 # RFC 2047 requires non-ASCII field names to be encoded, while RFC 7578
3171 # suggests sending UTF-8 directly. Firefox sends UTF-8, too
3172 content = b'Content-Disposition: form-data; name="' + k + b'"\r\n\r\n' + v + b'\r\n'
3173 if boundary.encode('ascii') in content:
3174 raise ValueError('Boundary overlaps with data')
3175 out += content
3176
3177 out += b'--' + boundary.encode('ascii') + b'--\r\n'
3178
3179 return out, content_type
3180
3181
3182 def multipart_encode(data, boundary=None):
3183 '''
3184 Encode a dict to RFC 7578-compliant form-data
3185
3186 data:
3187 A dict where keys and values can be either Unicode or bytes-like
3188 objects.
3189 boundary:
3190 If specified a Unicode object, it's used as the boundary. Otherwise
3191 a random boundary is generated.
3192
3193 Reference: https://tools.ietf.org/html/rfc7578
3194 '''
3195 has_specified_boundary = boundary is not None
3196
3197 while True:
3198 if boundary is None:
3199 boundary = '---------------' + str(random.randrange(0x0fffffff, 0xffffffff))
3200
3201 try:
3202 out, content_type = _multipart_encode_impl(data, boundary)
3203 break
3204 except ValueError:
3205 if has_specified_boundary:
3206 raise
3207 boundary = None
3208
3209 return out, content_type
3210
3211
3212 def variadic(x, allowed_types=(str, bytes, dict)):
3213 return x if isinstance(x, collections.abc.Iterable) and not isinstance(x, allowed_types) else (x,)
3214
3215
3216 def dict_get(d, key_or_keys, default=None, skip_false_values=True):
3217 for val in map(d.get, variadic(key_or_keys)):
3218 if val is not None and (val or not skip_false_values):
3219 return val
3220 return default
3221
3222
3223 def try_call(*funcs, expected_type=None, args=[], kwargs={}):
3224 for f in funcs:
3225 try:
3226 val = f(*args, **kwargs)
3227 except (AttributeError, KeyError, TypeError, IndexError, ValueError, ZeroDivisionError):
3228 pass
3229 else:
3230 if expected_type is None or isinstance(val, expected_type):
3231 return val
3232
3233
3234 def try_get(src, getter, expected_type=None):
3235 return try_call(*variadic(getter), args=(src,), expected_type=expected_type)
3236
3237
3238 def filter_dict(dct, cndn=lambda _, v: v is not None):
3239 return {k: v for k, v in dct.items() if cndn(k, v)}
3240
3241
3242 def merge_dicts(*dicts):
3243 merged = {}
3244 for a_dict in dicts:
3245 for k, v in a_dict.items():
3246 if (v is not None and k not in merged
3247 or isinstance(v, str) and merged[k] == ''):
3248 merged[k] = v
3249 return merged
3250
3251
3252 def encode_compat_str(string, encoding=preferredencoding(), errors='strict'):
3253 return string if isinstance(string, str) else str(string, encoding, errors)
3254
3255
3256 US_RATINGS = {
3257 'G': 0,
3258 'PG': 10,
3259 'PG-13': 13,
3260 'R': 16,
3261 'NC': 18,
3262 }
3263
3264
3265 TV_PARENTAL_GUIDELINES = {
3266 'TV-Y': 0,
3267 'TV-Y7': 7,
3268 'TV-G': 0,
3269 'TV-PG': 0,
3270 'TV-14': 14,
3271 'TV-MA': 17,
3272 }
3273
3274
3275 def parse_age_limit(s):
3276 # isinstance(False, int) is True. So type() must be used instead
3277 if type(s) is int: # noqa: E721
3278 return s if 0 <= s <= 21 else None
3279 elif not isinstance(s, str):
3280 return None
3281 m = re.match(r'^(?P<age>\d{1,2})\+?$', s)
3282 if m:
3283 return int(m.group('age'))
3284 s = s.upper()
3285 if s in US_RATINGS:
3286 return US_RATINGS[s]
3287 m = re.match(r'^TV[_-]?(%s)$' % '|'.join(k[3:] for k in TV_PARENTAL_GUIDELINES), s)
3288 if m:
3289 return TV_PARENTAL_GUIDELINES['TV-' + m.group(1)]
3290 return None
3291
3292
3293 def strip_jsonp(code):
3294 return re.sub(
3295 r'''(?sx)^
3296 (?:window\.)?(?P<func_name>[a-zA-Z0-9_.$]*)
3297 (?:\s*&&\s*(?P=func_name))?
3298 \s*\(\s*(?P<callback_data>.*)\);?
3299 \s*?(?://[^\n]*)*$''',
3300 r'\g<callback_data>', code)
3301
3302
3303 def js_to_json(code, vars={}, *, strict=False):
3304 # vars is a dict of var, val pairs to substitute
3305 STRING_QUOTES = '\'"'
3306 STRING_RE = '|'.join(rf'{q}(?:\\.|[^\\{q}])*{q}' for q in STRING_QUOTES)
3307 COMMENT_RE = r'/\*(?:(?!\*/).)*?\*/|//[^\n]*\n'
3308 SKIP_RE = fr'\s*(?:{COMMENT_RE})?\s*'
3309 INTEGER_TABLE = (
3310 (fr'(?s)^(0[xX][0-9a-fA-F]+){SKIP_RE}:?$', 16),
3311 (fr'(?s)^(0+[0-7]+){SKIP_RE}:?$', 8),
3312 )
3313
3314 def process_escape(match):
3315 JSON_PASSTHROUGH_ESCAPES = R'"\bfnrtu'
3316 escape = match.group(1) or match.group(2)
3317
3318 return (Rf'\{escape}' if escape in JSON_PASSTHROUGH_ESCAPES
3319 else R'\u00' if escape == 'x'
3320 else '' if escape == '\n'
3321 else escape)
3322
3323 def fix_kv(m):
3324 v = m.group(0)
3325 if v in ('true', 'false', 'null'):
3326 return v
3327 elif v in ('undefined', 'void 0'):
3328 return 'null'
3329 elif v.startswith('/*') or v.startswith('//') or v.startswith('!') or v == ',':
3330 return ''
3331
3332 if v[0] in STRING_QUOTES:
3333 escaped = re.sub(r'(?s)(")|\\(.)', process_escape, v[1:-1])
3334 return f'"{escaped}"'
3335
3336 for regex, base in INTEGER_TABLE:
3337 im = re.match(regex, v)
3338 if im:
3339 i = int(im.group(1), base)
3340 return f'"{i}":' if v.endswith(':') else str(i)
3341
3342 if v in vars:
3343 return json.dumps(vars[v])
3344
3345 if not strict:
3346 return f'"{v}"'
3347
3348 raise ValueError(f'Unknown value: {v}')
3349
3350 def create_map(mobj):
3351 return json.dumps(dict(json.loads(js_to_json(mobj.group(1) or '[]', vars=vars))))
3352
3353 code = re.sub(r'new Map\((\[.*?\])?\)', create_map, code)
3354 if not strict:
3355 code = re.sub(r'new Date\((".+")\)', r'\g<1>', code)
3356 code = re.sub(r'new \w+\((.*?)\)', lambda m: json.dumps(m.group(0)), code)
3357
3358 return re.sub(rf'''(?sx)
3359 {STRING_RE}|
3360 {COMMENT_RE}|,(?={SKIP_RE}[\]}}])|
3361 void\s0|(?:(?<![0-9])[eE]|[a-df-zA-DF-Z_$])[.a-zA-Z_$0-9]*|
3362 \b(?:0[xX][0-9a-fA-F]+|0+[0-7]+)(?:{SKIP_RE}:)?|
3363 [0-9]+(?={SKIP_RE}:)|
3364 !+
3365 ''', fix_kv, code)
3366
3367
3368 def qualities(quality_ids):
3369 """ Get a numeric quality value out of a list of possible values """
3370 def q(qid):
3371 try:
3372 return quality_ids.index(qid)
3373 except ValueError:
3374 return -1
3375 return q
3376
3377
3378 POSTPROCESS_WHEN = ('pre_process', 'after_filter', 'before_dl', 'post_process', 'after_move', 'after_video', 'playlist')
3379
3380
3381 DEFAULT_OUTTMPL = {
3382 'default': '%(title)s [%(id)s].%(ext)s',
3383 'chapter': '%(title)s - %(section_number)03d %(section_title)s [%(id)s].%(ext)s',
3384 }
3385 OUTTMPL_TYPES = {
3386 'chapter': None,
3387 'subtitle': None,
3388 'thumbnail': None,
3389 'description': 'description',
3390 'annotation': 'annotations.xml',
3391 'infojson': 'info.json',
3392 'link': None,
3393 'pl_video': None,
3394 'pl_thumbnail': None,
3395 'pl_description': 'description',
3396 'pl_infojson': 'info.json',
3397 }
3398
3399 # As of [1] format syntax is:
3400 # %[mapping_key][conversion_flags][minimum_width][.precision][length_modifier]type
3401 # 1. https://docs.python.org/2/library/stdtypes.html#string-formatting
3402 STR_FORMAT_RE_TMPL = r'''(?x)
3403 (?<!%)(?P<prefix>(?:%%)*)
3404 %
3405 (?P<has_key>\((?P<key>{0})\))?
3406 (?P<format>
3407 (?P<conversion>[#0\-+ ]+)?
3408 (?P<min_width>\d+)?
3409 (?P<precision>\.\d+)?
3410 (?P<len_mod>[hlL])? # unused in python
3411 {1} # conversion type
3412 )
3413 '''
3414
3415
3416 STR_FORMAT_TYPES = 'diouxXeEfFgGcrs'
3417
3418
3419 def limit_length(s, length):
3420 """ Add ellipses to overly long strings """
3421 if s is None:
3422 return None
3423 ELLIPSES = '...'
3424 if len(s) > length:
3425 return s[:length - len(ELLIPSES)] + ELLIPSES
3426 return s
3427
3428
3429 def version_tuple(v):
3430 return tuple(int(e) for e in re.split(r'[-.]', v))
3431
3432
3433 def is_outdated_version(version, limit, assume_new=True):
3434 if not version:
3435 return not assume_new
3436 try:
3437 return version_tuple(version) < version_tuple(limit)
3438 except ValueError:
3439 return not assume_new
3440
3441
3442 def ytdl_is_updateable():
3443 """ Returns if yt-dlp can be updated with -U """
3444
3445 from .update import is_non_updateable
3446
3447 return not is_non_updateable()
3448
3449
3450 def args_to_str(args):
3451 # Get a short string representation for a subprocess command
3452 return ' '.join(compat_shlex_quote(a) for a in args)
3453
3454
3455 def error_to_compat_str(err):
3456 return str(err)
3457
3458
3459 def error_to_str(err):
3460 return f'{type(err).__name__}: {err}'
3461
3462
3463 def mimetype2ext(mt):
3464 if mt is None:
3465 return None
3466
3467 mt, _, params = mt.partition(';')
3468 mt = mt.strip()
3469
3470 FULL_MAP = {
3471 'audio/mp4': 'm4a',
3472 # Per RFC 3003, audio/mpeg can be .mp1, .mp2 or .mp3. Here use .mp3 as
3473 # it's the most popular one
3474 'audio/mpeg': 'mp3',
3475 'audio/x-wav': 'wav',
3476 'audio/wav': 'wav',
3477 'audio/wave': 'wav',
3478 }
3479
3480 ext = FULL_MAP.get(mt)
3481 if ext is not None:
3482 return ext
3483
3484 SUBTYPE_MAP = {
3485 '3gpp': '3gp',
3486 'smptett+xml': 'tt',
3487 'ttaf+xml': 'dfxp',
3488 'ttml+xml': 'ttml',
3489 'x-flv': 'flv',
3490 'x-mp4-fragmented': 'mp4',
3491 'x-ms-sami': 'sami',
3492 'x-ms-wmv': 'wmv',
3493 'mpegurl': 'm3u8',
3494 'x-mpegurl': 'm3u8',
3495 'vnd.apple.mpegurl': 'm3u8',
3496 'dash+xml': 'mpd',
3497 'f4m+xml': 'f4m',
3498 'hds+xml': 'f4m',
3499 'vnd.ms-sstr+xml': 'ism',
3500 'quicktime': 'mov',
3501 'mp2t': 'ts',
3502 'x-wav': 'wav',
3503 'filmstrip+json': 'fs',
3504 'svg+xml': 'svg',
3505 }
3506
3507 _, _, subtype = mt.rpartition('/')
3508 ext = SUBTYPE_MAP.get(subtype.lower())
3509 if ext is not None:
3510 return ext
3511
3512 SUFFIX_MAP = {
3513 'json': 'json',
3514 'xml': 'xml',
3515 'zip': 'zip',
3516 'gzip': 'gz',
3517 }
3518
3519 _, _, suffix = subtype.partition('+')
3520 ext = SUFFIX_MAP.get(suffix)
3521 if ext is not None:
3522 return ext
3523
3524 return subtype.replace('+', '.')
3525
3526
3527 def ext2mimetype(ext_or_url):
3528 if not ext_or_url:
3529 return None
3530 if '.' not in ext_or_url:
3531 ext_or_url = f'file.{ext_or_url}'
3532 return mimetypes.guess_type(ext_or_url)[0]
3533
3534
3535 def parse_codecs(codecs_str):
3536 # http://tools.ietf.org/html/rfc6381
3537 if not codecs_str:
3538 return {}
3539 split_codecs = list(filter(None, map(
3540 str.strip, codecs_str.strip().strip(',').split(','))))
3541 vcodec, acodec, scodec, hdr = None, None, None, None
3542 for full_codec in split_codecs:
3543 parts = re.sub(r'0+(?=\d)', '', full_codec).split('.')
3544 if parts[0] in ('avc1', 'avc2', 'avc3', 'avc4', 'vp9', 'vp8', 'hev1', 'hev2',
3545 'h263', 'h264', 'mp4v', 'hvc1', 'av1', 'theora', 'dvh1', 'dvhe'):
3546 if vcodec:
3547 continue
3548 vcodec = full_codec
3549 if parts[0] in ('dvh1', 'dvhe'):
3550 hdr = 'DV'
3551 elif parts[0] == 'av1' and traverse_obj(parts, 3) == '10':
3552 hdr = 'HDR10'
3553 elif parts[:2] == ['vp9', '2']:
3554 hdr = 'HDR10'
3555 elif parts[0] in ('flac', 'mp4a', 'opus', 'vorbis', 'mp3', 'aac',
3556 'ac-3', 'ec-3', 'eac3', 'dtsc', 'dtse', 'dtsh', 'dtsl'):
3557 acodec = acodec or full_codec
3558 elif parts[0] in ('stpp', 'wvtt'):
3559 scodec = scodec or full_codec
3560 else:
3561 write_string(f'WARNING: Unknown codec {full_codec}\n')
3562 if vcodec or acodec or scodec:
3563 return {
3564 'vcodec': vcodec or 'none',
3565 'acodec': acodec or 'none',
3566 'dynamic_range': hdr,
3567 **({'scodec': scodec} if scodec is not None else {}),
3568 }
3569 elif len(split_codecs) == 2:
3570 return {
3571 'vcodec': split_codecs[0],
3572 'acodec': split_codecs[1],
3573 }
3574 return {}
3575
3576
3577 def get_compatible_ext(*, vcodecs, acodecs, vexts, aexts, preferences=None):
3578 assert len(vcodecs) == len(vexts) and len(acodecs) == len(aexts)
3579
3580 allow_mkv = not preferences or 'mkv' in preferences
3581
3582 if allow_mkv and max(len(acodecs), len(vcodecs)) > 1:
3583 return 'mkv' # TODO: any other format allows this?
3584
3585 # TODO: All codecs supported by parse_codecs isn't handled here
3586 COMPATIBLE_CODECS = {
3587 'mp4': {
3588 'av1', 'hevc', 'avc1', 'mp4a', # fourcc (m3u8, mpd)
3589 'h264', 'aacl', 'ec-3', # Set in ISM
3590 },
3591 'webm': {
3592 'av1', 'vp9', 'vp8', 'opus', 'vrbs',
3593 'vp9x', 'vp8x', # in the webm spec
3594 },
3595 }
3596
3597 sanitize_codec = functools.partial(try_get, getter=lambda x: x[0].split('.')[0].replace('0', ''))
3598 vcodec, acodec = sanitize_codec(vcodecs), sanitize_codec(acodecs)
3599
3600 for ext in preferences or COMPATIBLE_CODECS.keys():
3601 codec_set = COMPATIBLE_CODECS.get(ext, set())
3602 if ext == 'mkv' or codec_set.issuperset((vcodec, acodec)):
3603 return ext
3604
3605 COMPATIBLE_EXTS = (
3606 {'mp3', 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'ismv', 'isma', 'mov'},
3607 {'webm'},
3608 )
3609 for ext in preferences or vexts:
3610 current_exts = {ext, *vexts, *aexts}
3611 if ext == 'mkv' or current_exts == {ext} or any(
3612 ext_sets.issuperset(current_exts) for ext_sets in COMPATIBLE_EXTS):
3613 return ext
3614 return 'mkv' if allow_mkv else preferences[-1]
3615
3616
3617 def urlhandle_detect_ext(url_handle):
3618 getheader = url_handle.headers.get
3619
3620 cd = getheader('Content-Disposition')
3621 if cd:
3622 m = re.match(r'attachment;\s*filename="(?P<filename>[^"]+)"', cd)
3623 if m:
3624 e = determine_ext(m.group('filename'), default_ext=None)
3625 if e:
3626 return e
3627
3628 return mimetype2ext(getheader('Content-Type'))
3629
3630
3631 def encode_data_uri(data, mime_type):
3632 return 'data:%s;base64,%s' % (mime_type, base64.b64encode(data).decode('ascii'))
3633
3634
3635 def age_restricted(content_limit, age_limit):
3636 """ Returns True iff the content should be blocked """
3637
3638 if age_limit is None: # No limit set
3639 return False
3640 if content_limit is None:
3641 return False # Content available for everyone
3642 return age_limit < content_limit
3643
3644
3645 # List of known byte-order-marks (BOM)
3646 BOMS = [
3647 (b'\xef\xbb\xbf', 'utf-8'),
3648 (b'\x00\x00\xfe\xff', 'utf-32-be'),
3649 (b'\xff\xfe\x00\x00', 'utf-32-le'),
3650 (b'\xff\xfe', 'utf-16-le'),
3651 (b'\xfe\xff', 'utf-16-be'),
3652 ]
3653
3654
3655 def is_html(first_bytes):
3656 """ Detect whether a file contains HTML by examining its first bytes. """
3657
3658 encoding = 'utf-8'
3659 for bom, enc in BOMS:
3660 while first_bytes.startswith(bom):
3661 encoding, first_bytes = enc, first_bytes[len(bom):]
3662
3663 return re.match(r'^\s*<', first_bytes.decode(encoding, 'replace'))
3664
3665
3666 def determine_protocol(info_dict):
3667 protocol = info_dict.get('protocol')
3668 if protocol is not None:
3669 return protocol
3670
3671 url = sanitize_url(info_dict['url'])
3672 if url.startswith('rtmp'):
3673 return 'rtmp'
3674 elif url.startswith('mms'):
3675 return 'mms'
3676 elif url.startswith('rtsp'):
3677 return 'rtsp'
3678
3679 ext = determine_ext(url)
3680 if ext == 'm3u8':
3681 return 'm3u8' if info_dict.get('is_live') else 'm3u8_native'
3682 elif ext == 'f4m':
3683 return 'f4m'
3684
3685 return urllib.parse.urlparse(url).scheme
3686
3687
3688 def render_table(header_row, data, delim=False, extra_gap=0, hide_empty=False):
3689 """ Render a list of rows, each as a list of values.
3690 Text after a \t will be right aligned """
3691 def width(string):
3692 return len(remove_terminal_sequences(string).replace('\t', ''))
3693
3694 def get_max_lens(table):
3695 return [max(width(str(v)) for v in col) for col in zip(*table)]
3696
3697 def filter_using_list(row, filterArray):
3698 return [col for take, col in itertools.zip_longest(filterArray, row, fillvalue=True) if take]
3699
3700 max_lens = get_max_lens(data) if hide_empty else []
3701 header_row = filter_using_list(header_row, max_lens)
3702 data = [filter_using_list(row, max_lens) for row in data]
3703
3704 table = [header_row] + data
3705 max_lens = get_max_lens(table)
3706 extra_gap += 1
3707 if delim:
3708 table = [header_row, [delim * (ml + extra_gap) for ml in max_lens]] + data
3709 table[1][-1] = table[1][-1][:-extra_gap * len(delim)] # Remove extra_gap from end of delimiter
3710 for row in table:
3711 for pos, text in enumerate(map(str, row)):
3712 if '\t' in text:
3713 row[pos] = text.replace('\t', ' ' * (max_lens[pos] - width(text))) + ' ' * extra_gap
3714 else:
3715 row[pos] = text + ' ' * (max_lens[pos] - width(text) + extra_gap)
3716 ret = '\n'.join(''.join(row).rstrip() for row in table)
3717 return ret
3718
3719
3720 def _match_one(filter_part, dct, incomplete):
3721 # TODO: Generalize code with YoutubeDL._build_format_filter
3722 STRING_OPERATORS = {
3723 '*=': operator.contains,
3724 '^=': lambda attr, value: attr.startswith(value),
3725 '$=': lambda attr, value: attr.endswith(value),
3726 '~=': lambda attr, value: re.search(value, attr),
3727 }
3728 COMPARISON_OPERATORS = {
3729 **STRING_OPERATORS,
3730 '<=': operator.le, # "<=" must be defined above "<"
3731 '<': operator.lt,
3732 '>=': operator.ge,
3733 '>': operator.gt,
3734 '=': operator.eq,
3735 }
3736
3737 if isinstance(incomplete, bool):
3738 is_incomplete = lambda _: incomplete
3739 else:
3740 is_incomplete = lambda k: k in incomplete
3741
3742 operator_rex = re.compile(r'''(?x)
3743 (?P<key>[a-z_]+)
3744 \s*(?P<negation>!\s*)?(?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
3745 (?:
3746 (?P<quote>["\'])(?P<quotedstrval>.+?)(?P=quote)|
3747 (?P<strval>.+?)
3748 )
3749 ''' % '|'.join(map(re.escape, COMPARISON_OPERATORS.keys())))
3750 m = operator_rex.fullmatch(filter_part.strip())
3751 if m:
3752 m = m.groupdict()
3753 unnegated_op = COMPARISON_OPERATORS[m['op']]
3754 if m['negation']:
3755 op = lambda attr, value: not unnegated_op(attr, value)
3756 else:
3757 op = unnegated_op
3758 comparison_value = m['quotedstrval'] or m['strval'] or m['intval']
3759 if m['quote']:
3760 comparison_value = comparison_value.replace(r'\%s' % m['quote'], m['quote'])
3761 actual_value = dct.get(m['key'])
3762 numeric_comparison = None
3763 if isinstance(actual_value, (int, float)):
3764 # If the original field is a string and matching comparisonvalue is
3765 # a number we should respect the origin of the original field
3766 # and process comparison value as a string (see
3767 # https://github.com/ytdl-org/youtube-dl/issues/11082)
3768 try:
3769 numeric_comparison = int(comparison_value)
3770 except ValueError:
3771 numeric_comparison = parse_filesize(comparison_value)
3772 if numeric_comparison is None:
3773 numeric_comparison = parse_filesize(f'{comparison_value}B')
3774 if numeric_comparison is None:
3775 numeric_comparison = parse_duration(comparison_value)
3776 if numeric_comparison is not None and m['op'] in STRING_OPERATORS:
3777 raise ValueError('Operator %s only supports string values!' % m['op'])
3778 if actual_value is None:
3779 return is_incomplete(m['key']) or m['none_inclusive']
3780 return op(actual_value, comparison_value if numeric_comparison is None else numeric_comparison)
3781
3782 UNARY_OPERATORS = {
3783 '': lambda v: (v is True) if isinstance(v, bool) else (v is not None),
3784 '!': lambda v: (v is False) if isinstance(v, bool) else (v is None),
3785 }
3786 operator_rex = re.compile(r'''(?x)
3787 (?P<op>%s)\s*(?P<key>[a-z_]+)
3788 ''' % '|'.join(map(re.escape, UNARY_OPERATORS.keys())))
3789 m = operator_rex.fullmatch(filter_part.strip())
3790 if m:
3791 op = UNARY_OPERATORS[m.group('op')]
3792 actual_value = dct.get(m.group('key'))
3793 if is_incomplete(m.group('key')) and actual_value is None:
3794 return True
3795 return op(actual_value)
3796
3797 raise ValueError('Invalid filter part %r' % filter_part)
3798
3799
3800 def match_str(filter_str, dct, incomplete=False):
3801 """ Filter a dictionary with a simple string syntax.
3802 @returns Whether the filter passes
3803 @param incomplete Set of keys that is expected to be missing from dct.
3804 Can be True/False to indicate all/none of the keys may be missing.
3805 All conditions on incomplete keys pass if the key is missing
3806 """
3807 return all(
3808 _match_one(filter_part.replace(r'\&', '&'), dct, incomplete)
3809 for filter_part in re.split(r'(?<!\\)&', filter_str))
3810
3811
3812 def match_filter_func(filters):
3813 if not filters:
3814 return None
3815 filters = set(variadic(filters))
3816
3817 interactive = '-' in filters
3818 if interactive:
3819 filters.remove('-')
3820
3821 def _match_func(info_dict, incomplete=False):
3822 if not filters or any(match_str(f, info_dict, incomplete) for f in filters):
3823 return NO_DEFAULT if interactive and not incomplete else None
3824 else:
3825 video_title = info_dict.get('title') or info_dict.get('id') or 'entry'
3826 filter_str = ') | ('.join(map(str.strip, filters))
3827 return f'{video_title} does not pass filter ({filter_str}), skipping ..'
3828 return _match_func
3829
3830
3831 class download_range_func:
3832 def __init__(self, chapters, ranges):
3833 self.chapters, self.ranges = chapters, ranges
3834
3835 def __call__(self, info_dict, ydl):
3836 if not self.ranges and not self.chapters:
3837 yield {}
3838
3839 warning = ('There are no chapters matching the regex' if info_dict.get('chapters')
3840 else 'Cannot match chapters since chapter information is unavailable')
3841 for regex in self.chapters or []:
3842 for i, chapter in enumerate(info_dict.get('chapters') or []):
3843 if re.search(regex, chapter['title']):
3844 warning = None
3845 yield {**chapter, 'index': i}
3846 if self.chapters and warning:
3847 ydl.to_screen(f'[info] {info_dict["id"]}: {warning}')
3848
3849 yield from ({'start_time': start, 'end_time': end} for start, end in self.ranges or [])
3850
3851 def __eq__(self, other):
3852 return (isinstance(other, download_range_func)
3853 and self.chapters == other.chapters and self.ranges == other.ranges)
3854
3855
3856 def parse_dfxp_time_expr(time_expr):
3857 if not time_expr:
3858 return
3859
3860 mobj = re.match(rf'^(?P<time_offset>{NUMBER_RE})s?$', time_expr)
3861 if mobj:
3862 return float(mobj.group('time_offset'))
3863
3864 mobj = re.match(r'^(\d+):(\d\d):(\d\d(?:(?:\.|:)\d+)?)$', time_expr)
3865 if mobj:
3866 return 3600 * int(mobj.group(1)) + 60 * int(mobj.group(2)) + float(mobj.group(3).replace(':', '.'))
3867
3868
3869 def srt_subtitles_timecode(seconds):
3870 return '%02d:%02d:%02d,%03d' % timetuple_from_msec(seconds * 1000)
3871
3872
3873 def ass_subtitles_timecode(seconds):
3874 time = timetuple_from_msec(seconds * 1000)
3875 return '%01d:%02d:%02d.%02d' % (*time[:-1], time.milliseconds / 10)
3876
3877
3878 def dfxp2srt(dfxp_data):
3879 '''
3880 @param dfxp_data A bytes-like object containing DFXP data
3881 @returns A unicode object containing converted SRT data
3882 '''
3883 LEGACY_NAMESPACES = (
3884 (b'http://www.w3.org/ns/ttml', [
3885 b'http://www.w3.org/2004/11/ttaf1',
3886 b'http://www.w3.org/2006/04/ttaf1',
3887 b'http://www.w3.org/2006/10/ttaf1',
3888 ]),
3889 (b'http://www.w3.org/ns/ttml#styling', [
3890 b'http://www.w3.org/ns/ttml#style',
3891 ]),
3892 )
3893
3894 SUPPORTED_STYLING = [
3895 'color',
3896 'fontFamily',
3897 'fontSize',
3898 'fontStyle',
3899 'fontWeight',
3900 'textDecoration'
3901 ]
3902
3903 _x = functools.partial(xpath_with_ns, ns_map={
3904 'xml': 'http://www.w3.org/XML/1998/namespace',
3905 'ttml': 'http://www.w3.org/ns/ttml',
3906 'tts': 'http://www.w3.org/ns/ttml#styling',
3907 })
3908
3909 styles = {}
3910 default_style = {}
3911
3912 class TTMLPElementParser:
3913 _out = ''
3914 _unclosed_elements = []
3915 _applied_styles = []
3916
3917 def start(self, tag, attrib):
3918 if tag in (_x('ttml:br'), 'br'):
3919 self._out += '\n'
3920 else:
3921 unclosed_elements = []
3922 style = {}
3923 element_style_id = attrib.get('style')
3924 if default_style:
3925 style.update(default_style)
3926 if element_style_id:
3927 style.update(styles.get(element_style_id, {}))
3928 for prop in SUPPORTED_STYLING:
3929 prop_val = attrib.get(_x('tts:' + prop))
3930 if prop_val:
3931 style[prop] = prop_val
3932 if style:
3933 font = ''
3934 for k, v in sorted(style.items()):
3935 if self._applied_styles and self._applied_styles[-1].get(k) == v:
3936 continue
3937 if k == 'color':
3938 font += ' color="%s"' % v
3939 elif k == 'fontSize':
3940 font += ' size="%s"' % v
3941 elif k == 'fontFamily':
3942 font += ' face="%s"' % v
3943 elif k == 'fontWeight' and v == 'bold':
3944 self._out += '<b>'
3945 unclosed_elements.append('b')
3946 elif k == 'fontStyle' and v == 'italic':
3947 self._out += '<i>'
3948 unclosed_elements.append('i')
3949 elif k == 'textDecoration' and v == 'underline':
3950 self._out += '<u>'
3951 unclosed_elements.append('u')
3952 if font:
3953 self._out += '<font' + font + '>'
3954 unclosed_elements.append('font')
3955 applied_style = {}
3956 if self._applied_styles:
3957 applied_style.update(self._applied_styles[-1])
3958 applied_style.update(style)
3959 self._applied_styles.append(applied_style)
3960 self._unclosed_elements.append(unclosed_elements)
3961
3962 def end(self, tag):
3963 if tag not in (_x('ttml:br'), 'br'):
3964 unclosed_elements = self._unclosed_elements.pop()
3965 for element in reversed(unclosed_elements):
3966 self._out += '</%s>' % element
3967 if unclosed_elements and self._applied_styles:
3968 self._applied_styles.pop()
3969
3970 def data(self, data):
3971 self._out += data
3972
3973 def close(self):
3974 return self._out.strip()
3975
3976 def parse_node(node):
3977 target = TTMLPElementParser()
3978 parser = xml.etree.ElementTree.XMLParser(target=target)
3979 parser.feed(xml.etree.ElementTree.tostring(node))
3980 return parser.close()
3981
3982 for k, v in LEGACY_NAMESPACES:
3983 for ns in v:
3984 dfxp_data = dfxp_data.replace(ns, k)
3985
3986 dfxp = compat_etree_fromstring(dfxp_data)
3987 out = []
3988 paras = dfxp.findall(_x('.//ttml:p')) or dfxp.findall('.//p')
3989
3990 if not paras:
3991 raise ValueError('Invalid dfxp/TTML subtitle')
3992
3993 repeat = False
3994 while True:
3995 for style in dfxp.findall(_x('.//ttml:style')):
3996 style_id = style.get('id') or style.get(_x('xml:id'))
3997 if not style_id:
3998 continue
3999 parent_style_id = style.get('style')
4000 if parent_style_id:
4001 if parent_style_id not in styles:
4002 repeat = True
4003 continue
4004 styles[style_id] = styles[parent_style_id].copy()
4005 for prop in SUPPORTED_STYLING:
4006 prop_val = style.get(_x('tts:' + prop))
4007 if prop_val:
4008 styles.setdefault(style_id, {})[prop] = prop_val
4009 if repeat:
4010 repeat = False
4011 else:
4012 break
4013
4014 for p in ('body', 'div'):
4015 ele = xpath_element(dfxp, [_x('.//ttml:' + p), './/' + p])
4016 if ele is None:
4017 continue
4018 style = styles.get(ele.get('style'))
4019 if not style:
4020 continue
4021 default_style.update(style)
4022
4023 for para, index in zip(paras, itertools.count(1)):
4024 begin_time = parse_dfxp_time_expr(para.attrib.get('begin'))
4025 end_time = parse_dfxp_time_expr(para.attrib.get('end'))
4026 dur = parse_dfxp_time_expr(para.attrib.get('dur'))
4027 if begin_time is None:
4028 continue
4029 if not end_time:
4030 if not dur:
4031 continue
4032 end_time = begin_time + dur
4033 out.append('%d\n%s --> %s\n%s\n\n' % (
4034 index,
4035 srt_subtitles_timecode(begin_time),
4036 srt_subtitles_timecode(end_time),
4037 parse_node(para)))
4038
4039 return ''.join(out)
4040
4041
4042 def cli_option(params, command_option, param, separator=None):
4043 param = params.get(param)
4044 return ([] if param is None
4045 else [command_option, str(param)] if separator is None
4046 else [f'{command_option}{separator}{param}'])
4047
4048
4049 def cli_bool_option(params, command_option, param, true_value='true', false_value='false', separator=None):
4050 param = params.get(param)
4051 assert param in (True, False, None)
4052 return cli_option({True: true_value, False: false_value}, command_option, param, separator)
4053
4054
4055 def cli_valueless_option(params, command_option, param, expected_value=True):
4056 return [command_option] if params.get(param) == expected_value else []
4057
4058
4059 def cli_configuration_args(argdict, keys, default=[], use_compat=True):
4060 if isinstance(argdict, (list, tuple)): # for backward compatibility
4061 if use_compat:
4062 return argdict
4063 else:
4064 argdict = None
4065 if argdict is None:
4066 return default
4067 assert isinstance(argdict, dict)
4068
4069 assert isinstance(keys, (list, tuple))
4070 for key_list in keys:
4071 arg_list = list(filter(
4072 lambda x: x is not None,
4073 [argdict.get(key.lower()) for key in variadic(key_list)]))
4074 if arg_list:
4075 return [arg for args in arg_list for arg in args]
4076 return default
4077
4078
4079 def _configuration_args(main_key, argdict, exe, keys=None, default=[], use_compat=True):
4080 main_key, exe = main_key.lower(), exe.lower()
4081 root_key = exe if main_key == exe else f'{main_key}+{exe}'
4082 keys = [f'{root_key}{k}' for k in (keys or [''])]
4083 if root_key in keys:
4084 if main_key != exe:
4085 keys.append((main_key, exe))
4086 keys.append('default')
4087 else:
4088 use_compat = False
4089 return cli_configuration_args(argdict, keys, default, use_compat)
4090
4091
4092 class ISO639Utils:
4093 # See http://www.loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt
4094 _lang_map = {
4095 'aa': 'aar',
4096 'ab': 'abk',
4097 'ae': 'ave',
4098 'af': 'afr',
4099 'ak': 'aka',
4100 'am': 'amh',
4101 'an': 'arg',
4102 'ar': 'ara',
4103 'as': 'asm',
4104 'av': 'ava',
4105 'ay': 'aym',
4106 'az': 'aze',
4107 'ba': 'bak',
4108 'be': 'bel',
4109 'bg': 'bul',
4110 'bh': 'bih',
4111 'bi': 'bis',
4112 'bm': 'bam',
4113 'bn': 'ben',
4114 'bo': 'bod',
4115 'br': 'bre',
4116 'bs': 'bos',
4117 'ca': 'cat',
4118 'ce': 'che',
4119 'ch': 'cha',
4120 'co': 'cos',
4121 'cr': 'cre',
4122 'cs': 'ces',
4123 'cu': 'chu',
4124 'cv': 'chv',
4125 'cy': 'cym',
4126 'da': 'dan',
4127 'de': 'deu',
4128 'dv': 'div',
4129 'dz': 'dzo',
4130 'ee': 'ewe',
4131 'el': 'ell',
4132 'en': 'eng',
4133 'eo': 'epo',
4134 'es': 'spa',
4135 'et': 'est',
4136 'eu': 'eus',
4137 'fa': 'fas',
4138 'ff': 'ful',
4139 'fi': 'fin',
4140 'fj': 'fij',
4141 'fo': 'fao',
4142 'fr': 'fra',
4143 'fy': 'fry',
4144 'ga': 'gle',
4145 'gd': 'gla',
4146 'gl': 'glg',
4147 'gn': 'grn',
4148 'gu': 'guj',
4149 'gv': 'glv',
4150 'ha': 'hau',
4151 'he': 'heb',
4152 'iw': 'heb', # Replaced by he in 1989 revision
4153 'hi': 'hin',
4154 'ho': 'hmo',
4155 'hr': 'hrv',
4156 'ht': 'hat',
4157 'hu': 'hun',
4158 'hy': 'hye',
4159 'hz': 'her',
4160 'ia': 'ina',
4161 'id': 'ind',
4162 'in': 'ind', # Replaced by id in 1989 revision
4163 'ie': 'ile',
4164 'ig': 'ibo',
4165 'ii': 'iii',
4166 'ik': 'ipk',
4167 'io': 'ido',
4168 'is': 'isl',
4169 'it': 'ita',
4170 'iu': 'iku',
4171 'ja': 'jpn',
4172 'jv': 'jav',
4173 'ka': 'kat',
4174 'kg': 'kon',
4175 'ki': 'kik',
4176 'kj': 'kua',
4177 'kk': 'kaz',
4178 'kl': 'kal',
4179 'km': 'khm',
4180 'kn': 'kan',
4181 'ko': 'kor',
4182 'kr': 'kau',
4183 'ks': 'kas',
4184 'ku': 'kur',
4185 'kv': 'kom',
4186 'kw': 'cor',
4187 'ky': 'kir',
4188 'la': 'lat',
4189 'lb': 'ltz',
4190 'lg': 'lug',
4191 'li': 'lim',
4192 'ln': 'lin',
4193 'lo': 'lao',
4194 'lt': 'lit',
4195 'lu': 'lub',
4196 'lv': 'lav',
4197 'mg': 'mlg',
4198 'mh': 'mah',
4199 'mi': 'mri',
4200 'mk': 'mkd',
4201 'ml': 'mal',
4202 'mn': 'mon',
4203 'mr': 'mar',
4204 'ms': 'msa',
4205 'mt': 'mlt',
4206 'my': 'mya',
4207 'na': 'nau',
4208 'nb': 'nob',
4209 'nd': 'nde',
4210 'ne': 'nep',
4211 'ng': 'ndo',
4212 'nl': 'nld',
4213 'nn': 'nno',
4214 'no': 'nor',
4215 'nr': 'nbl',
4216 'nv': 'nav',
4217 'ny': 'nya',
4218 'oc': 'oci',
4219 'oj': 'oji',
4220 'om': 'orm',
4221 'or': 'ori',
4222 'os': 'oss',
4223 'pa': 'pan',
4224 'pi': 'pli',
4225 'pl': 'pol',
4226 'ps': 'pus',
4227 'pt': 'por',
4228 'qu': 'que',
4229 'rm': 'roh',
4230 'rn': 'run',
4231 'ro': 'ron',
4232 'ru': 'rus',
4233 'rw': 'kin',
4234 'sa': 'san',
4235 'sc': 'srd',
4236 'sd': 'snd',
4237 'se': 'sme',
4238 'sg': 'sag',
4239 'si': 'sin',
4240 'sk': 'slk',
4241 'sl': 'slv',
4242 'sm': 'smo',
4243 'sn': 'sna',
4244 'so': 'som',
4245 'sq': 'sqi',
4246 'sr': 'srp',
4247 'ss': 'ssw',
4248 'st': 'sot',
4249 'su': 'sun',
4250 'sv': 'swe',
4251 'sw': 'swa',
4252 'ta': 'tam',
4253 'te': 'tel',
4254 'tg': 'tgk',
4255 'th': 'tha',
4256 'ti': 'tir',
4257 'tk': 'tuk',
4258 'tl': 'tgl',
4259 'tn': 'tsn',
4260 'to': 'ton',
4261 'tr': 'tur',
4262 'ts': 'tso',
4263 'tt': 'tat',
4264 'tw': 'twi',
4265 'ty': 'tah',
4266 'ug': 'uig',
4267 'uk': 'ukr',
4268 'ur': 'urd',
4269 'uz': 'uzb',
4270 've': 'ven',
4271 'vi': 'vie',
4272 'vo': 'vol',
4273 'wa': 'wln',
4274 'wo': 'wol',
4275 'xh': 'xho',
4276 'yi': 'yid',
4277 'ji': 'yid', # Replaced by yi in 1989 revision
4278 'yo': 'yor',
4279 'za': 'zha',
4280 'zh': 'zho',
4281 'zu': 'zul',
4282 }
4283
4284 @classmethod
4285 def short2long(cls, code):
4286 """Convert language code from ISO 639-1 to ISO 639-2/T"""
4287 return cls._lang_map.get(code[:2])
4288
4289 @classmethod
4290 def long2short(cls, code):
4291 """Convert language code from ISO 639-2/T to ISO 639-1"""
4292 for short_name, long_name in cls._lang_map.items():
4293 if long_name == code:
4294 return short_name
4295
4296
4297 class ISO3166Utils:
4298 # From http://data.okfn.org/data/core/country-list
4299 _country_map = {
4300 'AF': 'Afghanistan',
4301 'AX': 'Åland Islands',
4302 'AL': 'Albania',
4303 'DZ': 'Algeria',
4304 'AS': 'American Samoa',
4305 'AD': 'Andorra',
4306 'AO': 'Angola',
4307 'AI': 'Anguilla',
4308 'AQ': 'Antarctica',
4309 'AG': 'Antigua and Barbuda',
4310 'AR': 'Argentina',
4311 'AM': 'Armenia',
4312 'AW': 'Aruba',
4313 'AU': 'Australia',
4314 'AT': 'Austria',
4315 'AZ': 'Azerbaijan',
4316 'BS': 'Bahamas',
4317 'BH': 'Bahrain',
4318 'BD': 'Bangladesh',
4319 'BB': 'Barbados',
4320 'BY': 'Belarus',
4321 'BE': 'Belgium',
4322 'BZ': 'Belize',
4323 'BJ': 'Benin',
4324 'BM': 'Bermuda',
4325 'BT': 'Bhutan',
4326 'BO': 'Bolivia, Plurinational State of',
4327 'BQ': 'Bonaire, Sint Eustatius and Saba',
4328 'BA': 'Bosnia and Herzegovina',
4329 'BW': 'Botswana',
4330 'BV': 'Bouvet Island',
4331 'BR': 'Brazil',
4332 'IO': 'British Indian Ocean Territory',
4333 'BN': 'Brunei Darussalam',
4334 'BG': 'Bulgaria',
4335 'BF': 'Burkina Faso',
4336 'BI': 'Burundi',
4337 'KH': 'Cambodia',
4338 'CM': 'Cameroon',
4339 'CA': 'Canada',
4340 'CV': 'Cape Verde',
4341 'KY': 'Cayman Islands',
4342 'CF': 'Central African Republic',
4343 'TD': 'Chad',
4344 'CL': 'Chile',
4345 'CN': 'China',
4346 'CX': 'Christmas Island',
4347 'CC': 'Cocos (Keeling) Islands',
4348 'CO': 'Colombia',
4349 'KM': 'Comoros',
4350 'CG': 'Congo',
4351 'CD': 'Congo, the Democratic Republic of the',
4352 'CK': 'Cook Islands',
4353 'CR': 'Costa Rica',
4354 'CI': 'Côte d\'Ivoire',
4355 'HR': 'Croatia',
4356 'CU': 'Cuba',
4357 'CW': 'Curaçao',
4358 'CY': 'Cyprus',
4359 'CZ': 'Czech Republic',
4360 'DK': 'Denmark',
4361 'DJ': 'Djibouti',
4362 'DM': 'Dominica',
4363 'DO': 'Dominican Republic',
4364 'EC': 'Ecuador',
4365 'EG': 'Egypt',
4366 'SV': 'El Salvador',
4367 'GQ': 'Equatorial Guinea',
4368 'ER': 'Eritrea',
4369 'EE': 'Estonia',
4370 'ET': 'Ethiopia',
4371 'FK': 'Falkland Islands (Malvinas)',
4372 'FO': 'Faroe Islands',
4373 'FJ': 'Fiji',
4374 'FI': 'Finland',
4375 'FR': 'France',
4376 'GF': 'French Guiana',
4377 'PF': 'French Polynesia',
4378 'TF': 'French Southern Territories',
4379 'GA': 'Gabon',
4380 'GM': 'Gambia',
4381 'GE': 'Georgia',
4382 'DE': 'Germany',
4383 'GH': 'Ghana',
4384 'GI': 'Gibraltar',
4385 'GR': 'Greece',
4386 'GL': 'Greenland',
4387 'GD': 'Grenada',
4388 'GP': 'Guadeloupe',
4389 'GU': 'Guam',
4390 'GT': 'Guatemala',
4391 'GG': 'Guernsey',
4392 'GN': 'Guinea',
4393 'GW': 'Guinea-Bissau',
4394 'GY': 'Guyana',
4395 'HT': 'Haiti',
4396 'HM': 'Heard Island and McDonald Islands',
4397 'VA': 'Holy See (Vatican City State)',
4398 'HN': 'Honduras',
4399 'HK': 'Hong Kong',
4400 'HU': 'Hungary',
4401 'IS': 'Iceland',
4402 'IN': 'India',
4403 'ID': 'Indonesia',
4404 'IR': 'Iran, Islamic Republic of',
4405 'IQ': 'Iraq',
4406 'IE': 'Ireland',
4407 'IM': 'Isle of Man',
4408 'IL': 'Israel',
4409 'IT': 'Italy',
4410 'JM': 'Jamaica',
4411 'JP': 'Japan',
4412 'JE': 'Jersey',
4413 'JO': 'Jordan',
4414 'KZ': 'Kazakhstan',
4415 'KE': 'Kenya',
4416 'KI': 'Kiribati',
4417 'KP': 'Korea, Democratic People\'s Republic of',
4418 'KR': 'Korea, Republic of',
4419 'KW': 'Kuwait',
4420 'KG': 'Kyrgyzstan',
4421 'LA': 'Lao People\'s Democratic Republic',
4422 'LV': 'Latvia',
4423 'LB': 'Lebanon',
4424 'LS': 'Lesotho',
4425 'LR': 'Liberia',
4426 'LY': 'Libya',
4427 'LI': 'Liechtenstein',
4428 'LT': 'Lithuania',
4429 'LU': 'Luxembourg',
4430 'MO': 'Macao',
4431 'MK': 'Macedonia, the Former Yugoslav Republic of',
4432 'MG': 'Madagascar',
4433 'MW': 'Malawi',
4434 'MY': 'Malaysia',
4435 'MV': 'Maldives',
4436 'ML': 'Mali',
4437 'MT': 'Malta',
4438 'MH': 'Marshall Islands',
4439 'MQ': 'Martinique',
4440 'MR': 'Mauritania',
4441 'MU': 'Mauritius',
4442 'YT': 'Mayotte',
4443 'MX': 'Mexico',
4444 'FM': 'Micronesia, Federated States of',
4445 'MD': 'Moldova, Republic of',
4446 'MC': 'Monaco',
4447 'MN': 'Mongolia',
4448 'ME': 'Montenegro',
4449 'MS': 'Montserrat',
4450 'MA': 'Morocco',
4451 'MZ': 'Mozambique',
4452 'MM': 'Myanmar',
4453 'NA': 'Namibia',
4454 'NR': 'Nauru',
4455 'NP': 'Nepal',
4456 'NL': 'Netherlands',
4457 'NC': 'New Caledonia',
4458 'NZ': 'New Zealand',
4459 'NI': 'Nicaragua',
4460 'NE': 'Niger',
4461 'NG': 'Nigeria',
4462 'NU': 'Niue',
4463 'NF': 'Norfolk Island',
4464 'MP': 'Northern Mariana Islands',
4465 'NO': 'Norway',
4466 'OM': 'Oman',
4467 'PK': 'Pakistan',
4468 'PW': 'Palau',
4469 'PS': 'Palestine, State of',
4470 'PA': 'Panama',
4471 'PG': 'Papua New Guinea',
4472 'PY': 'Paraguay',
4473 'PE': 'Peru',
4474 'PH': 'Philippines',
4475 'PN': 'Pitcairn',
4476 'PL': 'Poland',
4477 'PT': 'Portugal',
4478 'PR': 'Puerto Rico',
4479 'QA': 'Qatar',
4480 'RE': 'Réunion',
4481 'RO': 'Romania',
4482 'RU': 'Russian Federation',
4483 'RW': 'Rwanda',
4484 'BL': 'Saint Barthélemy',
4485 'SH': 'Saint Helena, Ascension and Tristan da Cunha',
4486 'KN': 'Saint Kitts and Nevis',
4487 'LC': 'Saint Lucia',
4488 'MF': 'Saint Martin (French part)',
4489 'PM': 'Saint Pierre and Miquelon',
4490 'VC': 'Saint Vincent and the Grenadines',
4491 'WS': 'Samoa',
4492 'SM': 'San Marino',
4493 'ST': 'Sao Tome and Principe',
4494 'SA': 'Saudi Arabia',
4495 'SN': 'Senegal',
4496 'RS': 'Serbia',
4497 'SC': 'Seychelles',
4498 'SL': 'Sierra Leone',
4499 'SG': 'Singapore',
4500 'SX': 'Sint Maarten (Dutch part)',
4501 'SK': 'Slovakia',
4502 'SI': 'Slovenia',
4503 'SB': 'Solomon Islands',
4504 'SO': 'Somalia',
4505 'ZA': 'South Africa',
4506 'GS': 'South Georgia and the South Sandwich Islands',
4507 'SS': 'South Sudan',
4508 'ES': 'Spain',
4509 'LK': 'Sri Lanka',
4510 'SD': 'Sudan',
4511 'SR': 'Suriname',
4512 'SJ': 'Svalbard and Jan Mayen',
4513 'SZ': 'Swaziland',
4514 'SE': 'Sweden',
4515 'CH': 'Switzerland',
4516 'SY': 'Syrian Arab Republic',
4517 'TW': 'Taiwan, Province of China',
4518 'TJ': 'Tajikistan',
4519 'TZ': 'Tanzania, United Republic of',
4520 'TH': 'Thailand',
4521 'TL': 'Timor-Leste',
4522 'TG': 'Togo',
4523 'TK': 'Tokelau',
4524 'TO': 'Tonga',
4525 'TT': 'Trinidad and Tobago',
4526 'TN': 'Tunisia',
4527 'TR': 'Turkey',
4528 'TM': 'Turkmenistan',
4529 'TC': 'Turks and Caicos Islands',
4530 'TV': 'Tuvalu',
4531 'UG': 'Uganda',
4532 'UA': 'Ukraine',
4533 'AE': 'United Arab Emirates',
4534 'GB': 'United Kingdom',
4535 'US': 'United States',
4536 'UM': 'United States Minor Outlying Islands',
4537 'UY': 'Uruguay',
4538 'UZ': 'Uzbekistan',
4539 'VU': 'Vanuatu',
4540 'VE': 'Venezuela, Bolivarian Republic of',
4541 'VN': 'Viet Nam',
4542 'VG': 'Virgin Islands, British',
4543 'VI': 'Virgin Islands, U.S.',
4544 'WF': 'Wallis and Futuna',
4545 'EH': 'Western Sahara',
4546 'YE': 'Yemen',
4547 'ZM': 'Zambia',
4548 'ZW': 'Zimbabwe',
4549 # Not ISO 3166 codes, but used for IP blocks
4550 'AP': 'Asia/Pacific Region',
4551 'EU': 'Europe',
4552 }
4553
4554 @classmethod
4555 def short2full(cls, code):
4556 """Convert an ISO 3166-2 country code to the corresponding full name"""
4557 return cls._country_map.get(code.upper())
4558
4559
4560 class GeoUtils:
4561 # Major IPv4 address blocks per country
4562 _country_ip_map = {
4563 'AD': '46.172.224.0/19',
4564 'AE': '94.200.0.0/13',
4565 'AF': '149.54.0.0/17',
4566 'AG': '209.59.64.0/18',
4567 'AI': '204.14.248.0/21',
4568 'AL': '46.99.0.0/16',
4569 'AM': '46.70.0.0/15',
4570 'AO': '105.168.0.0/13',
4571 'AP': '182.50.184.0/21',
4572 'AQ': '23.154.160.0/24',
4573 'AR': '181.0.0.0/12',
4574 'AS': '202.70.112.0/20',
4575 'AT': '77.116.0.0/14',
4576 'AU': '1.128.0.0/11',
4577 'AW': '181.41.0.0/18',
4578 'AX': '185.217.4.0/22',
4579 'AZ': '5.197.0.0/16',
4580 'BA': '31.176.128.0/17',
4581 'BB': '65.48.128.0/17',
4582 'BD': '114.130.0.0/16',
4583 'BE': '57.0.0.0/8',
4584 'BF': '102.178.0.0/15',
4585 'BG': '95.42.0.0/15',
4586 'BH': '37.131.0.0/17',
4587 'BI': '154.117.192.0/18',
4588 'BJ': '137.255.0.0/16',
4589 'BL': '185.212.72.0/23',
4590 'BM': '196.12.64.0/18',
4591 'BN': '156.31.0.0/16',
4592 'BO': '161.56.0.0/16',
4593 'BQ': '161.0.80.0/20',
4594 'BR': '191.128.0.0/12',
4595 'BS': '24.51.64.0/18',
4596 'BT': '119.2.96.0/19',
4597 'BW': '168.167.0.0/16',
4598 'BY': '178.120.0.0/13',
4599 'BZ': '179.42.192.0/18',
4600 'CA': '99.224.0.0/11',
4601 'CD': '41.243.0.0/16',
4602 'CF': '197.242.176.0/21',
4603 'CG': '160.113.0.0/16',
4604 'CH': '85.0.0.0/13',
4605 'CI': '102.136.0.0/14',
4606 'CK': '202.65.32.0/19',
4607 'CL': '152.172.0.0/14',
4608 'CM': '102.244.0.0/14',
4609 'CN': '36.128.0.0/10',
4610 'CO': '181.240.0.0/12',
4611 'CR': '201.192.0.0/12',
4612 'CU': '152.206.0.0/15',
4613 'CV': '165.90.96.0/19',
4614 'CW': '190.88.128.0/17',
4615 'CY': '31.153.0.0/16',
4616 'CZ': '88.100.0.0/14',
4617 'DE': '53.0.0.0/8',
4618 'DJ': '197.241.0.0/17',
4619 'DK': '87.48.0.0/12',
4620 'DM': '192.243.48.0/20',
4621 'DO': '152.166.0.0/15',
4622 'DZ': '41.96.0.0/12',
4623 'EC': '186.68.0.0/15',
4624 'EE': '90.190.0.0/15',
4625 'EG': '156.160.0.0/11',
4626 'ER': '196.200.96.0/20',
4627 'ES': '88.0.0.0/11',
4628 'ET': '196.188.0.0/14',
4629 'EU': '2.16.0.0/13',
4630 'FI': '91.152.0.0/13',
4631 'FJ': '144.120.0.0/16',
4632 'FK': '80.73.208.0/21',
4633 'FM': '119.252.112.0/20',
4634 'FO': '88.85.32.0/19',
4635 'FR': '90.0.0.0/9',
4636 'GA': '41.158.0.0/15',
4637 'GB': '25.0.0.0/8',
4638 'GD': '74.122.88.0/21',
4639 'GE': '31.146.0.0/16',
4640 'GF': '161.22.64.0/18',
4641 'GG': '62.68.160.0/19',
4642 'GH': '154.160.0.0/12',
4643 'GI': '95.164.0.0/16',
4644 'GL': '88.83.0.0/19',
4645 'GM': '160.182.0.0/15',
4646 'GN': '197.149.192.0/18',
4647 'GP': '104.250.0.0/19',
4648 'GQ': '105.235.224.0/20',
4649 'GR': '94.64.0.0/13',
4650 'GT': '168.234.0.0/16',
4651 'GU': '168.123.0.0/16',
4652 'GW': '197.214.80.0/20',
4653 'GY': '181.41.64.0/18',
4654 'HK': '113.252.0.0/14',
4655 'HN': '181.210.0.0/16',
4656 'HR': '93.136.0.0/13',
4657 'HT': '148.102.128.0/17',
4658 'HU': '84.0.0.0/14',
4659 'ID': '39.192.0.0/10',
4660 'IE': '87.32.0.0/12',
4661 'IL': '79.176.0.0/13',
4662 'IM': '5.62.80.0/20',
4663 'IN': '117.192.0.0/10',
4664 'IO': '203.83.48.0/21',
4665 'IQ': '37.236.0.0/14',
4666 'IR': '2.176.0.0/12',
4667 'IS': '82.221.0.0/16',
4668 'IT': '79.0.0.0/10',
4669 'JE': '87.244.64.0/18',
4670 'JM': '72.27.0.0/17',
4671 'JO': '176.29.0.0/16',
4672 'JP': '133.0.0.0/8',
4673 'KE': '105.48.0.0/12',
4674 'KG': '158.181.128.0/17',
4675 'KH': '36.37.128.0/17',
4676 'KI': '103.25.140.0/22',
4677 'KM': '197.255.224.0/20',
4678 'KN': '198.167.192.0/19',
4679 'KP': '175.45.176.0/22',
4680 'KR': '175.192.0.0/10',
4681 'KW': '37.36.0.0/14',
4682 'KY': '64.96.0.0/15',
4683 'KZ': '2.72.0.0/13',
4684 'LA': '115.84.64.0/18',
4685 'LB': '178.135.0.0/16',
4686 'LC': '24.92.144.0/20',
4687 'LI': '82.117.0.0/19',
4688 'LK': '112.134.0.0/15',
4689 'LR': '102.183.0.0/16',
4690 'LS': '129.232.0.0/17',
4691 'LT': '78.56.0.0/13',
4692 'LU': '188.42.0.0/16',
4693 'LV': '46.109.0.0/16',
4694 'LY': '41.252.0.0/14',
4695 'MA': '105.128.0.0/11',
4696 'MC': '88.209.64.0/18',
4697 'MD': '37.246.0.0/16',
4698 'ME': '178.175.0.0/17',
4699 'MF': '74.112.232.0/21',
4700 'MG': '154.126.0.0/17',
4701 'MH': '117.103.88.0/21',
4702 'MK': '77.28.0.0/15',
4703 'ML': '154.118.128.0/18',
4704 'MM': '37.111.0.0/17',
4705 'MN': '49.0.128.0/17',
4706 'MO': '60.246.0.0/16',
4707 'MP': '202.88.64.0/20',
4708 'MQ': '109.203.224.0/19',
4709 'MR': '41.188.64.0/18',
4710 'MS': '208.90.112.0/22',
4711 'MT': '46.11.0.0/16',
4712 'MU': '105.16.0.0/12',
4713 'MV': '27.114.128.0/18',
4714 'MW': '102.70.0.0/15',
4715 'MX': '187.192.0.0/11',
4716 'MY': '175.136.0.0/13',
4717 'MZ': '197.218.0.0/15',
4718 'NA': '41.182.0.0/16',
4719 'NC': '101.101.0.0/18',
4720 'NE': '197.214.0.0/18',
4721 'NF': '203.17.240.0/22',
4722 'NG': '105.112.0.0/12',
4723 'NI': '186.76.0.0/15',
4724 'NL': '145.96.0.0/11',
4725 'NO': '84.208.0.0/13',
4726 'NP': '36.252.0.0/15',
4727 'NR': '203.98.224.0/19',
4728 'NU': '49.156.48.0/22',
4729 'NZ': '49.224.0.0/14',
4730 'OM': '5.36.0.0/15',
4731 'PA': '186.72.0.0/15',
4732 'PE': '186.160.0.0/14',
4733 'PF': '123.50.64.0/18',
4734 'PG': '124.240.192.0/19',
4735 'PH': '49.144.0.0/13',
4736 'PK': '39.32.0.0/11',
4737 'PL': '83.0.0.0/11',
4738 'PM': '70.36.0.0/20',
4739 'PR': '66.50.0.0/16',
4740 'PS': '188.161.0.0/16',
4741 'PT': '85.240.0.0/13',
4742 'PW': '202.124.224.0/20',
4743 'PY': '181.120.0.0/14',
4744 'QA': '37.210.0.0/15',
4745 'RE': '102.35.0.0/16',
4746 'RO': '79.112.0.0/13',
4747 'RS': '93.86.0.0/15',
4748 'RU': '5.136.0.0/13',
4749 'RW': '41.186.0.0/16',
4750 'SA': '188.48.0.0/13',
4751 'SB': '202.1.160.0/19',
4752 'SC': '154.192.0.0/11',
4753 'SD': '102.120.0.0/13',
4754 'SE': '78.64.0.0/12',
4755 'SG': '8.128.0.0/10',
4756 'SI': '188.196.0.0/14',
4757 'SK': '78.98.0.0/15',
4758 'SL': '102.143.0.0/17',
4759 'SM': '89.186.32.0/19',
4760 'SN': '41.82.0.0/15',
4761 'SO': '154.115.192.0/18',
4762 'SR': '186.179.128.0/17',
4763 'SS': '105.235.208.0/21',
4764 'ST': '197.159.160.0/19',
4765 'SV': '168.243.0.0/16',
4766 'SX': '190.102.0.0/20',
4767 'SY': '5.0.0.0/16',
4768 'SZ': '41.84.224.0/19',
4769 'TC': '65.255.48.0/20',
4770 'TD': '154.68.128.0/19',
4771 'TG': '196.168.0.0/14',
4772 'TH': '171.96.0.0/13',
4773 'TJ': '85.9.128.0/18',
4774 'TK': '27.96.24.0/21',
4775 'TL': '180.189.160.0/20',
4776 'TM': '95.85.96.0/19',
4777 'TN': '197.0.0.0/11',
4778 'TO': '175.176.144.0/21',
4779 'TR': '78.160.0.0/11',
4780 'TT': '186.44.0.0/15',
4781 'TV': '202.2.96.0/19',
4782 'TW': '120.96.0.0/11',
4783 'TZ': '156.156.0.0/14',
4784 'UA': '37.52.0.0/14',
4785 'UG': '102.80.0.0/13',
4786 'US': '6.0.0.0/8',
4787 'UY': '167.56.0.0/13',
4788 'UZ': '84.54.64.0/18',
4789 'VA': '212.77.0.0/19',
4790 'VC': '207.191.240.0/21',
4791 'VE': '186.88.0.0/13',
4792 'VG': '66.81.192.0/20',
4793 'VI': '146.226.0.0/16',
4794 'VN': '14.160.0.0/11',
4795 'VU': '202.80.32.0/20',
4796 'WF': '117.20.32.0/21',
4797 'WS': '202.4.32.0/19',
4798 'YE': '134.35.0.0/16',
4799 'YT': '41.242.116.0/22',
4800 'ZA': '41.0.0.0/11',
4801 'ZM': '102.144.0.0/13',
4802 'ZW': '102.177.192.0/18',
4803 }
4804
4805 @classmethod
4806 def random_ipv4(cls, code_or_block):
4807 if len(code_or_block) == 2:
4808 block = cls._country_ip_map.get(code_or_block.upper())
4809 if not block:
4810 return None
4811 else:
4812 block = code_or_block
4813 addr, preflen = block.split('/')
4814 addr_min = struct.unpack('!L', socket.inet_aton(addr))[0]
4815 addr_max = addr_min | (0xffffffff >> int(preflen))
4816 return str(socket.inet_ntoa(
4817 struct.pack('!L', random.randint(addr_min, addr_max))))
4818
4819
4820 class PerRequestProxyHandler(urllib.request.ProxyHandler):
4821 def __init__(self, proxies=None):
4822 # Set default handlers
4823 for type in ('http', 'https'):
4824 setattr(self, '%s_open' % type,
4825 lambda r, proxy='__noproxy__', type=type, meth=self.proxy_open:
4826 meth(r, proxy, type))
4827 urllib.request.ProxyHandler.__init__(self, proxies)
4828
4829 def proxy_open(self, req, proxy, type):
4830 req_proxy = req.headers.get('Ytdl-request-proxy')
4831 if req_proxy is not None:
4832 proxy = req_proxy
4833 del req.headers['Ytdl-request-proxy']
4834
4835 if proxy == '__noproxy__':
4836 return None # No Proxy
4837 if urllib.parse.urlparse(proxy).scheme.lower() in ('socks', 'socks4', 'socks4a', 'socks5'):
4838 req.add_header('Ytdl-socks-proxy', proxy)
4839 # yt-dlp's http/https handlers do wrapping the socket with socks
4840 return None
4841 return urllib.request.ProxyHandler.proxy_open(
4842 self, req, proxy, type)
4843
4844
4845 # Both long_to_bytes and bytes_to_long are adapted from PyCrypto, which is
4846 # released into Public Domain
4847 # https://github.com/dlitz/pycrypto/blob/master/lib/Crypto/Util/number.py#L387
4848
4849 def long_to_bytes(n, blocksize=0):
4850 """long_to_bytes(n:long, blocksize:int) : string
4851 Convert a long integer to a byte string.
4852
4853 If optional blocksize is given and greater than zero, pad the front of the
4854 byte string with binary zeros so that the length is a multiple of
4855 blocksize.
4856 """
4857 # after much testing, this algorithm was deemed to be the fastest
4858 s = b''
4859 n = int(n)
4860 while n > 0:
4861 s = struct.pack('>I', n & 0xffffffff) + s
4862 n = n >> 32
4863 # strip off leading zeros
4864 for i in range(len(s)):
4865 if s[i] != b'\000'[0]:
4866 break
4867 else:
4868 # only happens when n == 0
4869 s = b'\000'
4870 i = 0
4871 s = s[i:]
4872 # add back some pad bytes. this could be done more efficiently w.r.t. the
4873 # de-padding being done above, but sigh...
4874 if blocksize > 0 and len(s) % blocksize:
4875 s = (blocksize - len(s) % blocksize) * b'\000' + s
4876 return s
4877
4878
4879 def bytes_to_long(s):
4880 """bytes_to_long(string) : long
4881 Convert a byte string to a long integer.
4882
4883 This is (essentially) the inverse of long_to_bytes().
4884 """
4885 acc = 0
4886 length = len(s)
4887 if length % 4:
4888 extra = (4 - length % 4)
4889 s = b'\000' * extra + s
4890 length = length + extra
4891 for i in range(0, length, 4):
4892 acc = (acc << 32) + struct.unpack('>I', s[i:i + 4])[0]
4893 return acc
4894
4895
4896 def ohdave_rsa_encrypt(data, exponent, modulus):
4897 '''
4898 Implement OHDave's RSA algorithm. See http://www.ohdave.com/rsa/
4899
4900 Input:
4901 data: data to encrypt, bytes-like object
4902 exponent, modulus: parameter e and N of RSA algorithm, both integer
4903 Output: hex string of encrypted data
4904
4905 Limitation: supports one block encryption only
4906 '''
4907
4908 payload = int(binascii.hexlify(data[::-1]), 16)
4909 encrypted = pow(payload, exponent, modulus)
4910 return '%x' % encrypted
4911
4912
4913 def pkcs1pad(data, length):
4914 """
4915 Padding input data with PKCS#1 scheme
4916
4917 @param {int[]} data input data
4918 @param {int} length target length
4919 @returns {int[]} padded data
4920 """
4921 if len(data) > length - 11:
4922 raise ValueError('Input data too long for PKCS#1 padding')
4923
4924 pseudo_random = [random.randint(0, 254) for _ in range(length - len(data) - 3)]
4925 return [0, 2] + pseudo_random + [0] + data
4926
4927
4928 def _base_n_table(n, table):
4929 if not table and not n:
4930 raise ValueError('Either table or n must be specified')
4931 table = (table or '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ')[:n]
4932
4933 if n and n != len(table):
4934 raise ValueError(f'base {n} exceeds table length {len(table)}')
4935 return table
4936
4937
4938 def encode_base_n(num, n=None, table=None):
4939 """Convert given int to a base-n string"""
4940 table = _base_n_table(n, table)
4941 if not num:
4942 return table[0]
4943
4944 result, base = '', len(table)
4945 while num:
4946 result = table[num % base] + result
4947 num = num // base
4948 return result
4949
4950
4951 def decode_base_n(string, n=None, table=None):
4952 """Convert given base-n string to int"""
4953 table = {char: index for index, char in enumerate(_base_n_table(n, table))}
4954 result, base = 0, len(table)
4955 for char in string:
4956 result = result * base + table[char]
4957 return result
4958
4959
4960 def decode_base(value, digits):
4961 deprecation_warning(f'{__name__}.decode_base is deprecated and may be removed '
4962 f'in a future version. Use {__name__}.decode_base_n instead')
4963 return decode_base_n(value, table=digits)
4964
4965
4966 def decode_packed_codes(code):
4967 mobj = re.search(PACKED_CODES_RE, code)
4968 obfuscated_code, base, count, symbols = mobj.groups()
4969 base = int(base)
4970 count = int(count)
4971 symbols = symbols.split('|')
4972 symbol_table = {}
4973
4974 while count:
4975 count -= 1
4976 base_n_count = encode_base_n(count, base)
4977 symbol_table[base_n_count] = symbols[count] or base_n_count
4978
4979 return re.sub(
4980 r'\b(\w+)\b', lambda mobj: symbol_table[mobj.group(0)],
4981 obfuscated_code)
4982
4983
4984 def caesar(s, alphabet, shift):
4985 if shift == 0:
4986 return s
4987 l = len(alphabet)
4988 return ''.join(
4989 alphabet[(alphabet.index(c) + shift) % l] if c in alphabet else c
4990 for c in s)
4991
4992
4993 def rot47(s):
4994 return caesar(s, r'''!"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~''', 47)
4995
4996
4997 def parse_m3u8_attributes(attrib):
4998 info = {}
4999 for (key, val) in re.findall(r'(?P<key>[A-Z0-9-]+)=(?P<val>"[^"]+"|[^",]+)(?:,|$)', attrib):
5000 if val.startswith('"'):
5001 val = val[1:-1]
5002 info[key] = val
5003 return info
5004
5005
5006 def urshift(val, n):
5007 return val >> n if val >= 0 else (val + 0x100000000) >> n
5008
5009
5010 # Based on png2str() written by @gdkchan and improved by @yokrysty
5011 # Originally posted at https://github.com/ytdl-org/youtube-dl/issues/9706
5012 def decode_png(png_data):
5013 # Reference: https://www.w3.org/TR/PNG/
5014 header = png_data[8:]
5015
5016 if png_data[:8] != b'\x89PNG\x0d\x0a\x1a\x0a' or header[4:8] != b'IHDR':
5017 raise OSError('Not a valid PNG file.')
5018
5019 int_map = {1: '>B', 2: '>H', 4: '>I'}
5020 unpack_integer = lambda x: struct.unpack(int_map[len(x)], x)[0]
5021
5022 chunks = []
5023
5024 while header:
5025 length = unpack_integer(header[:4])
5026 header = header[4:]
5027
5028 chunk_type = header[:4]
5029 header = header[4:]
5030
5031 chunk_data = header[:length]
5032 header = header[length:]
5033
5034 header = header[4:] # Skip CRC
5035
5036 chunks.append({
5037 'type': chunk_type,
5038 'length': length,
5039 'data': chunk_data
5040 })
5041
5042 ihdr = chunks[0]['data']
5043
5044 width = unpack_integer(ihdr[:4])
5045 height = unpack_integer(ihdr[4:8])
5046
5047 idat = b''
5048
5049 for chunk in chunks:
5050 if chunk['type'] == b'IDAT':
5051 idat += chunk['data']
5052
5053 if not idat:
5054 raise OSError('Unable to read PNG data.')
5055
5056 decompressed_data = bytearray(zlib.decompress(idat))
5057
5058 stride = width * 3
5059 pixels = []
5060
5061 def _get_pixel(idx):
5062 x = idx % stride
5063 y = idx // stride
5064 return pixels[y][x]
5065
5066 for y in range(height):
5067 basePos = y * (1 + stride)
5068 filter_type = decompressed_data[basePos]
5069
5070 current_row = []
5071
5072 pixels.append(current_row)
5073
5074 for x in range(stride):
5075 color = decompressed_data[1 + basePos + x]
5076 basex = y * stride + x
5077 left = 0
5078 up = 0
5079
5080 if x > 2:
5081 left = _get_pixel(basex - 3)
5082 if y > 0:
5083 up = _get_pixel(basex - stride)
5084
5085 if filter_type == 1: # Sub
5086 color = (color + left) & 0xff
5087 elif filter_type == 2: # Up
5088 color = (color + up) & 0xff
5089 elif filter_type == 3: # Average
5090 color = (color + ((left + up) >> 1)) & 0xff
5091 elif filter_type == 4: # Paeth
5092 a = left
5093 b = up
5094 c = 0
5095
5096 if x > 2 and y > 0:
5097 c = _get_pixel(basex - stride - 3)
5098
5099 p = a + b - c
5100
5101 pa = abs(p - a)
5102 pb = abs(p - b)
5103 pc = abs(p - c)
5104
5105 if pa <= pb and pa <= pc:
5106 color = (color + a) & 0xff
5107 elif pb <= pc:
5108 color = (color + b) & 0xff
5109 else:
5110 color = (color + c) & 0xff
5111
5112 current_row.append(color)
5113
5114 return width, height, pixels
5115
5116
5117 def write_xattr(path, key, value):
5118 # Windows: Write xattrs to NTFS Alternate Data Streams:
5119 # http://en.wikipedia.org/wiki/NTFS#Alternate_data_streams_.28ADS.29
5120 if compat_os_name == 'nt':
5121 assert ':' not in key
5122 assert os.path.exists(path)
5123
5124 try:
5125 with open(f'{path}:{key}', 'wb') as f:
5126 f.write(value)
5127 except OSError as e:
5128 raise XAttrMetadataError(e.errno, e.strerror)
5129 return
5130
5131 # UNIX Method 1. Use xattrs/pyxattrs modules
5132
5133 setxattr = None
5134 if getattr(xattr, '_yt_dlp__identifier', None) == 'pyxattr':
5135 # Unicode arguments are not supported in pyxattr until version 0.5.0
5136 # See https://github.com/ytdl-org/youtube-dl/issues/5498
5137 if version_tuple(xattr.__version__) >= (0, 5, 0):
5138 setxattr = xattr.set
5139 elif xattr:
5140 setxattr = xattr.setxattr
5141
5142 if setxattr:
5143 try:
5144 setxattr(path, key, value)
5145 except OSError as e:
5146 raise XAttrMetadataError(e.errno, e.strerror)
5147 return
5148
5149 # UNIX Method 2. Use setfattr/xattr executables
5150 exe = ('setfattr' if check_executable('setfattr', ['--version'])
5151 else 'xattr' if check_executable('xattr', ['-h']) else None)
5152 if not exe:
5153 raise XAttrUnavailableError(
5154 'Couldn\'t find a tool to set the xattrs. Install either the python "xattr" or "pyxattr" modules or the '
5155 + ('"xattr" binary' if sys.platform != 'linux' else 'GNU "attr" package (which contains the "setfattr" tool)'))
5156
5157 value = value.decode()
5158 try:
5159 _, stderr, returncode = Popen.run(
5160 [exe, '-w', key, value, path] if exe == 'xattr' else [exe, '-n', key, '-v', value, path],
5161 text=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
5162 except OSError as e:
5163 raise XAttrMetadataError(e.errno, e.strerror)
5164 if returncode:
5165 raise XAttrMetadataError(returncode, stderr)
5166
5167
5168 def random_birthday(year_field, month_field, day_field):
5169 start_date = datetime.date(1950, 1, 1)
5170 end_date = datetime.date(1995, 12, 31)
5171 offset = random.randint(0, (end_date - start_date).days)
5172 random_date = start_date + datetime.timedelta(offset)
5173 return {
5174 year_field: str(random_date.year),
5175 month_field: str(random_date.month),
5176 day_field: str(random_date.day),
5177 }
5178
5179
5180 # Templates for internet shortcut files, which are plain text files.
5181 DOT_URL_LINK_TEMPLATE = '''\
5182 [InternetShortcut]
5183 URL=%(url)s
5184 '''
5185
5186 DOT_WEBLOC_LINK_TEMPLATE = '''\
5187 <?xml version="1.0" encoding="UTF-8"?>
5188 <!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
5189 <plist version="1.0">
5190 <dict>
5191 \t<key>URL</key>
5192 \t<string>%(url)s</string>
5193 </dict>
5194 </plist>
5195 '''
5196
5197 DOT_DESKTOP_LINK_TEMPLATE = '''\
5198 [Desktop Entry]
5199 Encoding=UTF-8
5200 Name=%(filename)s
5201 Type=Link
5202 URL=%(url)s
5203 Icon=text-html
5204 '''
5205
5206 LINK_TEMPLATES = {
5207 'url': DOT_URL_LINK_TEMPLATE,
5208 'desktop': DOT_DESKTOP_LINK_TEMPLATE,
5209 'webloc': DOT_WEBLOC_LINK_TEMPLATE,
5210 }
5211
5212
5213 def iri_to_uri(iri):
5214 """
5215 Converts an IRI (Internationalized Resource Identifier, allowing Unicode characters) to a URI (Uniform Resource Identifier, ASCII-only).
5216
5217 The function doesn't add an additional layer of escaping; e.g., it doesn't escape `%3C` as `%253C`. Instead, it percent-escapes characters with an underlying UTF-8 encoding *besides* those already escaped, leaving the URI intact.
5218 """
5219
5220 iri_parts = urllib.parse.urlparse(iri)
5221
5222 if '[' in iri_parts.netloc:
5223 raise ValueError('IPv6 URIs are not, yet, supported.')
5224 # Querying `.netloc`, when there's only one bracket, also raises a ValueError.
5225
5226 # The `safe` argument values, that the following code uses, contain the characters that should not be percent-encoded. Everything else but letters, digits and '_.-' will be percent-encoded with an underlying UTF-8 encoding. Everything already percent-encoded will be left as is.
5227
5228 net_location = ''
5229 if iri_parts.username:
5230 net_location += urllib.parse.quote(iri_parts.username, safe=r"!$%&'()*+,~")
5231 if iri_parts.password is not None:
5232 net_location += ':' + urllib.parse.quote(iri_parts.password, safe=r"!$%&'()*+,~")
5233 net_location += '@'
5234
5235 net_location += iri_parts.hostname.encode('idna').decode() # Punycode for Unicode hostnames.
5236 # The 'idna' encoding produces ASCII text.
5237 if iri_parts.port is not None and iri_parts.port != 80:
5238 net_location += ':' + str(iri_parts.port)
5239
5240 return urllib.parse.urlunparse(
5241 (iri_parts.scheme,
5242 net_location,
5243
5244 urllib.parse.quote_plus(iri_parts.path, safe=r"!$%&'()*+,/:;=@|~"),
5245
5246 # Unsure about the `safe` argument, since this is a legacy way of handling parameters.
5247 urllib.parse.quote_plus(iri_parts.params, safe=r"!$%&'()*+,/:;=@|~"),
5248
5249 # Not totally sure about the `safe` argument, since the source does not explicitly mention the query URI component.
5250 urllib.parse.quote_plus(iri_parts.query, safe=r"!$%&'()*+,/:;=?@{|}~"),
5251
5252 urllib.parse.quote_plus(iri_parts.fragment, safe=r"!#$%&'()*+,/:;=?@{|}~")))
5253
5254 # Source for `safe` arguments: https://url.spec.whatwg.org/#percent-encoded-bytes.
5255
5256
5257 def to_high_limit_path(path):
5258 if sys.platform in ['win32', 'cygwin']:
5259 # Work around MAX_PATH limitation on Windows. The maximum allowed length for the individual path segments may still be quite limited.
5260 return '\\\\?\\' + os.path.abspath(path)
5261
5262 return path
5263
5264
5265 def format_field(obj, field=None, template='%s', ignore=NO_DEFAULT, default='', func=IDENTITY):
5266 val = traverse_obj(obj, *variadic(field))
5267 if (not val and val != 0) if ignore is NO_DEFAULT else val in variadic(ignore):
5268 return default
5269 return template % func(val)
5270
5271
5272 def clean_podcast_url(url):
5273 return re.sub(r'''(?x)
5274 (?:
5275 (?:
5276 chtbl\.com/track|
5277 media\.blubrry\.com| # https://create.blubrry.com/resources/podcast-media-download-statistics/getting-started/
5278 play\.podtrac\.com
5279 )/[^/]+|
5280 (?:dts|www)\.podtrac\.com/(?:pts/)?redirect\.[0-9a-z]{3,4}| # http://analytics.podtrac.com/how-to-measure
5281 flex\.acast\.com|
5282 pd(?:
5283 cn\.co| # https://podcorn.com/analytics-prefix/
5284 st\.fm # https://podsights.com/docs/
5285 )/e
5286 )/''', '', url)
5287
5288
5289 _HEX_TABLE = '0123456789abcdef'
5290
5291
5292 def random_uuidv4():
5293 return re.sub(r'[xy]', lambda x: _HEX_TABLE[random.randint(0, 15)], 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx')
5294
5295
5296 def make_dir(path, to_screen=None):
5297 try:
5298 dn = os.path.dirname(path)
5299 if dn and not os.path.exists(dn):
5300 os.makedirs(dn)
5301 return True
5302 except OSError as err:
5303 if callable(to_screen) is not None:
5304 to_screen('unable to create directory ' + error_to_compat_str(err))
5305 return False
5306
5307
5308 def get_executable_path():
5309 from .update import _get_variant_and_executable_path
5310
5311 return os.path.dirname(os.path.abspath(_get_variant_and_executable_path()[1]))
5312
5313
5314 def load_plugins(name, suffix, namespace):
5315 classes = {}
5316 with contextlib.suppress(FileNotFoundError):
5317 plugins_spec = importlib.util.spec_from_file_location(
5318 name, os.path.join(get_executable_path(), 'ytdlp_plugins', name, '__init__.py'))
5319 plugins = importlib.util.module_from_spec(plugins_spec)
5320 sys.modules[plugins_spec.name] = plugins
5321 plugins_spec.loader.exec_module(plugins)
5322 for name in dir(plugins):
5323 if name in namespace:
5324 continue
5325 if not name.endswith(suffix):
5326 continue
5327 klass = getattr(plugins, name)
5328 classes[name] = namespace[name] = klass
5329 return classes
5330
5331
5332 def traverse_obj(
5333 obj, *paths, default=NO_DEFAULT, expected_type=None, get_all=True,
5334 casesense=True, is_user_input=False, traverse_string=False):
5335 """
5336 Safely traverse nested `dict`s and `Sequence`s
5337
5338 >>> obj = [{}, {"key": "value"}]
5339 >>> traverse_obj(obj, (1, "key"))
5340 "value"
5341
5342 Each of the provided `paths` is tested and the first producing a valid result will be returned.
5343 The next path will also be tested if the path branched but no results could be found.
5344 Supported values for traversal are `Mapping`, `Sequence` and `re.Match`.
5345 A value of None is treated as the absence of a value.
5346
5347 The paths will be wrapped in `variadic`, so that `'key'` is conveniently the same as `('key', )`.
5348
5349 The keys in the path can be one of:
5350 - `None`: Return the current object.
5351 - `str`/`int`: Return `obj[key]`. For `re.Match, return `obj.group(key)`.
5352 - `slice`: Branch out and return all values in `obj[key]`.
5353 - `Ellipsis`: Branch out and return a list of all values.
5354 - `tuple`/`list`: Branch out and return a list of all matching values.
5355 Read as: `[traverse_obj(obj, branch) for branch in branches]`.
5356 - `function`: Branch out and return values filtered by the function.
5357 Read as: `[value for key, value in obj if function(key, value)]`.
5358 For `Sequence`s, `key` is the index of the value.
5359 - `dict` Transform the current object and return a matching dict.
5360 Read as: `{key: traverse_obj(obj, path) for key, path in dct.items()}`.
5361
5362 `tuple`, `list`, and `dict` all support nested paths and branches.
5363
5364 @params paths Paths which to traverse by.
5365 @param default Value to return if the paths do not match.
5366 @param expected_type If a `type`, only accept final values of this type.
5367 If any other callable, try to call the function on each result.
5368 @param get_all If `False`, return the first matching result, otherwise all matching ones.
5369 @param casesense If `False`, consider string dictionary keys as case insensitive.
5370
5371 The following are only meant to be used by YoutubeDL.prepare_outtmpl and are not part of the API
5372
5373 @param is_user_input Whether the keys are generated from user input.
5374 If `True` strings get converted to `int`/`slice` if needed.
5375 @param traverse_string Whether to traverse into objects as strings.
5376 If `True`, any non-compatible object will first be
5377 converted into a string and then traversed into.
5378
5379
5380 @returns The result of the object traversal.
5381 If successful, `get_all=True`, and the path branches at least once,
5382 then a list of results is returned instead.
5383 A list is always returned if the last path branches and no `default` is given.
5384 """
5385 is_sequence = lambda x: isinstance(x, collections.abc.Sequence) and not isinstance(x, (str, bytes))
5386 casefold = lambda k: k.casefold() if isinstance(k, str) else k
5387
5388 if isinstance(expected_type, type):
5389 type_test = lambda val: val if isinstance(val, expected_type) else None
5390 else:
5391 type_test = lambda val: try_call(expected_type or IDENTITY, args=(val,))
5392
5393 def apply_key(key, obj):
5394 if obj is None:
5395 return
5396
5397 elif key is None:
5398 yield obj
5399
5400 elif isinstance(key, (list, tuple)):
5401 for branch in key:
5402 _, result = apply_path(obj, branch)
5403 yield from result
5404
5405 elif key is ...:
5406 if isinstance(obj, collections.abc.Mapping):
5407 yield from obj.values()
5408 elif is_sequence(obj):
5409 yield from obj
5410 elif isinstance(obj, re.Match):
5411 yield from obj.groups()
5412 elif traverse_string:
5413 yield from str(obj)
5414
5415 elif callable(key):
5416 if is_sequence(obj):
5417 iter_obj = enumerate(obj)
5418 elif isinstance(obj, collections.abc.Mapping):
5419 iter_obj = obj.items()
5420 elif isinstance(obj, re.Match):
5421 iter_obj = enumerate((obj.group(), *obj.groups()))
5422 elif traverse_string:
5423 iter_obj = enumerate(str(obj))
5424 else:
5425 return
5426 yield from (v for k, v in iter_obj if try_call(key, args=(k, v)))
5427
5428 elif isinstance(key, dict):
5429 iter_obj = ((k, _traverse_obj(obj, v)) for k, v in key.items())
5430 yield {k: v if v is not None else default for k, v in iter_obj
5431 if v is not None or default is not NO_DEFAULT}
5432
5433 elif isinstance(obj, collections.abc.Mapping):
5434 yield (obj.get(key) if casesense or (key in obj)
5435 else next((v for k, v in obj.items() if casefold(k) == key), None))
5436
5437 elif isinstance(obj, re.Match):
5438 if isinstance(key, int) or casesense:
5439 with contextlib.suppress(IndexError):
5440 yield obj.group(key)
5441 return
5442
5443 if not isinstance(key, str):
5444 return
5445
5446 yield next((v for k, v in obj.groupdict().items() if casefold(k) == key), None)
5447
5448 else:
5449 if is_user_input:
5450 key = (int_or_none(key) if ':' not in key
5451 else slice(*map(int_or_none, key.split(':'))))
5452
5453 if not isinstance(key, (int, slice)):
5454 return
5455
5456 if not is_sequence(obj):
5457 if not traverse_string:
5458 return
5459 obj = str(obj)
5460
5461 with contextlib.suppress(IndexError):
5462 yield obj[key]
5463
5464 def apply_path(start_obj, path):
5465 objs = (start_obj,)
5466 has_branched = False
5467
5468 for key in variadic(path):
5469 if is_user_input and key == ':':
5470 key = ...
5471
5472 if not casesense and isinstance(key, str):
5473 key = key.casefold()
5474
5475 if key is ... or isinstance(key, (list, tuple)) or callable(key):
5476 has_branched = True
5477
5478 key_func = functools.partial(apply_key, key)
5479 objs = itertools.chain.from_iterable(map(key_func, objs))
5480
5481 return has_branched, objs
5482
5483 def _traverse_obj(obj, path, use_list=True):
5484 has_branched, results = apply_path(obj, path)
5485 results = LazyList(x for x in map(type_test, results) if x is not None)
5486
5487 if get_all and has_branched:
5488 return results.exhaust() if results or use_list else None
5489
5490 return results[0] if results else None
5491
5492 for index, path in enumerate(paths, 1):
5493 use_list = default is NO_DEFAULT and index == len(paths)
5494 result = _traverse_obj(obj, path, use_list)
5495 if result is not None:
5496 return result
5497
5498 return None if default is NO_DEFAULT else default
5499
5500
5501 def traverse_dict(dictn, keys, casesense=True):
5502 deprecation_warning(f'"{__name__}.traverse_dict" is deprecated and may be removed '
5503 f'in a future version. Use "{__name__}.traverse_obj" instead')
5504 return traverse_obj(dictn, keys, casesense=casesense, is_user_input=True, traverse_string=True)
5505
5506
5507 def get_first(obj, keys, **kwargs):
5508 return traverse_obj(obj, (..., *variadic(keys)), **kwargs, get_all=False)
5509
5510
5511 def time_seconds(**kwargs):
5512 t = datetime.datetime.now(datetime.timezone(datetime.timedelta(**kwargs)))
5513 return t.timestamp()
5514
5515
5516 # create a JSON Web Signature (jws) with HS256 algorithm
5517 # the resulting format is in JWS Compact Serialization
5518 # implemented following JWT https://www.rfc-editor.org/rfc/rfc7519.html
5519 # implemented following JWS https://www.rfc-editor.org/rfc/rfc7515.html
5520 def jwt_encode_hs256(payload_data, key, headers={}):
5521 header_data = {
5522 'alg': 'HS256',
5523 'typ': 'JWT',
5524 }
5525 if headers:
5526 header_data.update(headers)
5527 header_b64 = base64.b64encode(json.dumps(header_data).encode())
5528 payload_b64 = base64.b64encode(json.dumps(payload_data).encode())
5529 h = hmac.new(key.encode(), header_b64 + b'.' + payload_b64, hashlib.sha256)
5530 signature_b64 = base64.b64encode(h.digest())
5531 token = header_b64 + b'.' + payload_b64 + b'.' + signature_b64
5532 return token
5533
5534
5535 # can be extended in future to verify the signature and parse header and return the algorithm used if it's not HS256
5536 def jwt_decode_hs256(jwt):
5537 header_b64, payload_b64, signature_b64 = jwt.split('.')
5538 # add trailing ='s that may have been stripped, superfluous ='s are ignored
5539 payload_data = json.loads(base64.urlsafe_b64decode(f'{payload_b64}==='))
5540 return payload_data
5541
5542
5543 WINDOWS_VT_MODE = False if compat_os_name == 'nt' else None
5544
5545
5546 @functools.cache
5547 def supports_terminal_sequences(stream):
5548 if compat_os_name == 'nt':
5549 if not WINDOWS_VT_MODE:
5550 return False
5551 elif not os.getenv('TERM'):
5552 return False
5553 try:
5554 return stream.isatty()
5555 except BaseException:
5556 return False
5557
5558
5559 def windows_enable_vt_mode(): # TODO: Do this the proper way https://bugs.python.org/issue30075
5560 if get_windows_version() < (10, 0, 10586):
5561 return
5562 global WINDOWS_VT_MODE
5563 try:
5564 Popen.run('', shell=True)
5565 except Exception:
5566 return
5567
5568 WINDOWS_VT_MODE = True
5569 supports_terminal_sequences.cache_clear()
5570
5571
5572 _terminal_sequences_re = re.compile('\033\\[[^m]+m')
5573
5574
5575 def remove_terminal_sequences(string):
5576 return _terminal_sequences_re.sub('', string)
5577
5578
5579 def number_of_digits(number):
5580 return len('%d' % number)
5581
5582
5583 def join_nonempty(*values, delim='-', from_dict=None):
5584 if from_dict is not None:
5585 values = (traverse_obj(from_dict, variadic(v)) for v in values)
5586 return delim.join(map(str, filter(None, values)))
5587
5588
5589 def scale_thumbnails_to_max_format_width(formats, thumbnails, url_width_re):
5590 """
5591 Find the largest format dimensions in terms of video width and, for each thumbnail:
5592 * Modify the URL: Match the width with the provided regex and replace with the former width
5593 * Update dimensions
5594
5595 This function is useful with video services that scale the provided thumbnails on demand
5596 """
5597 _keys = ('width', 'height')
5598 max_dimensions = max(
5599 (tuple(format.get(k) or 0 for k in _keys) for format in formats),
5600 default=(0, 0))
5601 if not max_dimensions[0]:
5602 return thumbnails
5603 return [
5604 merge_dicts(
5605 {'url': re.sub(url_width_re, str(max_dimensions[0]), thumbnail['url'])},
5606 dict(zip(_keys, max_dimensions)), thumbnail)
5607 for thumbnail in thumbnails
5608 ]
5609
5610
5611 def parse_http_range(range):
5612 """ Parse value of "Range" or "Content-Range" HTTP header into tuple. """
5613 if not range:
5614 return None, None, None
5615 crg = re.search(r'bytes[ =](\d+)-(\d+)?(?:/(\d+))?', range)
5616 if not crg:
5617 return None, None, None
5618 return int(crg.group(1)), int_or_none(crg.group(2)), int_or_none(crg.group(3))
5619
5620
5621 def read_stdin(what):
5622 eof = 'Ctrl+Z' if compat_os_name == 'nt' else 'Ctrl+D'
5623 write_string(f'Reading {what} from STDIN - EOF ({eof}) to end:\n')
5624 return sys.stdin
5625
5626
5627 def determine_file_encoding(data):
5628 """
5629 Detect the text encoding used
5630 @returns (encoding, bytes to skip)
5631 """
5632
5633 # BOM marks are given priority over declarations
5634 for bom, enc in BOMS:
5635 if data.startswith(bom):
5636 return enc, len(bom)
5637
5638 # Strip off all null bytes to match even when UTF-16 or UTF-32 is used.
5639 # We ignore the endianness to get a good enough match
5640 data = data.replace(b'\0', b'')
5641 mobj = re.match(rb'(?m)^#\s*coding\s*:\s*(\S+)\s*$', data)
5642 return mobj.group(1).decode() if mobj else None, 0
5643
5644
5645 class Config:
5646 own_args = None
5647 parsed_args = None
5648 filename = None
5649 __initialized = False
5650
5651 def __init__(self, parser, label=None):
5652 self.parser, self.label = parser, label
5653 self._loaded_paths, self.configs = set(), []
5654
5655 def init(self, args=None, filename=None):
5656 assert not self.__initialized
5657 self.own_args, self.filename = args, filename
5658 return self.load_configs()
5659
5660 def load_configs(self):
5661 directory = ''
5662 if self.filename:
5663 location = os.path.realpath(self.filename)
5664 directory = os.path.dirname(location)
5665 if location in self._loaded_paths:
5666 return False
5667 self._loaded_paths.add(location)
5668
5669 self.__initialized = True
5670 opts, _ = self.parser.parse_known_args(self.own_args)
5671 self.parsed_args = self.own_args
5672 for location in opts.config_locations or []:
5673 if location == '-':
5674 if location in self._loaded_paths:
5675 continue
5676 self._loaded_paths.add(location)
5677 self.append_config(shlex.split(read_stdin('options'), comments=True), label='stdin')
5678 continue
5679 location = os.path.join(directory, expand_path(location))
5680 if os.path.isdir(location):
5681 location = os.path.join(location, 'yt-dlp.conf')
5682 if not os.path.exists(location):
5683 self.parser.error(f'config location {location} does not exist')
5684 self.append_config(self.read_file(location), location)
5685 return True
5686
5687 def __str__(self):
5688 label = join_nonempty(
5689 self.label, 'config', f'"{self.filename}"' if self.filename else '',
5690 delim=' ')
5691 return join_nonempty(
5692 self.own_args is not None and f'{label[0].upper()}{label[1:]}: {self.hide_login_info(self.own_args)}',
5693 *(f'\n{c}'.replace('\n', '\n| ')[1:] for c in self.configs),
5694 delim='\n')
5695
5696 @staticmethod
5697 def read_file(filename, default=[]):
5698 try:
5699 optionf = open(filename, 'rb')
5700 except OSError:
5701 return default # silently skip if file is not present
5702 try:
5703 enc, skip = determine_file_encoding(optionf.read(512))
5704 optionf.seek(skip, io.SEEK_SET)
5705 except OSError:
5706 enc = None # silently skip read errors
5707 try:
5708 # FIXME: https://github.com/ytdl-org/youtube-dl/commit/dfe5fa49aed02cf36ba9f743b11b0903554b5e56
5709 contents = optionf.read().decode(enc or preferredencoding())
5710 res = shlex.split(contents, comments=True)
5711 except Exception as err:
5712 raise ValueError(f'Unable to parse "{filename}": {err}')
5713 finally:
5714 optionf.close()
5715 return res
5716
5717 @staticmethod
5718 def hide_login_info(opts):
5719 PRIVATE_OPTS = {'-p', '--password', '-u', '--username', '--video-password', '--ap-password', '--ap-username'}
5720 eqre = re.compile('^(?P<key>' + ('|'.join(re.escape(po) for po in PRIVATE_OPTS)) + ')=.+$')
5721
5722 def _scrub_eq(o):
5723 m = eqre.match(o)
5724 if m:
5725 return m.group('key') + '=PRIVATE'
5726 else:
5727 return o
5728
5729 opts = list(map(_scrub_eq, opts))
5730 for idx, opt in enumerate(opts):
5731 if opt in PRIVATE_OPTS and idx + 1 < len(opts):
5732 opts[idx + 1] = 'PRIVATE'
5733 return opts
5734
5735 def append_config(self, *args, label=None):
5736 config = type(self)(self.parser, label)
5737 config._loaded_paths = self._loaded_paths
5738 if config.init(*args):
5739 self.configs.append(config)
5740
5741 @property
5742 def all_args(self):
5743 for config in reversed(self.configs):
5744 yield from config.all_args
5745 yield from self.parsed_args or []
5746
5747 def parse_known_args(self, **kwargs):
5748 return self.parser.parse_known_args(self.all_args, **kwargs)
5749
5750 def parse_args(self):
5751 return self.parser.parse_args(self.all_args)
5752
5753
5754 class WebSocketsWrapper:
5755 """Wraps websockets module to use in non-async scopes"""
5756 pool = None
5757
5758 def __init__(self, url, headers=None, connect=True):
5759 self.loop = asyncio.new_event_loop()
5760 # XXX: "loop" is deprecated
5761 self.conn = websockets.connect(
5762 url, extra_headers=headers, ping_interval=None,
5763 close_timeout=float('inf'), loop=self.loop, ping_timeout=float('inf'))
5764 if connect:
5765 self.__enter__()
5766 atexit.register(self.__exit__, None, None, None)
5767
5768 def __enter__(self):
5769 if not self.pool:
5770 self.pool = self.run_with_loop(self.conn.__aenter__(), self.loop)
5771 return self
5772
5773 def send(self, *args):
5774 self.run_with_loop(self.pool.send(*args), self.loop)
5775
5776 def recv(self, *args):
5777 return self.run_with_loop(self.pool.recv(*args), self.loop)
5778
5779 def __exit__(self, type, value, traceback):
5780 try:
5781 return self.run_with_loop(self.conn.__aexit__(type, value, traceback), self.loop)
5782 finally:
5783 self.loop.close()
5784 self._cancel_all_tasks(self.loop)
5785
5786 # taken from https://github.com/python/cpython/blob/3.9/Lib/asyncio/runners.py with modifications
5787 # for contributors: If there's any new library using asyncio needs to be run in non-async, move these function out of this class
5788 @staticmethod
5789 def run_with_loop(main, loop):
5790 if not asyncio.iscoroutine(main):
5791 raise ValueError(f'a coroutine was expected, got {main!r}')
5792
5793 try:
5794 return loop.run_until_complete(main)
5795 finally:
5796 loop.run_until_complete(loop.shutdown_asyncgens())
5797 if hasattr(loop, 'shutdown_default_executor'):
5798 loop.run_until_complete(loop.shutdown_default_executor())
5799
5800 @staticmethod
5801 def _cancel_all_tasks(loop):
5802 to_cancel = asyncio.all_tasks(loop)
5803
5804 if not to_cancel:
5805 return
5806
5807 for task in to_cancel:
5808 task.cancel()
5809
5810 # XXX: "loop" is removed in python 3.10+
5811 loop.run_until_complete(
5812 asyncio.gather(*to_cancel, loop=loop, return_exceptions=True))
5813
5814 for task in to_cancel:
5815 if task.cancelled():
5816 continue
5817 if task.exception() is not None:
5818 loop.call_exception_handler({
5819 'message': 'unhandled exception during asyncio.run() shutdown',
5820 'exception': task.exception(),
5821 'task': task,
5822 })
5823
5824
5825 def merge_headers(*dicts):
5826 """Merge dicts of http headers case insensitively, prioritizing the latter ones"""
5827 return {k.title(): v for k, v in itertools.chain.from_iterable(map(dict.items, dicts))}
5828
5829
5830 def cached_method(f):
5831 """Cache a method"""
5832 signature = inspect.signature(f)
5833
5834 @functools.wraps(f)
5835 def wrapper(self, *args, **kwargs):
5836 bound_args = signature.bind(self, *args, **kwargs)
5837 bound_args.apply_defaults()
5838 key = tuple(bound_args.arguments.values())[1:]
5839
5840 cache = vars(self).setdefault('__cached_method__cache', {}).setdefault(f.__name__, {})
5841 if key not in cache:
5842 cache[key] = f(self, *args, **kwargs)
5843 return cache[key]
5844 return wrapper
5845
5846
5847 class classproperty:
5848 """property access for class methods"""
5849
5850 def __init__(self, func):
5851 functools.update_wrapper(self, func)
5852 self.func = func
5853
5854 def __get__(self, _, cls):
5855 return self.func(cls)
5856
5857
5858 class Namespace(types.SimpleNamespace):
5859 """Immutable namespace"""
5860
5861 def __iter__(self):
5862 return iter(self.__dict__.values())
5863
5864 @property
5865 def items_(self):
5866 return self.__dict__.items()
5867
5868
5869 MEDIA_EXTENSIONS = Namespace(
5870 common_video=('avi', 'flv', 'mkv', 'mov', 'mp4', 'webm'),
5871 video=('3g2', '3gp', 'f4v', 'mk3d', 'divx', 'mpg', 'ogv', 'm4v', 'wmv'),
5872 common_audio=('aiff', 'alac', 'flac', 'm4a', 'mka', 'mp3', 'ogg', 'opus', 'wav'),
5873 audio=('aac', 'ape', 'asf', 'f4a', 'f4b', 'm4b', 'm4p', 'm4r', 'oga', 'ogx', 'spx', 'vorbis', 'wma'),
5874 thumbnails=('jpg', 'png', 'webp'),
5875 storyboards=('mhtml', ),
5876 subtitles=('srt', 'vtt', 'ass', 'lrc'),
5877 manifests=('f4f', 'f4m', 'm3u8', 'smil', 'mpd'),
5878 )
5879 MEDIA_EXTENSIONS.video += MEDIA_EXTENSIONS.common_video
5880 MEDIA_EXTENSIONS.audio += MEDIA_EXTENSIONS.common_audio
5881
5882 KNOWN_EXTENSIONS = (*MEDIA_EXTENSIONS.video, *MEDIA_EXTENSIONS.audio, *MEDIA_EXTENSIONS.manifests)
5883
5884
5885 class RetryManager:
5886 """Usage:
5887 for retry in RetryManager(...):
5888 try:
5889 ...
5890 except SomeException as err:
5891 retry.error = err
5892 continue
5893 """
5894 attempt, _error = 0, None
5895
5896 def __init__(self, _retries, _error_callback, **kwargs):
5897 self.retries = _retries or 0
5898 self.error_callback = functools.partial(_error_callback, **kwargs)
5899
5900 def _should_retry(self):
5901 return self._error is not NO_DEFAULT and self.attempt <= self.retries
5902
5903 @property
5904 def error(self):
5905 if self._error is NO_DEFAULT:
5906 return None
5907 return self._error
5908
5909 @error.setter
5910 def error(self, value):
5911 self._error = value
5912
5913 def __iter__(self):
5914 while self._should_retry():
5915 self.error = NO_DEFAULT
5916 self.attempt += 1
5917 yield self
5918 if self.error:
5919 self.error_callback(self.error, self.attempt, self.retries)
5920
5921 @staticmethod
5922 def report_retry(e, count, retries, *, sleep_func, info, warn, error=None, suffix=None):
5923 """Utility function for reporting retries"""
5924 if count > retries:
5925 if error:
5926 return error(f'{e}. Giving up after {count - 1} retries') if count > 1 else error(str(e))
5927 raise e
5928
5929 if not count:
5930 return warn(e)
5931 elif isinstance(e, ExtractorError):
5932 e = remove_end(str_or_none(e.cause) or e.orig_msg, '.')
5933 warn(f'{e}. Retrying{format_field(suffix, None, " %s")} ({count}/{retries})...')
5934
5935 delay = float_or_none(sleep_func(n=count - 1)) if callable(sleep_func) else sleep_func
5936 if delay:
5937 info(f'Sleeping {delay:.2f} seconds ...')
5938 time.sleep(delay)
5939
5940
5941 def make_archive_id(ie, video_id):
5942 ie_key = ie if isinstance(ie, str) else ie.ie_key()
5943 return f'{ie_key.lower()} {video_id}'
5944
5945
5946 def truncate_string(s, left, right=0):
5947 assert left > 3 and right >= 0
5948 if s is None or len(s) <= left + right:
5949 return s
5950 return f'{s[:left-3]}...{s[-right:]}'
5951
5952
5953 def orderedSet_from_options(options, alias_dict, *, use_regex=False, start=None):
5954 assert 'all' in alias_dict, '"all" alias is required'
5955 requested = list(start or [])
5956 for val in options:
5957 discard = val.startswith('-')
5958 if discard:
5959 val = val[1:]
5960
5961 if val in alias_dict:
5962 val = alias_dict[val] if not discard else [
5963 i[1:] if i.startswith('-') else f'-{i}' for i in alias_dict[val]]
5964 # NB: Do not allow regex in aliases for performance
5965 requested = orderedSet_from_options(val, alias_dict, start=requested)
5966 continue
5967
5968 current = (filter(re.compile(val, re.I).fullmatch, alias_dict['all']) if use_regex
5969 else [val] if val in alias_dict['all'] else None)
5970 if current is None:
5971 raise ValueError(val)
5972
5973 if discard:
5974 for item in current:
5975 while item in requested:
5976 requested.remove(item)
5977 else:
5978 requested.extend(current)
5979
5980 return orderedSet(requested)
5981
5982
5983 # Deprecated
5984 has_certifi = bool(certifi)
5985 has_websockets = bool(websockets)