]> jfr.im git - yt-dlp.git/blame - yt_dlp/utils.py
[extractor] Framework for embed detection (#4307)
[yt-dlp.git] / yt_dlp / utils.py
CommitLineData
6929b41a 1import asyncio
15dfb392 2import atexit
1e399778 3import base64
5bc880b9 4import binascii
912b38b4 5import calendar
676eb3f2 6import codecs
c380cc28 7import collections
62e609ab 8import contextlib
e3946f98 9import ctypes
c496ca96 10import datetime
0c265486 11import email.header
f8271158 12import email.utils
f45c185f 13import errno
d77c3dfd 14import gzip
49fa4d9a
N
15import hashlib
16import hmac
ac668111 17import html.entities
18import html.parser
54007a45 19import http.client
20import http.cookiejar
019a94f7 21import importlib.util
b1f94422 22import inspect
03f9daab 23import io
79a2e94e 24import itertools
f4bfd65f 25import json
d77c3dfd 26import locale
02dbf93f 27import math
f8271158 28import mimetypes
347de493 29import operator
d77c3dfd 30import os
c496ca96 31import platform
773f291d 32import random
d77c3dfd 33import re
f8271158 34import shlex
c496ca96 35import socket
79a2e94e 36import ssl
ac668111 37import struct
1c088fa8 38import subprocess
d77c3dfd 39import sys
181c8655 40import tempfile
c380cc28 41import time
01951dda 42import traceback
64fa820c 43import types
14f25df2 44import urllib.error
f8271158 45import urllib.parse
ac668111 46import urllib.request
bcf89ce6 47import xml.etree.ElementTree
d77c3dfd 48import zlib
d77c3dfd 49
6929b41a 50from .compat import functools # isort: split
8c25f81b 51from .compat import (
36e6f62c 52 compat_etree_fromstring,
51098426 53 compat_expanduser,
f8271158 54 compat_HTMLParseError,
efa97bdc 55 compat_os_name,
702ccf2d 56 compat_shlex_quote,
8c25f81b 57)
ac668111 58from .dependencies import brotli, certifi, websockets, xattr
f8271158 59from .socks import ProxyType, sockssocket
71aff188 60
4644ac55 61
51fb4995
YCH
62def register_socks_protocols():
63 # "Register" SOCKS protocols
d5ae6bb5
YCH
64 # In Python < 2.6.5, urlsplit() suffers from bug https://bugs.python.org/issue7904
65 # URLs with protocols not in urlparse.uses_netloc are not handled correctly
51fb4995 66 for scheme in ('socks', 'socks4', 'socks4a', 'socks5'):
14f25df2 67 if scheme not in urllib.parse.uses_netloc:
68 urllib.parse.uses_netloc.append(scheme)
51fb4995
YCH
69
70
468e2e92
FV
71# This is not clearly defined otherwise
72compiled_regex_type = type(re.compile(''))
73
f7a147e3
S
74
75def random_user_agent():
76 _USER_AGENT_TPL = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/%s Safari/537.36'
77 _CHROME_VERSIONS = (
19b4c74d 78 '90.0.4430.212',
79 '90.0.4430.24',
80 '90.0.4430.70',
81 '90.0.4430.72',
82 '90.0.4430.85',
83 '90.0.4430.93',
84 '91.0.4472.101',
85 '91.0.4472.106',
86 '91.0.4472.114',
87 '91.0.4472.124',
88 '91.0.4472.164',
89 '91.0.4472.19',
90 '91.0.4472.77',
91 '92.0.4515.107',
92 '92.0.4515.115',
93 '92.0.4515.131',
94 '92.0.4515.159',
95 '92.0.4515.43',
96 '93.0.4556.0',
97 '93.0.4577.15',
98 '93.0.4577.63',
99 '93.0.4577.82',
100 '94.0.4606.41',
101 '94.0.4606.54',
102 '94.0.4606.61',
103 '94.0.4606.71',
104 '94.0.4606.81',
105 '94.0.4606.85',
106 '95.0.4638.17',
107 '95.0.4638.50',
108 '95.0.4638.54',
109 '95.0.4638.69',
110 '95.0.4638.74',
111 '96.0.4664.18',
112 '96.0.4664.45',
113 '96.0.4664.55',
114 '96.0.4664.93',
115 '97.0.4692.20',
f7a147e3
S
116 )
117 return _USER_AGENT_TPL % random.choice(_CHROME_VERSIONS)
118
119
4390d5ec 120SUPPORTED_ENCODINGS = [
121 'gzip', 'deflate'
122]
9b8ee23b 123if brotli:
4390d5ec 124 SUPPORTED_ENCODINGS.append('br')
125
3e669f36 126std_headers = {
f7a147e3 127 'User-Agent': random_user_agent(),
59ae15a5 128 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
59ae15a5 129 'Accept-Language': 'en-us,en;q=0.5',
b1156c1e 130 'Sec-Fetch-Mode': 'navigate',
3e669f36 131}
f427df17 132
5f6a1245 133
fb37eb25
S
134USER_AGENTS = {
135 'Safari': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27',
136}
137
138
bf42a990 139NO_DEFAULT = object()
7b2c3f47 140IDENTITY = lambda x: x
bf42a990 141
7105440c
YCH
142ENGLISH_MONTH_NAMES = [
143 'January', 'February', 'March', 'April', 'May', 'June',
144 'July', 'August', 'September', 'October', 'November', 'December']
145
f6717dec
S
146MONTH_NAMES = {
147 'en': ENGLISH_MONTH_NAMES,
148 'fr': [
3e4185c3
S
149 'janvier', 'février', 'mars', 'avril', 'mai', 'juin',
150 'juillet', 'août', 'septembre', 'octobre', 'novembre', 'décembre'],
f6717dec 151}
a942d6cb 152
c587cbb7 153# needed for sanitizing filenames in restricted mode
c8827027 154ACCENT_CHARS = dict(zip('ÂÃÄÀÁÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖŐØŒÙÚÛÜŰÝÞßàáâãäåæçèéêëìíîïðñòóôõöőøœùúûüűýþÿ',
fd35d8cd
JW
155 itertools.chain('AAAAAA', ['AE'], 'CEEEEIIIIDNOOOOOOO', ['OE'], 'UUUUUY', ['TH', 'ss'],
156 'aaaaaa', ['ae'], 'ceeeeiiiionooooooo', ['oe'], 'uuuuuy', ['th'], 'y')))
c587cbb7 157
46f59e89
S
158DATE_FORMATS = (
159 '%d %B %Y',
160 '%d %b %Y',
161 '%B %d %Y',
cb655f34
S
162 '%B %dst %Y',
163 '%B %dnd %Y',
9d30c213 164 '%B %drd %Y',
cb655f34 165 '%B %dth %Y',
46f59e89 166 '%b %d %Y',
cb655f34
S
167 '%b %dst %Y',
168 '%b %dnd %Y',
9d30c213 169 '%b %drd %Y',
cb655f34 170 '%b %dth %Y',
46f59e89
S
171 '%b %dst %Y %I:%M',
172 '%b %dnd %Y %I:%M',
9d30c213 173 '%b %drd %Y %I:%M',
46f59e89
S
174 '%b %dth %Y %I:%M',
175 '%Y %m %d',
176 '%Y-%m-%d',
bccdbd22 177 '%Y.%m.%d.',
46f59e89 178 '%Y/%m/%d',
81c13222 179 '%Y/%m/%d %H:%M',
46f59e89 180 '%Y/%m/%d %H:%M:%S',
1931a55e
THD
181 '%Y%m%d%H%M',
182 '%Y%m%d%H%M%S',
4f3fa23e 183 '%Y%m%d',
0c1c6f4b 184 '%Y-%m-%d %H:%M',
46f59e89
S
185 '%Y-%m-%d %H:%M:%S',
186 '%Y-%m-%d %H:%M:%S.%f',
5014558a 187 '%Y-%m-%d %H:%M:%S:%f',
46f59e89
S
188 '%d.%m.%Y %H:%M',
189 '%d.%m.%Y %H.%M',
190 '%Y-%m-%dT%H:%M:%SZ',
191 '%Y-%m-%dT%H:%M:%S.%fZ',
192 '%Y-%m-%dT%H:%M:%S.%f0Z',
193 '%Y-%m-%dT%H:%M:%S',
194 '%Y-%m-%dT%H:%M:%S.%f',
195 '%Y-%m-%dT%H:%M',
c6eed6b8
S
196 '%b %d %Y at %H:%M',
197 '%b %d %Y at %H:%M:%S',
b555ae9b
S
198 '%B %d %Y at %H:%M',
199 '%B %d %Y at %H:%M:%S',
a63d9bd0 200 '%H:%M %d-%b-%Y',
46f59e89
S
201)
202
203DATE_FORMATS_DAY_FIRST = list(DATE_FORMATS)
204DATE_FORMATS_DAY_FIRST.extend([
205 '%d-%m-%Y',
206 '%d.%m.%Y',
207 '%d.%m.%y',
208 '%d/%m/%Y',
209 '%d/%m/%y',
210 '%d/%m/%Y %H:%M:%S',
47304e07 211 '%d-%m-%Y %H:%M',
46f59e89
S
212])
213
214DATE_FORMATS_MONTH_FIRST = list(DATE_FORMATS)
215DATE_FORMATS_MONTH_FIRST.extend([
216 '%m-%d-%Y',
217 '%m.%d.%Y',
218 '%m/%d/%Y',
219 '%m/%d/%y',
220 '%m/%d/%Y %H:%M:%S',
221])
222
06b3fe29 223PACKED_CODES_RE = r"}\('(.+)',(\d+),(\d+),'([^']+)'\.split\('\|'\)"
ae61d108 224JSON_LD_RE = r'(?is)<script[^>]+type=(["\']?)application/ld\+json\1[^>]*>\s*(?P<json_ld>{.+?})\s*</script>'
06b3fe29 225
1d485a1a 226NUMBER_RE = r'\d+(?:\.\d+)?'
227
7105440c 228
0b9c08b4 229@functools.cache
d77c3dfd 230def preferredencoding():
59ae15a5 231 """Get preferred encoding.
d77c3dfd 232
59ae15a5
PH
233 Returns the best encoding scheme for the system, based on
234 locale.getpreferredencoding() and some further tweaks.
235 """
236 try:
237 pref = locale.getpreferredencoding()
28e614de 238 'TEST'.encode(pref)
70a1165b 239 except Exception:
59ae15a5 240 pref = 'UTF-8'
bae611f2 241
59ae15a5 242 return pref
d77c3dfd 243
f4bfd65f 244
181c8655 245def write_json_file(obj, fn):
1394646a 246 """ Encode obj as JSON and write it to fn, atomically if possible """
181c8655 247
cfb0511d 248 tf = tempfile.NamedTemporaryFile(
249 prefix=f'{os.path.basename(fn)}.', dir=os.path.dirname(fn),
250 suffix='.tmp', delete=False, mode='w', encoding='utf-8')
181c8655
PH
251
252 try:
253 with tf:
45d86abe 254 json.dump(obj, tf, ensure_ascii=False)
1394646a
IK
255 if sys.platform == 'win32':
256 # Need to remove existing file on Windows, else os.rename raises
257 # WindowsError or FileExistsError.
19a03940 258 with contextlib.suppress(OSError):
1394646a 259 os.unlink(fn)
19a03940 260 with contextlib.suppress(OSError):
9cd5f54e
R
261 mask = os.umask(0)
262 os.umask(mask)
263 os.chmod(tf.name, 0o666 & ~mask)
181c8655 264 os.rename(tf.name, fn)
70a1165b 265 except Exception:
19a03940 266 with contextlib.suppress(OSError):
181c8655 267 os.remove(tf.name)
181c8655
PH
268 raise
269
270
cfb0511d 271def find_xpath_attr(node, xpath, key, val=None):
272 """ Find the xpath xpath[@key=val] """
273 assert re.match(r'^[a-zA-Z_-]+$', key)
86e5f3ed 274 expr = xpath + ('[@%s]' % key if val is None else f"[@{key}='{val}']")
cfb0511d 275 return node.find(expr)
59ae56fa 276
d7e66d39
JMF
277# On python2.6 the xml.etree.ElementTree.Element methods don't support
278# the namespace parameter
5f6a1245
JW
279
280
d7e66d39
JMF
281def xpath_with_ns(path, ns_map):
282 components = [c.split(':') for c in path.split('/')]
283 replaced = []
284 for c in components:
285 if len(c) == 1:
286 replaced.append(c[0])
287 else:
288 ns, tag = c
289 replaced.append('{%s}%s' % (ns_map[ns], tag))
290 return '/'.join(replaced)
291
d77c3dfd 292
a41fb80c 293def xpath_element(node, xpath, name=None, fatal=False, default=NO_DEFAULT):
578c0745 294 def _find_xpath(xpath):
f9934b96 295 return node.find(xpath)
578c0745 296
14f25df2 297 if isinstance(xpath, str):
578c0745
S
298 n = _find_xpath(xpath)
299 else:
300 for xp in xpath:
301 n = _find_xpath(xp)
302 if n is not None:
303 break
d74bebd5 304
8e636da4 305 if n is None:
bf42a990
S
306 if default is not NO_DEFAULT:
307 return default
308 elif fatal:
bf0ff932
PH
309 name = xpath if name is None else name
310 raise ExtractorError('Could not find XML element %s' % name)
311 else:
312 return None
a41fb80c
S
313 return n
314
315
316def xpath_text(node, xpath, name=None, fatal=False, default=NO_DEFAULT):
8e636da4
S
317 n = xpath_element(node, xpath, name, fatal=fatal, default=default)
318 if n is None or n == default:
319 return n
320 if n.text is None:
321 if default is not NO_DEFAULT:
322 return default
323 elif fatal:
324 name = xpath if name is None else name
325 raise ExtractorError('Could not find XML element\'s text %s' % name)
326 else:
327 return None
328 return n.text
a41fb80c
S
329
330
331def xpath_attr(node, xpath, key, name=None, fatal=False, default=NO_DEFAULT):
332 n = find_xpath_attr(node, xpath, key)
333 if n is None:
334 if default is not NO_DEFAULT:
335 return default
336 elif fatal:
86e5f3ed 337 name = f'{xpath}[@{key}]' if name is None else name
a41fb80c
S
338 raise ExtractorError('Could not find XML attribute %s' % name)
339 else:
340 return None
341 return n.attrib[key]
bf0ff932
PH
342
343
c487cf00 344def get_element_by_id(id, html, **kwargs):
43e8fafd 345 """Return the content of the tag with the specified ID in the passed HTML document"""
c487cf00 346 return get_element_by_attribute('id', id, html, **kwargs)
43e8fafd 347
12ea2f30 348
c487cf00 349def get_element_html_by_id(id, html, **kwargs):
6f32a0b5 350 """Return the html of the tag with the specified ID in the passed HTML document"""
c487cf00 351 return get_element_html_by_attribute('id', id, html, **kwargs)
6f32a0b5
ZM
352
353
84c237fb 354def get_element_by_class(class_name, html):
2af12ad9
TC
355 """Return the content of the first tag with the specified class in the passed HTML document"""
356 retval = get_elements_by_class(class_name, html)
357 return retval[0] if retval else None
358
359
6f32a0b5
ZM
360def get_element_html_by_class(class_name, html):
361 """Return the html of the first tag with the specified class in the passed HTML document"""
362 retval = get_elements_html_by_class(class_name, html)
363 return retval[0] if retval else None
364
365
c487cf00 366def get_element_by_attribute(attribute, value, html, **kwargs):
367 retval = get_elements_by_attribute(attribute, value, html, **kwargs)
2af12ad9
TC
368 return retval[0] if retval else None
369
370
c487cf00 371def get_element_html_by_attribute(attribute, value, html, **kargs):
372 retval = get_elements_html_by_attribute(attribute, value, html, **kargs)
6f32a0b5
ZM
373 return retval[0] if retval else None
374
375
c487cf00 376def get_elements_by_class(class_name, html, **kargs):
2af12ad9
TC
377 """Return the content of all tags with the specified class in the passed HTML document as a list"""
378 return get_elements_by_attribute(
64fa820c 379 'class', r'[^\'"]*(?<=[\'"\s])%s(?=[\'"\s])[^\'"]*' % re.escape(class_name),
84c237fb
YCH
380 html, escape_value=False)
381
382
6f32a0b5
ZM
383def get_elements_html_by_class(class_name, html):
384 """Return the html of all tags with the specified class in the passed HTML document as a list"""
385 return get_elements_html_by_attribute(
64fa820c 386 'class', r'[^\'"]*(?<=[\'"\s])%s(?=[\'"\s])[^\'"]*' % re.escape(class_name),
6f32a0b5
ZM
387 html, escape_value=False)
388
389
390def get_elements_by_attribute(*args, **kwargs):
43e8fafd 391 """Return the content of the tag with the specified attribute in the passed HTML document"""
6f32a0b5
ZM
392 return [content for content, _ in get_elements_text_and_html_by_attribute(*args, **kwargs)]
393
394
395def get_elements_html_by_attribute(*args, **kwargs):
396 """Return the html of the tag with the specified attribute in the passed HTML document"""
397 return [whole for _, whole in get_elements_text_and_html_by_attribute(*args, **kwargs)]
398
399
400def get_elements_text_and_html_by_attribute(attribute, value, html, escape_value=True):
401 """
402 Return the text (content) and the html (whole) of the tag with the specified
403 attribute in the passed HTML document
404 """
9e6dd238 405
86e5f3ed 406 quote = '' if re.match(r'''[\s"'`=<>]''', value) else '?'
0254f162 407
84c237fb
YCH
408 value = re.escape(value) if escape_value else value
409
86e5f3ed 410 partial_element_re = rf'''(?x)
6f32a0b5 411 <(?P<tag>[a-zA-Z0-9:._-]+)
0254f162 412 (?:\s(?:[^>"']|"[^"]*"|'[^']*')*)?
86e5f3ed 413 \s{re.escape(attribute)}\s*=\s*(?P<_q>['"]{quote})(?-x:{value})(?P=_q)
414 '''
38285056 415
0254f162
ZM
416 for m in re.finditer(partial_element_re, html):
417 content, whole = get_element_text_and_html_by_tag(m.group('tag'), html[m.start():])
a921f407 418
0254f162
ZM
419 yield (
420 unescapeHTML(re.sub(r'^(?P<q>["\'])(?P<content>.*)(?P=q)$', r'\g<content>', content, flags=re.DOTALL)),
421 whole
422 )
a921f407 423
c5229f39 424
ac668111 425class HTMLBreakOnClosingTagParser(html.parser.HTMLParser):
6f32a0b5
ZM
426 """
427 HTML parser which raises HTMLBreakOnClosingTagException upon reaching the
428 closing tag for the first opening tag it has encountered, and can be used
429 as a context manager
430 """
431
432 class HTMLBreakOnClosingTagException(Exception):
433 pass
434
435 def __init__(self):
436 self.tagstack = collections.deque()
ac668111 437 html.parser.HTMLParser.__init__(self)
6f32a0b5
ZM
438
439 def __enter__(self):
440 return self
441
442 def __exit__(self, *_):
443 self.close()
444
445 def close(self):
446 # handle_endtag does not return upon raising HTMLBreakOnClosingTagException,
447 # so data remains buffered; we no longer have any interest in it, thus
448 # override this method to discard it
449 pass
450
451 def handle_starttag(self, tag, _):
452 self.tagstack.append(tag)
453
454 def handle_endtag(self, tag):
455 if not self.tagstack:
456 raise compat_HTMLParseError('no tags in the stack')
457 while self.tagstack:
458 inner_tag = self.tagstack.pop()
459 if inner_tag == tag:
460 break
461 else:
462 raise compat_HTMLParseError(f'matching opening tag for closing {tag} tag not found')
463 if not self.tagstack:
464 raise self.HTMLBreakOnClosingTagException()
465
466
467def get_element_text_and_html_by_tag(tag, html):
468 """
469 For the first element with the specified tag in the passed HTML document
470 return its' content (text) and the whole element (html)
471 """
472 def find_or_raise(haystack, needle, exc):
473 try:
474 return haystack.index(needle)
475 except ValueError:
476 raise exc
477 closing_tag = f'</{tag}>'
478 whole_start = find_or_raise(
479 html, f'<{tag}', compat_HTMLParseError(f'opening {tag} tag not found'))
480 content_start = find_or_raise(
481 html[whole_start:], '>', compat_HTMLParseError(f'malformed opening {tag} tag'))
482 content_start += whole_start + 1
483 with HTMLBreakOnClosingTagParser() as parser:
484 parser.feed(html[whole_start:content_start])
485 if not parser.tagstack or parser.tagstack[0] != tag:
486 raise compat_HTMLParseError(f'parser did not match opening {tag} tag')
487 offset = content_start
488 while offset < len(html):
489 next_closing_tag_start = find_or_raise(
490 html[offset:], closing_tag,
491 compat_HTMLParseError(f'closing {tag} tag not found'))
492 next_closing_tag_end = next_closing_tag_start + len(closing_tag)
493 try:
494 parser.feed(html[offset:offset + next_closing_tag_end])
495 offset += next_closing_tag_end
496 except HTMLBreakOnClosingTagParser.HTMLBreakOnClosingTagException:
497 return html[content_start:offset + next_closing_tag_start], \
498 html[whole_start:offset + next_closing_tag_end]
499 raise compat_HTMLParseError('unexpected end of html')
500
501
ac668111 502class HTMLAttributeParser(html.parser.HTMLParser):
8bb56eee 503 """Trivial HTML parser to gather the attributes for a single element"""
b6e0c7d2 504
8bb56eee 505 def __init__(self):
c5229f39 506 self.attrs = {}
ac668111 507 html.parser.HTMLParser.__init__(self)
8bb56eee
BF
508
509 def handle_starttag(self, tag, attrs):
510 self.attrs = dict(attrs)
511
c5229f39 512
ac668111 513class HTMLListAttrsParser(html.parser.HTMLParser):
73673ccf
FF
514 """HTML parser to gather the attributes for the elements of a list"""
515
516 def __init__(self):
ac668111 517 html.parser.HTMLParser.__init__(self)
73673ccf
FF
518 self.items = []
519 self._level = 0
520
521 def handle_starttag(self, tag, attrs):
522 if tag == 'li' and self._level == 0:
523 self.items.append(dict(attrs))
524 self._level += 1
525
526 def handle_endtag(self, tag):
527 self._level -= 1
528
529
8bb56eee
BF
530def extract_attributes(html_element):
531 """Given a string for an HTML element such as
532 <el
533 a="foo" B="bar" c="&98;az" d=boz
534 empty= noval entity="&amp;"
535 sq='"' dq="'"
536 >
537 Decode and return a dictionary of attributes.
538 {
539 'a': 'foo', 'b': 'bar', c: 'baz', d: 'boz',
540 'empty': '', 'noval': None, 'entity': '&',
541 'sq': '"', 'dq': '\''
542 }.
8bb56eee
BF
543 """
544 parser = HTMLAttributeParser()
19a03940 545 with contextlib.suppress(compat_HTMLParseError):
b4a3d461
S
546 parser.feed(html_element)
547 parser.close()
8bb56eee 548 return parser.attrs
9e6dd238 549
c5229f39 550
73673ccf
FF
551def parse_list(webpage):
552 """Given a string for an series of HTML <li> elements,
553 return a dictionary of their attributes"""
554 parser = HTMLListAttrsParser()
555 parser.feed(webpage)
556 parser.close()
557 return parser.items
558
559
9e6dd238 560def clean_html(html):
59ae15a5 561 """Clean an HTML snippet into a readable string"""
dd622d7c
PH
562
563 if html is None: # Convenience for sanitizing descriptions etc.
564 return html
565
49185227 566 html = re.sub(r'\s+', ' ', html)
567 html = re.sub(r'(?u)\s?<\s?br\s?/?\s?>\s?', '\n', html)
568 html = re.sub(r'(?u)<\s?/\s?p\s?>\s?<\s?p[^>]*>', '\n', html)
59ae15a5
PH
569 # Strip html tags
570 html = re.sub('<.*?>', '', html)
571 # Replace html entities
572 html = unescapeHTML(html)
7decf895 573 return html.strip()
9e6dd238
FV
574
575
b7c47b74 576class LenientJSONDecoder(json.JSONDecoder):
577 def __init__(self, *args, transform_source=None, ignore_extra=False, **kwargs):
578 self.transform_source, self.ignore_extra = transform_source, ignore_extra
579 super().__init__(*args, **kwargs)
580
581 def decode(self, s):
582 if self.transform_source:
583 s = self.transform_source(s)
584 if self.ignore_extra:
585 return self.raw_decode(s.lstrip())[0]
586 return super().decode(s)
587
588
d77c3dfd 589def sanitize_open(filename, open_mode):
59ae15a5
PH
590 """Try to open the given filename, and slightly tweak it if this fails.
591
592 Attempts to open the given filename. If this fails, it tries to change
593 the filename slightly, step by step, until it's either able to open it
594 or it fails and raises a final exception, like the standard open()
595 function.
596
597 It returns the tuple (stream, definitive_file_name).
598 """
0edb3e33 599 if filename == '-':
600 if sys.platform == 'win32':
601 import msvcrt
daef7911 602 # stdout may be any IO stream. Eg, when using contextlib.redirect_stdout
603 with contextlib.suppress(io.UnsupportedOperation):
604 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
0edb3e33 605 return (sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout, filename)
59ae15a5 606
0edb3e33 607 for attempt in range(2):
608 try:
609 try:
89737671 610 if sys.platform == 'win32':
b506289f 611 # FIXME: An exclusive lock also locks the file from being read.
612 # Since windows locks are mandatory, don't lock the file on windows (for now).
613 # Ref: https://github.com/yt-dlp/yt-dlp/issues/3124
89737671 614 raise LockingUnsupportedError()
0edb3e33 615 stream = locked_file(filename, open_mode, block=False).__enter__()
8a82af35 616 except OSError:
0edb3e33 617 stream = open(filename, open_mode)
8a82af35 618 return stream, filename
86e5f3ed 619 except OSError as err:
0edb3e33 620 if attempt or err.errno in (errno.EACCES,):
621 raise
622 old_filename, filename = filename, sanitize_path(filename)
623 if old_filename == filename:
624 raise
d77c3dfd
FV
625
626
627def timeconvert(timestr):
59ae15a5
PH
628 """Convert RFC 2822 defined time string into system timestamp"""
629 timestamp = None
630 timetuple = email.utils.parsedate_tz(timestr)
631 if timetuple is not None:
632 timestamp = email.utils.mktime_tz(timetuple)
633 return timestamp
1c469a94 634
5f6a1245 635
5c3895ff 636def sanitize_filename(s, restricted=False, is_id=NO_DEFAULT):
59ae15a5 637 """Sanitizes a string so it could be used as part of a filename.
5c3895ff 638 @param restricted Use a stricter subset of allowed characters
639 @param is_id Whether this is an ID that should be kept unchanged if possible.
640 If unset, yt-dlp's new sanitization rules are in effect
59ae15a5 641 """
5c3895ff 642 if s == '':
643 return ''
644
59ae15a5 645 def replace_insane(char):
c587cbb7
AT
646 if restricted and char in ACCENT_CHARS:
647 return ACCENT_CHARS[char]
91dd88b9 648 elif not restricted and char == '\n':
5c3895ff 649 return '\0 '
91dd88b9 650 elif char == '?' or ord(char) < 32 or ord(char) == 127:
59ae15a5
PH
651 return ''
652 elif char == '"':
653 return '' if restricted else '\''
654 elif char == ':':
5c3895ff 655 return '\0_\0-' if restricted else '\0 \0-'
59ae15a5 656 elif char in '\\/|*<>':
5c3895ff 657 return '\0_'
658 if restricted and (char in '!&\'()[]{}$;`^,#' or char.isspace() or ord(char) > 127):
659 return '\0_'
59ae15a5
PH
660 return char
661
5c3895ff 662 s = re.sub(r'[0-9]+(?::[0-9]+)+', lambda m: m.group(0).replace(':', '_'), s) # Handle timestamps
28e614de 663 result = ''.join(map(replace_insane, s))
5c3895ff 664 if is_id is NO_DEFAULT:
ae61d108 665 result = re.sub(r'(\0.)(?:(?=\1)..)+', r'\1', result) # Remove repeated substitute chars
666 STRIP_RE = r'(?:\0.|[ _-])*'
5c3895ff 667 result = re.sub(f'^\0.{STRIP_RE}|{STRIP_RE}\0.$', '', result) # Remove substitute chars from start/end
668 result = result.replace('\0', '') or '_'
669
796173d0
PH
670 if not is_id:
671 while '__' in result:
672 result = result.replace('__', '_')
673 result = result.strip('_')
674 # Common case of "Foreign band name - English song title"
675 if restricted and result.startswith('-_'):
676 result = result[2:]
5a42414b
PH
677 if result.startswith('-'):
678 result = '_' + result[len('-'):]
a7440261 679 result = result.lstrip('.')
796173d0
PH
680 if not result:
681 result = '_'
59ae15a5 682 return result
d77c3dfd 683
5f6a1245 684
c2934512 685def sanitize_path(s, force=False):
a2aaf4db 686 """Sanitizes and normalizes path on Windows"""
c2934512 687 if sys.platform == 'win32':
c4218ac3 688 force = False
c2934512 689 drive_or_unc, _ = os.path.splitdrive(s)
c2934512 690 elif force:
691 drive_or_unc = ''
692 else:
a2aaf4db 693 return s
c2934512 694
be531ef1
S
695 norm_path = os.path.normpath(remove_start(s, drive_or_unc)).split(os.path.sep)
696 if drive_or_unc:
a2aaf4db
S
697 norm_path.pop(0)
698 sanitized_path = [
ec85ded8 699 path_part if path_part in ['.', '..'] else re.sub(r'(?:[/<>:"\|\\?\*]|[\s.]$)', '#', path_part)
a2aaf4db 700 for path_part in norm_path]
be531ef1
S
701 if drive_or_unc:
702 sanitized_path.insert(0, drive_or_unc + os.path.sep)
4abea8ca 703 elif force and s and s[0] == os.path.sep:
c4218ac3 704 sanitized_path.insert(0, os.path.sep)
a2aaf4db
S
705 return os.path.join(*sanitized_path)
706
707
8f97a15d 708def sanitize_url(url, *, scheme='http'):
befa4708
S
709 # Prepend protocol-less URLs with `http:` scheme in order to mitigate
710 # the number of unwanted failures due to missing protocol
21633673 711 if url is None:
712 return
713 elif url.startswith('//'):
8f97a15d 714 return f'{scheme}:{url}'
befa4708
S
715 # Fix some common typos seen so far
716 COMMON_TYPOS = (
067aa17e 717 # https://github.com/ytdl-org/youtube-dl/issues/15649
befa4708
S
718 (r'^httpss://', r'https://'),
719 # https://bx1.be/lives/direct-tv/
720 (r'^rmtp([es]?)://', r'rtmp\1://'),
721 )
722 for mistake, fixup in COMMON_TYPOS:
723 if re.match(mistake, url):
724 return re.sub(mistake, fixup, url)
bc6b9bcd 725 return url
17bcc626
S
726
727
5435dcf9 728def extract_basic_auth(url):
14f25df2 729 parts = urllib.parse.urlsplit(url)
5435dcf9
HH
730 if parts.username is None:
731 return url, None
14f25df2 732 url = urllib.parse.urlunsplit(parts._replace(netloc=(
5435dcf9
HH
733 parts.hostname if parts.port is None
734 else '%s:%d' % (parts.hostname, parts.port))))
735 auth_payload = base64.b64encode(
0f06bcd7 736 ('%s:%s' % (parts.username, parts.password or '')).encode())
737 return url, f'Basic {auth_payload.decode()}'
5435dcf9
HH
738
739
67dda517 740def sanitized_Request(url, *args, **kwargs):
bc6b9bcd 741 url, auth_header = extract_basic_auth(escape_url(sanitize_url(url)))
5435dcf9
HH
742 if auth_header is not None:
743 headers = args[1] if len(args) >= 2 else kwargs.setdefault('headers', {})
744 headers['Authorization'] = auth_header
ac668111 745 return urllib.request.Request(url, *args, **kwargs)
67dda517
S
746
747
51098426
S
748def expand_path(s):
749 """Expand shell variables and ~"""
750 return os.path.expandvars(compat_expanduser(s))
751
752
7e9a6125 753def orderedSet(iterable, *, lazy=False):
754 """Remove all duplicates from the input iterable"""
755 def _iter():
756 seen = [] # Do not use set since the items can be unhashable
757 for x in iterable:
758 if x not in seen:
759 seen.append(x)
760 yield x
761
762 return _iter() if lazy else list(_iter())
d77c3dfd 763
912b38b4 764
55b2f099 765def _htmlentity_transform(entity_with_semicolon):
4e408e47 766 """Transforms an HTML entity to a character."""
55b2f099
YCH
767 entity = entity_with_semicolon[:-1]
768
4e408e47 769 # Known non-numeric HTML entity
ac668111 770 if entity in html.entities.name2codepoint:
771 return chr(html.entities.name2codepoint[entity])
4e408e47 772
55b2f099
YCH
773 # TODO: HTML5 allows entities without a semicolon. For example,
774 # '&Eacuteric' should be decoded as 'Éric'.
ac668111 775 if entity_with_semicolon in html.entities.html5:
776 return html.entities.html5[entity_with_semicolon]
55b2f099 777
91757b0f 778 mobj = re.match(r'#(x[0-9a-fA-F]+|[0-9]+)', entity)
4e408e47
PH
779 if mobj is not None:
780 numstr = mobj.group(1)
28e614de 781 if numstr.startswith('x'):
4e408e47 782 base = 16
28e614de 783 numstr = '0%s' % numstr
4e408e47
PH
784 else:
785 base = 10
067aa17e 786 # See https://github.com/ytdl-org/youtube-dl/issues/7518
19a03940 787 with contextlib.suppress(ValueError):
ac668111 788 return chr(int(numstr, base))
4e408e47
PH
789
790 # Unknown entity in name, return its literal representation
7a3f0c00 791 return '&%s;' % entity
4e408e47
PH
792
793
d77c3dfd 794def unescapeHTML(s):
912b38b4
PH
795 if s is None:
796 return None
19a03940 797 assert isinstance(s, str)
d77c3dfd 798
4e408e47 799 return re.sub(
95f3f7c2 800 r'&([^&;]+;)', lambda m: _htmlentity_transform(m.group(1)), s)
d77c3dfd 801
8bf48f23 802
cdb19aa4 803def escapeHTML(text):
804 return (
805 text
806 .replace('&', '&amp;')
807 .replace('<', '&lt;')
808 .replace('>', '&gt;')
809 .replace('"', '&quot;')
810 .replace("'", '&#39;')
811 )
812
813
f5b1bca9 814def process_communicate_or_kill(p, *args, **kwargs):
8a82af35 815 write_string('DeprecationWarning: yt_dlp.utils.process_communicate_or_kill is deprecated '
816 'and may be removed in a future version. Use yt_dlp.utils.Popen.communicate_or_kill instead')
817 return Popen.communicate_or_kill(p, *args, **kwargs)
f5b1bca9 818
819
d3c93ec2 820class Popen(subprocess.Popen):
821 if sys.platform == 'win32':
822 _startupinfo = subprocess.STARTUPINFO()
823 _startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
824 else:
825 _startupinfo = None
826
f0c9fb96 827 def __init__(self, *args, text=False, **kwargs):
828 if text is True:
829 kwargs['universal_newlines'] = True # For 3.6 compatibility
830 kwargs.setdefault('encoding', 'utf-8')
831 kwargs.setdefault('errors', 'replace')
86e5f3ed 832 super().__init__(*args, **kwargs, startupinfo=self._startupinfo)
d3c93ec2 833
834 def communicate_or_kill(self, *args, **kwargs):
8a82af35 835 try:
836 return self.communicate(*args, **kwargs)
837 except BaseException: # Including KeyboardInterrupt
f0c9fb96 838 self.kill(timeout=None)
8a82af35 839 raise
d3c93ec2 840
f0c9fb96 841 def kill(self, *, timeout=0):
842 super().kill()
843 if timeout != 0:
844 self.wait(timeout=timeout)
845
846 @classmethod
847 def run(cls, *args, **kwargs):
848 with cls(*args, **kwargs) as proc:
849 stdout, stderr = proc.communicate_or_kill()
850 return stdout or '', stderr or '', proc.returncode
851
d3c93ec2 852
aa49acd1
S
853def get_subprocess_encoding():
854 if sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5:
855 # For subprocess calls, encode with locale encoding
856 # Refer to http://stackoverflow.com/a/9951851/35070
857 encoding = preferredencoding()
858 else:
859 encoding = sys.getfilesystemencoding()
860 if encoding is None:
861 encoding = 'utf-8'
862 return encoding
863
864
8bf48f23 865def encodeFilename(s, for_subprocess=False):
19a03940 866 assert isinstance(s, str)
cfb0511d 867 return s
aa49acd1
S
868
869
870def decodeFilename(b, for_subprocess=False):
cfb0511d 871 return b
8bf48f23 872
f07b74fc
PH
873
874def encodeArgument(s):
cfb0511d 875 # Legacy code that uses byte strings
876 # Uncomment the following line after fixing all post processors
14f25df2 877 # assert isinstance(s, str), 'Internal error: %r should be of type %r, is %r' % (s, str, type(s))
cfb0511d 878 return s if isinstance(s, str) else s.decode('ascii')
f07b74fc
PH
879
880
aa49acd1 881def decodeArgument(b):
cfb0511d 882 return b
aa49acd1
S
883
884
8271226a
PH
885def decodeOption(optval):
886 if optval is None:
887 return optval
888 if isinstance(optval, bytes):
889 optval = optval.decode(preferredencoding())
890
14f25df2 891 assert isinstance(optval, str)
8271226a 892 return optval
1c256f70 893
5f6a1245 894
aa7785f8 895_timetuple = collections.namedtuple('Time', ('hours', 'minutes', 'seconds', 'milliseconds'))
896
897
898def timetuple_from_msec(msec):
899 secs, msec = divmod(msec, 1000)
900 mins, secs = divmod(secs, 60)
901 hrs, mins = divmod(mins, 60)
902 return _timetuple(hrs, mins, secs, msec)
903
904
cdb19aa4 905def formatSeconds(secs, delim=':', msec=False):
aa7785f8 906 time = timetuple_from_msec(secs * 1000)
907 if time.hours:
908 ret = '%d%s%02d%s%02d' % (time.hours, delim, time.minutes, delim, time.seconds)
909 elif time.minutes:
910 ret = '%d%s%02d' % (time.minutes, delim, time.seconds)
4539dd30 911 else:
aa7785f8 912 ret = '%d' % time.seconds
913 return '%s.%03d' % (ret, time.milliseconds) if msec else ret
4539dd30 914
a0ddb8a2 915
77562778 916def _ssl_load_windows_store_certs(ssl_context, storename):
917 # Code adapted from _load_windows_store_certs in https://github.com/python/cpython/blob/main/Lib/ssl.py
918 try:
919 certs = [cert for cert, encoding, trust in ssl.enum_certificates(storename)
920 if encoding == 'x509_asn' and (
921 trust is True or ssl.Purpose.SERVER_AUTH.oid in trust)]
922 except PermissionError:
923 return
924 for cert in certs:
19a03940 925 with contextlib.suppress(ssl.SSLError):
77562778 926 ssl_context.load_verify_locations(cadata=cert)
a2366922 927
77562778 928
929def make_HTTPS_handler(params, **kwargs):
930 opts_check_certificate = not params.get('nocheckcertificate')
931 context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
932 context.check_hostname = opts_check_certificate
f81c62a6 933 if params.get('legacyserverconnect'):
934 context.options |= 4 # SSL_OP_LEGACY_SERVER_CONNECT
4f28b537 935 # Allow use of weaker ciphers in Python 3.10+. See https://bugs.python.org/issue43998
936 context.set_ciphers('DEFAULT')
8a82af35 937
77562778 938 context.verify_mode = ssl.CERT_REQUIRED if opts_check_certificate else ssl.CERT_NONE
939 if opts_check_certificate:
d5820461 940 if has_certifi and 'no-certifi' not in params.get('compat_opts', []):
941 context.load_verify_locations(cafile=certifi.where())
168bbc4f 942 else:
943 try:
944 context.load_default_certs()
945 # Work around the issue in load_default_certs when there are bad certificates. See:
946 # https://github.com/yt-dlp/yt-dlp/issues/1060,
947 # https://bugs.python.org/issue35665, https://bugs.python.org/issue45312
948 except ssl.SSLError:
949 # enum_certificates is not present in mingw python. See https://github.com/yt-dlp/yt-dlp/issues/1151
950 if sys.platform == 'win32' and hasattr(ssl, 'enum_certificates'):
951 for storename in ('CA', 'ROOT'):
952 _ssl_load_windows_store_certs(context, storename)
953 context.set_default_verify_paths()
8a82af35 954
bb58c9ed 955 client_certfile = params.get('client_certificate')
956 if client_certfile:
957 try:
958 context.load_cert_chain(
959 client_certfile, keyfile=params.get('client_certificate_key'),
960 password=params.get('client_certificate_password'))
961 except ssl.SSLError:
962 raise YoutubeDLError('Unable to load client certificate')
2c6dcb65 963
964 # Some servers may reject requests if ALPN extension is not sent. See:
965 # https://github.com/python/cpython/issues/85140
966 # https://github.com/yt-dlp/yt-dlp/issues/3878
967 with contextlib.suppress(NotImplementedError):
968 context.set_alpn_protocols(['http/1.1'])
969
77562778 970 return YoutubeDLHTTPSHandler(params, context=context, **kwargs)
ea6d901e 971
732ea2f0 972
5873d4cc 973def bug_reports_message(before=';'):
57e0f077 974 from .update import REPOSITORY
975
976 msg = (f'please report this issue on https://github.com/{REPOSITORY}/issues?q= , '
977 'filling out the appropriate issue template. Confirm you are on the latest version using yt-dlp -U')
5873d4cc
F
978
979 before = before.rstrip()
980 if not before or before.endswith(('.', '!', '?')):
981 msg = msg[0].title() + msg[1:]
982
983 return (before + ' ' if before else '') + msg
08f2a92c
JMF
984
985
bf5b9d85
PM
986class YoutubeDLError(Exception):
987 """Base exception for YoutubeDL errors."""
aa9369a2 988 msg = None
989
990 def __init__(self, msg=None):
991 if msg is not None:
992 self.msg = msg
993 elif self.msg is None:
994 self.msg = type(self).__name__
995 super().__init__(self.msg)
bf5b9d85
PM
996
997
ac668111 998network_exceptions = [urllib.error.URLError, http.client.HTTPException, socket.error]
3158150c 999if hasattr(ssl, 'CertificateError'):
1000 network_exceptions.append(ssl.CertificateError)
1001network_exceptions = tuple(network_exceptions)
1002
1003
bf5b9d85 1004class ExtractorError(YoutubeDLError):
1c256f70 1005 """Error during info extraction."""
5f6a1245 1006
1151c407 1007 def __init__(self, msg, tb=None, expected=False, cause=None, video_id=None, ie=None):
9a82b238 1008 """ tb, if given, is the original traceback (so that it can be printed out).
7a5c1cfe 1009 If expected is set, this is a normal error message and most likely not a bug in yt-dlp.
9a82b238 1010 """
3158150c 1011 if sys.exc_info()[0] in network_exceptions:
9a82b238 1012 expected = True
d5979c5d 1013
7265a219 1014 self.orig_msg = str(msg)
1c256f70 1015 self.traceback = tb
1151c407 1016 self.expected = expected
2eabb802 1017 self.cause = cause
d11271dd 1018 self.video_id = video_id
1151c407 1019 self.ie = ie
1020 self.exc_info = sys.exc_info() # preserve original exception
5df14442 1021 if isinstance(self.exc_info[1], ExtractorError):
1022 self.exc_info = self.exc_info[1].exc_info
1151c407 1023
86e5f3ed 1024 super().__init__(''.join((
a70635b8 1025 format_field(ie, None, '[%s] '),
1026 format_field(video_id, None, '%s: '),
7265a219 1027 msg,
a70635b8 1028 format_field(cause, None, ' (caused by %r)'),
1151c407 1029 '' if expected else bug_reports_message())))
1c256f70 1030
01951dda 1031 def format_traceback(self):
497d2fab 1032 return join_nonempty(
1033 self.traceback and ''.join(traceback.format_tb(self.traceback)),
e491d06d 1034 self.cause and ''.join(traceback.format_exception(None, self.cause, self.cause.__traceback__)[1:]),
497d2fab 1035 delim='\n') or None
01951dda 1036
1c256f70 1037
416c7fcb
PH
1038class UnsupportedError(ExtractorError):
1039 def __init__(self, url):
86e5f3ed 1040 super().__init__(
416c7fcb
PH
1041 'Unsupported URL: %s' % url, expected=True)
1042 self.url = url
1043
1044
55b3e45b
JMF
1045class RegexNotFoundError(ExtractorError):
1046 """Error when a regex didn't match"""
1047 pass
1048
1049
773f291d
S
1050class GeoRestrictedError(ExtractorError):
1051 """Geographic restriction Error exception.
1052
1053 This exception may be thrown when a video is not available from your
1054 geographic location due to geographic restrictions imposed by a website.
1055 """
b6e0c7d2 1056
0db3bae8 1057 def __init__(self, msg, countries=None, **kwargs):
1058 kwargs['expected'] = True
86e5f3ed 1059 super().__init__(msg, **kwargs)
773f291d
S
1060 self.countries = countries
1061
1062
693f0600 1063class UserNotLive(ExtractorError):
1064 """Error when a channel/user is not live"""
1065
1066 def __init__(self, msg=None, **kwargs):
1067 kwargs['expected'] = True
1068 super().__init__(msg or 'The channel is not currently live', **kwargs)
1069
1070
bf5b9d85 1071class DownloadError(YoutubeDLError):
59ae15a5 1072 """Download Error exception.
d77c3dfd 1073
59ae15a5
PH
1074 This exception may be thrown by FileDownloader objects if they are not
1075 configured to continue on errors. They will contain the appropriate
1076 error message.
1077 """
5f6a1245 1078
8cc83b8d
FV
1079 def __init__(self, msg, exc_info=None):
1080 """ exc_info, if given, is the original exception that caused the trouble (as returned by sys.exc_info()). """
86e5f3ed 1081 super().__init__(msg)
8cc83b8d 1082 self.exc_info = exc_info
d77c3dfd
FV
1083
1084
498f5606 1085class EntryNotInPlaylist(YoutubeDLError):
1086 """Entry not in playlist exception.
1087
1088 This exception will be thrown by YoutubeDL when a requested entry
1089 is not found in the playlist info_dict
1090 """
aa9369a2 1091 msg = 'Entry not found in info'
498f5606 1092
1093
bf5b9d85 1094class SameFileError(YoutubeDLError):
59ae15a5 1095 """Same File exception.
d77c3dfd 1096
59ae15a5
PH
1097 This exception will be thrown by FileDownloader objects if they detect
1098 multiple files would have to be downloaded to the same file on disk.
1099 """
aa9369a2 1100 msg = 'Fixed output name but more than one file to download'
1101
1102 def __init__(self, filename=None):
1103 if filename is not None:
1104 self.msg += f': {filename}'
1105 super().__init__(self.msg)
d77c3dfd
FV
1106
1107
bf5b9d85 1108class PostProcessingError(YoutubeDLError):
59ae15a5 1109 """Post Processing exception.
d77c3dfd 1110
59ae15a5
PH
1111 This exception may be raised by PostProcessor's .run() method to
1112 indicate an error in the postprocessing task.
1113 """
5f6a1245 1114
5f6a1245 1115
48f79687 1116class DownloadCancelled(YoutubeDLError):
1117 """ Exception raised when the download queue should be interrupted """
1118 msg = 'The download was cancelled'
8b0d7497 1119
8b0d7497 1120
48f79687 1121class ExistingVideoReached(DownloadCancelled):
1122 """ --break-on-existing triggered """
1123 msg = 'Encountered a video that is already in the archive, stopping due to --break-on-existing'
8b0d7497 1124
48f79687 1125
1126class RejectedVideoReached(DownloadCancelled):
1127 """ --break-on-reject triggered """
1128 msg = 'Encountered a video that did not match filter, stopping due to --break-on-reject'
51d9739f 1129
1130
48f79687 1131class MaxDownloadsReached(DownloadCancelled):
59ae15a5 1132 """ --max-downloads limit has been reached. """
48f79687 1133 msg = 'Maximum number of downloads reached, stopping due to --max-downloads'
1134
1135
f2ebc5c7 1136class ReExtractInfo(YoutubeDLError):
1137 """ Video info needs to be re-extracted. """
1138
1139 def __init__(self, msg, expected=False):
1140 super().__init__(msg)
1141 self.expected = expected
1142
1143
1144class ThrottledDownload(ReExtractInfo):
48f79687 1145 """ Download speed below --throttled-rate. """
aa9369a2 1146 msg = 'The download speed is below throttle limit'
d77c3dfd 1147
43b22906 1148 def __init__(self):
1149 super().__init__(self.msg, expected=False)
f2ebc5c7 1150
d77c3dfd 1151
bf5b9d85 1152class UnavailableVideoError(YoutubeDLError):
59ae15a5 1153 """Unavailable Format exception.
d77c3dfd 1154
59ae15a5
PH
1155 This exception will be thrown when a video is requested
1156 in a format that is not available for that video.
1157 """
aa9369a2 1158 msg = 'Unable to download video'
1159
1160 def __init__(self, err=None):
1161 if err is not None:
1162 self.msg += f': {err}'
1163 super().__init__(self.msg)
d77c3dfd
FV
1164
1165
bf5b9d85 1166class ContentTooShortError(YoutubeDLError):
59ae15a5 1167 """Content Too Short exception.
d77c3dfd 1168
59ae15a5
PH
1169 This exception may be raised by FileDownloader objects when a file they
1170 download is too small for what the server announced first, indicating
1171 the connection was probably interrupted.
1172 """
d77c3dfd 1173
59ae15a5 1174 def __init__(self, downloaded, expected):
86e5f3ed 1175 super().__init__(f'Downloaded {downloaded} bytes, expected {expected} bytes')
2c7ed247 1176 # Both in bytes
59ae15a5
PH
1177 self.downloaded = downloaded
1178 self.expected = expected
d77c3dfd 1179
5f6a1245 1180
bf5b9d85 1181class XAttrMetadataError(YoutubeDLError):
efa97bdc 1182 def __init__(self, code=None, msg='Unknown error'):
86e5f3ed 1183 super().__init__(msg)
efa97bdc 1184 self.code = code
bd264412 1185 self.msg = msg
efa97bdc
YCH
1186
1187 # Parsing code and msg
3089bc74 1188 if (self.code in (errno.ENOSPC, errno.EDQUOT)
a0566bbf 1189 or 'No space left' in self.msg or 'Disk quota exceeded' in self.msg):
efa97bdc
YCH
1190 self.reason = 'NO_SPACE'
1191 elif self.code == errno.E2BIG or 'Argument list too long' in self.msg:
1192 self.reason = 'VALUE_TOO_LONG'
1193 else:
1194 self.reason = 'NOT_SUPPORTED'
1195
1196
bf5b9d85 1197class XAttrUnavailableError(YoutubeDLError):
efa97bdc
YCH
1198 pass
1199
1200
c5a59d93 1201def _create_http_connection(ydl_handler, http_class, is_https, *args, **kwargs):
f9934b96 1202 hc = http_class(*args, **kwargs)
be4a824d 1203 source_address = ydl_handler._params.get('source_address')
8959018a 1204
be4a824d 1205 if source_address is not None:
8959018a
AU
1206 # This is to workaround _create_connection() from socket where it will try all
1207 # address data from getaddrinfo() including IPv6. This filters the result from
1208 # getaddrinfo() based on the source_address value.
1209 # This is based on the cpython socket.create_connection() function.
1210 # https://github.com/python/cpython/blob/master/Lib/socket.py#L691
1211 def _create_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, source_address=None):
1212 host, port = address
1213 err = None
1214 addrs = socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM)
9e21e6d9
S
1215 af = socket.AF_INET if '.' in source_address[0] else socket.AF_INET6
1216 ip_addrs = [addr for addr in addrs if addr[0] == af]
1217 if addrs and not ip_addrs:
1218 ip_version = 'v4' if af == socket.AF_INET else 'v6'
86e5f3ed 1219 raise OSError(
9e21e6d9
S
1220 "No remote IP%s addresses available for connect, can't use '%s' as source address"
1221 % (ip_version, source_address[0]))
8959018a
AU
1222 for res in ip_addrs:
1223 af, socktype, proto, canonname, sa = res
1224 sock = None
1225 try:
1226 sock = socket.socket(af, socktype, proto)
1227 if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT:
1228 sock.settimeout(timeout)
1229 sock.bind(source_address)
1230 sock.connect(sa)
1231 err = None # Explicitly break reference cycle
1232 return sock
86e5f3ed 1233 except OSError as _:
8959018a
AU
1234 err = _
1235 if sock is not None:
1236 sock.close()
1237 if err is not None:
1238 raise err
1239 else:
86e5f3ed 1240 raise OSError('getaddrinfo returns an empty list')
9e21e6d9
S
1241 if hasattr(hc, '_create_connection'):
1242 hc._create_connection = _create_connection
cfb0511d 1243 hc.source_address = (source_address, 0)
be4a824d
PH
1244
1245 return hc
1246
1247
87f0e62d 1248def handle_youtubedl_headers(headers):
992fc9d6
YCH
1249 filtered_headers = headers
1250
1251 if 'Youtubedl-no-compression' in filtered_headers:
86e5f3ed 1252 filtered_headers = {k: v for k, v in filtered_headers.items() if k.lower() != 'accept-encoding'}
87f0e62d 1253 del filtered_headers['Youtubedl-no-compression']
87f0e62d 1254
992fc9d6 1255 return filtered_headers
87f0e62d
YCH
1256
1257
ac668111 1258class YoutubeDLHandler(urllib.request.HTTPHandler):
59ae15a5
PH
1259 """Handler for HTTP requests and responses.
1260
1261 This class, when installed with an OpenerDirector, automatically adds
1262 the standard headers to every HTTP request and handles gzipped and
1263 deflated responses from web servers. If compression is to be avoided in
1264 a particular request, the original request in the program code only has
0424ec30 1265 to include the HTTP header "Youtubedl-no-compression", which will be
59ae15a5
PH
1266 removed before making the real request.
1267
1268 Part of this code was copied from:
1269
1270 http://techknack.net/python-urllib2-handlers/
1271
1272 Andrew Rowls, the author of that code, agreed to release it to the
1273 public domain.
1274 """
1275
be4a824d 1276 def __init__(self, params, *args, **kwargs):
ac668111 1277 urllib.request.HTTPHandler.__init__(self, *args, **kwargs)
be4a824d
PH
1278 self._params = params
1279
1280 def http_open(self, req):
ac668111 1281 conn_class = http.client.HTTPConnection
71aff188
YCH
1282
1283 socks_proxy = req.headers.get('Ytdl-socks-proxy')
1284 if socks_proxy:
1285 conn_class = make_socks_conn_class(conn_class, socks_proxy)
1286 del req.headers['Ytdl-socks-proxy']
1287
be4a824d 1288 return self.do_open(functools.partial(
71aff188 1289 _create_http_connection, self, conn_class, False),
be4a824d
PH
1290 req)
1291
59ae15a5
PH
1292 @staticmethod
1293 def deflate(data):
fc2119f2 1294 if not data:
1295 return data
59ae15a5
PH
1296 try:
1297 return zlib.decompress(data, -zlib.MAX_WBITS)
1298 except zlib.error:
1299 return zlib.decompress(data)
1300
4390d5ec 1301 @staticmethod
1302 def brotli(data):
1303 if not data:
1304 return data
9b8ee23b 1305 return brotli.decompress(data)
4390d5ec 1306
acebc9cd 1307 def http_request(self, req):
51f267d9
S
1308 # According to RFC 3986, URLs can not contain non-ASCII characters, however this is not
1309 # always respected by websites, some tend to give out URLs with non percent-encoded
1310 # non-ASCII characters (see telemb.py, ard.py [#3412])
1311 # urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991)
1312 # To work around aforementioned issue we will replace request's original URL with
1313 # percent-encoded one
1314 # Since redirects are also affected (e.g. http://www.southpark.de/alle-episoden/s18e09)
1315 # the code of this workaround has been moved here from YoutubeDL.urlopen()
1316 url = req.get_full_url()
1317 url_escaped = escape_url(url)
1318
1319 # Substitute URL if any change after escaping
1320 if url != url_escaped:
15d260eb 1321 req = update_Request(req, url=url_escaped)
51f267d9 1322
8b7539d2 1323 for h, v in self._params.get('http_headers', std_headers).items():
3d5f7a39
JK
1324 # Capitalize is needed because of Python bug 2275: http://bugs.python.org/issue2275
1325 # The dict keys are capitalized because of this bug by urllib
1326 if h.capitalize() not in req.headers:
33ac271b 1327 req.add_header(h, v)
87f0e62d 1328
af14914b 1329 if 'Accept-encoding' not in req.headers:
1330 req.add_header('Accept-encoding', ', '.join(SUPPORTED_ENCODINGS))
1331
87f0e62d 1332 req.headers = handle_youtubedl_headers(req.headers)
989b4b2b 1333
379a4f16 1334 return super().do_request_(req)
59ae15a5 1335
acebc9cd 1336 def http_response(self, req, resp):
59ae15a5
PH
1337 old_resp = resp
1338 # gzip
1339 if resp.headers.get('Content-encoding', '') == 'gzip':
aa3e9507
PH
1340 content = resp.read()
1341 gz = gzip.GzipFile(fileobj=io.BytesIO(content), mode='rb')
1342 try:
1343 uncompressed = io.BytesIO(gz.read())
86e5f3ed 1344 except OSError as original_ioerror:
aa3e9507
PH
1345 # There may be junk add the end of the file
1346 # See http://stackoverflow.com/q/4928560/35070 for details
1347 for i in range(1, 1024):
1348 try:
1349 gz = gzip.GzipFile(fileobj=io.BytesIO(content[:-i]), mode='rb')
1350 uncompressed = io.BytesIO(gz.read())
86e5f3ed 1351 except OSError:
aa3e9507
PH
1352 continue
1353 break
1354 else:
1355 raise original_ioerror
ac668111 1356 resp = urllib.request.addinfourl(uncompressed, old_resp.headers, old_resp.url, old_resp.code)
59ae15a5 1357 resp.msg = old_resp.msg
c047270c 1358 del resp.headers['Content-encoding']
59ae15a5
PH
1359 # deflate
1360 if resp.headers.get('Content-encoding', '') == 'deflate':
1361 gz = io.BytesIO(self.deflate(resp.read()))
ac668111 1362 resp = urllib.request.addinfourl(gz, old_resp.headers, old_resp.url, old_resp.code)
59ae15a5 1363 resp.msg = old_resp.msg
c047270c 1364 del resp.headers['Content-encoding']
4390d5ec 1365 # brotli
1366 if resp.headers.get('Content-encoding', '') == 'br':
ac668111 1367 resp = urllib.request.addinfourl(
4390d5ec 1368 io.BytesIO(self.brotli(resp.read())), old_resp.headers, old_resp.url, old_resp.code)
1369 resp.msg = old_resp.msg
1370 del resp.headers['Content-encoding']
ad729172 1371 # Percent-encode redirect URL of Location HTTP header to satisfy RFC 3986 (see
067aa17e 1372 # https://github.com/ytdl-org/youtube-dl/issues/6457).
5a4d9ddb
S
1373 if 300 <= resp.code < 400:
1374 location = resp.headers.get('Location')
1375 if location:
1376 # As of RFC 2616 default charset is iso-8859-1 that is respected by python 3
0f06bcd7 1377 location = location.encode('iso-8859-1').decode()
5a4d9ddb
S
1378 location_escaped = escape_url(location)
1379 if location != location_escaped:
1380 del resp.headers['Location']
1381 resp.headers['Location'] = location_escaped
59ae15a5 1382 return resp
0f8d03f8 1383
acebc9cd
PH
1384 https_request = http_request
1385 https_response = http_response
bf50b038 1386
5de90176 1387
71aff188
YCH
1388def make_socks_conn_class(base_class, socks_proxy):
1389 assert issubclass(base_class, (
ac668111 1390 http.client.HTTPConnection, http.client.HTTPSConnection))
71aff188 1391
14f25df2 1392 url_components = urllib.parse.urlparse(socks_proxy)
71aff188
YCH
1393 if url_components.scheme.lower() == 'socks5':
1394 socks_type = ProxyType.SOCKS5
1395 elif url_components.scheme.lower() in ('socks', 'socks4'):
1396 socks_type = ProxyType.SOCKS4
51fb4995
YCH
1397 elif url_components.scheme.lower() == 'socks4a':
1398 socks_type = ProxyType.SOCKS4A
71aff188 1399
cdd94c2e
YCH
1400 def unquote_if_non_empty(s):
1401 if not s:
1402 return s
ac668111 1403 return urllib.parse.unquote_plus(s)
cdd94c2e 1404
71aff188
YCH
1405 proxy_args = (
1406 socks_type,
1407 url_components.hostname, url_components.port or 1080,
1408 True, # Remote DNS
cdd94c2e
YCH
1409 unquote_if_non_empty(url_components.username),
1410 unquote_if_non_empty(url_components.password),
71aff188
YCH
1411 )
1412
1413 class SocksConnection(base_class):
1414 def connect(self):
1415 self.sock = sockssocket()
1416 self.sock.setproxy(*proxy_args)
19a03940 1417 if isinstance(self.timeout, (int, float)):
71aff188
YCH
1418 self.sock.settimeout(self.timeout)
1419 self.sock.connect((self.host, self.port))
1420
ac668111 1421 if isinstance(self, http.client.HTTPSConnection):
71aff188
YCH
1422 if hasattr(self, '_context'): # Python > 2.6
1423 self.sock = self._context.wrap_socket(
1424 self.sock, server_hostname=self.host)
1425 else:
1426 self.sock = ssl.wrap_socket(self.sock)
1427
1428 return SocksConnection
1429
1430
ac668111 1431class YoutubeDLHTTPSHandler(urllib.request.HTTPSHandler):
be4a824d 1432 def __init__(self, params, https_conn_class=None, *args, **kwargs):
ac668111 1433 urllib.request.HTTPSHandler.__init__(self, *args, **kwargs)
1434 self._https_conn_class = https_conn_class or http.client.HTTPSConnection
be4a824d
PH
1435 self._params = params
1436
1437 def https_open(self, req):
4f264c02 1438 kwargs = {}
71aff188
YCH
1439 conn_class = self._https_conn_class
1440
4f264c02
JMF
1441 if hasattr(self, '_context'): # python > 2.6
1442 kwargs['context'] = self._context
1443 if hasattr(self, '_check_hostname'): # python 3.x
1444 kwargs['check_hostname'] = self._check_hostname
71aff188
YCH
1445
1446 socks_proxy = req.headers.get('Ytdl-socks-proxy')
1447 if socks_proxy:
1448 conn_class = make_socks_conn_class(conn_class, socks_proxy)
1449 del req.headers['Ytdl-socks-proxy']
1450
4f28b537 1451 try:
1452 return self.do_open(
1453 functools.partial(_create_http_connection, self, conn_class, True), req, **kwargs)
1454 except urllib.error.URLError as e:
1455 if (isinstance(e.reason, ssl.SSLError)
1456 and getattr(e.reason, 'reason', None) == 'SSLV3_ALERT_HANDSHAKE_FAILURE'):
1457 raise YoutubeDLError('SSLV3_ALERT_HANDSHAKE_FAILURE: Try using --legacy-server-connect')
1458 raise
be4a824d
PH
1459
1460
ac668111 1461class YoutubeDLCookieJar(http.cookiejar.MozillaCookieJar):
f1a8511f
S
1462 """
1463 See [1] for cookie file format.
1464
1465 1. https://curl.haxx.se/docs/http-cookies.html
1466 """
e7e62441 1467 _HTTPONLY_PREFIX = '#HttpOnly_'
c380cc28
S
1468 _ENTRY_LEN = 7
1469 _HEADER = '''# Netscape HTTP Cookie File
7a5c1cfe 1470# This file is generated by yt-dlp. Do not edit.
c380cc28
S
1471
1472'''
1473 _CookieFileEntry = collections.namedtuple(
1474 'CookieFileEntry',
1475 ('domain_name', 'include_subdomains', 'path', 'https_only', 'expires_at', 'name', 'value'))
e7e62441 1476
d76fa1f3 1477 def __init__(self, filename=None, *args, **kwargs):
1478 super().__init__(None, *args, **kwargs)
1479 if self.is_path(filename):
1480 filename = os.fspath(filename)
1481 self.filename = filename
1482
24146491 1483 @staticmethod
1484 def _true_or_false(cndn):
1485 return 'TRUE' if cndn else 'FALSE'
1486
d76fa1f3 1487 @staticmethod
1488 def is_path(file):
1489 return isinstance(file, (str, bytes, os.PathLike))
1490
1491 @contextlib.contextmanager
1492 def open(self, file, *, write=False):
1493 if self.is_path(file):
1494 with open(file, 'w' if write else 'r', encoding='utf-8') as f:
1495 yield f
1496 else:
1497 if write:
1498 file.truncate(0)
1499 yield file
1500
24146491 1501 def _really_save(self, f, ignore_discard=False, ignore_expires=False):
1502 now = time.time()
1503 for cookie in self:
1504 if (not ignore_discard and cookie.discard
1505 or not ignore_expires and cookie.is_expired(now)):
1506 continue
1507 name, value = cookie.name, cookie.value
1508 if value is None:
1509 # cookies.txt regards 'Set-Cookie: foo' as a cookie
1510 # with no name, whereas http.cookiejar regards it as a
1511 # cookie with no value.
1512 name, value = '', name
1513 f.write('%s\n' % '\t'.join((
1514 cookie.domain,
1515 self._true_or_false(cookie.domain.startswith('.')),
1516 cookie.path,
1517 self._true_or_false(cookie.secure),
1518 str_or_none(cookie.expires, default=''),
1519 name, value
1520 )))
1521
1522 def save(self, filename=None, *args, **kwargs):
c380cc28
S
1523 """
1524 Save cookies to a file.
24146491 1525 Code is taken from CPython 3.6
1526 https://github.com/python/cpython/blob/8d999cbf4adea053be6dbb612b9844635c4dfb8e/Lib/http/cookiejar.py#L2091-L2117 """
c380cc28 1527
c380cc28
S
1528 if filename is None:
1529 if self.filename is not None:
1530 filename = self.filename
1531 else:
ac668111 1532 raise ValueError(http.cookiejar.MISSING_FILENAME_TEXT)
c380cc28 1533
24146491 1534 # Store session cookies with `expires` set to 0 instead of an empty string
1bab3437
S
1535 for cookie in self:
1536 if cookie.expires is None:
1537 cookie.expires = 0
c380cc28 1538
d76fa1f3 1539 with self.open(filename, write=True) as f:
c380cc28 1540 f.write(self._HEADER)
24146491 1541 self._really_save(f, *args, **kwargs)
1bab3437
S
1542
1543 def load(self, filename=None, ignore_discard=False, ignore_expires=False):
e7e62441 1544 """Load cookies from a file."""
1545 if filename is None:
1546 if self.filename is not None:
1547 filename = self.filename
1548 else:
ac668111 1549 raise ValueError(http.cookiejar.MISSING_FILENAME_TEXT)
e7e62441 1550
c380cc28
S
1551 def prepare_line(line):
1552 if line.startswith(self._HTTPONLY_PREFIX):
1553 line = line[len(self._HTTPONLY_PREFIX):]
1554 # comments and empty lines are fine
1555 if line.startswith('#') or not line.strip():
1556 return line
1557 cookie_list = line.split('\t')
1558 if len(cookie_list) != self._ENTRY_LEN:
ac668111 1559 raise http.cookiejar.LoadError('invalid length %d' % len(cookie_list))
c380cc28
S
1560 cookie = self._CookieFileEntry(*cookie_list)
1561 if cookie.expires_at and not cookie.expires_at.isdigit():
ac668111 1562 raise http.cookiejar.LoadError('invalid expires at %s' % cookie.expires_at)
c380cc28
S
1563 return line
1564
e7e62441 1565 cf = io.StringIO()
d76fa1f3 1566 with self.open(filename) as f:
e7e62441 1567 for line in f:
c380cc28
S
1568 try:
1569 cf.write(prepare_line(line))
ac668111 1570 except http.cookiejar.LoadError as e:
94aa0644 1571 if f'{line.strip()} '[0] in '[{"':
ac668111 1572 raise http.cookiejar.LoadError(
94aa0644
L
1573 'Cookies file must be Netscape formatted, not JSON. See '
1574 'https://github.com/ytdl-org/youtube-dl#how-do-i-pass-cookies-to-youtube-dl')
19a03940 1575 write_string(f'WARNING: skipping cookie file entry due to {e}: {line!r}\n')
c380cc28 1576 continue
e7e62441 1577 cf.seek(0)
1578 self._really_load(cf, filename, ignore_discard, ignore_expires)
1bab3437
S
1579 # Session cookies are denoted by either `expires` field set to
1580 # an empty string or 0. MozillaCookieJar only recognizes the former
1581 # (see [1]). So we need force the latter to be recognized as session
1582 # cookies on our own.
1583 # Session cookies may be important for cookies-based authentication,
1584 # e.g. usually, when user does not check 'Remember me' check box while
1585 # logging in on a site, some important cookies are stored as session
1586 # cookies so that not recognizing them will result in failed login.
1587 # 1. https://bugs.python.org/issue17164
1588 for cookie in self:
1589 # Treat `expires=0` cookies as session cookies
1590 if cookie.expires == 0:
1591 cookie.expires = None
1592 cookie.discard = True
1593
1594
ac668111 1595class YoutubeDLCookieProcessor(urllib.request.HTTPCookieProcessor):
a6420bf5 1596 def __init__(self, cookiejar=None):
ac668111 1597 urllib.request.HTTPCookieProcessor.__init__(self, cookiejar)
a6420bf5
S
1598
1599 def http_response(self, request, response):
ac668111 1600 return urllib.request.HTTPCookieProcessor.http_response(self, request, response)
a6420bf5 1601
ac668111 1602 https_request = urllib.request.HTTPCookieProcessor.http_request
a6420bf5
S
1603 https_response = http_response
1604
1605
ac668111 1606class YoutubeDLRedirectHandler(urllib.request.HTTPRedirectHandler):
201c1459 1607 """YoutubeDL redirect handler
1608
1609 The code is based on HTTPRedirectHandler implementation from CPython [1].
1610
1611 This redirect handler solves two issues:
1612 - ensures redirect URL is always unicode under python 2
1613 - introduces support for experimental HTTP response status code
1614 308 Permanent Redirect [2] used by some sites [3]
1615
1616 1. https://github.com/python/cpython/blob/master/Lib/urllib/request.py
1617 2. https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/308
1618 3. https://github.com/ytdl-org/youtube-dl/issues/28768
1619 """
1620
ac668111 1621 http_error_301 = http_error_303 = http_error_307 = http_error_308 = urllib.request.HTTPRedirectHandler.http_error_302
201c1459 1622
1623 def redirect_request(self, req, fp, code, msg, headers, newurl):
1624 """Return a Request or None in response to a redirect.
1625
1626 This is called by the http_error_30x methods when a
1627 redirection response is received. If a redirection should
1628 take place, return a new Request to allow http_error_30x to
1629 perform the redirect. Otherwise, raise HTTPError if no-one
1630 else should try to handle this url. Return None if you can't
1631 but another Handler might.
1632 """
1633 m = req.get_method()
1634 if (not (code in (301, 302, 303, 307, 308) and m in ("GET", "HEAD")
1635 or code in (301, 302, 303) and m == "POST")):
14f25df2 1636 raise urllib.error.HTTPError(req.full_url, code, msg, headers, fp)
201c1459 1637 # Strictly (according to RFC 2616), 301 or 302 in response to
1638 # a POST MUST NOT cause a redirection without confirmation
1639 # from the user (of urllib.request, in this case). In practice,
1640 # essentially all clients do redirect in this case, so we do
1641 # the same.
1642
201c1459 1643 # Be conciliant with URIs containing a space. This is mainly
1644 # redundant with the more complete encoding done in http_error_302(),
1645 # but it is kept for compatibility with other callers.
1646 newurl = newurl.replace(' ', '%20')
1647
1648 CONTENT_HEADERS = ("content-length", "content-type")
1649 # NB: don't use dict comprehension for python 2.6 compatibility
86e5f3ed 1650 newheaders = {k: v for k, v in req.headers.items() if k.lower() not in CONTENT_HEADERS}
afac4caa 1651
1652 # A 303 must either use GET or HEAD for subsequent request
1653 # https://datatracker.ietf.org/doc/html/rfc7231#section-6.4.4
1654 if code == 303 and m != 'HEAD':
1655 m = 'GET'
1656 # 301 and 302 redirects are commonly turned into a GET from a POST
1657 # for subsequent requests by browsers, so we'll do the same.
1658 # https://datatracker.ietf.org/doc/html/rfc7231#section-6.4.2
1659 # https://datatracker.ietf.org/doc/html/rfc7231#section-6.4.3
1660 if code in (301, 302) and m == 'POST':
1661 m = 'GET'
1662
ac668111 1663 return urllib.request.Request(
201c1459 1664 newurl, headers=newheaders, origin_req_host=req.origin_req_host,
afac4caa 1665 unverifiable=True, method=m)
fca6dba8
S
1666
1667
46f59e89
S
1668def extract_timezone(date_str):
1669 m = re.search(
f137e4c2 1670 r'''(?x)
1671 ^.{8,}? # >=8 char non-TZ prefix, if present
1672 (?P<tz>Z| # just the UTC Z, or
1673 (?:(?<=.\b\d{4}|\b\d{2}:\d\d)| # preceded by 4 digits or hh:mm or
1674 (?<!.\b[a-zA-Z]{3}|[a-zA-Z]{4}|..\b\d\d)) # not preceded by 3 alpha word or >= 4 alpha or 2 digits
1675 [ ]? # optional space
1676 (?P<sign>\+|-) # +/-
1677 (?P<hours>[0-9]{2}):?(?P<minutes>[0-9]{2}) # hh[:]mm
1678 $)
1679 ''', date_str)
46f59e89
S
1680 if not m:
1681 timezone = datetime.timedelta()
1682 else:
1683 date_str = date_str[:-len(m.group('tz'))]
1684 if not m.group('sign'):
1685 timezone = datetime.timedelta()
1686 else:
1687 sign = 1 if m.group('sign') == '+' else -1
1688 timezone = datetime.timedelta(
1689 hours=sign * int(m.group('hours')),
1690 minutes=sign * int(m.group('minutes')))
1691 return timezone, date_str
1692
1693
08b38d54 1694def parse_iso8601(date_str, delimiter='T', timezone=None):
912b38b4
PH
1695 """ Return a UNIX timestamp from the given date """
1696
1697 if date_str is None:
1698 return None
1699
52c3a6e4
S
1700 date_str = re.sub(r'\.[0-9]+', '', date_str)
1701
08b38d54 1702 if timezone is None:
46f59e89
S
1703 timezone, date_str = extract_timezone(date_str)
1704
19a03940 1705 with contextlib.suppress(ValueError):
86e5f3ed 1706 date_format = f'%Y-%m-%d{delimiter}%H:%M:%S'
52c3a6e4
S
1707 dt = datetime.datetime.strptime(date_str, date_format) - timezone
1708 return calendar.timegm(dt.timetuple())
912b38b4
PH
1709
1710
46f59e89
S
1711def date_formats(day_first=True):
1712 return DATE_FORMATS_DAY_FIRST if day_first else DATE_FORMATS_MONTH_FIRST
1713
1714
42bdd9d0 1715def unified_strdate(date_str, day_first=True):
bf50b038 1716 """Return a string with the date in the format YYYYMMDD"""
64e7ad60
PH
1717
1718 if date_str is None:
1719 return None
bf50b038 1720 upload_date = None
5f6a1245 1721 # Replace commas
026fcc04 1722 date_str = date_str.replace(',', ' ')
42bdd9d0 1723 # Remove AM/PM + timezone
9bb8e0a3 1724 date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str)
46f59e89 1725 _, date_str = extract_timezone(date_str)
42bdd9d0 1726
46f59e89 1727 for expression in date_formats(day_first):
19a03940 1728 with contextlib.suppress(ValueError):
bf50b038 1729 upload_date = datetime.datetime.strptime(date_str, expression).strftime('%Y%m%d')
42393ce2
PH
1730 if upload_date is None:
1731 timetuple = email.utils.parsedate_tz(date_str)
1732 if timetuple:
19a03940 1733 with contextlib.suppress(ValueError):
c6b9cf05 1734 upload_date = datetime.datetime(*timetuple[:6]).strftime('%Y%m%d')
6a750402 1735 if upload_date is not None:
14f25df2 1736 return str(upload_date)
bf50b038 1737
5f6a1245 1738
46f59e89
S
1739def unified_timestamp(date_str, day_first=True):
1740 if date_str is None:
1741 return None
1742
2ae2ffda 1743 date_str = re.sub(r'[,|]', '', date_str)
46f59e89 1744
7dc2a74e 1745 pm_delta = 12 if re.search(r'(?i)PM', date_str) else 0
46f59e89
S
1746 timezone, date_str = extract_timezone(date_str)
1747
1748 # Remove AM/PM + timezone
1749 date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str)
1750
deef3195
S
1751 # Remove unrecognized timezones from ISO 8601 alike timestamps
1752 m = re.search(r'\d{1,2}:\d{1,2}(?:\.\d+)?(?P<tz>\s*[A-Z]+)$', date_str)
1753 if m:
1754 date_str = date_str[:-len(m.group('tz'))]
1755
f226880c
PH
1756 # Python only supports microseconds, so remove nanoseconds
1757 m = re.search(r'^([0-9]{4,}-[0-9]{1,2}-[0-9]{1,2}T[0-9]{1,2}:[0-9]{1,2}:[0-9]{1,2}\.[0-9]{6})[0-9]+$', date_str)
1758 if m:
1759 date_str = m.group(1)
1760
46f59e89 1761 for expression in date_formats(day_first):
19a03940 1762 with contextlib.suppress(ValueError):
7dc2a74e 1763 dt = datetime.datetime.strptime(date_str, expression) - timezone + datetime.timedelta(hours=pm_delta)
46f59e89 1764 return calendar.timegm(dt.timetuple())
46f59e89
S
1765 timetuple = email.utils.parsedate_tz(date_str)
1766 if timetuple:
7dc2a74e 1767 return calendar.timegm(timetuple) + pm_delta * 3600
46f59e89
S
1768
1769
28e614de 1770def determine_ext(url, default_ext='unknown_video'):
85750f89 1771 if url is None or '.' not in url:
f4776371 1772 return default_ext
9cb9a5df 1773 guess = url.partition('?')[0].rpartition('.')[2]
73e79f2a
PH
1774 if re.match(r'^[A-Za-z0-9]+$', guess):
1775 return guess
a7aaa398
S
1776 # Try extract ext from URLs like http://example.com/foo/bar.mp4/?download
1777 elif guess.rstrip('/') in KNOWN_EXTENSIONS:
9cb9a5df 1778 return guess.rstrip('/')
73e79f2a 1779 else:
cbdbb766 1780 return default_ext
73e79f2a 1781
5f6a1245 1782
824fa511
S
1783def subtitles_filename(filename, sub_lang, sub_format, expected_real_ext=None):
1784 return replace_extension(filename, sub_lang + '.' + sub_format, expected_real_ext)
d4051a8e 1785
5f6a1245 1786
9e62f283 1787def datetime_from_str(date_str, precision='auto', format='%Y%m%d'):
3d38b2d6 1788 R"""
1789 Return a datetime object from a string.
1790 Supported format:
1791 (now|today|yesterday|DATE)([+-]\d+(microsecond|second|minute|hour|day|week|month|year)s?)?
1792
1793 @param format strftime format of DATE
1794 @param precision Round the datetime object: auto|microsecond|second|minute|hour|day
1795 auto: round to the unit provided in date_str (if applicable).
9e62f283 1796 """
1797 auto_precision = False
1798 if precision == 'auto':
1799 auto_precision = True
1800 precision = 'microsecond'
396a76f7 1801 today = datetime_round(datetime.datetime.utcnow(), precision)
f8795e10 1802 if date_str in ('now', 'today'):
37254abc 1803 return today
f8795e10
PH
1804 if date_str == 'yesterday':
1805 return today - datetime.timedelta(days=1)
9e62f283 1806 match = re.match(
3d38b2d6 1807 r'(?P<start>.+)(?P<sign>[+-])(?P<time>\d+)(?P<unit>microsecond|second|minute|hour|day|week|month|year)s?',
9e62f283 1808 date_str)
37254abc 1809 if match is not None:
9e62f283 1810 start_time = datetime_from_str(match.group('start'), precision, format)
1811 time = int(match.group('time')) * (-1 if match.group('sign') == '-' else 1)
37254abc 1812 unit = match.group('unit')
9e62f283 1813 if unit == 'month' or unit == 'year':
1814 new_date = datetime_add_months(start_time, time * 12 if unit == 'year' else time)
37254abc 1815 unit = 'day'
9e62f283 1816 else:
1817 if unit == 'week':
1818 unit = 'day'
1819 time *= 7
1820 delta = datetime.timedelta(**{unit + 's': time})
1821 new_date = start_time + delta
1822 if auto_precision:
1823 return datetime_round(new_date, unit)
1824 return new_date
1825
1826 return datetime_round(datetime.datetime.strptime(date_str, format), precision)
1827
1828
d49f8db3 1829def date_from_str(date_str, format='%Y%m%d', strict=False):
3d38b2d6 1830 R"""
1831 Return a date object from a string using datetime_from_str
9e62f283 1832
3d38b2d6 1833 @param strict Restrict allowed patterns to "YYYYMMDD" and
1834 (now|today|yesterday)(-\d+(day|week|month|year)s?)?
9e62f283 1835 """
3d38b2d6 1836 if strict and not re.fullmatch(r'\d{8}|(now|today|yesterday)(-\d+(day|week|month|year)s?)?', date_str):
1837 raise ValueError(f'Invalid date format "{date_str}"')
9e62f283 1838 return datetime_from_str(date_str, precision='microsecond', format=format).date()
1839
1840
1841def datetime_add_months(dt, months):
1842 """Increment/Decrement a datetime object by months."""
1843 month = dt.month + months - 1
1844 year = dt.year + month // 12
1845 month = month % 12 + 1
1846 day = min(dt.day, calendar.monthrange(year, month)[1])
1847 return dt.replace(year, month, day)
1848
1849
1850def datetime_round(dt, precision='day'):
1851 """
1852 Round a datetime object's time to a specific precision
1853 """
1854 if precision == 'microsecond':
1855 return dt
1856
1857 unit_seconds = {
1858 'day': 86400,
1859 'hour': 3600,
1860 'minute': 60,
1861 'second': 1,
1862 }
1863 roundto = lambda x, n: ((x + n / 2) // n) * n
1864 timestamp = calendar.timegm(dt.timetuple())
1865 return datetime.datetime.utcfromtimestamp(roundto(timestamp, unit_seconds[precision]))
5f6a1245
JW
1866
1867
e63fc1be 1868def hyphenate_date(date_str):
1869 """
1870 Convert a date in 'YYYYMMDD' format to 'YYYY-MM-DD' format"""
1871 match = re.match(r'^(\d\d\d\d)(\d\d)(\d\d)$', date_str)
1872 if match is not None:
1873 return '-'.join(match.groups())
1874 else:
1875 return date_str
1876
5f6a1245 1877
86e5f3ed 1878class DateRange:
bd558525 1879 """Represents a time interval between two dates"""
5f6a1245 1880
bd558525
JMF
1881 def __init__(self, start=None, end=None):
1882 """start and end must be strings in the format accepted by date"""
1883 if start is not None:
d49f8db3 1884 self.start = date_from_str(start, strict=True)
bd558525
JMF
1885 else:
1886 self.start = datetime.datetime.min.date()
1887 if end is not None:
d49f8db3 1888 self.end = date_from_str(end, strict=True)
bd558525
JMF
1889 else:
1890 self.end = datetime.datetime.max.date()
37254abc 1891 if self.start > self.end:
bd558525 1892 raise ValueError('Date range: "%s" , the start date must be before the end date' % self)
5f6a1245 1893
bd558525
JMF
1894 @classmethod
1895 def day(cls, day):
1896 """Returns a range that only contains the given day"""
5f6a1245
JW
1897 return cls(day, day)
1898
bd558525
JMF
1899 def __contains__(self, date):
1900 """Check if the date is in the range"""
37254abc
JMF
1901 if not isinstance(date, datetime.date):
1902 date = date_from_str(date)
1903 return self.start <= date <= self.end
5f6a1245 1904
bd558525 1905 def __str__(self):
86e5f3ed 1906 return f'{self.start.isoformat()} - {self.end.isoformat()}'
c496ca96 1907
f2df4071 1908 def __eq__(self, other):
1909 return (isinstance(other, DateRange)
1910 and self.start == other.start and self.end == other.end)
1911
c496ca96
PH
1912
1913def platform_name():
14f25df2 1914 """ Returns the platform name as a str """
b1f94422 1915 write_string('DeprecationWarning: yt_dlp.utils.platform_name is deprecated, use platform.platform instead')
1916 return platform.platform()
c496ca96 1917
b1f94422 1918
1919@functools.cache
1920def system_identifier():
1921 python_implementation = platform.python_implementation()
1922 if python_implementation == 'PyPy' and hasattr(sys, 'pypy_version_info'):
1923 python_implementation += ' version %d.%d.%d' % sys.pypy_version_info[:3]
1924
1925 return 'Python %s (%s %s) - %s %s' % (
1926 platform.python_version(),
1927 python_implementation,
1928 platform.architecture()[0],
1929 platform.platform(),
1930 format_field(join_nonempty(*platform.libc_ver(), delim=' '), None, '(%s)'),
1931 )
c257baff
PH
1932
1933
0b9c08b4 1934@functools.cache
49fa4d9a 1935def get_windows_version():
8a82af35 1936 ''' Get Windows version. returns () if it's not running on Windows '''
49fa4d9a
N
1937 if compat_os_name == 'nt':
1938 return version_tuple(platform.win32_ver()[1])
1939 else:
8a82af35 1940 return ()
49fa4d9a
N
1941
1942
734f90bb 1943def write_string(s, out=None, encoding=None):
19a03940 1944 assert isinstance(s, str)
1945 out = out or sys.stderr
7459e3a2 1946
fe1daad3 1947 if compat_os_name == 'nt' and supports_terminal_sequences(out):
3fe75fdc 1948 s = re.sub(r'([\r\n]+)', r' \1', s)
59f943cd 1949
8a82af35 1950 enc, buffer = None, out
cfb0511d 1951 if 'b' in getattr(out, 'mode', ''):
c487cf00 1952 enc = encoding or preferredencoding()
104aa738 1953 elif hasattr(out, 'buffer'):
8a82af35 1954 buffer = out.buffer
104aa738 1955 enc = encoding or getattr(out, 'encoding', None) or preferredencoding()
c487cf00 1956
8a82af35 1957 buffer.write(s.encode(enc, 'ignore') if enc else s)
7459e3a2
PH
1958 out.flush()
1959
1960
48ea9cea
PH
1961def bytes_to_intlist(bs):
1962 if not bs:
1963 return []
1964 if isinstance(bs[0], int): # Python 3
1965 return list(bs)
1966 else:
1967 return [ord(c) for c in bs]
1968
c257baff 1969
cba892fa 1970def intlist_to_bytes(xs):
1971 if not xs:
1972 return b''
ac668111 1973 return struct.pack('%dB' % len(xs), *xs)
c38b1e77
PH
1974
1975
8a82af35 1976class LockingUnsupportedError(OSError):
1890fc63 1977 msg = 'File locking is not supported'
0edb3e33 1978
1979 def __init__(self):
1980 super().__init__(self.msg)
1981
1982
c1c9a79c
PH
1983# Cross-platform file locking
1984if sys.platform == 'win32':
1985 import ctypes.wintypes
1986 import msvcrt
1987
1988 class OVERLAPPED(ctypes.Structure):
1989 _fields_ = [
1990 ('Internal', ctypes.wintypes.LPVOID),
1991 ('InternalHigh', ctypes.wintypes.LPVOID),
1992 ('Offset', ctypes.wintypes.DWORD),
1993 ('OffsetHigh', ctypes.wintypes.DWORD),
1994 ('hEvent', ctypes.wintypes.HANDLE),
1995 ]
1996
1997 kernel32 = ctypes.windll.kernel32
1998 LockFileEx = kernel32.LockFileEx
1999 LockFileEx.argtypes = [
2000 ctypes.wintypes.HANDLE, # hFile
2001 ctypes.wintypes.DWORD, # dwFlags
2002 ctypes.wintypes.DWORD, # dwReserved
2003 ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow
2004 ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh
2005 ctypes.POINTER(OVERLAPPED) # Overlapped
2006 ]
2007 LockFileEx.restype = ctypes.wintypes.BOOL
2008 UnlockFileEx = kernel32.UnlockFileEx
2009 UnlockFileEx.argtypes = [
2010 ctypes.wintypes.HANDLE, # hFile
2011 ctypes.wintypes.DWORD, # dwReserved
2012 ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow
2013 ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh
2014 ctypes.POINTER(OVERLAPPED) # Overlapped
2015 ]
2016 UnlockFileEx.restype = ctypes.wintypes.BOOL
2017 whole_low = 0xffffffff
2018 whole_high = 0x7fffffff
2019
747c0bd1 2020 def _lock_file(f, exclusive, block):
c1c9a79c
PH
2021 overlapped = OVERLAPPED()
2022 overlapped.Offset = 0
2023 overlapped.OffsetHigh = 0
2024 overlapped.hEvent = 0
2025 f._lock_file_overlapped_p = ctypes.pointer(overlapped)
747c0bd1 2026
2027 if not LockFileEx(msvcrt.get_osfhandle(f.fileno()),
2028 (0x2 if exclusive else 0x0) | (0x0 if block else 0x1),
2029 0, whole_low, whole_high, f._lock_file_overlapped_p):
2cb19820 2030 # NB: No argument form of "ctypes.FormatError" does not work on PyPy
2031 raise BlockingIOError(f'Locking file failed: {ctypes.FormatError(ctypes.GetLastError())!r}')
c1c9a79c
PH
2032
2033 def _unlock_file(f):
2034 assert f._lock_file_overlapped_p
2035 handle = msvcrt.get_osfhandle(f.fileno())
747c0bd1 2036 if not UnlockFileEx(handle, 0, whole_low, whole_high, f._lock_file_overlapped_p):
c1c9a79c
PH
2037 raise OSError('Unlocking file failed: %r' % ctypes.FormatError())
2038
2039else:
399a76e6
YCH
2040 try:
2041 import fcntl
c1c9a79c 2042
a3125791 2043 def _lock_file(f, exclusive, block):
b63837bc 2044 flags = fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH
2045 if not block:
2046 flags |= fcntl.LOCK_NB
acea8d7c 2047 try:
b63837bc 2048 fcntl.flock(f, flags)
acea8d7c
JK
2049 except BlockingIOError:
2050 raise
2051 except OSError: # AOSP does not have flock()
b63837bc 2052 fcntl.lockf(f, flags)
c1c9a79c 2053
399a76e6 2054 def _unlock_file(f):
acea8d7c
JK
2055 try:
2056 fcntl.flock(f, fcntl.LOCK_UN)
2057 except OSError:
2058 fcntl.lockf(f, fcntl.LOCK_UN)
a3125791 2059
399a76e6 2060 except ImportError:
399a76e6 2061
a3125791 2062 def _lock_file(f, exclusive, block):
0edb3e33 2063 raise LockingUnsupportedError()
399a76e6
YCH
2064
2065 def _unlock_file(f):
0edb3e33 2066 raise LockingUnsupportedError()
c1c9a79c
PH
2067
2068
86e5f3ed 2069class locked_file:
0edb3e33 2070 locked = False
747c0bd1 2071
a3125791 2072 def __init__(self, filename, mode, block=True, encoding=None):
fcfa8853
JK
2073 if mode not in {'r', 'rb', 'a', 'ab', 'w', 'wb'}:
2074 raise NotImplementedError(mode)
2075 self.mode, self.block = mode, block
2076
2077 writable = any(f in mode for f in 'wax+')
2078 readable = any(f in mode for f in 'r+')
2079 flags = functools.reduce(operator.ior, (
2080 getattr(os, 'O_CLOEXEC', 0), # UNIX only
2081 getattr(os, 'O_BINARY', 0), # Windows only
2082 getattr(os, 'O_NOINHERIT', 0), # Windows only
2083 os.O_CREAT if writable else 0, # O_TRUNC only after locking
2084 os.O_APPEND if 'a' in mode else 0,
2085 os.O_EXCL if 'x' in mode else 0,
2086 os.O_RDONLY if not writable else os.O_RDWR if readable else os.O_WRONLY,
2087 ))
2088
98804d03 2089 self.f = os.fdopen(os.open(filename, flags, 0o666), mode, encoding=encoding)
c1c9a79c
PH
2090
2091 def __enter__(self):
a3125791 2092 exclusive = 'r' not in self.mode
c1c9a79c 2093 try:
a3125791 2094 _lock_file(self.f, exclusive, self.block)
0edb3e33 2095 self.locked = True
86e5f3ed 2096 except OSError:
c1c9a79c
PH
2097 self.f.close()
2098 raise
fcfa8853 2099 if 'w' in self.mode:
131e14dc
JK
2100 try:
2101 self.f.truncate()
2102 except OSError as e:
1890fc63 2103 if e.errno not in (
2104 errno.ESPIPE, # Illegal seek - expected for FIFO
2105 errno.EINVAL, # Invalid argument - expected for /dev/null
2106 ):
2107 raise
c1c9a79c
PH
2108 return self
2109
0edb3e33 2110 def unlock(self):
2111 if not self.locked:
2112 return
c1c9a79c 2113 try:
0edb3e33 2114 _unlock_file(self.f)
c1c9a79c 2115 finally:
0edb3e33 2116 self.locked = False
c1c9a79c 2117
0edb3e33 2118 def __exit__(self, *_):
2119 try:
2120 self.unlock()
2121 finally:
2122 self.f.close()
4eb7f1d1 2123
0edb3e33 2124 open = __enter__
2125 close = __exit__
a3125791 2126
0edb3e33 2127 def __getattr__(self, attr):
2128 return getattr(self.f, attr)
a3125791 2129
0edb3e33 2130 def __iter__(self):
2131 return iter(self.f)
a3125791 2132
4eb7f1d1 2133
0b9c08b4 2134@functools.cache
4644ac55
S
2135def get_filesystem_encoding():
2136 encoding = sys.getfilesystemencoding()
2137 return encoding if encoding is not None else 'utf-8'
2138
2139
4eb7f1d1 2140def shell_quote(args):
a6a173c2 2141 quoted_args = []
4644ac55 2142 encoding = get_filesystem_encoding()
a6a173c2
JMF
2143 for a in args:
2144 if isinstance(a, bytes):
2145 # We may get a filename encoded with 'encodeFilename'
2146 a = a.decode(encoding)
aefce8e6 2147 quoted_args.append(compat_shlex_quote(a))
28e614de 2148 return ' '.join(quoted_args)
9d4660ca
PH
2149
2150
2151def smuggle_url(url, data):
2152 """ Pass additional data in a URL for internal use. """
2153
81953d1a
RA
2154 url, idata = unsmuggle_url(url, {})
2155 data.update(idata)
14f25df2 2156 sdata = urllib.parse.urlencode(
28e614de
PH
2157 {'__youtubedl_smuggle': json.dumps(data)})
2158 return url + '#' + sdata
9d4660ca
PH
2159
2160
79f82953 2161def unsmuggle_url(smug_url, default=None):
83e865a3 2162 if '#__youtubedl_smuggle' not in smug_url:
79f82953 2163 return smug_url, default
28e614de 2164 url, _, sdata = smug_url.rpartition('#')
14f25df2 2165 jsond = urllib.parse.parse_qs(sdata)['__youtubedl_smuggle'][0]
9d4660ca
PH
2166 data = json.loads(jsond)
2167 return url, data
02dbf93f
PH
2168
2169
e0fd9573 2170def format_decimal_suffix(num, fmt='%d%s', *, factor=1000):
2171 """ Formats numbers with decimal sufixes like K, M, etc """
2172 num, factor = float_or_none(num), float(factor)
4c3f8c3f 2173 if num is None or num < 0:
e0fd9573 2174 return None
eeb2a770 2175 POSSIBLE_SUFFIXES = 'kMGTPEZY'
2176 exponent = 0 if num == 0 else min(int(math.log(num, factor)), len(POSSIBLE_SUFFIXES))
2177 suffix = ['', *POSSIBLE_SUFFIXES][exponent]
abbeeebc 2178 if factor == 1024:
2179 suffix = {'k': 'Ki', '': ''}.get(suffix, f'{suffix}i')
e0fd9573 2180 converted = num / (factor ** exponent)
abbeeebc 2181 return fmt % (converted, suffix)
e0fd9573 2182
2183
02dbf93f 2184def format_bytes(bytes):
f02d24d8 2185 return format_decimal_suffix(bytes, '%.2f%sB', factor=1024) or 'N/A'
f53c966a 2186
1c088fa8 2187
fb47597b
S
2188def lookup_unit_table(unit_table, s):
2189 units_re = '|'.join(re.escape(u) for u in unit_table)
2190 m = re.match(
782b1b5b 2191 r'(?P<num>[0-9]+(?:[,.][0-9]*)?)\s*(?P<unit>%s)\b' % units_re, s)
fb47597b
S
2192 if not m:
2193 return None
2194 num_str = m.group('num').replace(',', '.')
2195 mult = unit_table[m.group('unit')]
2196 return int(float(num_str) * mult)
2197
2198
be64b5b0
PH
2199def parse_filesize(s):
2200 if s is None:
2201 return None
2202
dfb1b146 2203 # The lower-case forms are of course incorrect and unofficial,
be64b5b0
PH
2204 # but we support those too
2205 _UNIT_TABLE = {
2206 'B': 1,
2207 'b': 1,
70852b47 2208 'bytes': 1,
be64b5b0
PH
2209 'KiB': 1024,
2210 'KB': 1000,
2211 'kB': 1024,
2212 'Kb': 1000,
13585d76 2213 'kb': 1000,
70852b47
YCH
2214 'kilobytes': 1000,
2215 'kibibytes': 1024,
be64b5b0
PH
2216 'MiB': 1024 ** 2,
2217 'MB': 1000 ** 2,
2218 'mB': 1024 ** 2,
2219 'Mb': 1000 ** 2,
13585d76 2220 'mb': 1000 ** 2,
70852b47
YCH
2221 'megabytes': 1000 ** 2,
2222 'mebibytes': 1024 ** 2,
be64b5b0
PH
2223 'GiB': 1024 ** 3,
2224 'GB': 1000 ** 3,
2225 'gB': 1024 ** 3,
2226 'Gb': 1000 ** 3,
13585d76 2227 'gb': 1000 ** 3,
70852b47
YCH
2228 'gigabytes': 1000 ** 3,
2229 'gibibytes': 1024 ** 3,
be64b5b0
PH
2230 'TiB': 1024 ** 4,
2231 'TB': 1000 ** 4,
2232 'tB': 1024 ** 4,
2233 'Tb': 1000 ** 4,
13585d76 2234 'tb': 1000 ** 4,
70852b47
YCH
2235 'terabytes': 1000 ** 4,
2236 'tebibytes': 1024 ** 4,
be64b5b0
PH
2237 'PiB': 1024 ** 5,
2238 'PB': 1000 ** 5,
2239 'pB': 1024 ** 5,
2240 'Pb': 1000 ** 5,
13585d76 2241 'pb': 1000 ** 5,
70852b47
YCH
2242 'petabytes': 1000 ** 5,
2243 'pebibytes': 1024 ** 5,
be64b5b0
PH
2244 'EiB': 1024 ** 6,
2245 'EB': 1000 ** 6,
2246 'eB': 1024 ** 6,
2247 'Eb': 1000 ** 6,
13585d76 2248 'eb': 1000 ** 6,
70852b47
YCH
2249 'exabytes': 1000 ** 6,
2250 'exbibytes': 1024 ** 6,
be64b5b0
PH
2251 'ZiB': 1024 ** 7,
2252 'ZB': 1000 ** 7,
2253 'zB': 1024 ** 7,
2254 'Zb': 1000 ** 7,
13585d76 2255 'zb': 1000 ** 7,
70852b47
YCH
2256 'zettabytes': 1000 ** 7,
2257 'zebibytes': 1024 ** 7,
be64b5b0
PH
2258 'YiB': 1024 ** 8,
2259 'YB': 1000 ** 8,
2260 'yB': 1024 ** 8,
2261 'Yb': 1000 ** 8,
13585d76 2262 'yb': 1000 ** 8,
70852b47
YCH
2263 'yottabytes': 1000 ** 8,
2264 'yobibytes': 1024 ** 8,
be64b5b0
PH
2265 }
2266
fb47597b
S
2267 return lookup_unit_table(_UNIT_TABLE, s)
2268
2269
2270def parse_count(s):
2271 if s is None:
be64b5b0
PH
2272 return None
2273
352d5da8 2274 s = re.sub(r'^[^\d]+\s', '', s).strip()
fb47597b
S
2275
2276 if re.match(r'^[\d,.]+$', s):
2277 return str_to_int(s)
2278
2279 _UNIT_TABLE = {
2280 'k': 1000,
2281 'K': 1000,
2282 'm': 1000 ** 2,
2283 'M': 1000 ** 2,
2284 'kk': 1000 ** 2,
2285 'KK': 1000 ** 2,
352d5da8 2286 'b': 1000 ** 3,
2287 'B': 1000 ** 3,
fb47597b 2288 }
be64b5b0 2289
352d5da8 2290 ret = lookup_unit_table(_UNIT_TABLE, s)
2291 if ret is not None:
2292 return ret
2293
2294 mobj = re.match(r'([\d,.]+)(?:$|\s)', s)
2295 if mobj:
2296 return str_to_int(mobj.group(1))
be64b5b0 2297
2f7ae819 2298
5d45484c 2299def parse_resolution(s, *, lenient=False):
b871d7e9
S
2300 if s is None:
2301 return {}
2302
5d45484c
LNO
2303 if lenient:
2304 mobj = re.search(r'(?P<w>\d+)\s*[xX×,]\s*(?P<h>\d+)', s)
2305 else:
2306 mobj = re.search(r'(?<![a-zA-Z0-9])(?P<w>\d+)\s*[xX×,]\s*(?P<h>\d+)(?![a-zA-Z0-9])', s)
b871d7e9
S
2307 if mobj:
2308 return {
2309 'width': int(mobj.group('w')),
2310 'height': int(mobj.group('h')),
2311 }
2312
17ec8bcf 2313 mobj = re.search(r'(?<![a-zA-Z0-9])(\d+)[pPiI](?![a-zA-Z0-9])', s)
b871d7e9
S
2314 if mobj:
2315 return {'height': int(mobj.group(1))}
2316
2317 mobj = re.search(r'\b([48])[kK]\b', s)
2318 if mobj:
2319 return {'height': int(mobj.group(1)) * 540}
2320
2321 return {}
2322
2323
0dc41787 2324def parse_bitrate(s):
14f25df2 2325 if not isinstance(s, str):
0dc41787
S
2326 return
2327 mobj = re.search(r'\b(\d+)\s*kbps', s)
2328 if mobj:
2329 return int(mobj.group(1))
2330
2331
a942d6cb 2332def month_by_name(name, lang='en'):
caefb1de
PH
2333 """ Return the number of a month by (locale-independently) English name """
2334
f6717dec 2335 month_names = MONTH_NAMES.get(lang, MONTH_NAMES['en'])
a942d6cb 2336
caefb1de 2337 try:
f6717dec 2338 return month_names.index(name) + 1
7105440c
YCH
2339 except ValueError:
2340 return None
2341
2342
2343def month_by_abbreviation(abbrev):
2344 """ Return the number of a month by (locale-independently) English
2345 abbreviations """
2346
2347 try:
2348 return [s[:3] for s in ENGLISH_MONTH_NAMES].index(abbrev) + 1
caefb1de
PH
2349 except ValueError:
2350 return None
18258362
JMF
2351
2352
5aafe895 2353def fix_xml_ampersands(xml_str):
18258362 2354 """Replace all the '&' by '&amp;' in XML"""
5aafe895
PH
2355 return re.sub(
2356 r'&(?!amp;|lt;|gt;|apos;|quot;|#x[0-9a-fA-F]{,4};|#[0-9]{,4};)',
28e614de 2357 '&amp;',
5aafe895 2358 xml_str)
e3946f98
PH
2359
2360
2361def setproctitle(title):
14f25df2 2362 assert isinstance(title, str)
c1c05c67
YCH
2363
2364 # ctypes in Jython is not complete
2365 # http://bugs.jython.org/issue2148
2366 if sys.platform.startswith('java'):
2367 return
2368
e3946f98 2369 try:
611c1dd9 2370 libc = ctypes.cdll.LoadLibrary('libc.so.6')
e3946f98
PH
2371 except OSError:
2372 return
2f49bcd6
RC
2373 except TypeError:
2374 # LoadLibrary in Windows Python 2.7.13 only expects
2375 # a bytestring, but since unicode_literals turns
2376 # every string into a unicode string, it fails.
2377 return
0f06bcd7 2378 title_bytes = title.encode()
6eefe533
PH
2379 buf = ctypes.create_string_buffer(len(title_bytes))
2380 buf.value = title_bytes
e3946f98 2381 try:
6eefe533 2382 libc.prctl(15, buf, 0, 0, 0)
e3946f98
PH
2383 except AttributeError:
2384 return # Strange libc, just skip this
d7dda168
PH
2385
2386
2387def remove_start(s, start):
46bc9b7d 2388 return s[len(start):] if s is not None and s.startswith(start) else s
29eb5174
PH
2389
2390
2b9faf55 2391def remove_end(s, end):
46bc9b7d 2392 return s[:-len(end)] if s is not None and s.endswith(end) else s
2b9faf55
PH
2393
2394
31b2051e
S
2395def remove_quotes(s):
2396 if s is None or len(s) < 2:
2397 return s
2398 for quote in ('"', "'", ):
2399 if s[0] == quote and s[-1] == quote:
2400 return s[1:-1]
2401 return s
2402
2403
b6e0c7d2 2404def get_domain(url):
ebf99aaf 2405 """
2406 This implementation is inconsistent, but is kept for compatibility.
2407 Use this only for "webpage_url_domain"
2408 """
2409 return remove_start(urllib.parse.urlparse(url).netloc, 'www.') or None
b6e0c7d2
U
2410
2411
29eb5174 2412def url_basename(url):
14f25df2 2413 path = urllib.parse.urlparse(url).path
28e614de 2414 return path.strip('/').split('/')[-1]
aa94a6d3
PH
2415
2416
02dc0a36
S
2417def base_url(url):
2418 return re.match(r'https?://[^?#&]+/', url).group()
2419
2420
e34c3361 2421def urljoin(base, path):
4b5de77b 2422 if isinstance(path, bytes):
0f06bcd7 2423 path = path.decode()
14f25df2 2424 if not isinstance(path, str) or not path:
e34c3361 2425 return None
fad4ceb5 2426 if re.match(r'^(?:[a-zA-Z][a-zA-Z0-9+-.]*:)?//', path):
e34c3361 2427 return path
4b5de77b 2428 if isinstance(base, bytes):
0f06bcd7 2429 base = base.decode()
14f25df2 2430 if not isinstance(base, str) or not re.match(
4b5de77b 2431 r'^(?:https?:)?//', base):
e34c3361 2432 return None
14f25df2 2433 return urllib.parse.urljoin(base, path)
e34c3361
S
2434
2435
ac668111 2436class HEADRequest(urllib.request.Request):
aa94a6d3 2437 def get_method(self):
611c1dd9 2438 return 'HEAD'
7217e148
PH
2439
2440
ac668111 2441class PUTRequest(urllib.request.Request):
95cf60e8
S
2442 def get_method(self):
2443 return 'PUT'
2444
2445
9732d77e 2446def int_or_none(v, scale=1, default=None, get_attr=None, invscale=1):
9e907ebd 2447 if get_attr and v is not None:
2448 v = getattr(v, get_attr, None)
1812afb7
S
2449 try:
2450 return int(v) * invscale // scale
31c49255 2451 except (ValueError, TypeError, OverflowError):
af98f8ff 2452 return default
9732d77e 2453
9572013d 2454
40a90862 2455def str_or_none(v, default=None):
14f25df2 2456 return default if v is None else str(v)
40a90862 2457
9732d77e
PH
2458
2459def str_to_int(int_str):
48d4681e 2460 """ A more relaxed version of int_or_none """
f9934b96 2461 if isinstance(int_str, int):
348c6bf1 2462 return int_str
14f25df2 2463 elif isinstance(int_str, str):
42db58ec
S
2464 int_str = re.sub(r'[,\.\+]', '', int_str)
2465 return int_or_none(int_str)
608d11f5
PH
2466
2467
9732d77e 2468def float_or_none(v, scale=1, invscale=1, default=None):
caf80631
S
2469 if v is None:
2470 return default
2471 try:
2472 return float(v) * invscale / scale
5e1271c5 2473 except (ValueError, TypeError):
caf80631 2474 return default
43f775e4
PH
2475
2476
c7e327c4
S
2477def bool_or_none(v, default=None):
2478 return v if isinstance(v, bool) else default
2479
2480
53cd37ba 2481def strip_or_none(v, default=None):
14f25df2 2482 return v.strip() if isinstance(v, str) else default
b72b4431
S
2483
2484
af03000a 2485def url_or_none(url):
14f25df2 2486 if not url or not isinstance(url, str):
af03000a
S
2487 return None
2488 url = url.strip()
29f7c58a 2489 return url if re.match(r'^(?:(?:https?|rt(?:m(?:pt?[es]?|fp)|sp[su]?)|mms|ftps?):)?//', url) else None
af03000a
S
2490
2491
3e9b66d7 2492def request_to_url(req):
ac668111 2493 if isinstance(req, urllib.request.Request):
3e9b66d7
LNO
2494 return req.get_full_url()
2495 else:
2496 return req
2497
2498
e29663c6 2499def strftime_or_none(timestamp, date_format, default=None):
2500 datetime_object = None
2501 try:
f9934b96 2502 if isinstance(timestamp, (int, float)): # unix timestamp
e29663c6 2503 datetime_object = datetime.datetime.utcfromtimestamp(timestamp)
14f25df2 2504 elif isinstance(timestamp, str): # assume YYYYMMDD
e29663c6 2505 datetime_object = datetime.datetime.strptime(timestamp, '%Y%m%d')
2506 return datetime_object.strftime(date_format)
2507 except (ValueError, TypeError, AttributeError):
2508 return default
2509
2510
608d11f5 2511def parse_duration(s):
f9934b96 2512 if not isinstance(s, str):
608d11f5 2513 return None
ca7b3246 2514 s = s.strip()
38d79fd1 2515 if not s:
2516 return None
ca7b3246 2517
acaff495 2518 days, hours, mins, secs, ms = [None] * 5
8bd1c00b 2519 m = re.match(r'''(?x)
2520 (?P<before_secs>
2521 (?:(?:(?P<days>[0-9]+):)?(?P<hours>[0-9]+):)?(?P<mins>[0-9]+):)?
2522 (?P<secs>(?(before_secs)[0-9]{1,2}|[0-9]+))
2523 (?P<ms>[.:][0-9]+)?Z?$
2524 ''', s)
acaff495 2525 if m:
8bd1c00b 2526 days, hours, mins, secs, ms = m.group('days', 'hours', 'mins', 'secs', 'ms')
acaff495 2527 else:
2528 m = re.match(
056653bb
S
2529 r'''(?ix)(?:P?
2530 (?:
1c1b2f96 2531 [0-9]+\s*y(?:ears?)?,?\s*
056653bb
S
2532 )?
2533 (?:
1c1b2f96 2534 [0-9]+\s*m(?:onths?)?,?\s*
056653bb
S
2535 )?
2536 (?:
1c1b2f96 2537 [0-9]+\s*w(?:eeks?)?,?\s*
056653bb 2538 )?
8f4b58d7 2539 (?:
1c1b2f96 2540 (?P<days>[0-9]+)\s*d(?:ays?)?,?\s*
8f4b58d7 2541 )?
056653bb 2542 T)?
acaff495 2543 (?:
1c1b2f96 2544 (?P<hours>[0-9]+)\s*h(?:ours?)?,?\s*
acaff495 2545 )?
2546 (?:
1c1b2f96 2547 (?P<mins>[0-9]+)\s*m(?:in(?:ute)?s?)?,?\s*
acaff495 2548 )?
2549 (?:
2550 (?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?\s*s(?:ec(?:ond)?s?)?\s*
15846398 2551 )?Z?$''', s)
acaff495 2552 if m:
2553 days, hours, mins, secs, ms = m.groups()
2554 else:
15846398 2555 m = re.match(r'(?i)(?:(?P<hours>[0-9.]+)\s*(?:hours?)|(?P<mins>[0-9.]+)\s*(?:mins?\.?|minutes?)\s*)Z?$', s)
acaff495 2556 if m:
2557 hours, mins = m.groups()
2558 else:
2559 return None
2560
acaff495 2561 if ms:
19a03940 2562 ms = ms.replace(':', '.')
2563 return sum(float(part or 0) * mult for part, mult in (
2564 (days, 86400), (hours, 3600), (mins, 60), (secs, 1), (ms, 1)))
91d7d0b3
JMF
2565
2566
e65e4c88 2567def prepend_extension(filename, ext, expected_real_ext=None):
5f6a1245 2568 name, real_ext = os.path.splitext(filename)
e65e4c88 2569 return (
86e5f3ed 2570 f'{name}.{ext}{real_ext}'
e65e4c88 2571 if not expected_real_ext or real_ext[1:] == expected_real_ext
86e5f3ed 2572 else f'{filename}.{ext}')
d70ad093
PH
2573
2574
b3ed15b7
S
2575def replace_extension(filename, ext, expected_real_ext=None):
2576 name, real_ext = os.path.splitext(filename)
86e5f3ed 2577 return '{}.{}'.format(
b3ed15b7
S
2578 name if not expected_real_ext or real_ext[1:] == expected_real_ext else filename,
2579 ext)
2580
2581
d70ad093
PH
2582def check_executable(exe, args=[]):
2583 """ Checks if the given binary is installed somewhere in PATH, and returns its name.
2584 args can be a list of arguments for a short output (like -version) """
2585 try:
f0c9fb96 2586 Popen.run([exe] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
d70ad093
PH
2587 except OSError:
2588 return False
2589 return exe
b7ab0590
PH
2590
2591
8a7f68d0 2592def _get_exe_version_output(exe, args, *, to_screen=None):
2593 if to_screen:
2594 to_screen(f'Checking exe version: {shell_quote([exe] + args)}')
95807118 2595 try:
b64d04c1 2596 # STDIN should be redirected too. On UNIX-like systems, ffmpeg triggers
7a5c1cfe 2597 # SIGTTOU if yt-dlp is run in the background.
067aa17e 2598 # See https://github.com/ytdl-org/youtube-dl/issues/955#issuecomment-209789656
f0c9fb96 2599 stdout, _, _ = Popen.run([encodeArgument(exe)] + args, text=True,
2600 stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
95807118
PH
2601 except OSError:
2602 return False
f0c9fb96 2603 return stdout
cae97f65
PH
2604
2605
2606def detect_exe_version(output, version_re=None, unrecognized='present'):
14f25df2 2607 assert isinstance(output, str)
cae97f65
PH
2608 if version_re is None:
2609 version_re = r'version\s+([-0-9._a-zA-Z]+)'
2610 m = re.search(version_re, output)
95807118
PH
2611 if m:
2612 return m.group(1)
2613 else:
2614 return unrecognized
2615
2616
9af98e17 2617def get_exe_version(exe, args=['--version'],
2618 version_re=None, unrecognized='present'):
2619 """ Returns the version of the specified executable,
2620 or False if the executable is not present """
2621 out = _get_exe_version_output(exe, args)
2622 return detect_exe_version(out, version_re, unrecognized) if out else False
2623
2624
7e88d7d7 2625def frange(start=0, stop=None, step=1):
2626 """Float range"""
2627 if stop is None:
2628 start, stop = 0, start
2629 sign = [-1, 1][step > 0] if step else 0
2630 while sign * start < sign * stop:
2631 yield start
2632 start += step
2633
2634
cb89cfc1 2635class LazyList(collections.abc.Sequence):
0f06bcd7 2636 """Lazy immutable list from an iterable
2637 Note that slices of a LazyList are lists and not LazyList"""
483336e7 2638
8e5fecc8 2639 class IndexError(IndexError):
2640 pass
2641
282f5709 2642 def __init__(self, iterable, *, reverse=False, _cache=None):
0f06bcd7 2643 self._iterable = iter(iterable)
2644 self._cache = [] if _cache is None else _cache
2645 self._reversed = reverse
483336e7 2646
2647 def __iter__(self):
0f06bcd7 2648 if self._reversed:
28419ca2 2649 # We need to consume the entire iterable to iterate in reverse
981052c9 2650 yield from self.exhaust()
28419ca2 2651 return
0f06bcd7 2652 yield from self._cache
2653 for item in self._iterable:
2654 self._cache.append(item)
483336e7 2655 yield item
2656
0f06bcd7 2657 def _exhaust(self):
2658 self._cache.extend(self._iterable)
2659 self._iterable = [] # Discard the emptied iterable to make it pickle-able
2660 return self._cache
28419ca2 2661
981052c9 2662 def exhaust(self):
0f06bcd7 2663 """Evaluate the entire iterable"""
2664 return self._exhaust()[::-1 if self._reversed else 1]
981052c9 2665
28419ca2 2666 @staticmethod
0f06bcd7 2667 def _reverse_index(x):
f2df4071 2668 return None if x is None else ~x
483336e7 2669
2670 def __getitem__(self, idx):
2671 if isinstance(idx, slice):
0f06bcd7 2672 if self._reversed:
2673 idx = slice(self._reverse_index(idx.start), self._reverse_index(idx.stop), -(idx.step or 1))
e0f2b4b4 2674 start, stop, step = idx.start, idx.stop, idx.step or 1
483336e7 2675 elif isinstance(idx, int):
0f06bcd7 2676 if self._reversed:
2677 idx = self._reverse_index(idx)
e0f2b4b4 2678 start, stop, step = idx, idx, 0
483336e7 2679 else:
2680 raise TypeError('indices must be integers or slices')
e0f2b4b4 2681 if ((start or 0) < 0 or (stop or 0) < 0
2682 or (start is None and step < 0)
2683 or (stop is None and step > 0)):
483336e7 2684 # We need to consume the entire iterable to be able to slice from the end
2685 # Obviously, never use this with infinite iterables
0f06bcd7 2686 self._exhaust()
8e5fecc8 2687 try:
0f06bcd7 2688 return self._cache[idx]
8e5fecc8 2689 except IndexError as e:
2690 raise self.IndexError(e) from e
0f06bcd7 2691 n = max(start or 0, stop or 0) - len(self._cache) + 1
28419ca2 2692 if n > 0:
0f06bcd7 2693 self._cache.extend(itertools.islice(self._iterable, n))
8e5fecc8 2694 try:
0f06bcd7 2695 return self._cache[idx]
8e5fecc8 2696 except IndexError as e:
2697 raise self.IndexError(e) from e
483336e7 2698
2699 def __bool__(self):
2700 try:
0f06bcd7 2701 self[-1] if self._reversed else self[0]
8e5fecc8 2702 except self.IndexError:
483336e7 2703 return False
2704 return True
2705
2706 def __len__(self):
0f06bcd7 2707 self._exhaust()
2708 return len(self._cache)
483336e7 2709
282f5709 2710 def __reversed__(self):
0f06bcd7 2711 return type(self)(self._iterable, reverse=not self._reversed, _cache=self._cache)
282f5709 2712
2713 def __copy__(self):
0f06bcd7 2714 return type(self)(self._iterable, reverse=self._reversed, _cache=self._cache)
282f5709 2715
28419ca2 2716 def __repr__(self):
2717 # repr and str should mimic a list. So we exhaust the iterable
2718 return repr(self.exhaust())
2719
2720 def __str__(self):
2721 return repr(self.exhaust())
2722
483336e7 2723
7be9ccff 2724class PagedList:
c07a39ae 2725
2726 class IndexError(IndexError):
2727 pass
2728
dd26ced1
PH
2729 def __len__(self):
2730 # This is only useful for tests
2731 return len(self.getslice())
2732
7be9ccff 2733 def __init__(self, pagefunc, pagesize, use_cache=True):
2734 self._pagefunc = pagefunc
2735 self._pagesize = pagesize
f1d13090 2736 self._pagecount = float('inf')
7be9ccff 2737 self._use_cache = use_cache
2738 self._cache = {}
2739
2740 def getpage(self, pagenum):
d8cf8d97 2741 page_results = self._cache.get(pagenum)
2742 if page_results is None:
f1d13090 2743 page_results = [] if pagenum > self._pagecount else list(self._pagefunc(pagenum))
7be9ccff 2744 if self._use_cache:
2745 self._cache[pagenum] = page_results
2746 return page_results
2747
2748 def getslice(self, start=0, end=None):
2749 return list(self._getslice(start, end))
2750
2751 def _getslice(self, start, end):
55575225 2752 raise NotImplementedError('This method must be implemented by subclasses')
2753
2754 def __getitem__(self, idx):
f1d13090 2755 assert self._use_cache, 'Indexing PagedList requires cache'
55575225 2756 if not isinstance(idx, int) or idx < 0:
2757 raise TypeError('indices must be non-negative integers')
2758 entries = self.getslice(idx, idx + 1)
d8cf8d97 2759 if not entries:
c07a39ae 2760 raise self.IndexError()
d8cf8d97 2761 return entries[0]
55575225 2762
9c44d242
PH
2763
2764class OnDemandPagedList(PagedList):
a44ca5a4 2765 """Download pages until a page with less than maximum results"""
86e5f3ed 2766
7be9ccff 2767 def _getslice(self, start, end):
b7ab0590
PH
2768 for pagenum in itertools.count(start // self._pagesize):
2769 firstid = pagenum * self._pagesize
2770 nextfirstid = pagenum * self._pagesize + self._pagesize
2771 if start >= nextfirstid:
2772 continue
2773
b7ab0590
PH
2774 startv = (
2775 start % self._pagesize
2776 if firstid <= start < nextfirstid
2777 else 0)
b7ab0590
PH
2778 endv = (
2779 ((end - 1) % self._pagesize) + 1
2780 if (end is not None and firstid <= end <= nextfirstid)
2781 else None)
2782
f1d13090 2783 try:
2784 page_results = self.getpage(pagenum)
2785 except Exception:
2786 self._pagecount = pagenum - 1
2787 raise
b7ab0590
PH
2788 if startv != 0 or endv is not None:
2789 page_results = page_results[startv:endv]
7be9ccff 2790 yield from page_results
b7ab0590
PH
2791
2792 # A little optimization - if current page is not "full", ie. does
2793 # not contain page_size videos then we can assume that this page
2794 # is the last one - there are no more ids on further pages -
2795 # i.e. no need to query again.
2796 if len(page_results) + startv < self._pagesize:
2797 break
2798
2799 # If we got the whole page, but the next page is not interesting,
2800 # break out early as well
2801 if end == nextfirstid:
2802 break
81c2f20b
PH
2803
2804
9c44d242 2805class InAdvancePagedList(PagedList):
a44ca5a4 2806 """PagedList with total number of pages known in advance"""
86e5f3ed 2807
9c44d242 2808 def __init__(self, pagefunc, pagecount, pagesize):
7be9ccff 2809 PagedList.__init__(self, pagefunc, pagesize, True)
f1d13090 2810 self._pagecount = pagecount
9c44d242 2811
7be9ccff 2812 def _getslice(self, start, end):
9c44d242 2813 start_page = start // self._pagesize
d37707bd 2814 end_page = self._pagecount if end is None else min(self._pagecount, end // self._pagesize + 1)
9c44d242
PH
2815 skip_elems = start - start_page * self._pagesize
2816 only_more = None if end is None else end - start
2817 for pagenum in range(start_page, end_page):
7be9ccff 2818 page_results = self.getpage(pagenum)
9c44d242 2819 if skip_elems:
7be9ccff 2820 page_results = page_results[skip_elems:]
9c44d242
PH
2821 skip_elems = None
2822 if only_more is not None:
7be9ccff 2823 if len(page_results) < only_more:
2824 only_more -= len(page_results)
9c44d242 2825 else:
7be9ccff 2826 yield from page_results[:only_more]
9c44d242 2827 break
7be9ccff 2828 yield from page_results
9c44d242
PH
2829
2830
7e88d7d7 2831class PlaylistEntries:
2832 MissingEntry = object()
2833 is_exhausted = False
2834
2835 def __init__(self, ydl, info_dict):
7e9a6125 2836 self.ydl = ydl
2837
2838 # _entries must be assigned now since infodict can change during iteration
2839 entries = info_dict.get('entries')
2840 if entries is None:
2841 raise EntryNotInPlaylist('There are no entries')
2842 elif isinstance(entries, list):
2843 self.is_exhausted = True
2844
2845 requested_entries = info_dict.get('requested_entries')
2846 self.is_incomplete = bool(requested_entries)
2847 if self.is_incomplete:
2848 assert self.is_exhausted
2849 self._entries = [self.MissingEntry] * max(requested_entries)
2850 for i, entry in zip(requested_entries, entries):
2851 self._entries[i - 1] = entry
2852 elif isinstance(entries, (list, PagedList, LazyList)):
2853 self._entries = entries
2854 else:
2855 self._entries = LazyList(entries)
7e88d7d7 2856
2857 PLAYLIST_ITEMS_RE = re.compile(r'''(?x)
2858 (?P<start>[+-]?\d+)?
2859 (?P<range>[:-]
2860 (?P<end>[+-]?\d+|inf(?:inite)?)?
2861 (?::(?P<step>[+-]?\d+))?
2862 )?''')
2863
2864 @classmethod
2865 def parse_playlist_items(cls, string):
2866 for segment in string.split(','):
2867 if not segment:
2868 raise ValueError('There is two or more consecutive commas')
2869 mobj = cls.PLAYLIST_ITEMS_RE.fullmatch(segment)
2870 if not mobj:
2871 raise ValueError(f'{segment!r} is not a valid specification')
2872 start, end, step, has_range = mobj.group('start', 'end', 'step', 'range')
2873 if int_or_none(step) == 0:
2874 raise ValueError(f'Step in {segment!r} cannot be zero')
2875 yield slice(int_or_none(start), float_or_none(end), int_or_none(step)) if has_range else int(start)
2876
2877 def get_requested_items(self):
2878 playlist_items = self.ydl.params.get('playlist_items')
2879 playlist_start = self.ydl.params.get('playliststart', 1)
2880 playlist_end = self.ydl.params.get('playlistend')
2881 # For backwards compatibility, interpret -1 as whole list
2882 if playlist_end in (-1, None):
2883 playlist_end = ''
2884 if not playlist_items:
2885 playlist_items = f'{playlist_start}:{playlist_end}'
2886 elif playlist_start != 1 or playlist_end:
2887 self.ydl.report_warning('Ignoring playliststart and playlistend because playlistitems was given', only_once=True)
2888
2889 for index in self.parse_playlist_items(playlist_items):
2890 for i, entry in self[index]:
2891 yield i, entry
1ac4fd80 2892 if not entry:
2893 continue
7e88d7d7 2894 try:
2895 # TODO: Add auto-generated fields
2896 self.ydl._match_entry(entry, incomplete=True, silent=True)
2897 except (ExistingVideoReached, RejectedVideoReached):
2898 return
2899
7e9a6125 2900 def get_full_count(self):
2901 if self.is_exhausted and not self.is_incomplete:
7e88d7d7 2902 return len(self)
2903 elif isinstance(self._entries, InAdvancePagedList):
2904 if self._entries._pagesize == 1:
2905 return self._entries._pagecount
2906
7e88d7d7 2907 @functools.cached_property
2908 def _getter(self):
2909 if isinstance(self._entries, list):
2910 def get_entry(i):
2911 try:
2912 entry = self._entries[i]
2913 except IndexError:
2914 entry = self.MissingEntry
2915 if not self.is_incomplete:
2916 raise self.IndexError()
2917 if entry is self.MissingEntry:
2918 raise EntryNotInPlaylist(f'Entry {i} cannot be found')
2919 return entry
2920 else:
2921 def get_entry(i):
2922 try:
2923 return type(self.ydl)._handle_extraction_exceptions(lambda _, i: self._entries[i])(self.ydl, i)
2924 except (LazyList.IndexError, PagedList.IndexError):
2925 raise self.IndexError()
2926 return get_entry
2927
2928 def __getitem__(self, idx):
2929 if isinstance(idx, int):
2930 idx = slice(idx, idx)
2931
2932 # NB: PlaylistEntries[1:10] => (0, 1, ... 9)
2933 step = 1 if idx.step is None else idx.step
2934 if idx.start is None:
2935 start = 0 if step > 0 else len(self) - 1
2936 else:
2937 start = idx.start - 1 if idx.start >= 0 else len(self) + idx.start
2938
2939 # NB: Do not call len(self) when idx == [:]
2940 if idx.stop is None:
2941 stop = 0 if step < 0 else float('inf')
2942 else:
2943 stop = idx.stop - 1 if idx.stop >= 0 else len(self) + idx.stop
2944 stop += [-1, 1][step > 0]
2945
2946 for i in frange(start, stop, step):
2947 if i < 0:
2948 continue
2949 try:
7e9a6125 2950 entry = self._getter(i)
2951 except self.IndexError:
2952 self.is_exhausted = True
2953 if step > 0:
7e88d7d7 2954 break
7e9a6125 2955 continue
7e88d7d7 2956 yield i + 1, entry
2957
2958 def __len__(self):
2959 return len(tuple(self[:]))
2960
2961 class IndexError(IndexError):
2962 pass
2963
2964
81c2f20b 2965def uppercase_escape(s):
676eb3f2 2966 unicode_escape = codecs.getdecoder('unicode_escape')
81c2f20b 2967 return re.sub(
a612753d 2968 r'\\U[0-9a-fA-F]{8}',
676eb3f2
PH
2969 lambda m: unicode_escape(m.group(0))[0],
2970 s)
0fe2ff78
YCH
2971
2972
2973def lowercase_escape(s):
2974 unicode_escape = codecs.getdecoder('unicode_escape')
2975 return re.sub(
2976 r'\\u[0-9a-fA-F]{4}',
2977 lambda m: unicode_escape(m.group(0))[0],
2978 s)
b53466e1 2979
d05cfe06
S
2980
2981def escape_rfc3986(s):
2982 """Escape non-ASCII characters as suggested by RFC 3986"""
f9934b96 2983 return urllib.parse.quote(s, b"%/;:@&=+$,!~*'()?#[]")
d05cfe06
S
2984
2985
2986def escape_url(url):
2987 """Escape URL as suggested by RFC 3986"""
14f25df2 2988 url_parsed = urllib.parse.urlparse(url)
d05cfe06 2989 return url_parsed._replace(
efbed08d 2990 netloc=url_parsed.netloc.encode('idna').decode('ascii'),
d05cfe06
S
2991 path=escape_rfc3986(url_parsed.path),
2992 params=escape_rfc3986(url_parsed.params),
2993 query=escape_rfc3986(url_parsed.query),
2994 fragment=escape_rfc3986(url_parsed.fragment)
2995 ).geturl()
2996
62e609ab 2997
4dfbf869 2998def parse_qs(url):
14f25df2 2999 return urllib.parse.parse_qs(urllib.parse.urlparse(url).query)
4dfbf869 3000
3001
62e609ab
PH
3002def read_batch_urls(batch_fd):
3003 def fixup(url):
14f25df2 3004 if not isinstance(url, str):
62e609ab 3005 url = url.decode('utf-8', 'replace')
8c04f0be 3006 BOM_UTF8 = ('\xef\xbb\xbf', '\ufeff')
3007 for bom in BOM_UTF8:
3008 if url.startswith(bom):
3009 url = url[len(bom):]
3010 url = url.lstrip()
3011 if not url or url.startswith(('#', ';', ']')):
62e609ab 3012 return False
8c04f0be 3013 # "#" cannot be stripped out since it is part of the URI
962ffcf8 3014 # However, it can be safely stripped out if following a whitespace
8c04f0be 3015 return re.split(r'\s#', url, 1)[0].rstrip()
62e609ab
PH
3016
3017 with contextlib.closing(batch_fd) as fd:
3018 return [url for url in map(fixup, fd) if url]
b74fa8cd
JMF
3019
3020
3021def urlencode_postdata(*args, **kargs):
14f25df2 3022 return urllib.parse.urlencode(*args, **kargs).encode('ascii')
bcf89ce6
PH
3023
3024
38f9ef31 3025def update_url_query(url, query):
cacd9966
YCH
3026 if not query:
3027 return url
14f25df2 3028 parsed_url = urllib.parse.urlparse(url)
3029 qs = urllib.parse.parse_qs(parsed_url.query)
38f9ef31 3030 qs.update(query)
14f25df2 3031 return urllib.parse.urlunparse(parsed_url._replace(
3032 query=urllib.parse.urlencode(qs, True)))
16392824 3033
8e60dc75 3034
c043c246 3035def update_Request(req, url=None, data=None, headers=None, query=None):
ed0291d1 3036 req_headers = req.headers.copy()
c043c246 3037 req_headers.update(headers or {})
ed0291d1
S
3038 req_data = data or req.data
3039 req_url = update_url_query(url or req.get_full_url(), query)
95cf60e8
S
3040 req_get_method = req.get_method()
3041 if req_get_method == 'HEAD':
3042 req_type = HEADRequest
3043 elif req_get_method == 'PUT':
3044 req_type = PUTRequest
3045 else:
ac668111 3046 req_type = urllib.request.Request
ed0291d1
S
3047 new_req = req_type(
3048 req_url, data=req_data, headers=req_headers,
3049 origin_req_host=req.origin_req_host, unverifiable=req.unverifiable)
3050 if hasattr(req, 'timeout'):
3051 new_req.timeout = req.timeout
3052 return new_req
3053
3054
10c87c15 3055def _multipart_encode_impl(data, boundary):
0c265486
YCH
3056 content_type = 'multipart/form-data; boundary=%s' % boundary
3057
3058 out = b''
3059 for k, v in data.items():
3060 out += b'--' + boundary.encode('ascii') + b'\r\n'
14f25df2 3061 if isinstance(k, str):
0f06bcd7 3062 k = k.encode()
14f25df2 3063 if isinstance(v, str):
0f06bcd7 3064 v = v.encode()
0c265486
YCH
3065 # RFC 2047 requires non-ASCII field names to be encoded, while RFC 7578
3066 # suggests sending UTF-8 directly. Firefox sends UTF-8, too
b2ad479d 3067 content = b'Content-Disposition: form-data; name="' + k + b'"\r\n\r\n' + v + b'\r\n'
0c265486
YCH
3068 if boundary.encode('ascii') in content:
3069 raise ValueError('Boundary overlaps with data')
3070 out += content
3071
3072 out += b'--' + boundary.encode('ascii') + b'--\r\n'
3073
3074 return out, content_type
3075
3076
3077def multipart_encode(data, boundary=None):
3078 '''
3079 Encode a dict to RFC 7578-compliant form-data
3080
3081 data:
3082 A dict where keys and values can be either Unicode or bytes-like
3083 objects.
3084 boundary:
3085 If specified a Unicode object, it's used as the boundary. Otherwise
3086 a random boundary is generated.
3087
3088 Reference: https://tools.ietf.org/html/rfc7578
3089 '''
3090 has_specified_boundary = boundary is not None
3091
3092 while True:
3093 if boundary is None:
3094 boundary = '---------------' + str(random.randrange(0x0fffffff, 0xffffffff))
3095
3096 try:
10c87c15 3097 out, content_type = _multipart_encode_impl(data, boundary)
0c265486
YCH
3098 break
3099 except ValueError:
3100 if has_specified_boundary:
3101 raise
3102 boundary = None
3103
3104 return out, content_type
3105
3106
86296ad2 3107def dict_get(d, key_or_keys, default=None, skip_false_values=True):
a44ca5a4 3108 for val in map(d.get, variadic(key_or_keys)):
3109 if val is not None and (val or not skip_false_values):
3110 return val
3111 return default
cbecc9b9
S
3112
3113
c4f60dd7 3114def try_call(*funcs, expected_type=None, args=[], kwargs={}):
3115 for f in funcs:
a32a9a7e 3116 try:
c4f60dd7 3117 val = f(*args, **kwargs)
3118 except (AttributeError, KeyError, TypeError, IndexError, ZeroDivisionError):
a32a9a7e
S
3119 pass
3120 else:
c4f60dd7 3121 if expected_type is None or isinstance(val, expected_type):
3122 return val
3123
3124
3125def try_get(src, getter, expected_type=None):
3126 return try_call(*variadic(getter), args=(src,), expected_type=expected_type)
329ca3be
S
3127
3128
90137ca4 3129def filter_dict(dct, cndn=lambda _, v: v is not None):
3130 return {k: v for k, v in dct.items() if cndn(k, v)}
3131
3132
6cc62232
S
3133def merge_dicts(*dicts):
3134 merged = {}
3135 for a_dict in dicts:
3136 for k, v in a_dict.items():
90137ca4 3137 if (v is not None and k not in merged
3138 or isinstance(v, str) and merged[k] == ''):
6cc62232
S
3139 merged[k] = v
3140 return merged
3141
3142
8e60dc75 3143def encode_compat_str(string, encoding=preferredencoding(), errors='strict'):
14f25df2 3144 return string if isinstance(string, str) else str(string, encoding, errors)
8e60dc75 3145
16392824 3146
a1a530b0
PH
3147US_RATINGS = {
3148 'G': 0,
3149 'PG': 10,
3150 'PG-13': 13,
3151 'R': 16,
3152 'NC': 18,
3153}
fac55558
PH
3154
3155
a8795327 3156TV_PARENTAL_GUIDELINES = {
5a16c9d9
RA
3157 'TV-Y': 0,
3158 'TV-Y7': 7,
3159 'TV-G': 0,
3160 'TV-PG': 0,
3161 'TV-14': 14,
3162 'TV-MA': 17,
a8795327
S
3163}
3164
3165
146c80e2 3166def parse_age_limit(s):
19a03940 3167 # isinstance(False, int) is True. So type() must be used instead
c487cf00 3168 if type(s) is int: # noqa: E721
a8795327 3169 return s if 0 <= s <= 21 else None
19a03940 3170 elif not isinstance(s, str):
d838b1bd 3171 return None
146c80e2 3172 m = re.match(r'^(?P<age>\d{1,2})\+?$', s)
a8795327
S
3173 if m:
3174 return int(m.group('age'))
5c5fae6d 3175 s = s.upper()
a8795327
S
3176 if s in US_RATINGS:
3177 return US_RATINGS[s]
5a16c9d9 3178 m = re.match(r'^TV[_-]?(%s)$' % '|'.join(k[3:] for k in TV_PARENTAL_GUIDELINES), s)
b8361187 3179 if m:
5a16c9d9 3180 return TV_PARENTAL_GUIDELINES['TV-' + m.group(1)]
b8361187 3181 return None
146c80e2
S
3182
3183
fac55558 3184def strip_jsonp(code):
609a61e3 3185 return re.sub(
5552c9eb 3186 r'''(?sx)^
e9c671d5 3187 (?:window\.)?(?P<func_name>[a-zA-Z0-9_.$]*)
5552c9eb
YCH
3188 (?:\s*&&\s*(?P=func_name))?
3189 \s*\(\s*(?P<callback_data>.*)\);?
3190 \s*?(?://[^\n]*)*$''',
3191 r'\g<callback_data>', code)
478c2c61
PH
3192
3193
5c610515 3194def js_to_json(code, vars={}):
3195 # vars is a dict of var, val pairs to substitute
c843e685 3196 COMMENT_RE = r'/\*(?:(?!\*/).)*?\*/|//[^\n]*\n'
86e5f3ed 3197 SKIP_RE = fr'\s*(?:{COMMENT_RE})?\s*'
4195096e 3198 INTEGER_TABLE = (
86e5f3ed 3199 (fr'(?s)^(0[xX][0-9a-fA-F]+){SKIP_RE}:?$', 16),
3200 (fr'(?s)^(0+[0-7]+){SKIP_RE}:?$', 8),
4195096e
S
3201 )
3202
e05f6939 3203 def fix_kv(m):
e7b6d122
PH
3204 v = m.group(0)
3205 if v in ('true', 'false', 'null'):
3206 return v
421ddcb8
C
3207 elif v in ('undefined', 'void 0'):
3208 return 'null'
8bdd16b4 3209 elif v.startswith('/*') or v.startswith('//') or v.startswith('!') or v == ',':
bd1e4844 3210 return ""
3211
3212 if v[0] in ("'", '"'):
3213 v = re.sub(r'(?s)\\.|"', lambda m: {
e7b6d122 3214 '"': '\\"',
bd1e4844 3215 "\\'": "'",
3216 '\\\n': '',
3217 '\\x': '\\u00',
3218 }.get(m.group(0), m.group(0)), v[1:-1])
8bdd16b4 3219 else:
3220 for regex, base in INTEGER_TABLE:
3221 im = re.match(regex, v)
3222 if im:
3223 i = int(im.group(1), base)
3224 return '"%d":' % i if v.endswith(':') else '%d' % i
89ac4a19 3225
5c610515 3226 if v in vars:
3227 return vars[v]
3228
e7b6d122 3229 return '"%s"' % v
e05f6939 3230
8072ef2b 3231 def create_map(mobj):
3232 return json.dumps(dict(json.loads(js_to_json(mobj.group(1) or '[]', vars=vars))))
3233
febff4c1 3234 code = re.sub(r'new Date\((".+")\)', r'\g<1>', code)
8072ef2b 3235 code = re.sub(r'new Map\((\[.*?\])?\)', create_map, code)
febff4c1 3236
bd1e4844 3237 return re.sub(r'''(?sx)
3238 "(?:[^"\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^"\\]*"|
3239 '(?:[^'\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^'\\]*'|
4195096e 3240 {comment}|,(?={skip}[\]}}])|
421ddcb8 3241 void\s0|(?:(?<![0-9])[eE]|[a-df-zA-DF-Z_$])[.a-zA-Z_$0-9]*|
4195096e 3242 \b(?:0[xX][0-9a-fA-F]+|0+[0-7]+)(?:{skip}:)?|
8bdd16b4 3243 [0-9]+(?={skip}:)|
3244 !+
4195096e 3245 '''.format(comment=COMMENT_RE, skip=SKIP_RE), fix_kv, code)
e05f6939
PH
3246
3247
478c2c61
PH
3248def qualities(quality_ids):
3249 """ Get a numeric quality value out of a list of possible values """
3250 def q(qid):
3251 try:
3252 return quality_ids.index(qid)
3253 except ValueError:
3254 return -1
3255 return q
3256
acd69589 3257
8aa0e7cd 3258POSTPROCESS_WHEN = ('pre_process', 'after_filter', 'before_dl', 'post_process', 'after_move', 'after_video', 'playlist')
1e43a6f7 3259
3260
de6000d9 3261DEFAULT_OUTTMPL = {
3262 'default': '%(title)s [%(id)s].%(ext)s',
72755351 3263 'chapter': '%(title)s - %(section_number)03d %(section_title)s [%(id)s].%(ext)s',
de6000d9 3264}
3265OUTTMPL_TYPES = {
72755351 3266 'chapter': None,
de6000d9 3267 'subtitle': None,
3268 'thumbnail': None,
3269 'description': 'description',
3270 'annotation': 'annotations.xml',
3271 'infojson': 'info.json',
08438d2c 3272 'link': None,
3b603dbd 3273 'pl_video': None,
5112f26a 3274 'pl_thumbnail': None,
de6000d9 3275 'pl_description': 'description',
3276 'pl_infojson': 'info.json',
3277}
0a871f68 3278
143db31d 3279# As of [1] format syntax is:
3280# %[mapping_key][conversion_flags][minimum_width][.precision][length_modifier]type
3281# 1. https://docs.python.org/2/library/stdtypes.html#string-formatting
901130bb 3282STR_FORMAT_RE_TMPL = r'''(?x)
3283 (?<!%)(?P<prefix>(?:%%)*)
143db31d 3284 %
524e2e4f 3285 (?P<has_key>\((?P<key>{0})\))?
752cda38 3286 (?P<format>
524e2e4f 3287 (?P<conversion>[#0\-+ ]+)?
3288 (?P<min_width>\d+)?
3289 (?P<precision>\.\d+)?
3290 (?P<len_mod>[hlL])? # unused in python
901130bb 3291 {1} # conversion type
752cda38 3292 )
143db31d 3293'''
3294
7d1eb38a 3295
901130bb 3296STR_FORMAT_TYPES = 'diouxXeEfFgGcrs'
a020a0dc 3297
7d1eb38a 3298
a020a0dc
PH
3299def limit_length(s, length):
3300 """ Add ellipses to overly long strings """
3301 if s is None:
3302 return None
3303 ELLIPSES = '...'
3304 if len(s) > length:
3305 return s[:length - len(ELLIPSES)] + ELLIPSES
3306 return s
48844745
PH
3307
3308
3309def version_tuple(v):
5f9b8394 3310 return tuple(int(e) for e in re.split(r'[-.]', v))
48844745
PH
3311
3312
3313def is_outdated_version(version, limit, assume_new=True):
3314 if not version:
3315 return not assume_new
3316 try:
3317 return version_tuple(version) < version_tuple(limit)
3318 except ValueError:
3319 return not assume_new
732ea2f0
PH
3320
3321
3322def ytdl_is_updateable():
7a5c1cfe 3323 """ Returns if yt-dlp can be updated with -U """
735d865e 3324
5d535b4a 3325 from .update import is_non_updateable
732ea2f0 3326
5d535b4a 3327 return not is_non_updateable()
7d4111ed
PH
3328
3329
3330def args_to_str(args):
3331 # Get a short string representation for a subprocess command
702ccf2d 3332 return ' '.join(compat_shlex_quote(a) for a in args)
2ccd1b10
PH
3333
3334
9b9c5355 3335def error_to_compat_str(err):
cfb0511d 3336 return str(err)
fdae2358
S
3337
3338
a44ca5a4 3339def error_to_str(err):
3340 return f'{type(err).__name__}: {err}'
3341
3342
c460bdd5 3343def mimetype2ext(mt):
eb9ee194
S
3344 if mt is None:
3345 return None
3346
9359f3d4
F
3347 mt, _, params = mt.partition(';')
3348 mt = mt.strip()
3349
3350 FULL_MAP = {
765ac263 3351 'audio/mp4': 'm4a',
6c33d24b
YCH
3352 # Per RFC 3003, audio/mpeg can be .mp1, .mp2 or .mp3. Here use .mp3 as
3353 # it's the most popular one
3354 'audio/mpeg': 'mp3',
ba39289d 3355 'audio/x-wav': 'wav',
9359f3d4
F
3356 'audio/wav': 'wav',
3357 'audio/wave': 'wav',
3358 }
3359
3360 ext = FULL_MAP.get(mt)
765ac263
JMF
3361 if ext is not None:
3362 return ext
3363
9359f3d4 3364 SUBTYPE_MAP = {
f6861ec9 3365 '3gpp': '3gp',
cafcf657 3366 'smptett+xml': 'tt',
cafcf657 3367 'ttaf+xml': 'dfxp',
a0d8d704 3368 'ttml+xml': 'ttml',
f6861ec9 3369 'x-flv': 'flv',
a0d8d704 3370 'x-mp4-fragmented': 'mp4',
d4f05d47 3371 'x-ms-sami': 'sami',
a0d8d704 3372 'x-ms-wmv': 'wmv',
b4173f15
RA
3373 'mpegurl': 'm3u8',
3374 'x-mpegurl': 'm3u8',
3375 'vnd.apple.mpegurl': 'm3u8',
3376 'dash+xml': 'mpd',
b4173f15 3377 'f4m+xml': 'f4m',
f164b971 3378 'hds+xml': 'f4m',
e910fe2f 3379 'vnd.ms-sstr+xml': 'ism',
c2b2c7e1 3380 'quicktime': 'mov',
98ce1a3f 3381 'mp2t': 'ts',
39e7107d 3382 'x-wav': 'wav',
9359f3d4
F
3383 'filmstrip+json': 'fs',
3384 'svg+xml': 'svg',
3385 }
3386
3387 _, _, subtype = mt.rpartition('/')
3388 ext = SUBTYPE_MAP.get(subtype.lower())
3389 if ext is not None:
3390 return ext
3391
3392 SUFFIX_MAP = {
3393 'json': 'json',
3394 'xml': 'xml',
3395 'zip': 'zip',
3396 'gzip': 'gz',
3397 }
3398
3399 _, _, suffix = subtype.partition('+')
3400 ext = SUFFIX_MAP.get(suffix)
3401 if ext is not None:
3402 return ext
3403
3404 return subtype.replace('+', '.')
c460bdd5
PH
3405
3406
2814f12b
THD
3407def ext2mimetype(ext_or_url):
3408 if not ext_or_url:
3409 return None
3410 if '.' not in ext_or_url:
3411 ext_or_url = f'file.{ext_or_url}'
3412 return mimetypes.guess_type(ext_or_url)[0]
3413
3414
4f3c5e06 3415def parse_codecs(codecs_str):
3416 # http://tools.ietf.org/html/rfc6381
3417 if not codecs_str:
3418 return {}
a0566bbf 3419 split_codecs = list(filter(None, map(
dbf5416a 3420 str.strip, codecs_str.strip().strip(',').split(','))))
3fe75fdc 3421 vcodec, acodec, scodec, hdr = None, None, None, None
a0566bbf 3422 for full_codec in split_codecs:
d816f61f 3423 parts = re.sub(r'0+(?=\d)', '', full_codec).split('.')
3424 if parts[0] in ('avc1', 'avc2', 'avc3', 'avc4', 'vp9', 'vp8', 'hev1', 'hev2',
3425 'h263', 'h264', 'mp4v', 'hvc1', 'av1', 'theora', 'dvh1', 'dvhe'):
3426 if vcodec:
3427 continue
3428 vcodec = full_codec
3429 if parts[0] in ('dvh1', 'dvhe'):
3430 hdr = 'DV'
3431 elif parts[0] == 'av1' and traverse_obj(parts, 3) == '10':
3432 hdr = 'HDR10'
3433 elif parts[:2] == ['vp9', '2']:
3434 hdr = 'HDR10'
3435 elif parts[0] in ('flac', 'mp4a', 'opus', 'vorbis', 'mp3', 'aac',
3436 'ac-3', 'ec-3', 'eac3', 'dtsc', 'dtse', 'dtsh', 'dtsl'):
3437 acodec = acodec or full_codec
3438 elif parts[0] in ('stpp', 'wvtt'):
3439 scodec = scodec or full_codec
4f3c5e06 3440 else:
19a03940 3441 write_string(f'WARNING: Unknown codec {full_codec}\n')
3fe75fdc 3442 if vcodec or acodec or scodec:
4f3c5e06 3443 return {
3444 'vcodec': vcodec or 'none',
3445 'acodec': acodec or 'none',
176f1866 3446 'dynamic_range': hdr,
3fe75fdc 3447 **({'scodec': scodec} if scodec is not None else {}),
4f3c5e06 3448 }
b69fd25c 3449 elif len(split_codecs) == 2:
3450 return {
3451 'vcodec': split_codecs[0],
3452 'acodec': split_codecs[1],
3453 }
4f3c5e06 3454 return {}
3455
3456
2ccd1b10 3457def urlhandle_detect_ext(url_handle):
79298173 3458 getheader = url_handle.headers.get
2ccd1b10 3459
b55ee18f
PH
3460 cd = getheader('Content-Disposition')
3461 if cd:
3462 m = re.match(r'attachment;\s*filename="(?P<filename>[^"]+)"', cd)
3463 if m:
3464 e = determine_ext(m.group('filename'), default_ext=None)
3465 if e:
3466 return e
3467
c460bdd5 3468 return mimetype2ext(getheader('Content-Type'))
05900629
PH
3469
3470
1e399778
YCH
3471def encode_data_uri(data, mime_type):
3472 return 'data:%s;base64,%s' % (mime_type, base64.b64encode(data).decode('ascii'))
3473
3474
05900629 3475def age_restricted(content_limit, age_limit):
6ec6cb4e 3476 """ Returns True iff the content should be blocked """
05900629
PH
3477
3478 if age_limit is None: # No limit set
3479 return False
3480 if content_limit is None:
3481 return False # Content available for everyone
3482 return age_limit < content_limit
61ca9a80
PH
3483
3484
88f60feb 3485# List of known byte-order-marks (BOM)
a904a7f8
L
3486BOMS = [
3487 (b'\xef\xbb\xbf', 'utf-8'),
3488 (b'\x00\x00\xfe\xff', 'utf-32-be'),
3489 (b'\xff\xfe\x00\x00', 'utf-32-le'),
3490 (b'\xff\xfe', 'utf-16-le'),
3491 (b'\xfe\xff', 'utf-16-be'),
3492]
a904a7f8
L
3493
3494
61ca9a80
PH
3495def is_html(first_bytes):
3496 """ Detect whether a file contains HTML by examining its first bytes. """
3497
80e8493e 3498 encoding = 'utf-8'
61ca9a80 3499 for bom, enc in BOMS:
80e8493e 3500 while first_bytes.startswith(bom):
3501 encoding, first_bytes = enc, first_bytes[len(bom):]
61ca9a80 3502
80e8493e 3503 return re.match(r'^\s*<', first_bytes.decode(encoding, 'replace'))
a055469f
PH
3504
3505
3506def determine_protocol(info_dict):
3507 protocol = info_dict.get('protocol')
3508 if protocol is not None:
3509 return protocol
3510
7de837a5 3511 url = sanitize_url(info_dict['url'])
a055469f
PH
3512 if url.startswith('rtmp'):
3513 return 'rtmp'
3514 elif url.startswith('mms'):
3515 return 'mms'
3516 elif url.startswith('rtsp'):
3517 return 'rtsp'
3518
3519 ext = determine_ext(url)
3520 if ext == 'm3u8':
3521 return 'm3u8'
3522 elif ext == 'f4m':
3523 return 'f4m'
3524
14f25df2 3525 return urllib.parse.urlparse(url).scheme
cfb56d1a
PH
3526
3527
c5e3f849 3528def render_table(header_row, data, delim=False, extra_gap=0, hide_empty=False):
3529 """ Render a list of rows, each as a list of values.
3530 Text after a \t will be right aligned """
ec11a9f4 3531 def width(string):
c5e3f849 3532 return len(remove_terminal_sequences(string).replace('\t', ''))
76d321f6 3533
3534 def get_max_lens(table):
ec11a9f4 3535 return [max(width(str(v)) for v in col) for col in zip(*table)]
76d321f6 3536
3537 def filter_using_list(row, filterArray):
d16df59d 3538 return [col for take, col in itertools.zip_longest(filterArray, row, fillvalue=True) if take]
76d321f6 3539
d16df59d 3540 max_lens = get_max_lens(data) if hide_empty else []
3541 header_row = filter_using_list(header_row, max_lens)
3542 data = [filter_using_list(row, max_lens) for row in data]
76d321f6 3543
cfb56d1a 3544 table = [header_row] + data
76d321f6 3545 max_lens = get_max_lens(table)
c5e3f849 3546 extra_gap += 1
76d321f6 3547 if delim:
c5e3f849 3548 table = [header_row, [delim * (ml + extra_gap) for ml in max_lens]] + data
1ed7953a 3549 table[1][-1] = table[1][-1][:-extra_gap * len(delim)] # Remove extra_gap from end of delimiter
ec11a9f4 3550 for row in table:
3551 for pos, text in enumerate(map(str, row)):
c5e3f849 3552 if '\t' in text:
3553 row[pos] = text.replace('\t', ' ' * (max_lens[pos] - width(text))) + ' ' * extra_gap
3554 else:
3555 row[pos] = text + ' ' * (max_lens[pos] - width(text) + extra_gap)
3556 ret = '\n'.join(''.join(row).rstrip() for row in table)
ec11a9f4 3557 return ret
347de493
PH
3558
3559
8f18aca8 3560def _match_one(filter_part, dct, incomplete):
77b87f05 3561 # TODO: Generalize code with YoutubeDL._build_format_filter
a047eeb6 3562 STRING_OPERATORS = {
3563 '*=': operator.contains,
3564 '^=': lambda attr, value: attr.startswith(value),
3565 '$=': lambda attr, value: attr.endswith(value),
3566 '~=': lambda attr, value: re.search(value, attr),
3567 }
347de493 3568 COMPARISON_OPERATORS = {
a047eeb6 3569 **STRING_OPERATORS,
3570 '<=': operator.le, # "<=" must be defined above "<"
347de493 3571 '<': operator.lt,
347de493 3572 '>=': operator.ge,
a047eeb6 3573 '>': operator.gt,
347de493 3574 '=': operator.eq,
347de493 3575 }
a047eeb6 3576
6db9c4d5 3577 if isinstance(incomplete, bool):
3578 is_incomplete = lambda _: incomplete
3579 else:
3580 is_incomplete = lambda k: k in incomplete
3581
64fa820c 3582 operator_rex = re.compile(r'''(?x)
347de493 3583 (?P<key>[a-z_]+)
77b87f05 3584 \s*(?P<negation>!\s*)?(?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
347de493 3585 (?:
a047eeb6 3586 (?P<quote>["\'])(?P<quotedstrval>.+?)(?P=quote)|
3587 (?P<strval>.+?)
347de493 3588 )
347de493 3589 ''' % '|'.join(map(re.escape, COMPARISON_OPERATORS.keys())))
64fa820c 3590 m = operator_rex.fullmatch(filter_part.strip())
347de493 3591 if m:
18f96d12 3592 m = m.groupdict()
3593 unnegated_op = COMPARISON_OPERATORS[m['op']]
3594 if m['negation']:
77b87f05
MT
3595 op = lambda attr, value: not unnegated_op(attr, value)
3596 else:
3597 op = unnegated_op
18f96d12 3598 comparison_value = m['quotedstrval'] or m['strval'] or m['intval']
3599 if m['quote']:
3600 comparison_value = comparison_value.replace(r'\%s' % m['quote'], m['quote'])
3601 actual_value = dct.get(m['key'])
3602 numeric_comparison = None
f9934b96 3603 if isinstance(actual_value, (int, float)):
e5a088dc
S
3604 # If the original field is a string and matching comparisonvalue is
3605 # a number we should respect the origin of the original field
3606 # and process comparison value as a string (see
18f96d12 3607 # https://github.com/ytdl-org/youtube-dl/issues/11082)
347de493 3608 try:
18f96d12 3609 numeric_comparison = int(comparison_value)
347de493 3610 except ValueError:
18f96d12 3611 numeric_comparison = parse_filesize(comparison_value)
3612 if numeric_comparison is None:
3613 numeric_comparison = parse_filesize(f'{comparison_value}B')
3614 if numeric_comparison is None:
3615 numeric_comparison = parse_duration(comparison_value)
3616 if numeric_comparison is not None and m['op'] in STRING_OPERATORS:
3617 raise ValueError('Operator %s only supports string values!' % m['op'])
347de493 3618 if actual_value is None:
6db9c4d5 3619 return is_incomplete(m['key']) or m['none_inclusive']
18f96d12 3620 return op(actual_value, comparison_value if numeric_comparison is None else numeric_comparison)
347de493
PH
3621
3622 UNARY_OPERATORS = {
1cc47c66
S
3623 '': lambda v: (v is True) if isinstance(v, bool) else (v is not None),
3624 '!': lambda v: (v is False) if isinstance(v, bool) else (v is None),
347de493 3625 }
64fa820c 3626 operator_rex = re.compile(r'''(?x)
347de493 3627 (?P<op>%s)\s*(?P<key>[a-z_]+)
347de493 3628 ''' % '|'.join(map(re.escape, UNARY_OPERATORS.keys())))
64fa820c 3629 m = operator_rex.fullmatch(filter_part.strip())
347de493
PH
3630 if m:
3631 op = UNARY_OPERATORS[m.group('op')]
3632 actual_value = dct.get(m.group('key'))
6db9c4d5 3633 if is_incomplete(m.group('key')) and actual_value is None:
8f18aca8 3634 return True
347de493
PH
3635 return op(actual_value)
3636
3637 raise ValueError('Invalid filter part %r' % filter_part)
3638
3639
8f18aca8 3640def match_str(filter_str, dct, incomplete=False):
6db9c4d5 3641 """ Filter a dictionary with a simple string syntax.
3642 @returns Whether the filter passes
3643 @param incomplete Set of keys that is expected to be missing from dct.
3644 Can be True/False to indicate all/none of the keys may be missing.
3645 All conditions on incomplete keys pass if the key is missing
8f18aca8 3646 """
347de493 3647 return all(
8f18aca8 3648 _match_one(filter_part.replace(r'\&', '&'), dct, incomplete)
a047eeb6 3649 for filter_part in re.split(r'(?<!\\)&', filter_str))
347de493
PH
3650
3651
b1a7cd05 3652def match_filter_func(filters):
3653 if not filters:
d1b5f70b 3654 return None
492272fe 3655 filters = set(variadic(filters))
d1b5f70b 3656
492272fe 3657 interactive = '-' in filters
3658 if interactive:
3659 filters.remove('-')
3660
3661 def _match_func(info_dict, incomplete=False):
3662 if not filters or any(match_str(f, info_dict, incomplete) for f in filters):
3663 return NO_DEFAULT if interactive and not incomplete else None
347de493 3664 else:
3bec830a 3665 video_title = info_dict.get('title') or info_dict.get('id') or 'entry'
b1a7cd05 3666 filter_str = ') | ('.join(map(str.strip, filters))
3667 return f'{video_title} does not pass filter ({filter_str}), skipping ..'
347de493 3668 return _match_func
91410c9b
PH
3669
3670
f2df4071 3671class download_range_func:
3672 def __init__(self, chapters, ranges):
3673 self.chapters, self.ranges = chapters, ranges
3674
3675 def __call__(self, info_dict, ydl):
5ec1b6b7 3676 warning = ('There are no chapters matching the regex' if info_dict.get('chapters')
56ba69e4 3677 else 'Cannot match chapters since chapter information is unavailable')
f2df4071 3678 for regex in self.chapters or []:
5ec1b6b7 3679 for i, chapter in enumerate(info_dict.get('chapters') or []):
3680 if re.search(regex, chapter['title']):
3681 warning = None
3682 yield {**chapter, 'index': i}
f2df4071 3683 if self.chapters and warning:
5ec1b6b7 3684 ydl.to_screen(f'[info] {info_dict["id"]}: {warning}')
3685
f2df4071 3686 yield from ({'start_time': start, 'end_time': end} for start, end in self.ranges or [])
5ec1b6b7 3687
f2df4071 3688 def __eq__(self, other):
3689 return (isinstance(other, download_range_func)
3690 and self.chapters == other.chapters and self.ranges == other.ranges)
5ec1b6b7 3691
3692
bf6427d2
YCH
3693def parse_dfxp_time_expr(time_expr):
3694 if not time_expr:
d631d5f9 3695 return
bf6427d2 3696
1d485a1a 3697 mobj = re.match(rf'^(?P<time_offset>{NUMBER_RE})s?$', time_expr)
bf6427d2
YCH
3698 if mobj:
3699 return float(mobj.group('time_offset'))
3700
db2fe38b 3701 mobj = re.match(r'^(\d+):(\d\d):(\d\d(?:(?:\.|:)\d+)?)$', time_expr)
bf6427d2 3702 if mobj:
db2fe38b 3703 return 3600 * int(mobj.group(1)) + 60 * int(mobj.group(2)) + float(mobj.group(3).replace(':', '.'))
bf6427d2
YCH
3704
3705
c1c924ab 3706def srt_subtitles_timecode(seconds):
aa7785f8 3707 return '%02d:%02d:%02d,%03d' % timetuple_from_msec(seconds * 1000)
3708
3709
3710def ass_subtitles_timecode(seconds):
3711 time = timetuple_from_msec(seconds * 1000)
3712 return '%01d:%02d:%02d.%02d' % (*time[:-1], time.milliseconds / 10)
bf6427d2
YCH
3713
3714
3715def dfxp2srt(dfxp_data):
3869028f
YCH
3716 '''
3717 @param dfxp_data A bytes-like object containing DFXP data
3718 @returns A unicode object containing converted SRT data
3719 '''
5b995f71 3720 LEGACY_NAMESPACES = (
3869028f
YCH
3721 (b'http://www.w3.org/ns/ttml', [
3722 b'http://www.w3.org/2004/11/ttaf1',
3723 b'http://www.w3.org/2006/04/ttaf1',
3724 b'http://www.w3.org/2006/10/ttaf1',
5b995f71 3725 ]),
3869028f
YCH
3726 (b'http://www.w3.org/ns/ttml#styling', [
3727 b'http://www.w3.org/ns/ttml#style',
5b995f71
RA
3728 ]),
3729 )
3730
3731 SUPPORTED_STYLING = [
3732 'color',
3733 'fontFamily',
3734 'fontSize',
3735 'fontStyle',
3736 'fontWeight',
3737 'textDecoration'
3738 ]
3739
4e335771 3740 _x = functools.partial(xpath_with_ns, ns_map={
261f4730 3741 'xml': 'http://www.w3.org/XML/1998/namespace',
4e335771 3742 'ttml': 'http://www.w3.org/ns/ttml',
5b995f71 3743 'tts': 'http://www.w3.org/ns/ttml#styling',
4e335771 3744 })
bf6427d2 3745
5b995f71
RA
3746 styles = {}
3747 default_style = {}
3748
86e5f3ed 3749 class TTMLPElementParser:
5b995f71
RA
3750 _out = ''
3751 _unclosed_elements = []
3752 _applied_styles = []
bf6427d2 3753
2b14cb56 3754 def start(self, tag, attrib):
5b995f71
RA
3755 if tag in (_x('ttml:br'), 'br'):
3756 self._out += '\n'
3757 else:
3758 unclosed_elements = []
3759 style = {}
3760 element_style_id = attrib.get('style')
3761 if default_style:
3762 style.update(default_style)
3763 if element_style_id:
3764 style.update(styles.get(element_style_id, {}))
3765 for prop in SUPPORTED_STYLING:
3766 prop_val = attrib.get(_x('tts:' + prop))
3767 if prop_val:
3768 style[prop] = prop_val
3769 if style:
3770 font = ''
3771 for k, v in sorted(style.items()):
3772 if self._applied_styles and self._applied_styles[-1].get(k) == v:
3773 continue
3774 if k == 'color':
3775 font += ' color="%s"' % v
3776 elif k == 'fontSize':
3777 font += ' size="%s"' % v
3778 elif k == 'fontFamily':
3779 font += ' face="%s"' % v
3780 elif k == 'fontWeight' and v == 'bold':
3781 self._out += '<b>'
3782 unclosed_elements.append('b')
3783 elif k == 'fontStyle' and v == 'italic':
3784 self._out += '<i>'
3785 unclosed_elements.append('i')
3786 elif k == 'textDecoration' and v == 'underline':
3787 self._out += '<u>'
3788 unclosed_elements.append('u')
3789 if font:
3790 self._out += '<font' + font + '>'
3791 unclosed_elements.append('font')
3792 applied_style = {}
3793 if self._applied_styles:
3794 applied_style.update(self._applied_styles[-1])
3795 applied_style.update(style)
3796 self._applied_styles.append(applied_style)
3797 self._unclosed_elements.append(unclosed_elements)
bf6427d2 3798
2b14cb56 3799 def end(self, tag):
5b995f71
RA
3800 if tag not in (_x('ttml:br'), 'br'):
3801 unclosed_elements = self._unclosed_elements.pop()
3802 for element in reversed(unclosed_elements):
3803 self._out += '</%s>' % element
3804 if unclosed_elements and self._applied_styles:
3805 self._applied_styles.pop()
bf6427d2 3806
2b14cb56 3807 def data(self, data):
5b995f71 3808 self._out += data
2b14cb56 3809
3810 def close(self):
5b995f71 3811 return self._out.strip()
2b14cb56 3812
3813 def parse_node(node):
3814 target = TTMLPElementParser()
3815 parser = xml.etree.ElementTree.XMLParser(target=target)
3816 parser.feed(xml.etree.ElementTree.tostring(node))
3817 return parser.close()
bf6427d2 3818
5b995f71
RA
3819 for k, v in LEGACY_NAMESPACES:
3820 for ns in v:
3821 dfxp_data = dfxp_data.replace(ns, k)
3822
3869028f 3823 dfxp = compat_etree_fromstring(dfxp_data)
bf6427d2 3824 out = []
5b995f71 3825 paras = dfxp.findall(_x('.//ttml:p')) or dfxp.findall('.//p')
1b0427e6
YCH
3826
3827 if not paras:
3828 raise ValueError('Invalid dfxp/TTML subtitle')
bf6427d2 3829
5b995f71
RA
3830 repeat = False
3831 while True:
3832 for style in dfxp.findall(_x('.//ttml:style')):
261f4730
RA
3833 style_id = style.get('id') or style.get(_x('xml:id'))
3834 if not style_id:
3835 continue
5b995f71
RA
3836 parent_style_id = style.get('style')
3837 if parent_style_id:
3838 if parent_style_id not in styles:
3839 repeat = True
3840 continue
3841 styles[style_id] = styles[parent_style_id].copy()
3842 for prop in SUPPORTED_STYLING:
3843 prop_val = style.get(_x('tts:' + prop))
3844 if prop_val:
3845 styles.setdefault(style_id, {})[prop] = prop_val
3846 if repeat:
3847 repeat = False
3848 else:
3849 break
3850
3851 for p in ('body', 'div'):
3852 ele = xpath_element(dfxp, [_x('.//ttml:' + p), './/' + p])
3853 if ele is None:
3854 continue
3855 style = styles.get(ele.get('style'))
3856 if not style:
3857 continue
3858 default_style.update(style)
3859
bf6427d2 3860 for para, index in zip(paras, itertools.count(1)):
d631d5f9 3861 begin_time = parse_dfxp_time_expr(para.attrib.get('begin'))
7dff0363 3862 end_time = parse_dfxp_time_expr(para.attrib.get('end'))
d631d5f9
YCH
3863 dur = parse_dfxp_time_expr(para.attrib.get('dur'))
3864 if begin_time is None:
3865 continue
7dff0363 3866 if not end_time:
d631d5f9
YCH
3867 if not dur:
3868 continue
3869 end_time = begin_time + dur
bf6427d2
YCH
3870 out.append('%d\n%s --> %s\n%s\n\n' % (
3871 index,
c1c924ab
YCH
3872 srt_subtitles_timecode(begin_time),
3873 srt_subtitles_timecode(end_time),
bf6427d2
YCH
3874 parse_node(para)))
3875
3876 return ''.join(out)
3877
3878
c487cf00 3879def cli_option(params, command_option, param, separator=None):
66e289ba 3880 param = params.get(param)
c487cf00 3881 return ([] if param is None
3882 else [command_option, str(param)] if separator is None
3883 else [f'{command_option}{separator}{param}'])
66e289ba
S
3884
3885
3886def cli_bool_option(params, command_option, param, true_value='true', false_value='false', separator=None):
3887 param = params.get(param)
c487cf00 3888 assert param in (True, False, None)
3889 return cli_option({True: true_value, False: false_value}, command_option, param, separator)
66e289ba
S
3890
3891
3892def cli_valueless_option(params, command_option, param, expected_value=True):
c487cf00 3893 return [command_option] if params.get(param) == expected_value else []
66e289ba
S
3894
3895
e92caff5 3896def cli_configuration_args(argdict, keys, default=[], use_compat=True):
eab9b2bc 3897 if isinstance(argdict, (list, tuple)): # for backward compatibility
e92caff5 3898 if use_compat:
5b1ecbb3 3899 return argdict
3900 else:
3901 argdict = None
eab9b2bc 3902 if argdict is None:
5b1ecbb3 3903 return default
eab9b2bc 3904 assert isinstance(argdict, dict)
3905
e92caff5 3906 assert isinstance(keys, (list, tuple))
3907 for key_list in keys:
e92caff5 3908 arg_list = list(filter(
3909 lambda x: x is not None,
6606817a 3910 [argdict.get(key.lower()) for key in variadic(key_list)]))
e92caff5 3911 if arg_list:
3912 return [arg for args in arg_list for arg in args]
3913 return default
66e289ba 3914
6251555f 3915
330690a2 3916def _configuration_args(main_key, argdict, exe, keys=None, default=[], use_compat=True):
3917 main_key, exe = main_key.lower(), exe.lower()
3918 root_key = exe if main_key == exe else f'{main_key}+{exe}'
3919 keys = [f'{root_key}{k}' for k in (keys or [''])]
3920 if root_key in keys:
3921 if main_key != exe:
3922 keys.append((main_key, exe))
3923 keys.append('default')
3924 else:
3925 use_compat = False
3926 return cli_configuration_args(argdict, keys, default, use_compat)
3927
66e289ba 3928
86e5f3ed 3929class ISO639Utils:
39672624
YCH
3930 # See http://www.loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt
3931 _lang_map = {
3932 'aa': 'aar',
3933 'ab': 'abk',
3934 'ae': 'ave',
3935 'af': 'afr',
3936 'ak': 'aka',
3937 'am': 'amh',
3938 'an': 'arg',
3939 'ar': 'ara',
3940 'as': 'asm',
3941 'av': 'ava',
3942 'ay': 'aym',
3943 'az': 'aze',
3944 'ba': 'bak',
3945 'be': 'bel',
3946 'bg': 'bul',
3947 'bh': 'bih',
3948 'bi': 'bis',
3949 'bm': 'bam',
3950 'bn': 'ben',
3951 'bo': 'bod',
3952 'br': 'bre',
3953 'bs': 'bos',
3954 'ca': 'cat',
3955 'ce': 'che',
3956 'ch': 'cha',
3957 'co': 'cos',
3958 'cr': 'cre',
3959 'cs': 'ces',
3960 'cu': 'chu',
3961 'cv': 'chv',
3962 'cy': 'cym',
3963 'da': 'dan',
3964 'de': 'deu',
3965 'dv': 'div',
3966 'dz': 'dzo',
3967 'ee': 'ewe',
3968 'el': 'ell',
3969 'en': 'eng',
3970 'eo': 'epo',
3971 'es': 'spa',
3972 'et': 'est',
3973 'eu': 'eus',
3974 'fa': 'fas',
3975 'ff': 'ful',
3976 'fi': 'fin',
3977 'fj': 'fij',
3978 'fo': 'fao',
3979 'fr': 'fra',
3980 'fy': 'fry',
3981 'ga': 'gle',
3982 'gd': 'gla',
3983 'gl': 'glg',
3984 'gn': 'grn',
3985 'gu': 'guj',
3986 'gv': 'glv',
3987 'ha': 'hau',
3988 'he': 'heb',
b7acc835 3989 'iw': 'heb', # Replaced by he in 1989 revision
39672624
YCH
3990 'hi': 'hin',
3991 'ho': 'hmo',
3992 'hr': 'hrv',
3993 'ht': 'hat',
3994 'hu': 'hun',
3995 'hy': 'hye',
3996 'hz': 'her',
3997 'ia': 'ina',
3998 'id': 'ind',
b7acc835 3999 'in': 'ind', # Replaced by id in 1989 revision
39672624
YCH
4000 'ie': 'ile',
4001 'ig': 'ibo',
4002 'ii': 'iii',
4003 'ik': 'ipk',
4004 'io': 'ido',
4005 'is': 'isl',
4006 'it': 'ita',
4007 'iu': 'iku',
4008 'ja': 'jpn',
4009 'jv': 'jav',
4010 'ka': 'kat',
4011 'kg': 'kon',
4012 'ki': 'kik',
4013 'kj': 'kua',
4014 'kk': 'kaz',
4015 'kl': 'kal',
4016 'km': 'khm',
4017 'kn': 'kan',
4018 'ko': 'kor',
4019 'kr': 'kau',
4020 'ks': 'kas',
4021 'ku': 'kur',
4022 'kv': 'kom',
4023 'kw': 'cor',
4024 'ky': 'kir',
4025 'la': 'lat',
4026 'lb': 'ltz',
4027 'lg': 'lug',
4028 'li': 'lim',
4029 'ln': 'lin',
4030 'lo': 'lao',
4031 'lt': 'lit',
4032 'lu': 'lub',
4033 'lv': 'lav',
4034 'mg': 'mlg',
4035 'mh': 'mah',
4036 'mi': 'mri',
4037 'mk': 'mkd',
4038 'ml': 'mal',
4039 'mn': 'mon',
4040 'mr': 'mar',
4041 'ms': 'msa',
4042 'mt': 'mlt',
4043 'my': 'mya',
4044 'na': 'nau',
4045 'nb': 'nob',
4046 'nd': 'nde',
4047 'ne': 'nep',
4048 'ng': 'ndo',
4049 'nl': 'nld',
4050 'nn': 'nno',
4051 'no': 'nor',
4052 'nr': 'nbl',
4053 'nv': 'nav',
4054 'ny': 'nya',
4055 'oc': 'oci',
4056 'oj': 'oji',
4057 'om': 'orm',
4058 'or': 'ori',
4059 'os': 'oss',
4060 'pa': 'pan',
4061 'pi': 'pli',
4062 'pl': 'pol',
4063 'ps': 'pus',
4064 'pt': 'por',
4065 'qu': 'que',
4066 'rm': 'roh',
4067 'rn': 'run',
4068 'ro': 'ron',
4069 'ru': 'rus',
4070 'rw': 'kin',
4071 'sa': 'san',
4072 'sc': 'srd',
4073 'sd': 'snd',
4074 'se': 'sme',
4075 'sg': 'sag',
4076 'si': 'sin',
4077 'sk': 'slk',
4078 'sl': 'slv',
4079 'sm': 'smo',
4080 'sn': 'sna',
4081 'so': 'som',
4082 'sq': 'sqi',
4083 'sr': 'srp',
4084 'ss': 'ssw',
4085 'st': 'sot',
4086 'su': 'sun',
4087 'sv': 'swe',
4088 'sw': 'swa',
4089 'ta': 'tam',
4090 'te': 'tel',
4091 'tg': 'tgk',
4092 'th': 'tha',
4093 'ti': 'tir',
4094 'tk': 'tuk',
4095 'tl': 'tgl',
4096 'tn': 'tsn',
4097 'to': 'ton',
4098 'tr': 'tur',
4099 'ts': 'tso',
4100 'tt': 'tat',
4101 'tw': 'twi',
4102 'ty': 'tah',
4103 'ug': 'uig',
4104 'uk': 'ukr',
4105 'ur': 'urd',
4106 'uz': 'uzb',
4107 've': 'ven',
4108 'vi': 'vie',
4109 'vo': 'vol',
4110 'wa': 'wln',
4111 'wo': 'wol',
4112 'xh': 'xho',
4113 'yi': 'yid',
e9a50fba 4114 'ji': 'yid', # Replaced by yi in 1989 revision
39672624
YCH
4115 'yo': 'yor',
4116 'za': 'zha',
4117 'zh': 'zho',
4118 'zu': 'zul',
4119 }
4120
4121 @classmethod
4122 def short2long(cls, code):
4123 """Convert language code from ISO 639-1 to ISO 639-2/T"""
4124 return cls._lang_map.get(code[:2])
4125
4126 @classmethod
4127 def long2short(cls, code):
4128 """Convert language code from ISO 639-2/T to ISO 639-1"""
4129 for short_name, long_name in cls._lang_map.items():
4130 if long_name == code:
4131 return short_name
4132
4133
86e5f3ed 4134class ISO3166Utils:
4eb10f66
YCH
4135 # From http://data.okfn.org/data/core/country-list
4136 _country_map = {
4137 'AF': 'Afghanistan',
4138 'AX': 'Åland Islands',
4139 'AL': 'Albania',
4140 'DZ': 'Algeria',
4141 'AS': 'American Samoa',
4142 'AD': 'Andorra',
4143 'AO': 'Angola',
4144 'AI': 'Anguilla',
4145 'AQ': 'Antarctica',
4146 'AG': 'Antigua and Barbuda',
4147 'AR': 'Argentina',
4148 'AM': 'Armenia',
4149 'AW': 'Aruba',
4150 'AU': 'Australia',
4151 'AT': 'Austria',
4152 'AZ': 'Azerbaijan',
4153 'BS': 'Bahamas',
4154 'BH': 'Bahrain',
4155 'BD': 'Bangladesh',
4156 'BB': 'Barbados',
4157 'BY': 'Belarus',
4158 'BE': 'Belgium',
4159 'BZ': 'Belize',
4160 'BJ': 'Benin',
4161 'BM': 'Bermuda',
4162 'BT': 'Bhutan',
4163 'BO': 'Bolivia, Plurinational State of',
4164 'BQ': 'Bonaire, Sint Eustatius and Saba',
4165 'BA': 'Bosnia and Herzegovina',
4166 'BW': 'Botswana',
4167 'BV': 'Bouvet Island',
4168 'BR': 'Brazil',
4169 'IO': 'British Indian Ocean Territory',
4170 'BN': 'Brunei Darussalam',
4171 'BG': 'Bulgaria',
4172 'BF': 'Burkina Faso',
4173 'BI': 'Burundi',
4174 'KH': 'Cambodia',
4175 'CM': 'Cameroon',
4176 'CA': 'Canada',
4177 'CV': 'Cape Verde',
4178 'KY': 'Cayman Islands',
4179 'CF': 'Central African Republic',
4180 'TD': 'Chad',
4181 'CL': 'Chile',
4182 'CN': 'China',
4183 'CX': 'Christmas Island',
4184 'CC': 'Cocos (Keeling) Islands',
4185 'CO': 'Colombia',
4186 'KM': 'Comoros',
4187 'CG': 'Congo',
4188 'CD': 'Congo, the Democratic Republic of the',
4189 'CK': 'Cook Islands',
4190 'CR': 'Costa Rica',
4191 'CI': 'Côte d\'Ivoire',
4192 'HR': 'Croatia',
4193 'CU': 'Cuba',
4194 'CW': 'Curaçao',
4195 'CY': 'Cyprus',
4196 'CZ': 'Czech Republic',
4197 'DK': 'Denmark',
4198 'DJ': 'Djibouti',
4199 'DM': 'Dominica',
4200 'DO': 'Dominican Republic',
4201 'EC': 'Ecuador',
4202 'EG': 'Egypt',
4203 'SV': 'El Salvador',
4204 'GQ': 'Equatorial Guinea',
4205 'ER': 'Eritrea',
4206 'EE': 'Estonia',
4207 'ET': 'Ethiopia',
4208 'FK': 'Falkland Islands (Malvinas)',
4209 'FO': 'Faroe Islands',
4210 'FJ': 'Fiji',
4211 'FI': 'Finland',
4212 'FR': 'France',
4213 'GF': 'French Guiana',
4214 'PF': 'French Polynesia',
4215 'TF': 'French Southern Territories',
4216 'GA': 'Gabon',
4217 'GM': 'Gambia',
4218 'GE': 'Georgia',
4219 'DE': 'Germany',
4220 'GH': 'Ghana',
4221 'GI': 'Gibraltar',
4222 'GR': 'Greece',
4223 'GL': 'Greenland',
4224 'GD': 'Grenada',
4225 'GP': 'Guadeloupe',
4226 'GU': 'Guam',
4227 'GT': 'Guatemala',
4228 'GG': 'Guernsey',
4229 'GN': 'Guinea',
4230 'GW': 'Guinea-Bissau',
4231 'GY': 'Guyana',
4232 'HT': 'Haiti',
4233 'HM': 'Heard Island and McDonald Islands',
4234 'VA': 'Holy See (Vatican City State)',
4235 'HN': 'Honduras',
4236 'HK': 'Hong Kong',
4237 'HU': 'Hungary',
4238 'IS': 'Iceland',
4239 'IN': 'India',
4240 'ID': 'Indonesia',
4241 'IR': 'Iran, Islamic Republic of',
4242 'IQ': 'Iraq',
4243 'IE': 'Ireland',
4244 'IM': 'Isle of Man',
4245 'IL': 'Israel',
4246 'IT': 'Italy',
4247 'JM': 'Jamaica',
4248 'JP': 'Japan',
4249 'JE': 'Jersey',
4250 'JO': 'Jordan',
4251 'KZ': 'Kazakhstan',
4252 'KE': 'Kenya',
4253 'KI': 'Kiribati',
4254 'KP': 'Korea, Democratic People\'s Republic of',
4255 'KR': 'Korea, Republic of',
4256 'KW': 'Kuwait',
4257 'KG': 'Kyrgyzstan',
4258 'LA': 'Lao People\'s Democratic Republic',
4259 'LV': 'Latvia',
4260 'LB': 'Lebanon',
4261 'LS': 'Lesotho',
4262 'LR': 'Liberia',
4263 'LY': 'Libya',
4264 'LI': 'Liechtenstein',
4265 'LT': 'Lithuania',
4266 'LU': 'Luxembourg',
4267 'MO': 'Macao',
4268 'MK': 'Macedonia, the Former Yugoslav Republic of',
4269 'MG': 'Madagascar',
4270 'MW': 'Malawi',
4271 'MY': 'Malaysia',
4272 'MV': 'Maldives',
4273 'ML': 'Mali',
4274 'MT': 'Malta',
4275 'MH': 'Marshall Islands',
4276 'MQ': 'Martinique',
4277 'MR': 'Mauritania',
4278 'MU': 'Mauritius',
4279 'YT': 'Mayotte',
4280 'MX': 'Mexico',
4281 'FM': 'Micronesia, Federated States of',
4282 'MD': 'Moldova, Republic of',
4283 'MC': 'Monaco',
4284 'MN': 'Mongolia',
4285 'ME': 'Montenegro',
4286 'MS': 'Montserrat',
4287 'MA': 'Morocco',
4288 'MZ': 'Mozambique',
4289 'MM': 'Myanmar',
4290 'NA': 'Namibia',
4291 'NR': 'Nauru',
4292 'NP': 'Nepal',
4293 'NL': 'Netherlands',
4294 'NC': 'New Caledonia',
4295 'NZ': 'New Zealand',
4296 'NI': 'Nicaragua',
4297 'NE': 'Niger',
4298 'NG': 'Nigeria',
4299 'NU': 'Niue',
4300 'NF': 'Norfolk Island',
4301 'MP': 'Northern Mariana Islands',
4302 'NO': 'Norway',
4303 'OM': 'Oman',
4304 'PK': 'Pakistan',
4305 'PW': 'Palau',
4306 'PS': 'Palestine, State of',
4307 'PA': 'Panama',
4308 'PG': 'Papua New Guinea',
4309 'PY': 'Paraguay',
4310 'PE': 'Peru',
4311 'PH': 'Philippines',
4312 'PN': 'Pitcairn',
4313 'PL': 'Poland',
4314 'PT': 'Portugal',
4315 'PR': 'Puerto Rico',
4316 'QA': 'Qatar',
4317 'RE': 'Réunion',
4318 'RO': 'Romania',
4319 'RU': 'Russian Federation',
4320 'RW': 'Rwanda',
4321 'BL': 'Saint Barthélemy',
4322 'SH': 'Saint Helena, Ascension and Tristan da Cunha',
4323 'KN': 'Saint Kitts and Nevis',
4324 'LC': 'Saint Lucia',
4325 'MF': 'Saint Martin (French part)',
4326 'PM': 'Saint Pierre and Miquelon',
4327 'VC': 'Saint Vincent and the Grenadines',
4328 'WS': 'Samoa',
4329 'SM': 'San Marino',
4330 'ST': 'Sao Tome and Principe',
4331 'SA': 'Saudi Arabia',
4332 'SN': 'Senegal',
4333 'RS': 'Serbia',
4334 'SC': 'Seychelles',
4335 'SL': 'Sierra Leone',
4336 'SG': 'Singapore',
4337 'SX': 'Sint Maarten (Dutch part)',
4338 'SK': 'Slovakia',
4339 'SI': 'Slovenia',
4340 'SB': 'Solomon Islands',
4341 'SO': 'Somalia',
4342 'ZA': 'South Africa',
4343 'GS': 'South Georgia and the South Sandwich Islands',
4344 'SS': 'South Sudan',
4345 'ES': 'Spain',
4346 'LK': 'Sri Lanka',
4347 'SD': 'Sudan',
4348 'SR': 'Suriname',
4349 'SJ': 'Svalbard and Jan Mayen',
4350 'SZ': 'Swaziland',
4351 'SE': 'Sweden',
4352 'CH': 'Switzerland',
4353 'SY': 'Syrian Arab Republic',
4354 'TW': 'Taiwan, Province of China',
4355 'TJ': 'Tajikistan',
4356 'TZ': 'Tanzania, United Republic of',
4357 'TH': 'Thailand',
4358 'TL': 'Timor-Leste',
4359 'TG': 'Togo',
4360 'TK': 'Tokelau',
4361 'TO': 'Tonga',
4362 'TT': 'Trinidad and Tobago',
4363 'TN': 'Tunisia',
4364 'TR': 'Turkey',
4365 'TM': 'Turkmenistan',
4366 'TC': 'Turks and Caicos Islands',
4367 'TV': 'Tuvalu',
4368 'UG': 'Uganda',
4369 'UA': 'Ukraine',
4370 'AE': 'United Arab Emirates',
4371 'GB': 'United Kingdom',
4372 'US': 'United States',
4373 'UM': 'United States Minor Outlying Islands',
4374 'UY': 'Uruguay',
4375 'UZ': 'Uzbekistan',
4376 'VU': 'Vanuatu',
4377 'VE': 'Venezuela, Bolivarian Republic of',
4378 'VN': 'Viet Nam',
4379 'VG': 'Virgin Islands, British',
4380 'VI': 'Virgin Islands, U.S.',
4381 'WF': 'Wallis and Futuna',
4382 'EH': 'Western Sahara',
4383 'YE': 'Yemen',
4384 'ZM': 'Zambia',
4385 'ZW': 'Zimbabwe',
2f97cc61 4386 # Not ISO 3166 codes, but used for IP blocks
4387 'AP': 'Asia/Pacific Region',
4388 'EU': 'Europe',
4eb10f66
YCH
4389 }
4390
4391 @classmethod
4392 def short2full(cls, code):
4393 """Convert an ISO 3166-2 country code to the corresponding full name"""
4394 return cls._country_map.get(code.upper())
4395
4396
86e5f3ed 4397class GeoUtils:
773f291d
S
4398 # Major IPv4 address blocks per country
4399 _country_ip_map = {
53896ca5 4400 'AD': '46.172.224.0/19',
773f291d
S
4401 'AE': '94.200.0.0/13',
4402 'AF': '149.54.0.0/17',
4403 'AG': '209.59.64.0/18',
4404 'AI': '204.14.248.0/21',
4405 'AL': '46.99.0.0/16',
4406 'AM': '46.70.0.0/15',
4407 'AO': '105.168.0.0/13',
53896ca5
S
4408 'AP': '182.50.184.0/21',
4409 'AQ': '23.154.160.0/24',
773f291d
S
4410 'AR': '181.0.0.0/12',
4411 'AS': '202.70.112.0/20',
53896ca5 4412 'AT': '77.116.0.0/14',
773f291d
S
4413 'AU': '1.128.0.0/11',
4414 'AW': '181.41.0.0/18',
53896ca5
S
4415 'AX': '185.217.4.0/22',
4416 'AZ': '5.197.0.0/16',
773f291d
S
4417 'BA': '31.176.128.0/17',
4418 'BB': '65.48.128.0/17',
4419 'BD': '114.130.0.0/16',
4420 'BE': '57.0.0.0/8',
53896ca5 4421 'BF': '102.178.0.0/15',
773f291d
S
4422 'BG': '95.42.0.0/15',
4423 'BH': '37.131.0.0/17',
4424 'BI': '154.117.192.0/18',
4425 'BJ': '137.255.0.0/16',
53896ca5 4426 'BL': '185.212.72.0/23',
773f291d
S
4427 'BM': '196.12.64.0/18',
4428 'BN': '156.31.0.0/16',
4429 'BO': '161.56.0.0/16',
4430 'BQ': '161.0.80.0/20',
53896ca5 4431 'BR': '191.128.0.0/12',
773f291d
S
4432 'BS': '24.51.64.0/18',
4433 'BT': '119.2.96.0/19',
4434 'BW': '168.167.0.0/16',
4435 'BY': '178.120.0.0/13',
4436 'BZ': '179.42.192.0/18',
4437 'CA': '99.224.0.0/11',
4438 'CD': '41.243.0.0/16',
53896ca5
S
4439 'CF': '197.242.176.0/21',
4440 'CG': '160.113.0.0/16',
773f291d 4441 'CH': '85.0.0.0/13',
53896ca5 4442 'CI': '102.136.0.0/14',
773f291d
S
4443 'CK': '202.65.32.0/19',
4444 'CL': '152.172.0.0/14',
53896ca5 4445 'CM': '102.244.0.0/14',
773f291d
S
4446 'CN': '36.128.0.0/10',
4447 'CO': '181.240.0.0/12',
4448 'CR': '201.192.0.0/12',
4449 'CU': '152.206.0.0/15',
4450 'CV': '165.90.96.0/19',
4451 'CW': '190.88.128.0/17',
53896ca5 4452 'CY': '31.153.0.0/16',
773f291d
S
4453 'CZ': '88.100.0.0/14',
4454 'DE': '53.0.0.0/8',
4455 'DJ': '197.241.0.0/17',
4456 'DK': '87.48.0.0/12',
4457 'DM': '192.243.48.0/20',
4458 'DO': '152.166.0.0/15',
4459 'DZ': '41.96.0.0/12',
4460 'EC': '186.68.0.0/15',
4461 'EE': '90.190.0.0/15',
4462 'EG': '156.160.0.0/11',
4463 'ER': '196.200.96.0/20',
4464 'ES': '88.0.0.0/11',
4465 'ET': '196.188.0.0/14',
4466 'EU': '2.16.0.0/13',
4467 'FI': '91.152.0.0/13',
4468 'FJ': '144.120.0.0/16',
53896ca5 4469 'FK': '80.73.208.0/21',
773f291d
S
4470 'FM': '119.252.112.0/20',
4471 'FO': '88.85.32.0/19',
4472 'FR': '90.0.0.0/9',
4473 'GA': '41.158.0.0/15',
4474 'GB': '25.0.0.0/8',
4475 'GD': '74.122.88.0/21',
4476 'GE': '31.146.0.0/16',
4477 'GF': '161.22.64.0/18',
4478 'GG': '62.68.160.0/19',
53896ca5
S
4479 'GH': '154.160.0.0/12',
4480 'GI': '95.164.0.0/16',
773f291d
S
4481 'GL': '88.83.0.0/19',
4482 'GM': '160.182.0.0/15',
4483 'GN': '197.149.192.0/18',
4484 'GP': '104.250.0.0/19',
4485 'GQ': '105.235.224.0/20',
4486 'GR': '94.64.0.0/13',
4487 'GT': '168.234.0.0/16',
4488 'GU': '168.123.0.0/16',
4489 'GW': '197.214.80.0/20',
4490 'GY': '181.41.64.0/18',
4491 'HK': '113.252.0.0/14',
4492 'HN': '181.210.0.0/16',
4493 'HR': '93.136.0.0/13',
4494 'HT': '148.102.128.0/17',
4495 'HU': '84.0.0.0/14',
4496 'ID': '39.192.0.0/10',
4497 'IE': '87.32.0.0/12',
4498 'IL': '79.176.0.0/13',
4499 'IM': '5.62.80.0/20',
4500 'IN': '117.192.0.0/10',
4501 'IO': '203.83.48.0/21',
4502 'IQ': '37.236.0.0/14',
4503 'IR': '2.176.0.0/12',
4504 'IS': '82.221.0.0/16',
4505 'IT': '79.0.0.0/10',
4506 'JE': '87.244.64.0/18',
4507 'JM': '72.27.0.0/17',
4508 'JO': '176.29.0.0/16',
53896ca5 4509 'JP': '133.0.0.0/8',
773f291d
S
4510 'KE': '105.48.0.0/12',
4511 'KG': '158.181.128.0/17',
4512 'KH': '36.37.128.0/17',
4513 'KI': '103.25.140.0/22',
4514 'KM': '197.255.224.0/20',
53896ca5 4515 'KN': '198.167.192.0/19',
773f291d
S
4516 'KP': '175.45.176.0/22',
4517 'KR': '175.192.0.0/10',
4518 'KW': '37.36.0.0/14',
4519 'KY': '64.96.0.0/15',
4520 'KZ': '2.72.0.0/13',
4521 'LA': '115.84.64.0/18',
4522 'LB': '178.135.0.0/16',
53896ca5 4523 'LC': '24.92.144.0/20',
773f291d
S
4524 'LI': '82.117.0.0/19',
4525 'LK': '112.134.0.0/15',
53896ca5 4526 'LR': '102.183.0.0/16',
773f291d
S
4527 'LS': '129.232.0.0/17',
4528 'LT': '78.56.0.0/13',
4529 'LU': '188.42.0.0/16',
4530 'LV': '46.109.0.0/16',
4531 'LY': '41.252.0.0/14',
4532 'MA': '105.128.0.0/11',
4533 'MC': '88.209.64.0/18',
4534 'MD': '37.246.0.0/16',
4535 'ME': '178.175.0.0/17',
4536 'MF': '74.112.232.0/21',
4537 'MG': '154.126.0.0/17',
4538 'MH': '117.103.88.0/21',
4539 'MK': '77.28.0.0/15',
4540 'ML': '154.118.128.0/18',
4541 'MM': '37.111.0.0/17',
4542 'MN': '49.0.128.0/17',
4543 'MO': '60.246.0.0/16',
4544 'MP': '202.88.64.0/20',
4545 'MQ': '109.203.224.0/19',
4546 'MR': '41.188.64.0/18',
4547 'MS': '208.90.112.0/22',
4548 'MT': '46.11.0.0/16',
4549 'MU': '105.16.0.0/12',
4550 'MV': '27.114.128.0/18',
53896ca5 4551 'MW': '102.70.0.0/15',
773f291d
S
4552 'MX': '187.192.0.0/11',
4553 'MY': '175.136.0.0/13',
4554 'MZ': '197.218.0.0/15',
4555 'NA': '41.182.0.0/16',
4556 'NC': '101.101.0.0/18',
4557 'NE': '197.214.0.0/18',
4558 'NF': '203.17.240.0/22',
4559 'NG': '105.112.0.0/12',
4560 'NI': '186.76.0.0/15',
4561 'NL': '145.96.0.0/11',
4562 'NO': '84.208.0.0/13',
4563 'NP': '36.252.0.0/15',
4564 'NR': '203.98.224.0/19',
4565 'NU': '49.156.48.0/22',
4566 'NZ': '49.224.0.0/14',
4567 'OM': '5.36.0.0/15',
4568 'PA': '186.72.0.0/15',
4569 'PE': '186.160.0.0/14',
4570 'PF': '123.50.64.0/18',
4571 'PG': '124.240.192.0/19',
4572 'PH': '49.144.0.0/13',
4573 'PK': '39.32.0.0/11',
4574 'PL': '83.0.0.0/11',
4575 'PM': '70.36.0.0/20',
4576 'PR': '66.50.0.0/16',
4577 'PS': '188.161.0.0/16',
4578 'PT': '85.240.0.0/13',
4579 'PW': '202.124.224.0/20',
4580 'PY': '181.120.0.0/14',
4581 'QA': '37.210.0.0/15',
53896ca5 4582 'RE': '102.35.0.0/16',
773f291d 4583 'RO': '79.112.0.0/13',
53896ca5 4584 'RS': '93.86.0.0/15',
773f291d 4585 'RU': '5.136.0.0/13',
53896ca5 4586 'RW': '41.186.0.0/16',
773f291d
S
4587 'SA': '188.48.0.0/13',
4588 'SB': '202.1.160.0/19',
4589 'SC': '154.192.0.0/11',
53896ca5 4590 'SD': '102.120.0.0/13',
773f291d 4591 'SE': '78.64.0.0/12',
53896ca5 4592 'SG': '8.128.0.0/10',
773f291d
S
4593 'SI': '188.196.0.0/14',
4594 'SK': '78.98.0.0/15',
53896ca5 4595 'SL': '102.143.0.0/17',
773f291d
S
4596 'SM': '89.186.32.0/19',
4597 'SN': '41.82.0.0/15',
53896ca5 4598 'SO': '154.115.192.0/18',
773f291d
S
4599 'SR': '186.179.128.0/17',
4600 'SS': '105.235.208.0/21',
4601 'ST': '197.159.160.0/19',
4602 'SV': '168.243.0.0/16',
4603 'SX': '190.102.0.0/20',
4604 'SY': '5.0.0.0/16',
4605 'SZ': '41.84.224.0/19',
4606 'TC': '65.255.48.0/20',
4607 'TD': '154.68.128.0/19',
4608 'TG': '196.168.0.0/14',
4609 'TH': '171.96.0.0/13',
4610 'TJ': '85.9.128.0/18',
4611 'TK': '27.96.24.0/21',
4612 'TL': '180.189.160.0/20',
4613 'TM': '95.85.96.0/19',
4614 'TN': '197.0.0.0/11',
4615 'TO': '175.176.144.0/21',
4616 'TR': '78.160.0.0/11',
4617 'TT': '186.44.0.0/15',
4618 'TV': '202.2.96.0/19',
4619 'TW': '120.96.0.0/11',
4620 'TZ': '156.156.0.0/14',
53896ca5
S
4621 'UA': '37.52.0.0/14',
4622 'UG': '102.80.0.0/13',
4623 'US': '6.0.0.0/8',
773f291d 4624 'UY': '167.56.0.0/13',
53896ca5 4625 'UZ': '84.54.64.0/18',
773f291d 4626 'VA': '212.77.0.0/19',
53896ca5 4627 'VC': '207.191.240.0/21',
773f291d 4628 'VE': '186.88.0.0/13',
53896ca5 4629 'VG': '66.81.192.0/20',
773f291d
S
4630 'VI': '146.226.0.0/16',
4631 'VN': '14.160.0.0/11',
4632 'VU': '202.80.32.0/20',
4633 'WF': '117.20.32.0/21',
4634 'WS': '202.4.32.0/19',
4635 'YE': '134.35.0.0/16',
4636 'YT': '41.242.116.0/22',
4637 'ZA': '41.0.0.0/11',
53896ca5
S
4638 'ZM': '102.144.0.0/13',
4639 'ZW': '102.177.192.0/18',
773f291d
S
4640 }
4641
4642 @classmethod
5f95927a
S
4643 def random_ipv4(cls, code_or_block):
4644 if len(code_or_block) == 2:
4645 block = cls._country_ip_map.get(code_or_block.upper())
4646 if not block:
4647 return None
4648 else:
4649 block = code_or_block
773f291d 4650 addr, preflen = block.split('/')
ac668111 4651 addr_min = struct.unpack('!L', socket.inet_aton(addr))[0]
773f291d 4652 addr_max = addr_min | (0xffffffff >> int(preflen))
14f25df2 4653 return str(socket.inet_ntoa(
ac668111 4654 struct.pack('!L', random.randint(addr_min, addr_max))))
773f291d
S
4655
4656
ac668111 4657class PerRequestProxyHandler(urllib.request.ProxyHandler):
2461f79d
PH
4658 def __init__(self, proxies=None):
4659 # Set default handlers
4660 for type in ('http', 'https'):
4661 setattr(self, '%s_open' % type,
4662 lambda r, proxy='__noproxy__', type=type, meth=self.proxy_open:
4663 meth(r, proxy, type))
ac668111 4664 urllib.request.ProxyHandler.__init__(self, proxies)
2461f79d 4665
91410c9b 4666 def proxy_open(self, req, proxy, type):
2461f79d 4667 req_proxy = req.headers.get('Ytdl-request-proxy')
91410c9b
PH
4668 if req_proxy is not None:
4669 proxy = req_proxy
2461f79d
PH
4670 del req.headers['Ytdl-request-proxy']
4671
4672 if proxy == '__noproxy__':
4673 return None # No Proxy
14f25df2 4674 if urllib.parse.urlparse(proxy).scheme.lower() in ('socks', 'socks4', 'socks4a', 'socks5'):
71aff188 4675 req.add_header('Ytdl-socks-proxy', proxy)
7a5c1cfe 4676 # yt-dlp's http/https handlers do wrapping the socket with socks
71aff188 4677 return None
ac668111 4678 return urllib.request.ProxyHandler.proxy_open(
91410c9b 4679 self, req, proxy, type)
5bc880b9
YCH
4680
4681
0a5445dd
YCH
4682# Both long_to_bytes and bytes_to_long are adapted from PyCrypto, which is
4683# released into Public Domain
4684# https://github.com/dlitz/pycrypto/blob/master/lib/Crypto/Util/number.py#L387
4685
4686def long_to_bytes(n, blocksize=0):
4687 """long_to_bytes(n:long, blocksize:int) : string
4688 Convert a long integer to a byte string.
4689
4690 If optional blocksize is given and greater than zero, pad the front of the
4691 byte string with binary zeros so that the length is a multiple of
4692 blocksize.
4693 """
4694 # after much testing, this algorithm was deemed to be the fastest
4695 s = b''
4696 n = int(n)
4697 while n > 0:
ac668111 4698 s = struct.pack('>I', n & 0xffffffff) + s
0a5445dd
YCH
4699 n = n >> 32
4700 # strip off leading zeros
4701 for i in range(len(s)):
4702 if s[i] != b'\000'[0]:
4703 break
4704 else:
4705 # only happens when n == 0
4706 s = b'\000'
4707 i = 0
4708 s = s[i:]
4709 # add back some pad bytes. this could be done more efficiently w.r.t. the
4710 # de-padding being done above, but sigh...
4711 if blocksize > 0 and len(s) % blocksize:
4712 s = (blocksize - len(s) % blocksize) * b'\000' + s
4713 return s
4714
4715
4716def bytes_to_long(s):
4717 """bytes_to_long(string) : long
4718 Convert a byte string to a long integer.
4719
4720 This is (essentially) the inverse of long_to_bytes().
4721 """
4722 acc = 0
4723 length = len(s)
4724 if length % 4:
4725 extra = (4 - length % 4)
4726 s = b'\000' * extra + s
4727 length = length + extra
4728 for i in range(0, length, 4):
ac668111 4729 acc = (acc << 32) + struct.unpack('>I', s[i:i + 4])[0]
0a5445dd
YCH
4730 return acc
4731
4732
5bc880b9
YCH
4733def ohdave_rsa_encrypt(data, exponent, modulus):
4734 '''
4735 Implement OHDave's RSA algorithm. See http://www.ohdave.com/rsa/
4736
4737 Input:
4738 data: data to encrypt, bytes-like object
4739 exponent, modulus: parameter e and N of RSA algorithm, both integer
4740 Output: hex string of encrypted data
4741
4742 Limitation: supports one block encryption only
4743 '''
4744
4745 payload = int(binascii.hexlify(data[::-1]), 16)
4746 encrypted = pow(payload, exponent, modulus)
4747 return '%x' % encrypted
81bdc8fd
YCH
4748
4749
f48409c7
YCH
4750def pkcs1pad(data, length):
4751 """
4752 Padding input data with PKCS#1 scheme
4753
4754 @param {int[]} data input data
4755 @param {int} length target length
4756 @returns {int[]} padded data
4757 """
4758 if len(data) > length - 11:
4759 raise ValueError('Input data too long for PKCS#1 padding')
4760
4761 pseudo_random = [random.randint(0, 254) for _ in range(length - len(data) - 3)]
4762 return [0, 2] + pseudo_random + [0] + data
4763
4764
7b2c3f47 4765def _base_n_table(n, table):
4766 if not table and not n:
4767 raise ValueError('Either table or n must be specified')
612f2be5 4768 table = (table or '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ')[:n]
4769
44f14eb4 4770 if n and n != len(table):
612f2be5 4771 raise ValueError(f'base {n} exceeds table length {len(table)}')
4772 return table
59f898b7 4773
5eb6bdce 4774
7b2c3f47 4775def encode_base_n(num, n=None, table=None):
4776 """Convert given int to a base-n string"""
612f2be5 4777 table = _base_n_table(n, table)
7b2c3f47 4778 if not num:
5eb6bdce
YCH
4779 return table[0]
4780
7b2c3f47 4781 result, base = '', len(table)
81bdc8fd 4782 while num:
7b2c3f47 4783 result = table[num % base] + result
612f2be5 4784 num = num // base
7b2c3f47 4785 return result
4786
4787
4788def decode_base_n(string, n=None, table=None):
4789 """Convert given base-n string to int"""
4790 table = {char: index for index, char in enumerate(_base_n_table(n, table))}
4791 result, base = 0, len(table)
4792 for char in string:
4793 result = result * base + table[char]
4794 return result
4795
4796
4797def decode_base(value, digits):
4798 write_string('DeprecationWarning: yt_dlp.utils.decode_base is deprecated '
4799 'and may be removed in a future version. Use yt_dlp.decode_base_n instead')
4800 return decode_base_n(value, table=digits)
f52354a8
YCH
4801
4802
4803def decode_packed_codes(code):
06b3fe29 4804 mobj = re.search(PACKED_CODES_RE, code)
a0566bbf 4805 obfuscated_code, base, count, symbols = mobj.groups()
f52354a8
YCH
4806 base = int(base)
4807 count = int(count)
4808 symbols = symbols.split('|')
4809 symbol_table = {}
4810
4811 while count:
4812 count -= 1
5eb6bdce 4813 base_n_count = encode_base_n(count, base)
f52354a8
YCH
4814 symbol_table[base_n_count] = symbols[count] or base_n_count
4815
4816 return re.sub(
4817 r'\b(\w+)\b', lambda mobj: symbol_table[mobj.group(0)],
a0566bbf 4818 obfuscated_code)
e154c651 4819
4820
1ced2221
S
4821def caesar(s, alphabet, shift):
4822 if shift == 0:
4823 return s
4824 l = len(alphabet)
4825 return ''.join(
4826 alphabet[(alphabet.index(c) + shift) % l] if c in alphabet else c
4827 for c in s)
4828
4829
4830def rot47(s):
4831 return caesar(s, r'''!"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~''', 47)
4832
4833
e154c651 4834def parse_m3u8_attributes(attrib):
4835 info = {}
4836 for (key, val) in re.findall(r'(?P<key>[A-Z0-9-]+)=(?P<val>"[^"]+"|[^",]+)(?:,|$)', attrib):
4837 if val.startswith('"'):
4838 val = val[1:-1]
4839 info[key] = val
4840 return info
1143535d
YCH
4841
4842
4843def urshift(val, n):
4844 return val >> n if val >= 0 else (val + 0x100000000) >> n
d3f8e038
YCH
4845
4846
4847# Based on png2str() written by @gdkchan and improved by @yokrysty
067aa17e 4848# Originally posted at https://github.com/ytdl-org/youtube-dl/issues/9706
d3f8e038
YCH
4849def decode_png(png_data):
4850 # Reference: https://www.w3.org/TR/PNG/
4851 header = png_data[8:]
4852
4853 if png_data[:8] != b'\x89PNG\x0d\x0a\x1a\x0a' or header[4:8] != b'IHDR':
86e5f3ed 4854 raise OSError('Not a valid PNG file.')
d3f8e038
YCH
4855
4856 int_map = {1: '>B', 2: '>H', 4: '>I'}
ac668111 4857 unpack_integer = lambda x: struct.unpack(int_map[len(x)], x)[0]
d3f8e038
YCH
4858
4859 chunks = []
4860
4861 while header:
4862 length = unpack_integer(header[:4])
4863 header = header[4:]
4864
4865 chunk_type = header[:4]
4866 header = header[4:]
4867
4868 chunk_data = header[:length]
4869 header = header[length:]
4870
4871 header = header[4:] # Skip CRC
4872
4873 chunks.append({
4874 'type': chunk_type,
4875 'length': length,
4876 'data': chunk_data
4877 })
4878
4879 ihdr = chunks[0]['data']
4880
4881 width = unpack_integer(ihdr[:4])
4882 height = unpack_integer(ihdr[4:8])
4883
4884 idat = b''
4885
4886 for chunk in chunks:
4887 if chunk['type'] == b'IDAT':
4888 idat += chunk['data']
4889
4890 if not idat:
86e5f3ed 4891 raise OSError('Unable to read PNG data.')
d3f8e038
YCH
4892
4893 decompressed_data = bytearray(zlib.decompress(idat))
4894
4895 stride = width * 3
4896 pixels = []
4897
4898 def _get_pixel(idx):
4899 x = idx % stride
4900 y = idx // stride
4901 return pixels[y][x]
4902
4903 for y in range(height):
4904 basePos = y * (1 + stride)
4905 filter_type = decompressed_data[basePos]
4906
4907 current_row = []
4908
4909 pixels.append(current_row)
4910
4911 for x in range(stride):
4912 color = decompressed_data[1 + basePos + x]
4913 basex = y * stride + x
4914 left = 0
4915 up = 0
4916
4917 if x > 2:
4918 left = _get_pixel(basex - 3)
4919 if y > 0:
4920 up = _get_pixel(basex - stride)
4921
4922 if filter_type == 1: # Sub
4923 color = (color + left) & 0xff
4924 elif filter_type == 2: # Up
4925 color = (color + up) & 0xff
4926 elif filter_type == 3: # Average
4927 color = (color + ((left + up) >> 1)) & 0xff
4928 elif filter_type == 4: # Paeth
4929 a = left
4930 b = up
4931 c = 0
4932
4933 if x > 2 and y > 0:
4934 c = _get_pixel(basex - stride - 3)
4935
4936 p = a + b - c
4937
4938 pa = abs(p - a)
4939 pb = abs(p - b)
4940 pc = abs(p - c)
4941
4942 if pa <= pb and pa <= pc:
4943 color = (color + a) & 0xff
4944 elif pb <= pc:
4945 color = (color + b) & 0xff
4946 else:
4947 color = (color + c) & 0xff
4948
4949 current_row.append(color)
4950
4951 return width, height, pixels
efa97bdc
YCH
4952
4953
4954def write_xattr(path, key, value):
6f7563be 4955 # Windows: Write xattrs to NTFS Alternate Data Streams:
4956 # http://en.wikipedia.org/wiki/NTFS#Alternate_data_streams_.28ADS.29
4957 if compat_os_name == 'nt':
4958 assert ':' not in key
4959 assert os.path.exists(path)
efa97bdc
YCH
4960
4961 try:
6f7563be 4962 with open(f'{path}:{key}', 'wb') as f:
4963 f.write(value)
86e5f3ed 4964 except OSError as e:
efa97bdc 4965 raise XAttrMetadataError(e.errno, e.strerror)
6f7563be 4966 return
efa97bdc 4967
6f7563be 4968 # UNIX Method 1. Use xattrs/pyxattrs modules
efa97bdc 4969
6f7563be 4970 setxattr = None
4971 if getattr(xattr, '_yt_dlp__identifier', None) == 'pyxattr':
4972 # Unicode arguments are not supported in pyxattr until version 0.5.0
4973 # See https://github.com/ytdl-org/youtube-dl/issues/5498
4974 if version_tuple(xattr.__version__) >= (0, 5, 0):
4975 setxattr = xattr.set
4976 elif xattr:
4977 setxattr = xattr.setxattr
efa97bdc 4978
6f7563be 4979 if setxattr:
4980 try:
4981 setxattr(path, key, value)
4982 except OSError as e:
4983 raise XAttrMetadataError(e.errno, e.strerror)
4984 return
efa97bdc 4985
6f7563be 4986 # UNIX Method 2. Use setfattr/xattr executables
4987 exe = ('setfattr' if check_executable('setfattr', ['--version'])
4988 else 'xattr' if check_executable('xattr', ['-h']) else None)
4989 if not exe:
4990 raise XAttrUnavailableError(
4991 'Couldn\'t find a tool to set the xattrs. Install either the python "xattr" or "pyxattr" modules or the '
4992 + ('"xattr" binary' if sys.platform != 'linux' else 'GNU "attr" package (which contains the "setfattr" tool)'))
efa97bdc 4993
0f06bcd7 4994 value = value.decode()
6f7563be 4995 try:
f0c9fb96 4996 _, stderr, returncode = Popen.run(
6f7563be 4997 [exe, '-w', key, value, path] if exe == 'xattr' else [exe, '-n', key, '-v', value, path],
e121e3ce 4998 text=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
6f7563be 4999 except OSError as e:
5000 raise XAttrMetadataError(e.errno, e.strerror)
f0c9fb96 5001 if returncode:
5002 raise XAttrMetadataError(returncode, stderr)
0c265486
YCH
5003
5004
5005def random_birthday(year_field, month_field, day_field):
aa374bc7
AS
5006 start_date = datetime.date(1950, 1, 1)
5007 end_date = datetime.date(1995, 12, 31)
5008 offset = random.randint(0, (end_date - start_date).days)
5009 random_date = start_date + datetime.timedelta(offset)
0c265486 5010 return {
aa374bc7
AS
5011 year_field: str(random_date.year),
5012 month_field: str(random_date.month),
5013 day_field: str(random_date.day),
0c265486 5014 }
732044af 5015
c76eb41b 5016
732044af 5017# Templates for internet shortcut files, which are plain text files.
e5a998f3 5018DOT_URL_LINK_TEMPLATE = '''\
732044af 5019[InternetShortcut]
5020URL=%(url)s
e5a998f3 5021'''
732044af 5022
e5a998f3 5023DOT_WEBLOC_LINK_TEMPLATE = '''\
732044af 5024<?xml version="1.0" encoding="UTF-8"?>
5025<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
5026<plist version="1.0">
5027<dict>
5028\t<key>URL</key>
5029\t<string>%(url)s</string>
5030</dict>
5031</plist>
e5a998f3 5032'''
732044af 5033
e5a998f3 5034DOT_DESKTOP_LINK_TEMPLATE = '''\
732044af 5035[Desktop Entry]
5036Encoding=UTF-8
5037Name=%(filename)s
5038Type=Link
5039URL=%(url)s
5040Icon=text-html
e5a998f3 5041'''
732044af 5042
08438d2c 5043LINK_TEMPLATES = {
5044 'url': DOT_URL_LINK_TEMPLATE,
5045 'desktop': DOT_DESKTOP_LINK_TEMPLATE,
5046 'webloc': DOT_WEBLOC_LINK_TEMPLATE,
5047}
5048
732044af 5049
5050def iri_to_uri(iri):
5051 """
5052 Converts an IRI (Internationalized Resource Identifier, allowing Unicode characters) to a URI (Uniform Resource Identifier, ASCII-only).
5053
5054 The function doesn't add an additional layer of escaping; e.g., it doesn't escape `%3C` as `%253C`. Instead, it percent-escapes characters with an underlying UTF-8 encoding *besides* those already escaped, leaving the URI intact.
5055 """
5056
14f25df2 5057 iri_parts = urllib.parse.urlparse(iri)
732044af 5058
5059 if '[' in iri_parts.netloc:
5060 raise ValueError('IPv6 URIs are not, yet, supported.')
5061 # Querying `.netloc`, when there's only one bracket, also raises a ValueError.
5062
5063 # The `safe` argument values, that the following code uses, contain the characters that should not be percent-encoded. Everything else but letters, digits and '_.-' will be percent-encoded with an underlying UTF-8 encoding. Everything already percent-encoded will be left as is.
5064
5065 net_location = ''
5066 if iri_parts.username:
f9934b96 5067 net_location += urllib.parse.quote(iri_parts.username, safe=r"!$%&'()*+,~")
732044af 5068 if iri_parts.password is not None:
f9934b96 5069 net_location += ':' + urllib.parse.quote(iri_parts.password, safe=r"!$%&'()*+,~")
732044af 5070 net_location += '@'
5071
0f06bcd7 5072 net_location += iri_parts.hostname.encode('idna').decode() # Punycode for Unicode hostnames.
732044af 5073 # The 'idna' encoding produces ASCII text.
5074 if iri_parts.port is not None and iri_parts.port != 80:
5075 net_location += ':' + str(iri_parts.port)
5076
f9934b96 5077 return urllib.parse.urlunparse(
732044af 5078 (iri_parts.scheme,
5079 net_location,
5080
f9934b96 5081 urllib.parse.quote_plus(iri_parts.path, safe=r"!$%&'()*+,/:;=@|~"),
732044af 5082
5083 # Unsure about the `safe` argument, since this is a legacy way of handling parameters.
f9934b96 5084 urllib.parse.quote_plus(iri_parts.params, safe=r"!$%&'()*+,/:;=@|~"),
732044af 5085
5086 # Not totally sure about the `safe` argument, since the source does not explicitly mention the query URI component.
f9934b96 5087 urllib.parse.quote_plus(iri_parts.query, safe=r"!$%&'()*+,/:;=?@{|}~"),
732044af 5088
f9934b96 5089 urllib.parse.quote_plus(iri_parts.fragment, safe=r"!#$%&'()*+,/:;=?@{|}~")))
732044af 5090
5091 # Source for `safe` arguments: https://url.spec.whatwg.org/#percent-encoded-bytes.
5092
5093
5094def to_high_limit_path(path):
5095 if sys.platform in ['win32', 'cygwin']:
5096 # Work around MAX_PATH limitation on Windows. The maximum allowed length for the individual path segments may still be quite limited.
e5a998f3 5097 return '\\\\?\\' + os.path.abspath(path)
732044af 5098
5099 return path
76d321f6 5100
c76eb41b 5101
7b2c3f47 5102def format_field(obj, field=None, template='%s', ignore=NO_DEFAULT, default='', func=IDENTITY):
e0ddbd02 5103 val = traverse_obj(obj, *variadic(field))
7b2c3f47 5104 if (not val and val != 0) if ignore is NO_DEFAULT else val in variadic(ignore):
e0ddbd02 5105 return default
7b2c3f47 5106 return template % func(val)
00dd0cd5 5107
5108
5109def clean_podcast_url(url):
5110 return re.sub(r'''(?x)
5111 (?:
5112 (?:
5113 chtbl\.com/track|
5114 media\.blubrry\.com| # https://create.blubrry.com/resources/podcast-media-download-statistics/getting-started/
5115 play\.podtrac\.com
5116 )/[^/]+|
5117 (?:dts|www)\.podtrac\.com/(?:pts/)?redirect\.[0-9a-z]{3,4}| # http://analytics.podtrac.com/how-to-measure
5118 flex\.acast\.com|
5119 pd(?:
5120 cn\.co| # https://podcorn.com/analytics-prefix/
5121 st\.fm # https://podsights.com/docs/
5122 )/e
5123 )/''', '', url)
ffcb8191
THD
5124
5125
5126_HEX_TABLE = '0123456789abcdef'
5127
5128
5129def random_uuidv4():
5130 return re.sub(r'[xy]', lambda x: _HEX_TABLE[random.randint(0, 15)], 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx')
0202b52a 5131
5132
5133def make_dir(path, to_screen=None):
5134 try:
5135 dn = os.path.dirname(path)
5136 if dn and not os.path.exists(dn):
5137 os.makedirs(dn)
5138 return True
86e5f3ed 5139 except OSError as err:
0202b52a 5140 if callable(to_screen) is not None:
5141 to_screen('unable to create directory ' + error_to_compat_str(err))
5142 return False
f74980cb 5143
5144
5145def get_executable_path():
b5899f4f 5146 from .update import _get_variant_and_executable_path
c487cf00 5147
b5899f4f 5148 return os.path.dirname(os.path.abspath(_get_variant_and_executable_path()[1]))
f74980cb 5149
5150
2f567473 5151def load_plugins(name, suffix, namespace):
3ae5e797 5152 classes = {}
19a03940 5153 with contextlib.suppress(FileNotFoundError):
019a94f7
ÁS
5154 plugins_spec = importlib.util.spec_from_file_location(
5155 name, os.path.join(get_executable_path(), 'ytdlp_plugins', name, '__init__.py'))
5156 plugins = importlib.util.module_from_spec(plugins_spec)
5157 sys.modules[plugins_spec.name] = plugins
5158 plugins_spec.loader.exec_module(plugins)
f74980cb 5159 for name in dir(plugins):
2f567473 5160 if name in namespace:
5161 continue
5162 if not name.endswith(suffix):
f74980cb 5163 continue
5164 klass = getattr(plugins, name)
3ae5e797 5165 classes[name] = namespace[name] = klass
f74980cb 5166 return classes
06167fbb 5167
5168
325ebc17 5169def traverse_obj(
352d63fd 5170 obj, *path_list, default=None, expected_type=None, get_all=True,
325ebc17 5171 casesense=True, is_user_input=False, traverse_string=False):
324ad820 5172 ''' Traverse nested list/dict/tuple
8f334380 5173 @param path_list A list of paths which are checked one by one.
19a03940 5174 Each path is a list of keys where each key is a:
5175 - None: Do nothing
5176 - string: A dictionary key
5177 - int: An index into a list
5178 - tuple: A list of keys all of which will be traversed
5179 - Ellipsis: Fetch all values in the object
5180 - Function: Takes the key and value as arguments
5181 and returns whether the key matches or not
325ebc17 5182 @param default Default value to return
352d63fd 5183 @param expected_type Only accept final value of this type (Can also be any callable)
5184 @param get_all Return all the values obtained from a path or only the first one
324ad820 5185 @param casesense Whether to consider dictionary keys as case sensitive
5186 @param is_user_input Whether the keys are generated from user input. If True,
5187 strings are converted to int/slice if necessary
5188 @param traverse_string Whether to traverse inside strings. If True, any
5189 non-compatible object will also be converted into a string
8f334380 5190 # TODO: Write tests
324ad820 5191 '''
325ebc17 5192 if not casesense:
dbf5416a 5193 _lower = lambda k: (k.lower() if isinstance(k, str) else k)
8f334380 5194 path_list = (map(_lower, variadic(path)) for path in path_list)
5195
5196 def _traverse_obj(obj, path, _current_depth=0):
5197 nonlocal depth
5198 path = tuple(variadic(path))
5199 for i, key in enumerate(path):
1797b073 5200 if None in (key, obj):
5201 return obj
8f334380 5202 if isinstance(key, (list, tuple)):
5203 obj = [_traverse_obj(obj, sub_key, _current_depth) for sub_key in key]
5204 key = ...
5205 if key is ...:
5206 obj = (obj.values() if isinstance(obj, dict)
5207 else obj if isinstance(obj, (list, tuple, LazyList))
5208 else str(obj) if traverse_string else [])
5209 _current_depth += 1
5210 depth = max(depth, _current_depth)
5211 return [_traverse_obj(inner_obj, path[i + 1:], _current_depth) for inner_obj in obj]
2614f646 5212 elif callable(key):
5213 if isinstance(obj, (list, tuple, LazyList)):
5214 obj = enumerate(obj)
5215 elif isinstance(obj, dict):
5216 obj = obj.items()
5217 else:
5218 if not traverse_string:
5219 return None
5220 obj = str(obj)
5221 _current_depth += 1
5222 depth = max(depth, _current_depth)
e6f868a6 5223 return [_traverse_obj(v, path[i + 1:], _current_depth) for k, v in obj if try_call(key, args=(k, v))]
575e17a1 5224 elif isinstance(obj, dict) and not (is_user_input and key == ':'):
325ebc17 5225 obj = (obj.get(key) if casesense or (key in obj)
5226 else next((v for k, v in obj.items() if _lower(k) == key), None))
5227 else:
5228 if is_user_input:
5229 key = (int_or_none(key) if ':' not in key
5230 else slice(*map(int_or_none, key.split(':'))))
8f334380 5231 if key == slice(None):
575e17a1 5232 return _traverse_obj(obj, (..., *path[i + 1:]), _current_depth)
325ebc17 5233 if not isinstance(key, (int, slice)):
9fea350f 5234 return None
8f334380 5235 if not isinstance(obj, (list, tuple, LazyList)):
325ebc17 5236 if not traverse_string:
5237 return None
5238 obj = str(obj)
5239 try:
5240 obj = obj[key]
5241 except IndexError:
324ad820 5242 return None
325ebc17 5243 return obj
5244
352d63fd 5245 if isinstance(expected_type, type):
5246 type_test = lambda val: val if isinstance(val, expected_type) else None
352d63fd 5247 else:
7b2c3f47 5248 type_test = expected_type or IDENTITY
352d63fd 5249
8f334380 5250 for path in path_list:
5251 depth = 0
5252 val = _traverse_obj(obj, path)
325ebc17 5253 if val is not None:
8f334380 5254 if depth:
5255 for _ in range(depth - 1):
6586bca9 5256 val = itertools.chain.from_iterable(v for v in val if v is not None)
352d63fd 5257 val = [v for v in map(type_test, val) if v is not None]
8f334380 5258 if val:
352d63fd 5259 return val if get_all else val[0]
5260 else:
5261 val = type_test(val)
5262 if val is not None:
8f334380 5263 return val
325ebc17 5264 return default
324ad820 5265
5266
5267def traverse_dict(dictn, keys, casesense=True):
ee8dd27a 5268 write_string('DeprecationWarning: yt_dlp.utils.traverse_dict is deprecated '
5269 'and may be removed in a future version. Use yt_dlp.utils.traverse_obj instead')
5270 return traverse_obj(dictn, keys, casesense=casesense, is_user_input=True, traverse_string=True)
6606817a 5271
5272
ff91cf74 5273def get_first(obj, keys, **kwargs):
5274 return traverse_obj(obj, (..., *variadic(keys)), **kwargs, get_all=False)
5275
5276
4b4b7f74 5277def variadic(x, allowed_types=(str, bytes, dict)):
cb89cfc1 5278 return x if isinstance(x, collections.abc.Iterable) and not isinstance(x, allowed_types) else (x,)
bd50a52b
THD
5279
5280
3e9b66d7
LNO
5281def time_seconds(**kwargs):
5282 t = datetime.datetime.now(datetime.timezone(datetime.timedelta(**kwargs)))
5283 return t.timestamp()
5284
5285
49fa4d9a
N
5286# create a JSON Web Signature (jws) with HS256 algorithm
5287# the resulting format is in JWS Compact Serialization
5288# implemented following JWT https://www.rfc-editor.org/rfc/rfc7519.html
5289# implemented following JWS https://www.rfc-editor.org/rfc/rfc7515.html
5290def jwt_encode_hs256(payload_data, key, headers={}):
5291 header_data = {
5292 'alg': 'HS256',
5293 'typ': 'JWT',
5294 }
5295 if headers:
5296 header_data.update(headers)
0f06bcd7 5297 header_b64 = base64.b64encode(json.dumps(header_data).encode())
5298 payload_b64 = base64.b64encode(json.dumps(payload_data).encode())
5299 h = hmac.new(key.encode(), header_b64 + b'.' + payload_b64, hashlib.sha256)
49fa4d9a
N
5300 signature_b64 = base64.b64encode(h.digest())
5301 token = header_b64 + b'.' + payload_b64 + b'.' + signature_b64
5302 return token
819e0531 5303
5304
16b0d7e6 5305# can be extended in future to verify the signature and parse header and return the algorithm used if it's not HS256
5306def jwt_decode_hs256(jwt):
5307 header_b64, payload_b64, signature_b64 = jwt.split('.')
5308 payload_data = json.loads(base64.urlsafe_b64decode(payload_b64))
5309 return payload_data
5310
5311
53973b4d 5312WINDOWS_VT_MODE = False if compat_os_name == 'nt' else None
5313
5314
0b9c08b4 5315@functools.cache
819e0531 5316def supports_terminal_sequences(stream):
5317 if compat_os_name == 'nt':
8a82af35 5318 if not WINDOWS_VT_MODE:
819e0531 5319 return False
5320 elif not os.getenv('TERM'):
5321 return False
5322 try:
5323 return stream.isatty()
5324 except BaseException:
5325 return False
5326
5327
53973b4d 5328def windows_enable_vt_mode(): # TODO: Do this the proper way https://bugs.python.org/issue30075
8a82af35 5329 if get_windows_version() < (10, 0, 10586):
53973b4d 5330 return
5331 global WINDOWS_VT_MODE
53973b4d 5332 try:
f0c9fb96 5333 Popen.run('', shell=True)
53973b4d 5334 except Exception:
5335 return
5336
5337 WINDOWS_VT_MODE = True
5338 supports_terminal_sequences.cache_clear()
5339
5340
ec11a9f4 5341_terminal_sequences_re = re.compile('\033\\[[^m]+m')
5342
5343
5344def remove_terminal_sequences(string):
5345 return _terminal_sequences_re.sub('', string)
5346
5347
5348def number_of_digits(number):
5349 return len('%d' % number)
34921b43 5350
5351
5352def join_nonempty(*values, delim='-', from_dict=None):
5353 if from_dict is not None:
7b2c3f47 5354 values = (traverse_obj(from_dict, variadic(v)) for v in values)
34921b43 5355 return delim.join(map(str, filter(None, values)))
06e57990 5356
5357
27231526
ZM
5358def scale_thumbnails_to_max_format_width(formats, thumbnails, url_width_re):
5359 """
5360 Find the largest format dimensions in terms of video width and, for each thumbnail:
5361 * Modify the URL: Match the width with the provided regex and replace with the former width
5362 * Update dimensions
5363
5364 This function is useful with video services that scale the provided thumbnails on demand
5365 """
5366 _keys = ('width', 'height')
5367 max_dimensions = max(
86e5f3ed 5368 (tuple(format.get(k) or 0 for k in _keys) for format in formats),
27231526
ZM
5369 default=(0, 0))
5370 if not max_dimensions[0]:
5371 return thumbnails
5372 return [
5373 merge_dicts(
5374 {'url': re.sub(url_width_re, str(max_dimensions[0]), thumbnail['url'])},
5375 dict(zip(_keys, max_dimensions)), thumbnail)
5376 for thumbnail in thumbnails
5377 ]
5378
5379
93c8410d
LNO
5380def parse_http_range(range):
5381 """ Parse value of "Range" or "Content-Range" HTTP header into tuple. """
5382 if not range:
5383 return None, None, None
5384 crg = re.search(r'bytes[ =](\d+)-(\d+)?(?:/(\d+))?', range)
5385 if not crg:
5386 return None, None, None
5387 return int(crg.group(1)), int_or_none(crg.group(2)), int_or_none(crg.group(3))
5388
5389
6b9e832d 5390def read_stdin(what):
5391 eof = 'Ctrl+Z' if compat_os_name == 'nt' else 'Ctrl+D'
5392 write_string(f'Reading {what} from STDIN - EOF ({eof}) to end:\n')
5393 return sys.stdin
5394
5395
a904a7f8
L
5396def determine_file_encoding(data):
5397 """
88f60feb 5398 Detect the text encoding used
a904a7f8
L
5399 @returns (encoding, bytes to skip)
5400 """
5401
88f60feb 5402 # BOM marks are given priority over declarations
a904a7f8 5403 for bom, enc in BOMS:
a904a7f8
L
5404 if data.startswith(bom):
5405 return enc, len(bom)
5406
88f60feb 5407 # Strip off all null bytes to match even when UTF-16 or UTF-32 is used.
5408 # We ignore the endianness to get a good enough match
a904a7f8 5409 data = data.replace(b'\0', b'')
88f60feb 5410 mobj = re.match(rb'(?m)^#\s*coding\s*:\s*(\S+)\s*$', data)
5411 return mobj.group(1).decode() if mobj else None, 0
a904a7f8
L
5412
5413
06e57990 5414class Config:
5415 own_args = None
9e491463 5416 parsed_args = None
06e57990 5417 filename = None
5418 __initialized = False
5419
5420 def __init__(self, parser, label=None):
9e491463 5421 self.parser, self.label = parser, label
06e57990 5422 self._loaded_paths, self.configs = set(), []
5423
5424 def init(self, args=None, filename=None):
5425 assert not self.__initialized
284a60c5 5426 self.own_args, self.filename = args, filename
5427 return self.load_configs()
5428
5429 def load_configs(self):
65662dff 5430 directory = ''
284a60c5 5431 if self.filename:
5432 location = os.path.realpath(self.filename)
65662dff 5433 directory = os.path.dirname(location)
06e57990 5434 if location in self._loaded_paths:
5435 return False
5436 self._loaded_paths.add(location)
5437
284a60c5 5438 self.__initialized = True
5439 opts, _ = self.parser.parse_known_args(self.own_args)
5440 self.parsed_args = self.own_args
9e491463 5441 for location in opts.config_locations or []:
6b9e832d 5442 if location == '-':
5443 self.append_config(shlex.split(read_stdin('options'), comments=True), label='stdin')
5444 continue
65662dff 5445 location = os.path.join(directory, expand_path(location))
06e57990 5446 if os.path.isdir(location):
5447 location = os.path.join(location, 'yt-dlp.conf')
5448 if not os.path.exists(location):
9e491463 5449 self.parser.error(f'config location {location} does not exist')
06e57990 5450 self.append_config(self.read_file(location), location)
5451 return True
5452
5453 def __str__(self):
5454 label = join_nonempty(
5455 self.label, 'config', f'"{self.filename}"' if self.filename else '',
5456 delim=' ')
5457 return join_nonempty(
5458 self.own_args is not None and f'{label[0].upper()}{label[1:]}: {self.hide_login_info(self.own_args)}',
5459 *(f'\n{c}'.replace('\n', '\n| ')[1:] for c in self.configs),
5460 delim='\n')
5461
5462 @staticmethod
5463 def read_file(filename, default=[]):
5464 try:
a904a7f8 5465 optionf = open(filename, 'rb')
86e5f3ed 5466 except OSError:
06e57990 5467 return default # silently skip if file is not present
a904a7f8
L
5468 try:
5469 enc, skip = determine_file_encoding(optionf.read(512))
5470 optionf.seek(skip, io.SEEK_SET)
5471 except OSError:
5472 enc = None # silently skip read errors
06e57990 5473 try:
5474 # FIXME: https://github.com/ytdl-org/youtube-dl/commit/dfe5fa49aed02cf36ba9f743b11b0903554b5e56
a904a7f8 5475 contents = optionf.read().decode(enc or preferredencoding())
f9934b96 5476 res = shlex.split(contents, comments=True)
44a6fcff 5477 except Exception as err:
5478 raise ValueError(f'Unable to parse "{filename}": {err}')
06e57990 5479 finally:
5480 optionf.close()
5481 return res
5482
5483 @staticmethod
5484 def hide_login_info(opts):
86e5f3ed 5485 PRIVATE_OPTS = {'-p', '--password', '-u', '--username', '--video-password', '--ap-password', '--ap-username'}
06e57990 5486 eqre = re.compile('^(?P<key>' + ('|'.join(re.escape(po) for po in PRIVATE_OPTS)) + ')=.+$')
5487
5488 def _scrub_eq(o):
5489 m = eqre.match(o)
5490 if m:
5491 return m.group('key') + '=PRIVATE'
5492 else:
5493 return o
5494
5495 opts = list(map(_scrub_eq, opts))
5496 for idx, opt in enumerate(opts):
5497 if opt in PRIVATE_OPTS and idx + 1 < len(opts):
5498 opts[idx + 1] = 'PRIVATE'
5499 return opts
5500
5501 def append_config(self, *args, label=None):
9e491463 5502 config = type(self)(self.parser, label)
06e57990 5503 config._loaded_paths = self._loaded_paths
5504 if config.init(*args):
5505 self.configs.append(config)
5506
5507 @property
5508 def all_args(self):
5509 for config in reversed(self.configs):
5510 yield from config.all_args
9e491463 5511 yield from self.parsed_args or []
5512
5513 def parse_known_args(self, **kwargs):
5514 return self.parser.parse_known_args(self.all_args, **kwargs)
06e57990 5515
5516 def parse_args(self):
9e491463 5517 return self.parser.parse_args(self.all_args)
da42679b
LNO
5518
5519
5520class WebSocketsWrapper():
5521 """Wraps websockets module to use in non-async scopes"""
abfecb7b 5522 pool = None
da42679b 5523
3cea3edd 5524 def __init__(self, url, headers=None, connect=True):
059bc4db 5525 self.loop = asyncio.new_event_loop()
9cd08050 5526 # XXX: "loop" is deprecated
5527 self.conn = websockets.connect(
5528 url, extra_headers=headers, ping_interval=None,
5529 close_timeout=float('inf'), loop=self.loop, ping_timeout=float('inf'))
3cea3edd
LNO
5530 if connect:
5531 self.__enter__()
15dfb392 5532 atexit.register(self.__exit__, None, None, None)
da42679b
LNO
5533
5534 def __enter__(self):
3cea3edd 5535 if not self.pool:
9cd08050 5536 self.pool = self.run_with_loop(self.conn.__aenter__(), self.loop)
da42679b
LNO
5537 return self
5538
5539 def send(self, *args):
5540 self.run_with_loop(self.pool.send(*args), self.loop)
5541
5542 def recv(self, *args):
5543 return self.run_with_loop(self.pool.recv(*args), self.loop)
5544
5545 def __exit__(self, type, value, traceback):
5546 try:
5547 return self.run_with_loop(self.conn.__aexit__(type, value, traceback), self.loop)
5548 finally:
5549 self.loop.close()
15dfb392 5550 self._cancel_all_tasks(self.loop)
da42679b
LNO
5551
5552 # taken from https://github.com/python/cpython/blob/3.9/Lib/asyncio/runners.py with modifications
5553 # for contributors: If there's any new library using asyncio needs to be run in non-async, move these function out of this class
5554 @staticmethod
5555 def run_with_loop(main, loop):
059bc4db 5556 if not asyncio.iscoroutine(main):
da42679b
LNO
5557 raise ValueError(f'a coroutine was expected, got {main!r}')
5558
5559 try:
5560 return loop.run_until_complete(main)
5561 finally:
5562 loop.run_until_complete(loop.shutdown_asyncgens())
5563 if hasattr(loop, 'shutdown_default_executor'):
5564 loop.run_until_complete(loop.shutdown_default_executor())
5565
5566 @staticmethod
5567 def _cancel_all_tasks(loop):
059bc4db 5568 to_cancel = asyncio.all_tasks(loop)
da42679b
LNO
5569
5570 if not to_cancel:
5571 return
5572
5573 for task in to_cancel:
5574 task.cancel()
5575
9cd08050 5576 # XXX: "loop" is removed in python 3.10+
da42679b 5577 loop.run_until_complete(
059bc4db 5578 asyncio.gather(*to_cancel, loop=loop, return_exceptions=True))
da42679b
LNO
5579
5580 for task in to_cancel:
5581 if task.cancelled():
5582 continue
5583 if task.exception() is not None:
5584 loop.call_exception_handler({
5585 'message': 'unhandled exception during asyncio.run() shutdown',
5586 'exception': task.exception(),
5587 'task': task,
5588 })
5589
5590
8b7539d2 5591def merge_headers(*dicts):
08d30158 5592 """Merge dicts of http headers case insensitively, prioritizing the latter ones"""
76aa9913 5593 return {k.title(): v for k, v in itertools.chain.from_iterable(map(dict.items, dicts))}
28787f16 5594
5595
b1f94422 5596def cached_method(f):
5597 """Cache a method"""
5598 signature = inspect.signature(f)
5599
5600 @functools.wraps(f)
5601 def wrapper(self, *args, **kwargs):
5602 bound_args = signature.bind(self, *args, **kwargs)
5603 bound_args.apply_defaults()
5604 key = tuple(bound_args.arguments.values())
5605
5606 if not hasattr(self, '__cached_method__cache'):
5607 self.__cached_method__cache = {}
5608 cache = self.__cached_method__cache.setdefault(f.__name__, {})
5609 if key not in cache:
5610 cache[key] = f(self, *args, **kwargs)
5611 return cache[key]
5612 return wrapper
5613
5614
28787f16 5615class classproperty:
b1f94422 5616 """property access for class methods"""
c487cf00 5617
5618 def __init__(self, func):
5619 functools.update_wrapper(self, func)
5620 self.func = func
28787f16 5621
5622 def __get__(self, _, cls):
c487cf00 5623 return self.func(cls)
19a03940 5624
5625
64fa820c 5626class Namespace(types.SimpleNamespace):
591bb9d3 5627 """Immutable namespace"""
591bb9d3 5628
7896214c 5629 def __iter__(self):
64fa820c 5630 return iter(self.__dict__.values())
7896214c 5631
64fa820c 5632 @property
5633 def items_(self):
5634 return self.__dict__.items()
9b8ee23b 5635
5636
8dc59305 5637MEDIA_EXTENSIONS = Namespace(
5638 common_video=('avi', 'flv', 'mkv', 'mov', 'mp4', 'webm'),
5639 video=('3g2', '3gp', 'f4v', 'mk3d', 'divx', 'mpg', 'ogv', 'm4v', 'wmv'),
5640 common_audio=('aiff', 'alac', 'flac', 'm4a', 'mka', 'mp3', 'ogg', 'opus', 'wav'),
5641 audio=('aac', 'ape', 'asf', 'f4a', 'f4b', 'm4b', 'm4p', 'm4r', 'oga', 'ogx', 'spx', 'vorbis', 'wma'),
5642 thumbnails=('jpg', 'png', 'webp'),
5643 storyboards=('mhtml', ),
5644 subtitles=('srt', 'vtt', 'ass', 'lrc'),
5645 manifests=('f4f', 'f4m', 'm3u8', 'smil', 'mpd'),
5646)
5647MEDIA_EXTENSIONS.video += MEDIA_EXTENSIONS.common_video
5648MEDIA_EXTENSIONS.audio += MEDIA_EXTENSIONS.common_audio
5649
5650KNOWN_EXTENSIONS = (*MEDIA_EXTENSIONS.video, *MEDIA_EXTENSIONS.audio, *MEDIA_EXTENSIONS.manifests)
5651
5652
9b8ee23b 5653# Deprecated
5654has_certifi = bool(certifi)
5655has_websockets = bool(websockets)