]> jfr.im git - yt-dlp.git/blame - yt_dlp/utils/_utils.py
[utils] clean_podcast_url: Handle protocol in redirect URL
[yt-dlp.git] / yt_dlp / utils / _utils.py
CommitLineData
6929b41a 1import asyncio
15dfb392 2import atexit
1e399778 3import base64
5bc880b9 4import binascii
912b38b4 5import calendar
676eb3f2 6import codecs
c380cc28 7import collections
ab029d7e 8import collections.abc
62e609ab 9import contextlib
c496ca96 10import datetime
0c265486 11import email.header
f8271158 12import email.utils
f45c185f 13import errno
d77c3dfd 14import gzip
49fa4d9a
N
15import hashlib
16import hmac
ac668111 17import html.entities
18import html.parser
54007a45 19import http.client
20import http.cookiejar
b1f94422 21import inspect
03f9daab 22import io
79a2e94e 23import itertools
f4bfd65f 24import json
d77c3dfd 25import locale
02dbf93f 26import math
f8271158 27import mimetypes
db3ad8a6 28import netrc
347de493 29import operator
d77c3dfd 30import os
c496ca96 31import platform
773f291d 32import random
d77c3dfd 33import re
f8271158 34import shlex
c496ca96 35import socket
79a2e94e 36import ssl
ac668111 37import struct
1c088fa8 38import subprocess
d77c3dfd 39import sys
181c8655 40import tempfile
c380cc28 41import time
01951dda 42import traceback
64fa820c 43import types
989a01c2 44import unicodedata
14f25df2 45import urllib.error
f8271158 46import urllib.parse
ac668111 47import urllib.request
bcf89ce6 48import xml.etree.ElementTree
d77c3dfd 49import zlib
d77c3dfd 50
69bec673 51from . import traversal
52
53from ..compat import functools # isort: split
54from ..compat import (
36e6f62c 55 compat_etree_fromstring,
51098426 56 compat_expanduser,
f8271158 57 compat_HTMLParseError,
efa97bdc 58 compat_os_name,
702ccf2d 59 compat_shlex_quote,
8c25f81b 60)
69bec673 61from ..dependencies import brotli, certifi, websockets, xattr
62from ..socks import ProxyType, sockssocket
51fb4995 63
46f1370e 64__name__ = __name__.rsplit('.', 1)[0] # Pretend to be the parent module
65
468e2e92
FV
66# This is not clearly defined otherwise
67compiled_regex_type = type(re.compile(''))
68
f7a147e3
S
69
70def random_user_agent():
71 _USER_AGENT_TPL = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/%s Safari/537.36'
72 _CHROME_VERSIONS = (
19b4c74d 73 '90.0.4430.212',
74 '90.0.4430.24',
75 '90.0.4430.70',
76 '90.0.4430.72',
77 '90.0.4430.85',
78 '90.0.4430.93',
79 '91.0.4472.101',
80 '91.0.4472.106',
81 '91.0.4472.114',
82 '91.0.4472.124',
83 '91.0.4472.164',
84 '91.0.4472.19',
85 '91.0.4472.77',
86 '92.0.4515.107',
87 '92.0.4515.115',
88 '92.0.4515.131',
89 '92.0.4515.159',
90 '92.0.4515.43',
91 '93.0.4556.0',
92 '93.0.4577.15',
93 '93.0.4577.63',
94 '93.0.4577.82',
95 '94.0.4606.41',
96 '94.0.4606.54',
97 '94.0.4606.61',
98 '94.0.4606.71',
99 '94.0.4606.81',
100 '94.0.4606.85',
101 '95.0.4638.17',
102 '95.0.4638.50',
103 '95.0.4638.54',
104 '95.0.4638.69',
105 '95.0.4638.74',
106 '96.0.4664.18',
107 '96.0.4664.45',
108 '96.0.4664.55',
109 '96.0.4664.93',
110 '97.0.4692.20',
f7a147e3
S
111 )
112 return _USER_AGENT_TPL % random.choice(_CHROME_VERSIONS)
113
114
4390d5ec 115SUPPORTED_ENCODINGS = [
116 'gzip', 'deflate'
117]
9b8ee23b 118if brotli:
4390d5ec 119 SUPPORTED_ENCODINGS.append('br')
120
3e669f36 121std_headers = {
f7a147e3 122 'User-Agent': random_user_agent(),
59ae15a5 123 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
59ae15a5 124 'Accept-Language': 'en-us,en;q=0.5',
b1156c1e 125 'Sec-Fetch-Mode': 'navigate',
3e669f36 126}
f427df17 127
5f6a1245 128
fb37eb25
S
129USER_AGENTS = {
130 'Safari': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27',
131}
132
133
4823ec9f 134class NO_DEFAULT:
135 pass
136
137
138def IDENTITY(x):
139 return x
140
bf42a990 141
7105440c
YCH
142ENGLISH_MONTH_NAMES = [
143 'January', 'February', 'March', 'April', 'May', 'June',
144 'July', 'August', 'September', 'October', 'November', 'December']
145
f6717dec
S
146MONTH_NAMES = {
147 'en': ENGLISH_MONTH_NAMES,
148 'fr': [
3e4185c3
S
149 'janvier', 'février', 'mars', 'avril', 'mai', 'juin',
150 'juillet', 'août', 'septembre', 'octobre', 'novembre', 'décembre'],
78545664 151 # these follow the genitive grammatical case (dopełniacz)
152 # some websites might be using nominative, which will require another month list
153 # https://en.wikibooks.org/wiki/Polish/Noun_cases
154 'pl': ['stycznia', 'lutego', 'marca', 'kwietnia', 'maja', 'czerwca',
155 'lipca', 'sierpnia', 'września', 'października', 'listopada', 'grudnia'],
f6717dec 156}
a942d6cb 157
8f53dc44 158# From https://github.com/python/cpython/blob/3.11/Lib/email/_parseaddr.py#L36-L42
159TIMEZONE_NAMES = {
160 'UT': 0, 'UTC': 0, 'GMT': 0, 'Z': 0,
161 'AST': -4, 'ADT': -3, # Atlantic (used in Canada)
162 'EST': -5, 'EDT': -4, # Eastern
163 'CST': -6, 'CDT': -5, # Central
164 'MST': -7, 'MDT': -6, # Mountain
165 'PST': -8, 'PDT': -7 # Pacific
166}
167
c587cbb7 168# needed for sanitizing filenames in restricted mode
c8827027 169ACCENT_CHARS = dict(zip('ÂÃÄÀÁÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖŐØŒÙÚÛÜŰÝÞßàáâãäåæçèéêëìíîïðñòóôõöőøœùúûüűýþÿ',
fd35d8cd
JW
170 itertools.chain('AAAAAA', ['AE'], 'CEEEEIIIIDNOOOOOOO', ['OE'], 'UUUUUY', ['TH', 'ss'],
171 'aaaaaa', ['ae'], 'ceeeeiiiionooooooo', ['oe'], 'uuuuuy', ['th'], 'y')))
c587cbb7 172
46f59e89
S
173DATE_FORMATS = (
174 '%d %B %Y',
175 '%d %b %Y',
176 '%B %d %Y',
cb655f34
S
177 '%B %dst %Y',
178 '%B %dnd %Y',
9d30c213 179 '%B %drd %Y',
cb655f34 180 '%B %dth %Y',
46f59e89 181 '%b %d %Y',
cb655f34
S
182 '%b %dst %Y',
183 '%b %dnd %Y',
9d30c213 184 '%b %drd %Y',
cb655f34 185 '%b %dth %Y',
46f59e89
S
186 '%b %dst %Y %I:%M',
187 '%b %dnd %Y %I:%M',
9d30c213 188 '%b %drd %Y %I:%M',
46f59e89
S
189 '%b %dth %Y %I:%M',
190 '%Y %m %d',
191 '%Y-%m-%d',
bccdbd22 192 '%Y.%m.%d.',
46f59e89 193 '%Y/%m/%d',
81c13222 194 '%Y/%m/%d %H:%M',
46f59e89 195 '%Y/%m/%d %H:%M:%S',
1931a55e
THD
196 '%Y%m%d%H%M',
197 '%Y%m%d%H%M%S',
4f3fa23e 198 '%Y%m%d',
0c1c6f4b 199 '%Y-%m-%d %H:%M',
46f59e89
S
200 '%Y-%m-%d %H:%M:%S',
201 '%Y-%m-%d %H:%M:%S.%f',
5014558a 202 '%Y-%m-%d %H:%M:%S:%f',
46f59e89
S
203 '%d.%m.%Y %H:%M',
204 '%d.%m.%Y %H.%M',
205 '%Y-%m-%dT%H:%M:%SZ',
206 '%Y-%m-%dT%H:%M:%S.%fZ',
207 '%Y-%m-%dT%H:%M:%S.%f0Z',
208 '%Y-%m-%dT%H:%M:%S',
209 '%Y-%m-%dT%H:%M:%S.%f',
210 '%Y-%m-%dT%H:%M',
c6eed6b8
S
211 '%b %d %Y at %H:%M',
212 '%b %d %Y at %H:%M:%S',
b555ae9b
S
213 '%B %d %Y at %H:%M',
214 '%B %d %Y at %H:%M:%S',
a63d9bd0 215 '%H:%M %d-%b-%Y',
46f59e89
S
216)
217
218DATE_FORMATS_DAY_FIRST = list(DATE_FORMATS)
219DATE_FORMATS_DAY_FIRST.extend([
220 '%d-%m-%Y',
221 '%d.%m.%Y',
222 '%d.%m.%y',
223 '%d/%m/%Y',
224 '%d/%m/%y',
225 '%d/%m/%Y %H:%M:%S',
47304e07 226 '%d-%m-%Y %H:%M',
4cbfa570 227 '%H:%M %d/%m/%Y',
46f59e89
S
228])
229
230DATE_FORMATS_MONTH_FIRST = list(DATE_FORMATS)
231DATE_FORMATS_MONTH_FIRST.extend([
232 '%m-%d-%Y',
233 '%m.%d.%Y',
234 '%m/%d/%Y',
235 '%m/%d/%y',
236 '%m/%d/%Y %H:%M:%S',
237])
238
06b3fe29 239PACKED_CODES_RE = r"}\('(.+)',(\d+),(\d+),'([^']+)'\.split\('\|'\)"
0f60ba6e 240JSON_LD_RE = r'(?is)<script[^>]+type=(["\']?)application/ld\+json\1[^>]*>\s*(?P<json_ld>{.+?}|\[.+?\])\s*</script>'
06b3fe29 241
1d485a1a 242NUMBER_RE = r'\d+(?:\.\d+)?'
243
7105440c 244
0b9c08b4 245@functools.cache
d77c3dfd 246def preferredencoding():
59ae15a5 247 """Get preferred encoding.
d77c3dfd 248
59ae15a5
PH
249 Returns the best encoding scheme for the system, based on
250 locale.getpreferredencoding() and some further tweaks.
251 """
252 try:
253 pref = locale.getpreferredencoding()
28e614de 254 'TEST'.encode(pref)
70a1165b 255 except Exception:
59ae15a5 256 pref = 'UTF-8'
bae611f2 257
59ae15a5 258 return pref
d77c3dfd 259
f4bfd65f 260
181c8655 261def write_json_file(obj, fn):
1394646a 262 """ Encode obj as JSON and write it to fn, atomically if possible """
181c8655 263
cfb0511d 264 tf = tempfile.NamedTemporaryFile(
265 prefix=f'{os.path.basename(fn)}.', dir=os.path.dirname(fn),
266 suffix='.tmp', delete=False, mode='w', encoding='utf-8')
181c8655
PH
267
268 try:
269 with tf:
45d86abe 270 json.dump(obj, tf, ensure_ascii=False)
1394646a
IK
271 if sys.platform == 'win32':
272 # Need to remove existing file on Windows, else os.rename raises
273 # WindowsError or FileExistsError.
19a03940 274 with contextlib.suppress(OSError):
1394646a 275 os.unlink(fn)
19a03940 276 with contextlib.suppress(OSError):
9cd5f54e
R
277 mask = os.umask(0)
278 os.umask(mask)
279 os.chmod(tf.name, 0o666 & ~mask)
181c8655 280 os.rename(tf.name, fn)
70a1165b 281 except Exception:
19a03940 282 with contextlib.suppress(OSError):
181c8655 283 os.remove(tf.name)
181c8655
PH
284 raise
285
286
cfb0511d 287def find_xpath_attr(node, xpath, key, val=None):
288 """ Find the xpath xpath[@key=val] """
289 assert re.match(r'^[a-zA-Z_-]+$', key)
86e5f3ed 290 expr = xpath + ('[@%s]' % key if val is None else f"[@{key}='{val}']")
cfb0511d 291 return node.find(expr)
59ae56fa 292
d7e66d39
JMF
293# On python2.6 the xml.etree.ElementTree.Element methods don't support
294# the namespace parameter
5f6a1245
JW
295
296
d7e66d39
JMF
297def xpath_with_ns(path, ns_map):
298 components = [c.split(':') for c in path.split('/')]
299 replaced = []
300 for c in components:
301 if len(c) == 1:
302 replaced.append(c[0])
303 else:
304 ns, tag = c
305 replaced.append('{%s}%s' % (ns_map[ns], tag))
306 return '/'.join(replaced)
307
d77c3dfd 308
a41fb80c 309def xpath_element(node, xpath, name=None, fatal=False, default=NO_DEFAULT):
578c0745 310 def _find_xpath(xpath):
f9934b96 311 return node.find(xpath)
578c0745 312
14f25df2 313 if isinstance(xpath, str):
578c0745
S
314 n = _find_xpath(xpath)
315 else:
316 for xp in xpath:
317 n = _find_xpath(xp)
318 if n is not None:
319 break
d74bebd5 320
8e636da4 321 if n is None:
bf42a990
S
322 if default is not NO_DEFAULT:
323 return default
324 elif fatal:
bf0ff932
PH
325 name = xpath if name is None else name
326 raise ExtractorError('Could not find XML element %s' % name)
327 else:
328 return None
a41fb80c
S
329 return n
330
331
332def xpath_text(node, xpath, name=None, fatal=False, default=NO_DEFAULT):
8e636da4
S
333 n = xpath_element(node, xpath, name, fatal=fatal, default=default)
334 if n is None or n == default:
335 return n
336 if n.text is None:
337 if default is not NO_DEFAULT:
338 return default
339 elif fatal:
340 name = xpath if name is None else name
341 raise ExtractorError('Could not find XML element\'s text %s' % name)
342 else:
343 return None
344 return n.text
a41fb80c
S
345
346
347def xpath_attr(node, xpath, key, name=None, fatal=False, default=NO_DEFAULT):
348 n = find_xpath_attr(node, xpath, key)
349 if n is None:
350 if default is not NO_DEFAULT:
351 return default
352 elif fatal:
86e5f3ed 353 name = f'{xpath}[@{key}]' if name is None else name
a41fb80c
S
354 raise ExtractorError('Could not find XML attribute %s' % name)
355 else:
356 return None
357 return n.attrib[key]
bf0ff932
PH
358
359
c487cf00 360def get_element_by_id(id, html, **kwargs):
43e8fafd 361 """Return the content of the tag with the specified ID in the passed HTML document"""
c487cf00 362 return get_element_by_attribute('id', id, html, **kwargs)
43e8fafd 363
12ea2f30 364
c487cf00 365def get_element_html_by_id(id, html, **kwargs):
6f32a0b5 366 """Return the html of the tag with the specified ID in the passed HTML document"""
c487cf00 367 return get_element_html_by_attribute('id', id, html, **kwargs)
6f32a0b5
ZM
368
369
84c237fb 370def get_element_by_class(class_name, html):
2af12ad9
TC
371 """Return the content of the first tag with the specified class in the passed HTML document"""
372 retval = get_elements_by_class(class_name, html)
373 return retval[0] if retval else None
374
375
6f32a0b5
ZM
376def get_element_html_by_class(class_name, html):
377 """Return the html of the first tag with the specified class in the passed HTML document"""
378 retval = get_elements_html_by_class(class_name, html)
379 return retval[0] if retval else None
380
381
c487cf00 382def get_element_by_attribute(attribute, value, html, **kwargs):
383 retval = get_elements_by_attribute(attribute, value, html, **kwargs)
2af12ad9
TC
384 return retval[0] if retval else None
385
386
c487cf00 387def get_element_html_by_attribute(attribute, value, html, **kargs):
388 retval = get_elements_html_by_attribute(attribute, value, html, **kargs)
6f32a0b5
ZM
389 return retval[0] if retval else None
390
391
c487cf00 392def get_elements_by_class(class_name, html, **kargs):
2af12ad9
TC
393 """Return the content of all tags with the specified class in the passed HTML document as a list"""
394 return get_elements_by_attribute(
64fa820c 395 'class', r'[^\'"]*(?<=[\'"\s])%s(?=[\'"\s])[^\'"]*' % re.escape(class_name),
84c237fb
YCH
396 html, escape_value=False)
397
398
6f32a0b5
ZM
399def get_elements_html_by_class(class_name, html):
400 """Return the html of all tags with the specified class in the passed HTML document as a list"""
401 return get_elements_html_by_attribute(
64fa820c 402 'class', r'[^\'"]*(?<=[\'"\s])%s(?=[\'"\s])[^\'"]*' % re.escape(class_name),
6f32a0b5
ZM
403 html, escape_value=False)
404
405
406def get_elements_by_attribute(*args, **kwargs):
43e8fafd 407 """Return the content of the tag with the specified attribute in the passed HTML document"""
6f32a0b5
ZM
408 return [content for content, _ in get_elements_text_and_html_by_attribute(*args, **kwargs)]
409
410
411def get_elements_html_by_attribute(*args, **kwargs):
412 """Return the html of the tag with the specified attribute in the passed HTML document"""
413 return [whole for _, whole in get_elements_text_and_html_by_attribute(*args, **kwargs)]
414
415
4c9a1a3b 416def get_elements_text_and_html_by_attribute(attribute, value, html, *, tag=r'[\w:.-]+', escape_value=True):
6f32a0b5
ZM
417 """
418 Return the text (content) and the html (whole) of the tag with the specified
419 attribute in the passed HTML document
420 """
c61473c1
M
421 if not value:
422 return
9e6dd238 423
86e5f3ed 424 quote = '' if re.match(r'''[\s"'`=<>]''', value) else '?'
0254f162 425
84c237fb
YCH
426 value = re.escape(value) if escape_value else value
427
86e5f3ed 428 partial_element_re = rf'''(?x)
4c9a1a3b 429 <(?P<tag>{tag})
0254f162 430 (?:\s(?:[^>"']|"[^"]*"|'[^']*')*)?
86e5f3ed 431 \s{re.escape(attribute)}\s*=\s*(?P<_q>['"]{quote})(?-x:{value})(?P=_q)
432 '''
38285056 433
0254f162
ZM
434 for m in re.finditer(partial_element_re, html):
435 content, whole = get_element_text_and_html_by_tag(m.group('tag'), html[m.start():])
a921f407 436
0254f162
ZM
437 yield (
438 unescapeHTML(re.sub(r'^(?P<q>["\'])(?P<content>.*)(?P=q)$', r'\g<content>', content, flags=re.DOTALL)),
439 whole
440 )
a921f407 441
c5229f39 442
ac668111 443class HTMLBreakOnClosingTagParser(html.parser.HTMLParser):
6f32a0b5
ZM
444 """
445 HTML parser which raises HTMLBreakOnClosingTagException upon reaching the
446 closing tag for the first opening tag it has encountered, and can be used
447 as a context manager
448 """
449
450 class HTMLBreakOnClosingTagException(Exception):
451 pass
452
453 def __init__(self):
454 self.tagstack = collections.deque()
ac668111 455 html.parser.HTMLParser.__init__(self)
6f32a0b5
ZM
456
457 def __enter__(self):
458 return self
459
460 def __exit__(self, *_):
461 self.close()
462
463 def close(self):
464 # handle_endtag does not return upon raising HTMLBreakOnClosingTagException,
465 # so data remains buffered; we no longer have any interest in it, thus
466 # override this method to discard it
467 pass
468
469 def handle_starttag(self, tag, _):
470 self.tagstack.append(tag)
471
472 def handle_endtag(self, tag):
473 if not self.tagstack:
474 raise compat_HTMLParseError('no tags in the stack')
475 while self.tagstack:
476 inner_tag = self.tagstack.pop()
477 if inner_tag == tag:
478 break
479 else:
480 raise compat_HTMLParseError(f'matching opening tag for closing {tag} tag not found')
481 if not self.tagstack:
482 raise self.HTMLBreakOnClosingTagException()
483
484
46d09f87 485# XXX: This should be far less strict
6f32a0b5
ZM
486def get_element_text_and_html_by_tag(tag, html):
487 """
488 For the first element with the specified tag in the passed HTML document
489 return its' content (text) and the whole element (html)
490 """
491 def find_or_raise(haystack, needle, exc):
492 try:
493 return haystack.index(needle)
494 except ValueError:
495 raise exc
496 closing_tag = f'</{tag}>'
497 whole_start = find_or_raise(
498 html, f'<{tag}', compat_HTMLParseError(f'opening {tag} tag not found'))
499 content_start = find_or_raise(
500 html[whole_start:], '>', compat_HTMLParseError(f'malformed opening {tag} tag'))
501 content_start += whole_start + 1
502 with HTMLBreakOnClosingTagParser() as parser:
503 parser.feed(html[whole_start:content_start])
504 if not parser.tagstack or parser.tagstack[0] != tag:
505 raise compat_HTMLParseError(f'parser did not match opening {tag} tag')
506 offset = content_start
507 while offset < len(html):
508 next_closing_tag_start = find_or_raise(
509 html[offset:], closing_tag,
510 compat_HTMLParseError(f'closing {tag} tag not found'))
511 next_closing_tag_end = next_closing_tag_start + len(closing_tag)
512 try:
513 parser.feed(html[offset:offset + next_closing_tag_end])
514 offset += next_closing_tag_end
515 except HTMLBreakOnClosingTagParser.HTMLBreakOnClosingTagException:
516 return html[content_start:offset + next_closing_tag_start], \
517 html[whole_start:offset + next_closing_tag_end]
518 raise compat_HTMLParseError('unexpected end of html')
519
520
ac668111 521class HTMLAttributeParser(html.parser.HTMLParser):
8bb56eee 522 """Trivial HTML parser to gather the attributes for a single element"""
b6e0c7d2 523
8bb56eee 524 def __init__(self):
c5229f39 525 self.attrs = {}
ac668111 526 html.parser.HTMLParser.__init__(self)
8bb56eee
BF
527
528 def handle_starttag(self, tag, attrs):
529 self.attrs = dict(attrs)
7053aa3a 530 raise compat_HTMLParseError('done')
8bb56eee 531
c5229f39 532
ac668111 533class HTMLListAttrsParser(html.parser.HTMLParser):
73673ccf
FF
534 """HTML parser to gather the attributes for the elements of a list"""
535
536 def __init__(self):
ac668111 537 html.parser.HTMLParser.__init__(self)
73673ccf
FF
538 self.items = []
539 self._level = 0
540
541 def handle_starttag(self, tag, attrs):
542 if tag == 'li' and self._level == 0:
543 self.items.append(dict(attrs))
544 self._level += 1
545
546 def handle_endtag(self, tag):
547 self._level -= 1
548
549
8bb56eee
BF
550def extract_attributes(html_element):
551 """Given a string for an HTML element such as
552 <el
553 a="foo" B="bar" c="&98;az" d=boz
554 empty= noval entity="&amp;"
555 sq='"' dq="'"
556 >
557 Decode and return a dictionary of attributes.
558 {
559 'a': 'foo', 'b': 'bar', c: 'baz', d: 'boz',
560 'empty': '', 'noval': None, 'entity': '&',
561 'sq': '"', 'dq': '\''
562 }.
8bb56eee
BF
563 """
564 parser = HTMLAttributeParser()
19a03940 565 with contextlib.suppress(compat_HTMLParseError):
b4a3d461
S
566 parser.feed(html_element)
567 parser.close()
8bb56eee 568 return parser.attrs
9e6dd238 569
c5229f39 570
73673ccf
FF
571def parse_list(webpage):
572 """Given a string for an series of HTML <li> elements,
573 return a dictionary of their attributes"""
574 parser = HTMLListAttrsParser()
575 parser.feed(webpage)
576 parser.close()
577 return parser.items
578
579
9e6dd238 580def clean_html(html):
59ae15a5 581 """Clean an HTML snippet into a readable string"""
dd622d7c
PH
582
583 if html is None: # Convenience for sanitizing descriptions etc.
584 return html
585
49185227 586 html = re.sub(r'\s+', ' ', html)
587 html = re.sub(r'(?u)\s?<\s?br\s?/?\s?>\s?', '\n', html)
588 html = re.sub(r'(?u)<\s?/\s?p\s?>\s?<\s?p[^>]*>', '\n', html)
59ae15a5
PH
589 # Strip html tags
590 html = re.sub('<.*?>', '', html)
591 # Replace html entities
592 html = unescapeHTML(html)
7decf895 593 return html.strip()
9e6dd238
FV
594
595
b7c47b74 596class LenientJSONDecoder(json.JSONDecoder):
cc090836 597 # TODO: Write tests
598 def __init__(self, *args, transform_source=None, ignore_extra=False, close_objects=0, **kwargs):
b7c47b74 599 self.transform_source, self.ignore_extra = transform_source, ignore_extra
cc090836 600 self._close_attempts = 2 * close_objects
b7c47b74 601 super().__init__(*args, **kwargs)
602
cc090836 603 @staticmethod
604 def _close_object(err):
605 doc = err.doc[:err.pos]
606 # We need to add comma first to get the correct error message
607 if err.msg.startswith('Expecting \',\''):
608 return doc + ','
609 elif not doc.endswith(','):
610 return
611
612 if err.msg.startswith('Expecting property name'):
613 return doc[:-1] + '}'
614 elif err.msg.startswith('Expecting value'):
615 return doc[:-1] + ']'
616
b7c47b74 617 def decode(self, s):
618 if self.transform_source:
619 s = self.transform_source(s)
cc090836 620 for attempt in range(self._close_attempts + 1):
621 try:
622 if self.ignore_extra:
623 return self.raw_decode(s.lstrip())[0]
624 return super().decode(s)
625 except json.JSONDecodeError as e:
626 if e.pos is None:
627 raise
628 elif attempt < self._close_attempts:
629 s = self._close_object(e)
630 if s is not None:
631 continue
2fa669f7 632 raise type(e)(f'{e.msg} in {s[e.pos-10:e.pos+10]!r}', s, e.pos)
cc090836 633 assert False, 'Too many attempts to decode JSON'
b7c47b74 634
635
d77c3dfd 636def sanitize_open(filename, open_mode):
59ae15a5
PH
637 """Try to open the given filename, and slightly tweak it if this fails.
638
639 Attempts to open the given filename. If this fails, it tries to change
640 the filename slightly, step by step, until it's either able to open it
641 or it fails and raises a final exception, like the standard open()
642 function.
643
644 It returns the tuple (stream, definitive_file_name).
645 """
0edb3e33 646 if filename == '-':
647 if sys.platform == 'win32':
648 import msvcrt
be5c1ae8 649
62b58c09 650 # stdout may be any IO stream, e.g. when using contextlib.redirect_stdout
daef7911 651 with contextlib.suppress(io.UnsupportedOperation):
652 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
0edb3e33 653 return (sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout, filename)
59ae15a5 654
0edb3e33 655 for attempt in range(2):
656 try:
657 try:
89737671 658 if sys.platform == 'win32':
b506289f 659 # FIXME: An exclusive lock also locks the file from being read.
660 # Since windows locks are mandatory, don't lock the file on windows (for now).
661 # Ref: https://github.com/yt-dlp/yt-dlp/issues/3124
89737671 662 raise LockingUnsupportedError()
0edb3e33 663 stream = locked_file(filename, open_mode, block=False).__enter__()
8a82af35 664 except OSError:
0edb3e33 665 stream = open(filename, open_mode)
8a82af35 666 return stream, filename
86e5f3ed 667 except OSError as err:
0edb3e33 668 if attempt or err.errno in (errno.EACCES,):
669 raise
670 old_filename, filename = filename, sanitize_path(filename)
671 if old_filename == filename:
672 raise
d77c3dfd
FV
673
674
675def timeconvert(timestr):
59ae15a5
PH
676 """Convert RFC 2822 defined time string into system timestamp"""
677 timestamp = None
678 timetuple = email.utils.parsedate_tz(timestr)
679 if timetuple is not None:
680 timestamp = email.utils.mktime_tz(timetuple)
681 return timestamp
1c469a94 682
5f6a1245 683
5c3895ff 684def sanitize_filename(s, restricted=False, is_id=NO_DEFAULT):
59ae15a5 685 """Sanitizes a string so it could be used as part of a filename.
5c3895ff 686 @param restricted Use a stricter subset of allowed characters
687 @param is_id Whether this is an ID that should be kept unchanged if possible.
688 If unset, yt-dlp's new sanitization rules are in effect
59ae15a5 689 """
5c3895ff 690 if s == '':
691 return ''
692
59ae15a5 693 def replace_insane(char):
c587cbb7
AT
694 if restricted and char in ACCENT_CHARS:
695 return ACCENT_CHARS[char]
91dd88b9 696 elif not restricted and char == '\n':
5c3895ff 697 return '\0 '
989a01c2 698 elif is_id is NO_DEFAULT and not restricted and char in '"*:<>?|/\\':
699 # Replace with their full-width unicode counterparts
700 return {'/': '\u29F8', '\\': '\u29f9'}.get(char, chr(ord(char) + 0xfee0))
91dd88b9 701 elif char == '?' or ord(char) < 32 or ord(char) == 127:
59ae15a5
PH
702 return ''
703 elif char == '"':
704 return '' if restricted else '\''
705 elif char == ':':
5c3895ff 706 return '\0_\0-' if restricted else '\0 \0-'
59ae15a5 707 elif char in '\\/|*<>':
5c3895ff 708 return '\0_'
709 if restricted and (char in '!&\'()[]{}$;`^,#' or char.isspace() or ord(char) > 127):
710 return '\0_'
59ae15a5
PH
711 return char
712
db4678e4 713 # Replace look-alike Unicode glyphs
714 if restricted and (is_id is NO_DEFAULT or not is_id):
989a01c2 715 s = unicodedata.normalize('NFKC', s)
5c3895ff 716 s = re.sub(r'[0-9]+(?::[0-9]+)+', lambda m: m.group(0).replace(':', '_'), s) # Handle timestamps
28e614de 717 result = ''.join(map(replace_insane, s))
5c3895ff 718 if is_id is NO_DEFAULT:
ae61d108 719 result = re.sub(r'(\0.)(?:(?=\1)..)+', r'\1', result) # Remove repeated substitute chars
720 STRIP_RE = r'(?:\0.|[ _-])*'
5c3895ff 721 result = re.sub(f'^\0.{STRIP_RE}|{STRIP_RE}\0.$', '', result) # Remove substitute chars from start/end
722 result = result.replace('\0', '') or '_'
723
796173d0
PH
724 if not is_id:
725 while '__' in result:
726 result = result.replace('__', '_')
727 result = result.strip('_')
728 # Common case of "Foreign band name - English song title"
729 if restricted and result.startswith('-_'):
730 result = result[2:]
5a42414b
PH
731 if result.startswith('-'):
732 result = '_' + result[len('-'):]
a7440261 733 result = result.lstrip('.')
796173d0
PH
734 if not result:
735 result = '_'
59ae15a5 736 return result
d77c3dfd 737
5f6a1245 738
c2934512 739def sanitize_path(s, force=False):
a2aaf4db 740 """Sanitizes and normalizes path on Windows"""
c2934512 741 if sys.platform == 'win32':
c4218ac3 742 force = False
c2934512 743 drive_or_unc, _ = os.path.splitdrive(s)
c2934512 744 elif force:
745 drive_or_unc = ''
746 else:
a2aaf4db 747 return s
c2934512 748
be531ef1
S
749 norm_path = os.path.normpath(remove_start(s, drive_or_unc)).split(os.path.sep)
750 if drive_or_unc:
a2aaf4db
S
751 norm_path.pop(0)
752 sanitized_path = [
ec85ded8 753 path_part if path_part in ['.', '..'] else re.sub(r'(?:[/<>:"\|\\?\*]|[\s.]$)', '#', path_part)
a2aaf4db 754 for path_part in norm_path]
be531ef1
S
755 if drive_or_unc:
756 sanitized_path.insert(0, drive_or_unc + os.path.sep)
4abea8ca 757 elif force and s and s[0] == os.path.sep:
c4218ac3 758 sanitized_path.insert(0, os.path.sep)
a2aaf4db
S
759 return os.path.join(*sanitized_path)
760
761
8f97a15d 762def sanitize_url(url, *, scheme='http'):
befa4708
S
763 # Prepend protocol-less URLs with `http:` scheme in order to mitigate
764 # the number of unwanted failures due to missing protocol
21633673 765 if url is None:
766 return
767 elif url.startswith('//'):
8f97a15d 768 return f'{scheme}:{url}'
befa4708
S
769 # Fix some common typos seen so far
770 COMMON_TYPOS = (
067aa17e 771 # https://github.com/ytdl-org/youtube-dl/issues/15649
befa4708
S
772 (r'^httpss://', r'https://'),
773 # https://bx1.be/lives/direct-tv/
774 (r'^rmtp([es]?)://', r'rtmp\1://'),
775 )
776 for mistake, fixup in COMMON_TYPOS:
777 if re.match(mistake, url):
778 return re.sub(mistake, fixup, url)
bc6b9bcd 779 return url
17bcc626
S
780
781
5435dcf9 782def extract_basic_auth(url):
14f25df2 783 parts = urllib.parse.urlsplit(url)
5435dcf9
HH
784 if parts.username is None:
785 return url, None
14f25df2 786 url = urllib.parse.urlunsplit(parts._replace(netloc=(
5435dcf9
HH
787 parts.hostname if parts.port is None
788 else '%s:%d' % (parts.hostname, parts.port))))
789 auth_payload = base64.b64encode(
0f06bcd7 790 ('%s:%s' % (parts.username, parts.password or '')).encode())
791 return url, f'Basic {auth_payload.decode()}'
5435dcf9
HH
792
793
67dda517 794def sanitized_Request(url, *args, **kwargs):
bc6b9bcd 795 url, auth_header = extract_basic_auth(escape_url(sanitize_url(url)))
5435dcf9
HH
796 if auth_header is not None:
797 headers = args[1] if len(args) >= 2 else kwargs.setdefault('headers', {})
798 headers['Authorization'] = auth_header
ac668111 799 return urllib.request.Request(url, *args, **kwargs)
67dda517
S
800
801
51098426 802def expand_path(s):
2fa669f7 803 """Expand shell variables and ~"""
51098426
S
804 return os.path.expandvars(compat_expanduser(s))
805
806
7e9a6125 807def orderedSet(iterable, *, lazy=False):
808 """Remove all duplicates from the input iterable"""
809 def _iter():
810 seen = [] # Do not use set since the items can be unhashable
811 for x in iterable:
812 if x not in seen:
813 seen.append(x)
814 yield x
815
816 return _iter() if lazy else list(_iter())
d77c3dfd 817
912b38b4 818
55b2f099 819def _htmlentity_transform(entity_with_semicolon):
4e408e47 820 """Transforms an HTML entity to a character."""
55b2f099
YCH
821 entity = entity_with_semicolon[:-1]
822
4e408e47 823 # Known non-numeric HTML entity
ac668111 824 if entity in html.entities.name2codepoint:
825 return chr(html.entities.name2codepoint[entity])
4e408e47 826
62b58c09
L
827 # TODO: HTML5 allows entities without a semicolon.
828 # E.g. '&Eacuteric' should be decoded as 'Éric'.
ac668111 829 if entity_with_semicolon in html.entities.html5:
830 return html.entities.html5[entity_with_semicolon]
55b2f099 831
91757b0f 832 mobj = re.match(r'#(x[0-9a-fA-F]+|[0-9]+)', entity)
4e408e47
PH
833 if mobj is not None:
834 numstr = mobj.group(1)
28e614de 835 if numstr.startswith('x'):
4e408e47 836 base = 16
28e614de 837 numstr = '0%s' % numstr
4e408e47
PH
838 else:
839 base = 10
067aa17e 840 # See https://github.com/ytdl-org/youtube-dl/issues/7518
19a03940 841 with contextlib.suppress(ValueError):
ac668111 842 return chr(int(numstr, base))
4e408e47
PH
843
844 # Unknown entity in name, return its literal representation
7a3f0c00 845 return '&%s;' % entity
4e408e47
PH
846
847
d77c3dfd 848def unescapeHTML(s):
912b38b4
PH
849 if s is None:
850 return None
19a03940 851 assert isinstance(s, str)
d77c3dfd 852
4e408e47 853 return re.sub(
95f3f7c2 854 r'&([^&;]+;)', lambda m: _htmlentity_transform(m.group(1)), s)
d77c3dfd 855
8bf48f23 856
cdb19aa4 857def escapeHTML(text):
858 return (
859 text
860 .replace('&', '&amp;')
861 .replace('<', '&lt;')
862 .replace('>', '&gt;')
863 .replace('"', '&quot;')
864 .replace("'", '&#39;')
865 )
866
867
db3ad8a6
ND
868class netrc_from_content(netrc.netrc):
869 def __init__(self, content):
870 self.hosts, self.macros = {}, {}
871 with io.StringIO(content) as stream:
872 self._parse('-', stream, False)
873
874
d3c93ec2 875class Popen(subprocess.Popen):
876 if sys.platform == 'win32':
877 _startupinfo = subprocess.STARTUPINFO()
878 _startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
879 else:
880 _startupinfo = None
881
82ea226c
L
882 @staticmethod
883 def _fix_pyinstaller_ld_path(env):
884 """Restore LD_LIBRARY_PATH when using PyInstaller
885 Ref: https://github.com/pyinstaller/pyinstaller/blob/develop/doc/runtime-information.rst#ld_library_path--libpath-considerations
886 https://github.com/yt-dlp/yt-dlp/issues/4573
887 """
888 if not hasattr(sys, '_MEIPASS'):
889 return
890
891 def _fix(key):
892 orig = env.get(f'{key}_ORIG')
893 if orig is None:
894 env.pop(key, None)
895 else:
896 env[key] = orig
897
898 _fix('LD_LIBRARY_PATH') # Linux
899 _fix('DYLD_LIBRARY_PATH') # macOS
900
901 def __init__(self, *args, env=None, text=False, **kwargs):
902 if env is None:
903 env = os.environ.copy()
904 self._fix_pyinstaller_ld_path(env)
905
da8e2912 906 self.__text_mode = kwargs.get('encoding') or kwargs.get('errors') or text or kwargs.get('universal_newlines')
f0c9fb96 907 if text is True:
908 kwargs['universal_newlines'] = True # For 3.6 compatibility
909 kwargs.setdefault('encoding', 'utf-8')
910 kwargs.setdefault('errors', 'replace')
82ea226c 911 super().__init__(*args, env=env, **kwargs, startupinfo=self._startupinfo)
d3c93ec2 912
913 def communicate_or_kill(self, *args, **kwargs):
8a82af35 914 try:
915 return self.communicate(*args, **kwargs)
916 except BaseException: # Including KeyboardInterrupt
f0c9fb96 917 self.kill(timeout=None)
8a82af35 918 raise
d3c93ec2 919
f0c9fb96 920 def kill(self, *, timeout=0):
921 super().kill()
922 if timeout != 0:
923 self.wait(timeout=timeout)
924
925 @classmethod
992dc6b4 926 def run(cls, *args, timeout=None, **kwargs):
f0c9fb96 927 with cls(*args, **kwargs) as proc:
da8e2912 928 default = '' if proc.__text_mode else b''
992dc6b4 929 stdout, stderr = proc.communicate_or_kill(timeout=timeout)
914491b8 930 return stdout or default, stderr or default, proc.returncode
f0c9fb96 931
d3c93ec2 932
f07b74fc 933def encodeArgument(s):
cfb0511d 934 # Legacy code that uses byte strings
935 # Uncomment the following line after fixing all post processors
14f25df2 936 # assert isinstance(s, str), 'Internal error: %r should be of type %r, is %r' % (s, str, type(s))
cfb0511d 937 return s if isinstance(s, str) else s.decode('ascii')
f07b74fc
PH
938
939
aa7785f8 940_timetuple = collections.namedtuple('Time', ('hours', 'minutes', 'seconds', 'milliseconds'))
941
942
943def timetuple_from_msec(msec):
944 secs, msec = divmod(msec, 1000)
945 mins, secs = divmod(secs, 60)
946 hrs, mins = divmod(mins, 60)
947 return _timetuple(hrs, mins, secs, msec)
948
949
cdb19aa4 950def formatSeconds(secs, delim=':', msec=False):
aa7785f8 951 time = timetuple_from_msec(secs * 1000)
952 if time.hours:
953 ret = '%d%s%02d%s%02d' % (time.hours, delim, time.minutes, delim, time.seconds)
954 elif time.minutes:
955 ret = '%d%s%02d' % (time.minutes, delim, time.seconds)
4539dd30 956 else:
aa7785f8 957 ret = '%d' % time.seconds
958 return '%s.%03d' % (ret, time.milliseconds) if msec else ret
4539dd30 959
a0ddb8a2 960
77562778 961def _ssl_load_windows_store_certs(ssl_context, storename):
962 # Code adapted from _load_windows_store_certs in https://github.com/python/cpython/blob/main/Lib/ssl.py
963 try:
964 certs = [cert for cert, encoding, trust in ssl.enum_certificates(storename)
965 if encoding == 'x509_asn' and (
966 trust is True or ssl.Purpose.SERVER_AUTH.oid in trust)]
967 except PermissionError:
968 return
969 for cert in certs:
19a03940 970 with contextlib.suppress(ssl.SSLError):
77562778 971 ssl_context.load_verify_locations(cadata=cert)
a2366922 972
77562778 973
974def make_HTTPS_handler(params, **kwargs):
975 opts_check_certificate = not params.get('nocheckcertificate')
976 context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
977 context.check_hostname = opts_check_certificate
f81c62a6 978 if params.get('legacyserverconnect'):
979 context.options |= 4 # SSL_OP_LEGACY_SERVER_CONNECT
4f28b537 980 # Allow use of weaker ciphers in Python 3.10+. See https://bugs.python.org/issue43998
981 context.set_ciphers('DEFAULT')
ac8e69dd
M
982 elif (
983 sys.version_info < (3, 10)
984 and ssl.OPENSSL_VERSION_INFO >= (1, 1, 1)
985 and not ssl.OPENSSL_VERSION.startswith('LibreSSL')
986 ):
5b9f253f
M
987 # Backport the default SSL ciphers and minimum TLS version settings from Python 3.10 [1].
988 # This is to ensure consistent behavior across Python versions, and help avoid fingerprinting
989 # in some situations [2][3].
990 # Python 3.10 only supports OpenSSL 1.1.1+ [4]. Because this change is likely
991 # untested on older versions, we only apply this to OpenSSL 1.1.1+ to be safe.
ac8e69dd 992 # LibreSSL is excluded until further investigation due to cipher support issues [5][6].
5b9f253f
M
993 # 1. https://github.com/python/cpython/commit/e983252b516edb15d4338b0a47631b59ef1e2536
994 # 2. https://github.com/yt-dlp/yt-dlp/issues/4627
995 # 3. https://github.com/yt-dlp/yt-dlp/pull/5294
996 # 4. https://peps.python.org/pep-0644/
ac8e69dd
M
997 # 5. https://peps.python.org/pep-0644/#libressl-support
998 # 6. https://github.com/yt-dlp/yt-dlp/commit/5b9f253fa0aee996cf1ed30185d4b502e00609c4#commitcomment-89054368
5b9f253f
M
999 context.set_ciphers('@SECLEVEL=2:ECDH+AESGCM:ECDH+CHACHA20:ECDH+AES:DHE+AES:!aNULL:!eNULL:!aDSS:!SHA1:!AESCCM')
1000 context.minimum_version = ssl.TLSVersion.TLSv1_2
8a82af35 1001
77562778 1002 context.verify_mode = ssl.CERT_REQUIRED if opts_check_certificate else ssl.CERT_NONE
1003 if opts_check_certificate:
69bec673 1004 if certifi and 'no-certifi' not in params.get('compat_opts', []):
d5820461 1005 context.load_verify_locations(cafile=certifi.where())
168bbc4f 1006 else:
1007 try:
1008 context.load_default_certs()
1009 # Work around the issue in load_default_certs when there are bad certificates. See:
1010 # https://github.com/yt-dlp/yt-dlp/issues/1060,
1011 # https://bugs.python.org/issue35665, https://bugs.python.org/issue45312
1012 except ssl.SSLError:
1013 # enum_certificates is not present in mingw python. See https://github.com/yt-dlp/yt-dlp/issues/1151
1014 if sys.platform == 'win32' and hasattr(ssl, 'enum_certificates'):
1015 for storename in ('CA', 'ROOT'):
1016 _ssl_load_windows_store_certs(context, storename)
1017 context.set_default_verify_paths()
8a82af35 1018
bb58c9ed 1019 client_certfile = params.get('client_certificate')
1020 if client_certfile:
1021 try:
1022 context.load_cert_chain(
1023 client_certfile, keyfile=params.get('client_certificate_key'),
1024 password=params.get('client_certificate_password'))
1025 except ssl.SSLError:
1026 raise YoutubeDLError('Unable to load client certificate')
2c6dcb65 1027
1028 # Some servers may reject requests if ALPN extension is not sent. See:
1029 # https://github.com/python/cpython/issues/85140
1030 # https://github.com/yt-dlp/yt-dlp/issues/3878
1031 with contextlib.suppress(NotImplementedError):
1032 context.set_alpn_protocols(['http/1.1'])
1033
77562778 1034 return YoutubeDLHTTPSHandler(params, context=context, **kwargs)
ea6d901e 1035
732ea2f0 1036
5873d4cc 1037def bug_reports_message(before=';'):
69bec673 1038 from ..update import REPOSITORY
57e0f077 1039
1040 msg = (f'please report this issue on https://github.com/{REPOSITORY}/issues?q= , '
1041 'filling out the appropriate issue template. Confirm you are on the latest version using yt-dlp -U')
5873d4cc
F
1042
1043 before = before.rstrip()
1044 if not before or before.endswith(('.', '!', '?')):
1045 msg = msg[0].title() + msg[1:]
1046
1047 return (before + ' ' if before else '') + msg
08f2a92c
JMF
1048
1049
bf5b9d85
PM
1050class YoutubeDLError(Exception):
1051 """Base exception for YoutubeDL errors."""
aa9369a2 1052 msg = None
1053
1054 def __init__(self, msg=None):
1055 if msg is not None:
1056 self.msg = msg
1057 elif self.msg is None:
1058 self.msg = type(self).__name__
1059 super().__init__(self.msg)
bf5b9d85
PM
1060
1061
ac668111 1062network_exceptions = [urllib.error.URLError, http.client.HTTPException, socket.error]
3158150c 1063if hasattr(ssl, 'CertificateError'):
1064 network_exceptions.append(ssl.CertificateError)
1065network_exceptions = tuple(network_exceptions)
1066
1067
bf5b9d85 1068class ExtractorError(YoutubeDLError):
1c256f70 1069 """Error during info extraction."""
5f6a1245 1070
1151c407 1071 def __init__(self, msg, tb=None, expected=False, cause=None, video_id=None, ie=None):
9a82b238 1072 """ tb, if given, is the original traceback (so that it can be printed out).
7a5c1cfe 1073 If expected is set, this is a normal error message and most likely not a bug in yt-dlp.
9a82b238 1074 """
3158150c 1075 if sys.exc_info()[0] in network_exceptions:
9a82b238 1076 expected = True
d5979c5d 1077
7265a219 1078 self.orig_msg = str(msg)
1c256f70 1079 self.traceback = tb
1151c407 1080 self.expected = expected
2eabb802 1081 self.cause = cause
d11271dd 1082 self.video_id = video_id
1151c407 1083 self.ie = ie
1084 self.exc_info = sys.exc_info() # preserve original exception
5df14442 1085 if isinstance(self.exc_info[1], ExtractorError):
1086 self.exc_info = self.exc_info[1].exc_info
9bcfe33b 1087 super().__init__(self.__msg)
1151c407 1088
9bcfe33b 1089 @property
1090 def __msg(self):
1091 return ''.join((
1092 format_field(self.ie, None, '[%s] '),
1093 format_field(self.video_id, None, '%s: '),
1094 self.orig_msg,
1095 format_field(self.cause, None, ' (caused by %r)'),
1096 '' if self.expected else bug_reports_message()))
1c256f70 1097
01951dda 1098 def format_traceback(self):
497d2fab 1099 return join_nonempty(
1100 self.traceback and ''.join(traceback.format_tb(self.traceback)),
e491d06d 1101 self.cause and ''.join(traceback.format_exception(None, self.cause, self.cause.__traceback__)[1:]),
497d2fab 1102 delim='\n') or None
01951dda 1103
9bcfe33b 1104 def __setattr__(self, name, value):
1105 super().__setattr__(name, value)
1106 if getattr(self, 'msg', None) and name not in ('msg', 'args'):
1107 self.msg = self.__msg or type(self).__name__
1108 self.args = (self.msg, ) # Cannot be property
1109
1c256f70 1110
416c7fcb
PH
1111class UnsupportedError(ExtractorError):
1112 def __init__(self, url):
86e5f3ed 1113 super().__init__(
416c7fcb
PH
1114 'Unsupported URL: %s' % url, expected=True)
1115 self.url = url
1116
1117
55b3e45b
JMF
1118class RegexNotFoundError(ExtractorError):
1119 """Error when a regex didn't match"""
1120 pass
1121
1122
773f291d
S
1123class GeoRestrictedError(ExtractorError):
1124 """Geographic restriction Error exception.
1125
1126 This exception may be thrown when a video is not available from your
1127 geographic location due to geographic restrictions imposed by a website.
1128 """
b6e0c7d2 1129
0db3bae8 1130 def __init__(self, msg, countries=None, **kwargs):
1131 kwargs['expected'] = True
86e5f3ed 1132 super().__init__(msg, **kwargs)
773f291d
S
1133 self.countries = countries
1134
1135
693f0600 1136class UserNotLive(ExtractorError):
1137 """Error when a channel/user is not live"""
1138
1139 def __init__(self, msg=None, **kwargs):
1140 kwargs['expected'] = True
1141 super().__init__(msg or 'The channel is not currently live', **kwargs)
1142
1143
bf5b9d85 1144class DownloadError(YoutubeDLError):
59ae15a5 1145 """Download Error exception.
d77c3dfd 1146
59ae15a5
PH
1147 This exception may be thrown by FileDownloader objects if they are not
1148 configured to continue on errors. They will contain the appropriate
1149 error message.
1150 """
5f6a1245 1151
8cc83b8d
FV
1152 def __init__(self, msg, exc_info=None):
1153 """ exc_info, if given, is the original exception that caused the trouble (as returned by sys.exc_info()). """
86e5f3ed 1154 super().__init__(msg)
8cc83b8d 1155 self.exc_info = exc_info
d77c3dfd
FV
1156
1157
498f5606 1158class EntryNotInPlaylist(YoutubeDLError):
1159 """Entry not in playlist exception.
1160
1161 This exception will be thrown by YoutubeDL when a requested entry
1162 is not found in the playlist info_dict
1163 """
aa9369a2 1164 msg = 'Entry not found in info'
498f5606 1165
1166
bf5b9d85 1167class SameFileError(YoutubeDLError):
59ae15a5 1168 """Same File exception.
d77c3dfd 1169
59ae15a5
PH
1170 This exception will be thrown by FileDownloader objects if they detect
1171 multiple files would have to be downloaded to the same file on disk.
1172 """
aa9369a2 1173 msg = 'Fixed output name but more than one file to download'
1174
1175 def __init__(self, filename=None):
1176 if filename is not None:
1177 self.msg += f': {filename}'
1178 super().__init__(self.msg)
d77c3dfd
FV
1179
1180
bf5b9d85 1181class PostProcessingError(YoutubeDLError):
59ae15a5 1182 """Post Processing exception.
d77c3dfd 1183
59ae15a5
PH
1184 This exception may be raised by PostProcessor's .run() method to
1185 indicate an error in the postprocessing task.
1186 """
5f6a1245 1187
5f6a1245 1188
48f79687 1189class DownloadCancelled(YoutubeDLError):
1190 """ Exception raised when the download queue should be interrupted """
1191 msg = 'The download was cancelled'
8b0d7497 1192
8b0d7497 1193
48f79687 1194class ExistingVideoReached(DownloadCancelled):
1195 """ --break-on-existing triggered """
1196 msg = 'Encountered a video that is already in the archive, stopping due to --break-on-existing'
8b0d7497 1197
48f79687 1198
1199class RejectedVideoReached(DownloadCancelled):
fe2ce85a 1200 """ --break-match-filter triggered """
1201 msg = 'Encountered a video that did not match filter, stopping due to --break-match-filter'
51d9739f 1202
1203
48f79687 1204class MaxDownloadsReached(DownloadCancelled):
59ae15a5 1205 """ --max-downloads limit has been reached. """
48f79687 1206 msg = 'Maximum number of downloads reached, stopping due to --max-downloads'
1207
1208
f2ebc5c7 1209class ReExtractInfo(YoutubeDLError):
1210 """ Video info needs to be re-extracted. """
1211
1212 def __init__(self, msg, expected=False):
1213 super().__init__(msg)
1214 self.expected = expected
1215
1216
1217class ThrottledDownload(ReExtractInfo):
48f79687 1218 """ Download speed below --throttled-rate. """
aa9369a2 1219 msg = 'The download speed is below throttle limit'
d77c3dfd 1220
43b22906 1221 def __init__(self):
1222 super().__init__(self.msg, expected=False)
f2ebc5c7 1223
d77c3dfd 1224
bf5b9d85 1225class UnavailableVideoError(YoutubeDLError):
59ae15a5 1226 """Unavailable Format exception.
d77c3dfd 1227
59ae15a5
PH
1228 This exception will be thrown when a video is requested
1229 in a format that is not available for that video.
1230 """
aa9369a2 1231 msg = 'Unable to download video'
1232
1233 def __init__(self, err=None):
1234 if err is not None:
1235 self.msg += f': {err}'
1236 super().__init__(self.msg)
d77c3dfd
FV
1237
1238
bf5b9d85 1239class ContentTooShortError(YoutubeDLError):
59ae15a5 1240 """Content Too Short exception.
d77c3dfd 1241
59ae15a5
PH
1242 This exception may be raised by FileDownloader objects when a file they
1243 download is too small for what the server announced first, indicating
1244 the connection was probably interrupted.
1245 """
d77c3dfd 1246
59ae15a5 1247 def __init__(self, downloaded, expected):
86e5f3ed 1248 super().__init__(f'Downloaded {downloaded} bytes, expected {expected} bytes')
2c7ed247 1249 # Both in bytes
59ae15a5
PH
1250 self.downloaded = downloaded
1251 self.expected = expected
d77c3dfd 1252
5f6a1245 1253
bf5b9d85 1254class XAttrMetadataError(YoutubeDLError):
efa97bdc 1255 def __init__(self, code=None, msg='Unknown error'):
86e5f3ed 1256 super().__init__(msg)
efa97bdc 1257 self.code = code
bd264412 1258 self.msg = msg
efa97bdc
YCH
1259
1260 # Parsing code and msg
3089bc74 1261 if (self.code in (errno.ENOSPC, errno.EDQUOT)
a0566bbf 1262 or 'No space left' in self.msg or 'Disk quota exceeded' in self.msg):
efa97bdc
YCH
1263 self.reason = 'NO_SPACE'
1264 elif self.code == errno.E2BIG or 'Argument list too long' in self.msg:
1265 self.reason = 'VALUE_TOO_LONG'
1266 else:
1267 self.reason = 'NOT_SUPPORTED'
1268
1269
bf5b9d85 1270class XAttrUnavailableError(YoutubeDLError):
efa97bdc
YCH
1271 pass
1272
1273
c5a59d93 1274def _create_http_connection(ydl_handler, http_class, is_https, *args, **kwargs):
f9934b96 1275 hc = http_class(*args, **kwargs)
be4a824d 1276 source_address = ydl_handler._params.get('source_address')
8959018a 1277
be4a824d 1278 if source_address is not None:
8959018a
AU
1279 # This is to workaround _create_connection() from socket where it will try all
1280 # address data from getaddrinfo() including IPv6. This filters the result from
1281 # getaddrinfo() based on the source_address value.
1282 # This is based on the cpython socket.create_connection() function.
1283 # https://github.com/python/cpython/blob/master/Lib/socket.py#L691
1284 def _create_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, source_address=None):
1285 host, port = address
1286 err = None
1287 addrs = socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM)
9e21e6d9
S
1288 af = socket.AF_INET if '.' in source_address[0] else socket.AF_INET6
1289 ip_addrs = [addr for addr in addrs if addr[0] == af]
1290 if addrs and not ip_addrs:
1291 ip_version = 'v4' if af == socket.AF_INET else 'v6'
86e5f3ed 1292 raise OSError(
9e21e6d9
S
1293 "No remote IP%s addresses available for connect, can't use '%s' as source address"
1294 % (ip_version, source_address[0]))
8959018a
AU
1295 for res in ip_addrs:
1296 af, socktype, proto, canonname, sa = res
1297 sock = None
1298 try:
1299 sock = socket.socket(af, socktype, proto)
1300 if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT:
1301 sock.settimeout(timeout)
1302 sock.bind(source_address)
1303 sock.connect(sa)
1304 err = None # Explicitly break reference cycle
1305 return sock
86e5f3ed 1306 except OSError as _:
8959018a
AU
1307 err = _
1308 if sock is not None:
1309 sock.close()
1310 if err is not None:
1311 raise err
1312 else:
86e5f3ed 1313 raise OSError('getaddrinfo returns an empty list')
9e21e6d9
S
1314 if hasattr(hc, '_create_connection'):
1315 hc._create_connection = _create_connection
cfb0511d 1316 hc.source_address = (source_address, 0)
be4a824d
PH
1317
1318 return hc
1319
1320
ac668111 1321class YoutubeDLHandler(urllib.request.HTTPHandler):
59ae15a5
PH
1322 """Handler for HTTP requests and responses.
1323
1324 This class, when installed with an OpenerDirector, automatically adds
955c8958 1325 the standard headers to every HTTP request and handles gzipped, deflated and
1326 brotli responses from web servers.
59ae15a5
PH
1327
1328 Part of this code was copied from:
1329
1330 http://techknack.net/python-urllib2-handlers/
1331
1332 Andrew Rowls, the author of that code, agreed to release it to the
1333 public domain.
1334 """
1335
be4a824d 1336 def __init__(self, params, *args, **kwargs):
ac668111 1337 urllib.request.HTTPHandler.__init__(self, *args, **kwargs)
be4a824d
PH
1338 self._params = params
1339
1340 def http_open(self, req):
ac668111 1341 conn_class = http.client.HTTPConnection
71aff188
YCH
1342
1343 socks_proxy = req.headers.get('Ytdl-socks-proxy')
1344 if socks_proxy:
1345 conn_class = make_socks_conn_class(conn_class, socks_proxy)
1346 del req.headers['Ytdl-socks-proxy']
1347
be4a824d 1348 return self.do_open(functools.partial(
71aff188 1349 _create_http_connection, self, conn_class, False),
be4a824d
PH
1350 req)
1351
59ae15a5
PH
1352 @staticmethod
1353 def deflate(data):
fc2119f2 1354 if not data:
1355 return data
59ae15a5
PH
1356 try:
1357 return zlib.decompress(data, -zlib.MAX_WBITS)
1358 except zlib.error:
1359 return zlib.decompress(data)
1360
4390d5ec 1361 @staticmethod
1362 def brotli(data):
1363 if not data:
1364 return data
9b8ee23b 1365 return brotli.decompress(data)
4390d5ec 1366
daafbf49 1367 @staticmethod
1368 def gz(data):
1369 gz = gzip.GzipFile(fileobj=io.BytesIO(data), mode='rb')
1370 try:
1371 return gz.read()
1372 except OSError as original_oserror:
1373 # There may be junk add the end of the file
1374 # See http://stackoverflow.com/q/4928560/35070 for details
1375 for i in range(1, 1024):
1376 try:
1377 gz = gzip.GzipFile(fileobj=io.BytesIO(data[:-i]), mode='rb')
1378 return gz.read()
1379 except OSError:
1380 continue
1381 else:
1382 raise original_oserror
1383
acebc9cd 1384 def http_request(self, req):
51f267d9
S
1385 # According to RFC 3986, URLs can not contain non-ASCII characters, however this is not
1386 # always respected by websites, some tend to give out URLs with non percent-encoded
1387 # non-ASCII characters (see telemb.py, ard.py [#3412])
1388 # urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991)
1389 # To work around aforementioned issue we will replace request's original URL with
1390 # percent-encoded one
1391 # Since redirects are also affected (e.g. http://www.southpark.de/alle-episoden/s18e09)
1392 # the code of this workaround has been moved here from YoutubeDL.urlopen()
1393 url = req.get_full_url()
1394 url_escaped = escape_url(url)
1395
1396 # Substitute URL if any change after escaping
1397 if url != url_escaped:
15d260eb 1398 req = update_Request(req, url=url_escaped)
51f267d9 1399
8b7539d2 1400 for h, v in self._params.get('http_headers', std_headers).items():
3d5f7a39
JK
1401 # Capitalize is needed because of Python bug 2275: http://bugs.python.org/issue2275
1402 # The dict keys are capitalized because of this bug by urllib
1403 if h.capitalize() not in req.headers:
33ac271b 1404 req.add_header(h, v)
87f0e62d 1405
955c8958 1406 if 'Youtubedl-no-compression' in req.headers: # deprecated
1407 req.headers.pop('Youtubedl-no-compression', None)
1408 req.add_header('Accept-encoding', 'identity')
1409
af14914b 1410 if 'Accept-encoding' not in req.headers:
1411 req.add_header('Accept-encoding', ', '.join(SUPPORTED_ENCODINGS))
1412
379a4f16 1413 return super().do_request_(req)
59ae15a5 1414
acebc9cd 1415 def http_response(self, req, resp):
59ae15a5 1416 old_resp = resp
daafbf49 1417
1418 # Content-Encoding header lists the encodings in order that they were applied [1].
1419 # To decompress, we simply do the reverse.
1420 # [1]: https://datatracker.ietf.org/doc/html/rfc9110#name-content-encoding
1421 decoded_response = None
1422 for encoding in (e.strip() for e in reversed(resp.headers.get('Content-encoding', '').split(','))):
1423 if encoding == 'gzip':
1424 decoded_response = self.gz(decoded_response or resp.read())
1425 elif encoding == 'deflate':
1426 decoded_response = self.deflate(decoded_response or resp.read())
1427 elif encoding == 'br' and brotli:
1428 decoded_response = self.brotli(decoded_response or resp.read())
1429
1430 if decoded_response is not None:
1431 resp = urllib.request.addinfourl(io.BytesIO(decoded_response), old_resp.headers, old_resp.url, old_resp.code)
4390d5ec 1432 resp.msg = old_resp.msg
ad729172 1433 # Percent-encode redirect URL of Location HTTP header to satisfy RFC 3986 (see
067aa17e 1434 # https://github.com/ytdl-org/youtube-dl/issues/6457).
5a4d9ddb
S
1435 if 300 <= resp.code < 400:
1436 location = resp.headers.get('Location')
1437 if location:
1438 # As of RFC 2616 default charset is iso-8859-1 that is respected by python 3
0f06bcd7 1439 location = location.encode('iso-8859-1').decode()
5a4d9ddb
S
1440 location_escaped = escape_url(location)
1441 if location != location_escaped:
1442 del resp.headers['Location']
1443 resp.headers['Location'] = location_escaped
59ae15a5 1444 return resp
0f8d03f8 1445
acebc9cd
PH
1446 https_request = http_request
1447 https_response = http_response
bf50b038 1448
5de90176 1449
71aff188
YCH
1450def make_socks_conn_class(base_class, socks_proxy):
1451 assert issubclass(base_class, (
ac668111 1452 http.client.HTTPConnection, http.client.HTTPSConnection))
71aff188 1453
14f25df2 1454 url_components = urllib.parse.urlparse(socks_proxy)
71aff188
YCH
1455 if url_components.scheme.lower() == 'socks5':
1456 socks_type = ProxyType.SOCKS5
1457 elif url_components.scheme.lower() in ('socks', 'socks4'):
1458 socks_type = ProxyType.SOCKS4
51fb4995
YCH
1459 elif url_components.scheme.lower() == 'socks4a':
1460 socks_type = ProxyType.SOCKS4A
71aff188 1461
cdd94c2e
YCH
1462 def unquote_if_non_empty(s):
1463 if not s:
1464 return s
ac668111 1465 return urllib.parse.unquote_plus(s)
cdd94c2e 1466
71aff188
YCH
1467 proxy_args = (
1468 socks_type,
1469 url_components.hostname, url_components.port or 1080,
1470 True, # Remote DNS
cdd94c2e
YCH
1471 unquote_if_non_empty(url_components.username),
1472 unquote_if_non_empty(url_components.password),
71aff188
YCH
1473 )
1474
1475 class SocksConnection(base_class):
1476 def connect(self):
1477 self.sock = sockssocket()
1478 self.sock.setproxy(*proxy_args)
19a03940 1479 if isinstance(self.timeout, (int, float)):
71aff188
YCH
1480 self.sock.settimeout(self.timeout)
1481 self.sock.connect((self.host, self.port))
1482
ac668111 1483 if isinstance(self, http.client.HTTPSConnection):
71aff188
YCH
1484 if hasattr(self, '_context'): # Python > 2.6
1485 self.sock = self._context.wrap_socket(
1486 self.sock, server_hostname=self.host)
1487 else:
1488 self.sock = ssl.wrap_socket(self.sock)
1489
1490 return SocksConnection
1491
1492
ac668111 1493class YoutubeDLHTTPSHandler(urllib.request.HTTPSHandler):
be4a824d 1494 def __init__(self, params, https_conn_class=None, *args, **kwargs):
ac668111 1495 urllib.request.HTTPSHandler.__init__(self, *args, **kwargs)
1496 self._https_conn_class = https_conn_class or http.client.HTTPSConnection
be4a824d
PH
1497 self._params = params
1498
1499 def https_open(self, req):
4f264c02 1500 kwargs = {}
71aff188
YCH
1501 conn_class = self._https_conn_class
1502
4f264c02
JMF
1503 if hasattr(self, '_context'): # python > 2.6
1504 kwargs['context'] = self._context
1505 if hasattr(self, '_check_hostname'): # python 3.x
1506 kwargs['check_hostname'] = self._check_hostname
71aff188
YCH
1507
1508 socks_proxy = req.headers.get('Ytdl-socks-proxy')
1509 if socks_proxy:
1510 conn_class = make_socks_conn_class(conn_class, socks_proxy)
1511 del req.headers['Ytdl-socks-proxy']
1512
4f28b537 1513 try:
1514 return self.do_open(
1515 functools.partial(_create_http_connection, self, conn_class, True), req, **kwargs)
1516 except urllib.error.URLError as e:
1517 if (isinstance(e.reason, ssl.SSLError)
1518 and getattr(e.reason, 'reason', None) == 'SSLV3_ALERT_HANDSHAKE_FAILURE'):
1519 raise YoutubeDLError('SSLV3_ALERT_HANDSHAKE_FAILURE: Try using --legacy-server-connect')
1520 raise
be4a824d
PH
1521
1522
941e881e 1523def is_path_like(f):
1524 return isinstance(f, (str, bytes, os.PathLike))
1525
1526
ac668111 1527class YoutubeDLCookieProcessor(urllib.request.HTTPCookieProcessor):
a6420bf5 1528 def __init__(self, cookiejar=None):
ac668111 1529 urllib.request.HTTPCookieProcessor.__init__(self, cookiejar)
a6420bf5
S
1530
1531 def http_response(self, request, response):
ac668111 1532 return urllib.request.HTTPCookieProcessor.http_response(self, request, response)
a6420bf5 1533
ac668111 1534 https_request = urllib.request.HTTPCookieProcessor.http_request
a6420bf5
S
1535 https_response = http_response
1536
1537
ac668111 1538class YoutubeDLRedirectHandler(urllib.request.HTTPRedirectHandler):
201c1459 1539 """YoutubeDL redirect handler
1540
1541 The code is based on HTTPRedirectHandler implementation from CPython [1].
1542
08916a49 1543 This redirect handler fixes and improves the logic to better align with RFC7261
1544 and what browsers tend to do [2][3]
201c1459 1545
1546 1. https://github.com/python/cpython/blob/master/Lib/urllib/request.py
08916a49 1547 2. https://datatracker.ietf.org/doc/html/rfc7231
1548 3. https://github.com/python/cpython/issues/91306
201c1459 1549 """
1550
ac668111 1551 http_error_301 = http_error_303 = http_error_307 = http_error_308 = urllib.request.HTTPRedirectHandler.http_error_302
201c1459 1552
1553 def redirect_request(self, req, fp, code, msg, headers, newurl):
08916a49 1554 if code not in (301, 302, 303, 307, 308):
14f25df2 1555 raise urllib.error.HTTPError(req.full_url, code, msg, headers, fp)
afac4caa 1556
08916a49 1557 new_method = req.get_method()
1558 new_data = req.data
1559 remove_headers = []
afac4caa 1560 # A 303 must either use GET or HEAD for subsequent request
1561 # https://datatracker.ietf.org/doc/html/rfc7231#section-6.4.4
08916a49 1562 if code == 303 and req.get_method() != 'HEAD':
1563 new_method = 'GET'
afac4caa 1564 # 301 and 302 redirects are commonly turned into a GET from a POST
1565 # for subsequent requests by browsers, so we'll do the same.
1566 # https://datatracker.ietf.org/doc/html/rfc7231#section-6.4.2
1567 # https://datatracker.ietf.org/doc/html/rfc7231#section-6.4.3
08916a49 1568 elif code in (301, 302) and req.get_method() == 'POST':
1569 new_method = 'GET'
1570
1571 # only remove payload if method changed (e.g. POST to GET)
1572 if new_method != req.get_method():
1573 new_data = None
1574 remove_headers.extend(['Content-Length', 'Content-Type'])
1575
1576 new_headers = {k: v for k, v in req.headers.items() if k.lower() not in remove_headers}
afac4caa 1577
ac668111 1578 return urllib.request.Request(
08916a49 1579 newurl, headers=new_headers, origin_req_host=req.origin_req_host,
1580 unverifiable=True, method=new_method, data=new_data)
fca6dba8
S
1581
1582
46f59e89
S
1583def extract_timezone(date_str):
1584 m = re.search(
f137e4c2 1585 r'''(?x)
1586 ^.{8,}? # >=8 char non-TZ prefix, if present
1587 (?P<tz>Z| # just the UTC Z, or
1588 (?:(?<=.\b\d{4}|\b\d{2}:\d\d)| # preceded by 4 digits or hh:mm or
1589 (?<!.\b[a-zA-Z]{3}|[a-zA-Z]{4}|..\b\d\d)) # not preceded by 3 alpha word or >= 4 alpha or 2 digits
1590 [ ]? # optional space
1591 (?P<sign>\+|-) # +/-
1592 (?P<hours>[0-9]{2}):?(?P<minutes>[0-9]{2}) # hh[:]mm
1593 $)
1594 ''', date_str)
46f59e89 1595 if not m:
8f53dc44 1596 m = re.search(r'\d{1,2}:\d{1,2}(?:\.\d+)?(?P<tz>\s*[A-Z]+)$', date_str)
1597 timezone = TIMEZONE_NAMES.get(m and m.group('tz').strip())
1598 if timezone is not None:
1599 date_str = date_str[:-len(m.group('tz'))]
1600 timezone = datetime.timedelta(hours=timezone or 0)
46f59e89
S
1601 else:
1602 date_str = date_str[:-len(m.group('tz'))]
1603 if not m.group('sign'):
1604 timezone = datetime.timedelta()
1605 else:
1606 sign = 1 if m.group('sign') == '+' else -1
1607 timezone = datetime.timedelta(
1608 hours=sign * int(m.group('hours')),
1609 minutes=sign * int(m.group('minutes')))
1610 return timezone, date_str
1611
1612
08b38d54 1613def parse_iso8601(date_str, delimiter='T', timezone=None):
912b38b4
PH
1614 """ Return a UNIX timestamp from the given date """
1615
1616 if date_str is None:
1617 return None
1618
52c3a6e4
S
1619 date_str = re.sub(r'\.[0-9]+', '', date_str)
1620
08b38d54 1621 if timezone is None:
46f59e89
S
1622 timezone, date_str = extract_timezone(date_str)
1623
19a03940 1624 with contextlib.suppress(ValueError):
86e5f3ed 1625 date_format = f'%Y-%m-%d{delimiter}%H:%M:%S'
52c3a6e4
S
1626 dt = datetime.datetime.strptime(date_str, date_format) - timezone
1627 return calendar.timegm(dt.timetuple())
912b38b4
PH
1628
1629
46f59e89
S
1630def date_formats(day_first=True):
1631 return DATE_FORMATS_DAY_FIRST if day_first else DATE_FORMATS_MONTH_FIRST
1632
1633
42bdd9d0 1634def unified_strdate(date_str, day_first=True):
bf50b038 1635 """Return a string with the date in the format YYYYMMDD"""
64e7ad60
PH
1636
1637 if date_str is None:
1638 return None
bf50b038 1639 upload_date = None
5f6a1245 1640 # Replace commas
026fcc04 1641 date_str = date_str.replace(',', ' ')
42bdd9d0 1642 # Remove AM/PM + timezone
9bb8e0a3 1643 date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str)
46f59e89 1644 _, date_str = extract_timezone(date_str)
42bdd9d0 1645
46f59e89 1646 for expression in date_formats(day_first):
19a03940 1647 with contextlib.suppress(ValueError):
bf50b038 1648 upload_date = datetime.datetime.strptime(date_str, expression).strftime('%Y%m%d')
42393ce2
PH
1649 if upload_date is None:
1650 timetuple = email.utils.parsedate_tz(date_str)
1651 if timetuple:
19a03940 1652 with contextlib.suppress(ValueError):
c6b9cf05 1653 upload_date = datetime.datetime(*timetuple[:6]).strftime('%Y%m%d')
6a750402 1654 if upload_date is not None:
14f25df2 1655 return str(upload_date)
bf50b038 1656
5f6a1245 1657
46f59e89 1658def unified_timestamp(date_str, day_first=True):
ad54c913 1659 if not isinstance(date_str, str):
46f59e89
S
1660 return None
1661
8f53dc44 1662 date_str = re.sub(r'\s+', ' ', re.sub(
1663 r'(?i)[,|]|(mon|tues?|wed(nes)?|thu(rs)?|fri|sat(ur)?)(day)?', '', date_str))
46f59e89 1664
7dc2a74e 1665 pm_delta = 12 if re.search(r'(?i)PM', date_str) else 0
46f59e89
S
1666 timezone, date_str = extract_timezone(date_str)
1667
1668 # Remove AM/PM + timezone
1669 date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str)
1670
deef3195
S
1671 # Remove unrecognized timezones from ISO 8601 alike timestamps
1672 m = re.search(r'\d{1,2}:\d{1,2}(?:\.\d+)?(?P<tz>\s*[A-Z]+)$', date_str)
1673 if m:
1674 date_str = date_str[:-len(m.group('tz'))]
1675
f226880c
PH
1676 # Python only supports microseconds, so remove nanoseconds
1677 m = re.search(r'^([0-9]{4,}-[0-9]{1,2}-[0-9]{1,2}T[0-9]{1,2}:[0-9]{1,2}:[0-9]{1,2}\.[0-9]{6})[0-9]+$', date_str)
1678 if m:
1679 date_str = m.group(1)
1680
46f59e89 1681 for expression in date_formats(day_first):
19a03940 1682 with contextlib.suppress(ValueError):
7dc2a74e 1683 dt = datetime.datetime.strptime(date_str, expression) - timezone + datetime.timedelta(hours=pm_delta)
46f59e89 1684 return calendar.timegm(dt.timetuple())
8f53dc44 1685
46f59e89
S
1686 timetuple = email.utils.parsedate_tz(date_str)
1687 if timetuple:
8f53dc44 1688 return calendar.timegm(timetuple) + pm_delta * 3600 - timezone.total_seconds()
46f59e89
S
1689
1690
28e614de 1691def determine_ext(url, default_ext='unknown_video'):
85750f89 1692 if url is None or '.' not in url:
f4776371 1693 return default_ext
9cb9a5df 1694 guess = url.partition('?')[0].rpartition('.')[2]
73e79f2a
PH
1695 if re.match(r'^[A-Za-z0-9]+$', guess):
1696 return guess
a7aaa398
S
1697 # Try extract ext from URLs like http://example.com/foo/bar.mp4/?download
1698 elif guess.rstrip('/') in KNOWN_EXTENSIONS:
9cb9a5df 1699 return guess.rstrip('/')
73e79f2a 1700 else:
cbdbb766 1701 return default_ext
73e79f2a 1702
5f6a1245 1703
824fa511
S
1704def subtitles_filename(filename, sub_lang, sub_format, expected_real_ext=None):
1705 return replace_extension(filename, sub_lang + '.' + sub_format, expected_real_ext)
d4051a8e 1706
5f6a1245 1707
9e62f283 1708def datetime_from_str(date_str, precision='auto', format='%Y%m%d'):
3d38b2d6 1709 R"""
1710 Return a datetime object from a string.
1711 Supported format:
1712 (now|today|yesterday|DATE)([+-]\d+(microsecond|second|minute|hour|day|week|month|year)s?)?
1713
1714 @param format strftime format of DATE
1715 @param precision Round the datetime object: auto|microsecond|second|minute|hour|day
1716 auto: round to the unit provided in date_str (if applicable).
9e62f283 1717 """
1718 auto_precision = False
1719 if precision == 'auto':
1720 auto_precision = True
1721 precision = 'microsecond'
396a76f7 1722 today = datetime_round(datetime.datetime.utcnow(), precision)
f8795e10 1723 if date_str in ('now', 'today'):
37254abc 1724 return today
f8795e10
PH
1725 if date_str == 'yesterday':
1726 return today - datetime.timedelta(days=1)
9e62f283 1727 match = re.match(
3d38b2d6 1728 r'(?P<start>.+)(?P<sign>[+-])(?P<time>\d+)(?P<unit>microsecond|second|minute|hour|day|week|month|year)s?',
9e62f283 1729 date_str)
37254abc 1730 if match is not None:
9e62f283 1731 start_time = datetime_from_str(match.group('start'), precision, format)
1732 time = int(match.group('time')) * (-1 if match.group('sign') == '-' else 1)
37254abc 1733 unit = match.group('unit')
9e62f283 1734 if unit == 'month' or unit == 'year':
1735 new_date = datetime_add_months(start_time, time * 12 if unit == 'year' else time)
37254abc 1736 unit = 'day'
9e62f283 1737 else:
1738 if unit == 'week':
1739 unit = 'day'
1740 time *= 7
1741 delta = datetime.timedelta(**{unit + 's': time})
1742 new_date = start_time + delta
1743 if auto_precision:
1744 return datetime_round(new_date, unit)
1745 return new_date
1746
1747 return datetime_round(datetime.datetime.strptime(date_str, format), precision)
1748
1749
d49f8db3 1750def date_from_str(date_str, format='%Y%m%d', strict=False):
3d38b2d6 1751 R"""
1752 Return a date object from a string using datetime_from_str
9e62f283 1753
3d38b2d6 1754 @param strict Restrict allowed patterns to "YYYYMMDD" and
1755 (now|today|yesterday)(-\d+(day|week|month|year)s?)?
9e62f283 1756 """
3d38b2d6 1757 if strict and not re.fullmatch(r'\d{8}|(now|today|yesterday)(-\d+(day|week|month|year)s?)?', date_str):
1758 raise ValueError(f'Invalid date format "{date_str}"')
9e62f283 1759 return datetime_from_str(date_str, precision='microsecond', format=format).date()
1760
1761
1762def datetime_add_months(dt, months):
1763 """Increment/Decrement a datetime object by months."""
1764 month = dt.month + months - 1
1765 year = dt.year + month // 12
1766 month = month % 12 + 1
1767 day = min(dt.day, calendar.monthrange(year, month)[1])
1768 return dt.replace(year, month, day)
1769
1770
1771def datetime_round(dt, precision='day'):
1772 """
1773 Round a datetime object's time to a specific precision
1774 """
1775 if precision == 'microsecond':
1776 return dt
1777
1778 unit_seconds = {
1779 'day': 86400,
1780 'hour': 3600,
1781 'minute': 60,
1782 'second': 1,
1783 }
1784 roundto = lambda x, n: ((x + n / 2) // n) * n
1785 timestamp = calendar.timegm(dt.timetuple())
1786 return datetime.datetime.utcfromtimestamp(roundto(timestamp, unit_seconds[precision]))
5f6a1245
JW
1787
1788
e63fc1be 1789def hyphenate_date(date_str):
1790 """
1791 Convert a date in 'YYYYMMDD' format to 'YYYY-MM-DD' format"""
1792 match = re.match(r'^(\d\d\d\d)(\d\d)(\d\d)$', date_str)
1793 if match is not None:
1794 return '-'.join(match.groups())
1795 else:
1796 return date_str
1797
5f6a1245 1798
86e5f3ed 1799class DateRange:
bd558525 1800 """Represents a time interval between two dates"""
5f6a1245 1801
bd558525
JMF
1802 def __init__(self, start=None, end=None):
1803 """start and end must be strings in the format accepted by date"""
1804 if start is not None:
d49f8db3 1805 self.start = date_from_str(start, strict=True)
bd558525
JMF
1806 else:
1807 self.start = datetime.datetime.min.date()
1808 if end is not None:
d49f8db3 1809 self.end = date_from_str(end, strict=True)
bd558525
JMF
1810 else:
1811 self.end = datetime.datetime.max.date()
37254abc 1812 if self.start > self.end:
bd558525 1813 raise ValueError('Date range: "%s" , the start date must be before the end date' % self)
5f6a1245 1814
bd558525
JMF
1815 @classmethod
1816 def day(cls, day):
1817 """Returns a range that only contains the given day"""
5f6a1245
JW
1818 return cls(day, day)
1819
bd558525
JMF
1820 def __contains__(self, date):
1821 """Check if the date is in the range"""
37254abc
JMF
1822 if not isinstance(date, datetime.date):
1823 date = date_from_str(date)
1824 return self.start <= date <= self.end
5f6a1245 1825
46f1370e 1826 def __repr__(self):
1827 return f'{__name__}.{type(self).__name__}({self.start.isoformat()!r}, {self.end.isoformat()!r})'
c496ca96 1828
f2df4071 1829 def __eq__(self, other):
1830 return (isinstance(other, DateRange)
1831 and self.start == other.start and self.end == other.end)
1832
c496ca96 1833
b1f94422 1834@functools.cache
1835def system_identifier():
1836 python_implementation = platform.python_implementation()
1837 if python_implementation == 'PyPy' and hasattr(sys, 'pypy_version_info'):
1838 python_implementation += ' version %d.%d.%d' % sys.pypy_version_info[:3]
dab284f8 1839 libc_ver = []
1840 with contextlib.suppress(OSError): # We may not have access to the executable
1841 libc_ver = platform.libc_ver()
b1f94422 1842
17fc3dc4 1843 return 'Python %s (%s %s %s) - %s (%s%s)' % (
b1f94422 1844 platform.python_version(),
1845 python_implementation,
17fc3dc4 1846 platform.machine(),
b1f94422 1847 platform.architecture()[0],
1848 platform.platform(),
5b9f253f
M
1849 ssl.OPENSSL_VERSION,
1850 format_field(join_nonempty(*libc_ver, delim=' '), None, ', %s'),
b1f94422 1851 )
c257baff
PH
1852
1853
0b9c08b4 1854@functools.cache
49fa4d9a 1855def get_windows_version():
8a82af35 1856 ''' Get Windows version. returns () if it's not running on Windows '''
49fa4d9a
N
1857 if compat_os_name == 'nt':
1858 return version_tuple(platform.win32_ver()[1])
1859 else:
8a82af35 1860 return ()
49fa4d9a
N
1861
1862
734f90bb 1863def write_string(s, out=None, encoding=None):
19a03940 1864 assert isinstance(s, str)
1865 out = out or sys.stderr
3b479100
SS
1866 # `sys.stderr` might be `None` (Ref: https://github.com/pyinstaller/pyinstaller/pull/7217)
1867 if not out:
1868 return
7459e3a2 1869
fe1daad3 1870 if compat_os_name == 'nt' and supports_terminal_sequences(out):
3fe75fdc 1871 s = re.sub(r'([\r\n]+)', r' \1', s)
59f943cd 1872
8a82af35 1873 enc, buffer = None, out
cfb0511d 1874 if 'b' in getattr(out, 'mode', ''):
c487cf00 1875 enc = encoding or preferredencoding()
104aa738 1876 elif hasattr(out, 'buffer'):
8a82af35 1877 buffer = out.buffer
104aa738 1878 enc = encoding or getattr(out, 'encoding', None) or preferredencoding()
c487cf00 1879
8a82af35 1880 buffer.write(s.encode(enc, 'ignore') if enc else s)
7459e3a2
PH
1881 out.flush()
1882
1883
da4db748 1884def deprecation_warning(msg, *, printer=None, stacklevel=0, **kwargs):
69bec673 1885 from .. import _IN_CLI
da4db748 1886 if _IN_CLI:
1887 if msg in deprecation_warning._cache:
1888 return
1889 deprecation_warning._cache.add(msg)
1890 if printer:
1891 return printer(f'{msg}{bug_reports_message()}', **kwargs)
1892 return write_string(f'ERROR: {msg}{bug_reports_message()}\n', **kwargs)
1893 else:
1894 import warnings
1895 warnings.warn(DeprecationWarning(msg), stacklevel=stacklevel + 3)
1896
1897
1898deprecation_warning._cache = set()
1899
1900
48ea9cea
PH
1901def bytes_to_intlist(bs):
1902 if not bs:
1903 return []
1904 if isinstance(bs[0], int): # Python 3
1905 return list(bs)
1906 else:
1907 return [ord(c) for c in bs]
1908
c257baff 1909
cba892fa 1910def intlist_to_bytes(xs):
1911 if not xs:
1912 return b''
ac668111 1913 return struct.pack('%dB' % len(xs), *xs)
c38b1e77
PH
1914
1915
8a82af35 1916class LockingUnsupportedError(OSError):
1890fc63 1917 msg = 'File locking is not supported'
0edb3e33 1918
1919 def __init__(self):
1920 super().__init__(self.msg)
1921
1922
c1c9a79c
PH
1923# Cross-platform file locking
1924if sys.platform == 'win32':
fe0918bb 1925 import ctypes
c1c9a79c
PH
1926 import ctypes.wintypes
1927 import msvcrt
1928
1929 class OVERLAPPED(ctypes.Structure):
1930 _fields_ = [
1931 ('Internal', ctypes.wintypes.LPVOID),
1932 ('InternalHigh', ctypes.wintypes.LPVOID),
1933 ('Offset', ctypes.wintypes.DWORD),
1934 ('OffsetHigh', ctypes.wintypes.DWORD),
1935 ('hEvent', ctypes.wintypes.HANDLE),
1936 ]
1937
37e325b9 1938 kernel32 = ctypes.WinDLL('kernel32')
c1c9a79c
PH
1939 LockFileEx = kernel32.LockFileEx
1940 LockFileEx.argtypes = [
1941 ctypes.wintypes.HANDLE, # hFile
1942 ctypes.wintypes.DWORD, # dwFlags
1943 ctypes.wintypes.DWORD, # dwReserved
1944 ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow
1945 ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh
1946 ctypes.POINTER(OVERLAPPED) # Overlapped
1947 ]
1948 LockFileEx.restype = ctypes.wintypes.BOOL
1949 UnlockFileEx = kernel32.UnlockFileEx
1950 UnlockFileEx.argtypes = [
1951 ctypes.wintypes.HANDLE, # hFile
1952 ctypes.wintypes.DWORD, # dwReserved
1953 ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow
1954 ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh
1955 ctypes.POINTER(OVERLAPPED) # Overlapped
1956 ]
1957 UnlockFileEx.restype = ctypes.wintypes.BOOL
1958 whole_low = 0xffffffff
1959 whole_high = 0x7fffffff
1960
747c0bd1 1961 def _lock_file(f, exclusive, block):
c1c9a79c
PH
1962 overlapped = OVERLAPPED()
1963 overlapped.Offset = 0
1964 overlapped.OffsetHigh = 0
1965 overlapped.hEvent = 0
1966 f._lock_file_overlapped_p = ctypes.pointer(overlapped)
747c0bd1 1967
1968 if not LockFileEx(msvcrt.get_osfhandle(f.fileno()),
1969 (0x2 if exclusive else 0x0) | (0x0 if block else 0x1),
1970 0, whole_low, whole_high, f._lock_file_overlapped_p):
2cb19820 1971 # NB: No argument form of "ctypes.FormatError" does not work on PyPy
1972 raise BlockingIOError(f'Locking file failed: {ctypes.FormatError(ctypes.GetLastError())!r}')
c1c9a79c
PH
1973
1974 def _unlock_file(f):
1975 assert f._lock_file_overlapped_p
1976 handle = msvcrt.get_osfhandle(f.fileno())
747c0bd1 1977 if not UnlockFileEx(handle, 0, whole_low, whole_high, f._lock_file_overlapped_p):
c1c9a79c
PH
1978 raise OSError('Unlocking file failed: %r' % ctypes.FormatError())
1979
1980else:
399a76e6
YCH
1981 try:
1982 import fcntl
c1c9a79c 1983
a3125791 1984 def _lock_file(f, exclusive, block):
b63837bc 1985 flags = fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH
1986 if not block:
1987 flags |= fcntl.LOCK_NB
acea8d7c 1988 try:
b63837bc 1989 fcntl.flock(f, flags)
acea8d7c
JK
1990 except BlockingIOError:
1991 raise
1992 except OSError: # AOSP does not have flock()
b63837bc 1993 fcntl.lockf(f, flags)
c1c9a79c 1994
399a76e6 1995 def _unlock_file(f):
45998b3e
E
1996 with contextlib.suppress(OSError):
1997 return fcntl.flock(f, fcntl.LOCK_UN)
1998 with contextlib.suppress(OSError):
1999 return fcntl.lockf(f, fcntl.LOCK_UN) # AOSP does not have flock()
2000 return fcntl.flock(f, fcntl.LOCK_UN | fcntl.LOCK_NB) # virtiofs needs LOCK_NB on unlocking
a3125791 2001
399a76e6 2002 except ImportError:
399a76e6 2003
a3125791 2004 def _lock_file(f, exclusive, block):
0edb3e33 2005 raise LockingUnsupportedError()
399a76e6
YCH
2006
2007 def _unlock_file(f):
0edb3e33 2008 raise LockingUnsupportedError()
c1c9a79c
PH
2009
2010
86e5f3ed 2011class locked_file:
0edb3e33 2012 locked = False
747c0bd1 2013
a3125791 2014 def __init__(self, filename, mode, block=True, encoding=None):
fcfa8853
JK
2015 if mode not in {'r', 'rb', 'a', 'ab', 'w', 'wb'}:
2016 raise NotImplementedError(mode)
2017 self.mode, self.block = mode, block
2018
2019 writable = any(f in mode for f in 'wax+')
2020 readable = any(f in mode for f in 'r+')
2021 flags = functools.reduce(operator.ior, (
2022 getattr(os, 'O_CLOEXEC', 0), # UNIX only
2023 getattr(os, 'O_BINARY', 0), # Windows only
2024 getattr(os, 'O_NOINHERIT', 0), # Windows only
2025 os.O_CREAT if writable else 0, # O_TRUNC only after locking
2026 os.O_APPEND if 'a' in mode else 0,
2027 os.O_EXCL if 'x' in mode else 0,
2028 os.O_RDONLY if not writable else os.O_RDWR if readable else os.O_WRONLY,
2029 ))
2030
98804d03 2031 self.f = os.fdopen(os.open(filename, flags, 0o666), mode, encoding=encoding)
c1c9a79c
PH
2032
2033 def __enter__(self):
a3125791 2034 exclusive = 'r' not in self.mode
c1c9a79c 2035 try:
a3125791 2036 _lock_file(self.f, exclusive, self.block)
0edb3e33 2037 self.locked = True
86e5f3ed 2038 except OSError:
c1c9a79c
PH
2039 self.f.close()
2040 raise
fcfa8853 2041 if 'w' in self.mode:
131e14dc
JK
2042 try:
2043 self.f.truncate()
2044 except OSError as e:
1890fc63 2045 if e.errno not in (
2046 errno.ESPIPE, # Illegal seek - expected for FIFO
2047 errno.EINVAL, # Invalid argument - expected for /dev/null
2048 ):
2049 raise
c1c9a79c
PH
2050 return self
2051
0edb3e33 2052 def unlock(self):
2053 if not self.locked:
2054 return
c1c9a79c 2055 try:
0edb3e33 2056 _unlock_file(self.f)
c1c9a79c 2057 finally:
0edb3e33 2058 self.locked = False
c1c9a79c 2059
0edb3e33 2060 def __exit__(self, *_):
2061 try:
2062 self.unlock()
2063 finally:
2064 self.f.close()
4eb7f1d1 2065
0edb3e33 2066 open = __enter__
2067 close = __exit__
a3125791 2068
0edb3e33 2069 def __getattr__(self, attr):
2070 return getattr(self.f, attr)
a3125791 2071
0edb3e33 2072 def __iter__(self):
2073 return iter(self.f)
a3125791 2074
4eb7f1d1 2075
0b9c08b4 2076@functools.cache
4644ac55
S
2077def get_filesystem_encoding():
2078 encoding = sys.getfilesystemencoding()
2079 return encoding if encoding is not None else 'utf-8'
2080
2081
4eb7f1d1 2082def shell_quote(args):
a6a173c2 2083 quoted_args = []
4644ac55 2084 encoding = get_filesystem_encoding()
a6a173c2
JMF
2085 for a in args:
2086 if isinstance(a, bytes):
2087 # We may get a filename encoded with 'encodeFilename'
2088 a = a.decode(encoding)
aefce8e6 2089 quoted_args.append(compat_shlex_quote(a))
28e614de 2090 return ' '.join(quoted_args)
9d4660ca
PH
2091
2092
2093def smuggle_url(url, data):
2094 """ Pass additional data in a URL for internal use. """
2095
81953d1a
RA
2096 url, idata = unsmuggle_url(url, {})
2097 data.update(idata)
14f25df2 2098 sdata = urllib.parse.urlencode(
28e614de
PH
2099 {'__youtubedl_smuggle': json.dumps(data)})
2100 return url + '#' + sdata
9d4660ca
PH
2101
2102
79f82953 2103def unsmuggle_url(smug_url, default=None):
83e865a3 2104 if '#__youtubedl_smuggle' not in smug_url:
79f82953 2105 return smug_url, default
28e614de 2106 url, _, sdata = smug_url.rpartition('#')
14f25df2 2107 jsond = urllib.parse.parse_qs(sdata)['__youtubedl_smuggle'][0]
9d4660ca
PH
2108 data = json.loads(jsond)
2109 return url, data
02dbf93f
PH
2110
2111
e0fd9573 2112def format_decimal_suffix(num, fmt='%d%s', *, factor=1000):
2113 """ Formats numbers with decimal sufixes like K, M, etc """
2114 num, factor = float_or_none(num), float(factor)
4c3f8c3f 2115 if num is None or num < 0:
e0fd9573 2116 return None
eeb2a770 2117 POSSIBLE_SUFFIXES = 'kMGTPEZY'
2118 exponent = 0 if num == 0 else min(int(math.log(num, factor)), len(POSSIBLE_SUFFIXES))
2119 suffix = ['', *POSSIBLE_SUFFIXES][exponent]
abbeeebc 2120 if factor == 1024:
2121 suffix = {'k': 'Ki', '': ''}.get(suffix, f'{suffix}i')
e0fd9573 2122 converted = num / (factor ** exponent)
abbeeebc 2123 return fmt % (converted, suffix)
e0fd9573 2124
2125
02dbf93f 2126def format_bytes(bytes):
f02d24d8 2127 return format_decimal_suffix(bytes, '%.2f%sB', factor=1024) or 'N/A'
f53c966a 2128
1c088fa8 2129
64c464a1 2130def lookup_unit_table(unit_table, s, strict=False):
2131 num_re = NUMBER_RE if strict else NUMBER_RE.replace(R'\.', '[,.]')
fb47597b 2132 units_re = '|'.join(re.escape(u) for u in unit_table)
64c464a1 2133 m = (re.fullmatch if strict else re.match)(
2134 rf'(?P<num>{num_re})\s*(?P<unit>{units_re})\b', s)
fb47597b
S
2135 if not m:
2136 return None
64c464a1 2137
2138 num = float(m.group('num').replace(',', '.'))
fb47597b 2139 mult = unit_table[m.group('unit')]
64c464a1 2140 return round(num * mult)
2141
2142
2143def parse_bytes(s):
2144 """Parse a string indicating a byte quantity into an integer"""
2145 return lookup_unit_table(
2146 {u: 1024**i for i, u in enumerate(['', *'KMGTPEZY'])},
2147 s.upper(), strict=True)
fb47597b
S
2148
2149
be64b5b0
PH
2150def parse_filesize(s):
2151 if s is None:
2152 return None
2153
dfb1b146 2154 # The lower-case forms are of course incorrect and unofficial,
be64b5b0
PH
2155 # but we support those too
2156 _UNIT_TABLE = {
2157 'B': 1,
2158 'b': 1,
70852b47 2159 'bytes': 1,
be64b5b0
PH
2160 'KiB': 1024,
2161 'KB': 1000,
2162 'kB': 1024,
2163 'Kb': 1000,
13585d76 2164 'kb': 1000,
70852b47
YCH
2165 'kilobytes': 1000,
2166 'kibibytes': 1024,
be64b5b0
PH
2167 'MiB': 1024 ** 2,
2168 'MB': 1000 ** 2,
2169 'mB': 1024 ** 2,
2170 'Mb': 1000 ** 2,
13585d76 2171 'mb': 1000 ** 2,
70852b47
YCH
2172 'megabytes': 1000 ** 2,
2173 'mebibytes': 1024 ** 2,
be64b5b0
PH
2174 'GiB': 1024 ** 3,
2175 'GB': 1000 ** 3,
2176 'gB': 1024 ** 3,
2177 'Gb': 1000 ** 3,
13585d76 2178 'gb': 1000 ** 3,
70852b47
YCH
2179 'gigabytes': 1000 ** 3,
2180 'gibibytes': 1024 ** 3,
be64b5b0
PH
2181 'TiB': 1024 ** 4,
2182 'TB': 1000 ** 4,
2183 'tB': 1024 ** 4,
2184 'Tb': 1000 ** 4,
13585d76 2185 'tb': 1000 ** 4,
70852b47
YCH
2186 'terabytes': 1000 ** 4,
2187 'tebibytes': 1024 ** 4,
be64b5b0
PH
2188 'PiB': 1024 ** 5,
2189 'PB': 1000 ** 5,
2190 'pB': 1024 ** 5,
2191 'Pb': 1000 ** 5,
13585d76 2192 'pb': 1000 ** 5,
70852b47
YCH
2193 'petabytes': 1000 ** 5,
2194 'pebibytes': 1024 ** 5,
be64b5b0
PH
2195 'EiB': 1024 ** 6,
2196 'EB': 1000 ** 6,
2197 'eB': 1024 ** 6,
2198 'Eb': 1000 ** 6,
13585d76 2199 'eb': 1000 ** 6,
70852b47
YCH
2200 'exabytes': 1000 ** 6,
2201 'exbibytes': 1024 ** 6,
be64b5b0
PH
2202 'ZiB': 1024 ** 7,
2203 'ZB': 1000 ** 7,
2204 'zB': 1024 ** 7,
2205 'Zb': 1000 ** 7,
13585d76 2206 'zb': 1000 ** 7,
70852b47
YCH
2207 'zettabytes': 1000 ** 7,
2208 'zebibytes': 1024 ** 7,
be64b5b0
PH
2209 'YiB': 1024 ** 8,
2210 'YB': 1000 ** 8,
2211 'yB': 1024 ** 8,
2212 'Yb': 1000 ** 8,
13585d76 2213 'yb': 1000 ** 8,
70852b47
YCH
2214 'yottabytes': 1000 ** 8,
2215 'yobibytes': 1024 ** 8,
be64b5b0
PH
2216 }
2217
fb47597b
S
2218 return lookup_unit_table(_UNIT_TABLE, s)
2219
2220
2221def parse_count(s):
2222 if s is None:
be64b5b0
PH
2223 return None
2224
352d5da8 2225 s = re.sub(r'^[^\d]+\s', '', s).strip()
fb47597b
S
2226
2227 if re.match(r'^[\d,.]+$', s):
2228 return str_to_int(s)
2229
2230 _UNIT_TABLE = {
2231 'k': 1000,
2232 'K': 1000,
2233 'm': 1000 ** 2,
2234 'M': 1000 ** 2,
2235 'kk': 1000 ** 2,
2236 'KK': 1000 ** 2,
352d5da8 2237 'b': 1000 ** 3,
2238 'B': 1000 ** 3,
fb47597b 2239 }
be64b5b0 2240
352d5da8 2241 ret = lookup_unit_table(_UNIT_TABLE, s)
2242 if ret is not None:
2243 return ret
2244
2245 mobj = re.match(r'([\d,.]+)(?:$|\s)', s)
2246 if mobj:
2247 return str_to_int(mobj.group(1))
be64b5b0 2248
2f7ae819 2249
5d45484c 2250def parse_resolution(s, *, lenient=False):
b871d7e9
S
2251 if s is None:
2252 return {}
2253
5d45484c
LNO
2254 if lenient:
2255 mobj = re.search(r'(?P<w>\d+)\s*[xX×,]\s*(?P<h>\d+)', s)
2256 else:
2257 mobj = re.search(r'(?<![a-zA-Z0-9])(?P<w>\d+)\s*[xX×,]\s*(?P<h>\d+)(?![a-zA-Z0-9])', s)
b871d7e9
S
2258 if mobj:
2259 return {
2260 'width': int(mobj.group('w')),
2261 'height': int(mobj.group('h')),
2262 }
2263
17ec8bcf 2264 mobj = re.search(r'(?<![a-zA-Z0-9])(\d+)[pPiI](?![a-zA-Z0-9])', s)
b871d7e9
S
2265 if mobj:
2266 return {'height': int(mobj.group(1))}
2267
2268 mobj = re.search(r'\b([48])[kK]\b', s)
2269 if mobj:
2270 return {'height': int(mobj.group(1)) * 540}
2271
2272 return {}
2273
2274
0dc41787 2275def parse_bitrate(s):
14f25df2 2276 if not isinstance(s, str):
0dc41787
S
2277 return
2278 mobj = re.search(r'\b(\d+)\s*kbps', s)
2279 if mobj:
2280 return int(mobj.group(1))
2281
2282
a942d6cb 2283def month_by_name(name, lang='en'):
caefb1de
PH
2284 """ Return the number of a month by (locale-independently) English name """
2285
f6717dec 2286 month_names = MONTH_NAMES.get(lang, MONTH_NAMES['en'])
a942d6cb 2287
caefb1de 2288 try:
f6717dec 2289 return month_names.index(name) + 1
7105440c
YCH
2290 except ValueError:
2291 return None
2292
2293
2294def month_by_abbreviation(abbrev):
2295 """ Return the number of a month by (locale-independently) English
2296 abbreviations """
2297
2298 try:
2299 return [s[:3] for s in ENGLISH_MONTH_NAMES].index(abbrev) + 1
caefb1de
PH
2300 except ValueError:
2301 return None
18258362
JMF
2302
2303
5aafe895 2304def fix_xml_ampersands(xml_str):
18258362 2305 """Replace all the '&' by '&amp;' in XML"""
5aafe895
PH
2306 return re.sub(
2307 r'&(?!amp;|lt;|gt;|apos;|quot;|#x[0-9a-fA-F]{,4};|#[0-9]{,4};)',
28e614de 2308 '&amp;',
5aafe895 2309 xml_str)
e3946f98
PH
2310
2311
2312def setproctitle(title):
14f25df2 2313 assert isinstance(title, str)
c1c05c67 2314
fe0918bb 2315 # Workaround for https://github.com/yt-dlp/yt-dlp/issues/4541
2316 try:
2317 import ctypes
2318 except ImportError:
c1c05c67
YCH
2319 return
2320
e3946f98 2321 try:
611c1dd9 2322 libc = ctypes.cdll.LoadLibrary('libc.so.6')
e3946f98
PH
2323 except OSError:
2324 return
2f49bcd6
RC
2325 except TypeError:
2326 # LoadLibrary in Windows Python 2.7.13 only expects
2327 # a bytestring, but since unicode_literals turns
2328 # every string into a unicode string, it fails.
2329 return
0f06bcd7 2330 title_bytes = title.encode()
6eefe533
PH
2331 buf = ctypes.create_string_buffer(len(title_bytes))
2332 buf.value = title_bytes
e3946f98 2333 try:
6eefe533 2334 libc.prctl(15, buf, 0, 0, 0)
e3946f98
PH
2335 except AttributeError:
2336 return # Strange libc, just skip this
d7dda168
PH
2337
2338
2339def remove_start(s, start):
46bc9b7d 2340 return s[len(start):] if s is not None and s.startswith(start) else s
29eb5174
PH
2341
2342
2b9faf55 2343def remove_end(s, end):
46bc9b7d 2344 return s[:-len(end)] if s is not None and s.endswith(end) else s
2b9faf55
PH
2345
2346
31b2051e
S
2347def remove_quotes(s):
2348 if s is None or len(s) < 2:
2349 return s
2350 for quote in ('"', "'", ):
2351 if s[0] == quote and s[-1] == quote:
2352 return s[1:-1]
2353 return s
2354
2355
b6e0c7d2 2356def get_domain(url):
ebf99aaf 2357 """
2358 This implementation is inconsistent, but is kept for compatibility.
2359 Use this only for "webpage_url_domain"
2360 """
2361 return remove_start(urllib.parse.urlparse(url).netloc, 'www.') or None
b6e0c7d2
U
2362
2363
29eb5174 2364def url_basename(url):
14f25df2 2365 path = urllib.parse.urlparse(url).path
28e614de 2366 return path.strip('/').split('/')[-1]
aa94a6d3
PH
2367
2368
02dc0a36 2369def base_url(url):
7657ec7e 2370 return re.match(r'https?://[^?#]+/', url).group()
02dc0a36
S
2371
2372
e34c3361 2373def urljoin(base, path):
4b5de77b 2374 if isinstance(path, bytes):
0f06bcd7 2375 path = path.decode()
14f25df2 2376 if not isinstance(path, str) or not path:
e34c3361 2377 return None
fad4ceb5 2378 if re.match(r'^(?:[a-zA-Z][a-zA-Z0-9+-.]*:)?//', path):
e34c3361 2379 return path
4b5de77b 2380 if isinstance(base, bytes):
0f06bcd7 2381 base = base.decode()
14f25df2 2382 if not isinstance(base, str) or not re.match(
4b5de77b 2383 r'^(?:https?:)?//', base):
e34c3361 2384 return None
14f25df2 2385 return urllib.parse.urljoin(base, path)
e34c3361
S
2386
2387
ac668111 2388class HEADRequest(urllib.request.Request):
aa94a6d3 2389 def get_method(self):
611c1dd9 2390 return 'HEAD'
7217e148
PH
2391
2392
ac668111 2393class PUTRequest(urllib.request.Request):
95cf60e8
S
2394 def get_method(self):
2395 return 'PUT'
2396
2397
9732d77e 2398def int_or_none(v, scale=1, default=None, get_attr=None, invscale=1):
9e907ebd 2399 if get_attr and v is not None:
2400 v = getattr(v, get_attr, None)
1812afb7
S
2401 try:
2402 return int(v) * invscale // scale
31c49255 2403 except (ValueError, TypeError, OverflowError):
af98f8ff 2404 return default
9732d77e 2405
9572013d 2406
40a90862 2407def str_or_none(v, default=None):
14f25df2 2408 return default if v is None else str(v)
40a90862 2409
9732d77e
PH
2410
2411def str_to_int(int_str):
48d4681e 2412 """ A more relaxed version of int_or_none """
f9934b96 2413 if isinstance(int_str, int):
348c6bf1 2414 return int_str
14f25df2 2415 elif isinstance(int_str, str):
42db58ec
S
2416 int_str = re.sub(r'[,\.\+]', '', int_str)
2417 return int_or_none(int_str)
608d11f5
PH
2418
2419
9732d77e 2420def float_or_none(v, scale=1, invscale=1, default=None):
caf80631
S
2421 if v is None:
2422 return default
2423 try:
2424 return float(v) * invscale / scale
5e1271c5 2425 except (ValueError, TypeError):
caf80631 2426 return default
43f775e4
PH
2427
2428
c7e327c4
S
2429def bool_or_none(v, default=None):
2430 return v if isinstance(v, bool) else default
2431
2432
53cd37ba 2433def strip_or_none(v, default=None):
14f25df2 2434 return v.strip() if isinstance(v, str) else default
b72b4431
S
2435
2436
af03000a 2437def url_or_none(url):
14f25df2 2438 if not url or not isinstance(url, str):
af03000a
S
2439 return None
2440 url = url.strip()
29f7c58a 2441 return url if re.match(r'^(?:(?:https?|rt(?:m(?:pt?[es]?|fp)|sp[su]?)|mms|ftps?):)?//', url) else None
af03000a
S
2442
2443
3e9b66d7 2444def request_to_url(req):
ac668111 2445 if isinstance(req, urllib.request.Request):
3e9b66d7
LNO
2446 return req.get_full_url()
2447 else:
2448 return req
2449
2450
ad54c913 2451def strftime_or_none(timestamp, date_format='%Y%m%d', default=None):
e29663c6 2452 datetime_object = None
2453 try:
f9934b96 2454 if isinstance(timestamp, (int, float)): # unix timestamp
d509c1f5 2455 # Using naive datetime here can break timestamp() in Windows
2456 # Ref: https://github.com/yt-dlp/yt-dlp/issues/5185, https://github.com/python/cpython/issues/94414
a35af430 2457 # Also, datetime.datetime.fromtimestamp breaks for negative timestamps
2458 # Ref: https://github.com/yt-dlp/yt-dlp/issues/6706#issuecomment-1496842642
2459 datetime_object = (datetime.datetime.fromtimestamp(0, datetime.timezone.utc)
2460 + datetime.timedelta(seconds=timestamp))
14f25df2 2461 elif isinstance(timestamp, str): # assume YYYYMMDD
e29663c6 2462 datetime_object = datetime.datetime.strptime(timestamp, '%Y%m%d')
9665f15a 2463 date_format = re.sub( # Support %s on windows
2464 r'(?<!%)(%%)*%s', rf'\g<1>{int(datetime_object.timestamp())}', date_format)
e29663c6 2465 return datetime_object.strftime(date_format)
2466 except (ValueError, TypeError, AttributeError):
2467 return default
2468
2469
608d11f5 2470def parse_duration(s):
f9934b96 2471 if not isinstance(s, str):
608d11f5 2472 return None
ca7b3246 2473 s = s.strip()
38d79fd1 2474 if not s:
2475 return None
ca7b3246 2476
acaff495 2477 days, hours, mins, secs, ms = [None] * 5
8bd1c00b 2478 m = re.match(r'''(?x)
2479 (?P<before_secs>
2480 (?:(?:(?P<days>[0-9]+):)?(?P<hours>[0-9]+):)?(?P<mins>[0-9]+):)?
2481 (?P<secs>(?(before_secs)[0-9]{1,2}|[0-9]+))
2482 (?P<ms>[.:][0-9]+)?Z?$
2483 ''', s)
acaff495 2484 if m:
8bd1c00b 2485 days, hours, mins, secs, ms = m.group('days', 'hours', 'mins', 'secs', 'ms')
acaff495 2486 else:
2487 m = re.match(
056653bb
S
2488 r'''(?ix)(?:P?
2489 (?:
1c1b2f96 2490 [0-9]+\s*y(?:ears?)?,?\s*
056653bb
S
2491 )?
2492 (?:
1c1b2f96 2493 [0-9]+\s*m(?:onths?)?,?\s*
056653bb
S
2494 )?
2495 (?:
1c1b2f96 2496 [0-9]+\s*w(?:eeks?)?,?\s*
056653bb 2497 )?
8f4b58d7 2498 (?:
1c1b2f96 2499 (?P<days>[0-9]+)\s*d(?:ays?)?,?\s*
8f4b58d7 2500 )?
056653bb 2501 T)?
acaff495 2502 (?:
1c1b2f96 2503 (?P<hours>[0-9]+)\s*h(?:ours?)?,?\s*
acaff495 2504 )?
2505 (?:
1c1b2f96 2506 (?P<mins>[0-9]+)\s*m(?:in(?:ute)?s?)?,?\s*
acaff495 2507 )?
2508 (?:
2509 (?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?\s*s(?:ec(?:ond)?s?)?\s*
15846398 2510 )?Z?$''', s)
acaff495 2511 if m:
2512 days, hours, mins, secs, ms = m.groups()
2513 else:
15846398 2514 m = re.match(r'(?i)(?:(?P<hours>[0-9.]+)\s*(?:hours?)|(?P<mins>[0-9.]+)\s*(?:mins?\.?|minutes?)\s*)Z?$', s)
acaff495 2515 if m:
2516 hours, mins = m.groups()
2517 else:
2518 return None
2519
acaff495 2520 if ms:
19a03940 2521 ms = ms.replace(':', '.')
2522 return sum(float(part or 0) * mult for part, mult in (
2523 (days, 86400), (hours, 3600), (mins, 60), (secs, 1), (ms, 1)))
91d7d0b3
JMF
2524
2525
e65e4c88 2526def prepend_extension(filename, ext, expected_real_ext=None):
5f6a1245 2527 name, real_ext = os.path.splitext(filename)
e65e4c88 2528 return (
86e5f3ed 2529 f'{name}.{ext}{real_ext}'
e65e4c88 2530 if not expected_real_ext or real_ext[1:] == expected_real_ext
86e5f3ed 2531 else f'{filename}.{ext}')
d70ad093
PH
2532
2533
b3ed15b7
S
2534def replace_extension(filename, ext, expected_real_ext=None):
2535 name, real_ext = os.path.splitext(filename)
86e5f3ed 2536 return '{}.{}'.format(
b3ed15b7
S
2537 name if not expected_real_ext or real_ext[1:] == expected_real_ext else filename,
2538 ext)
2539
2540
d70ad093
PH
2541def check_executable(exe, args=[]):
2542 """ Checks if the given binary is installed somewhere in PATH, and returns its name.
2543 args can be a list of arguments for a short output (like -version) """
2544 try:
f0c9fb96 2545 Popen.run([exe] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
d70ad093
PH
2546 except OSError:
2547 return False
2548 return exe
b7ab0590
PH
2549
2550
7aaf4cd2 2551def _get_exe_version_output(exe, args):
95807118 2552 try:
b64d04c1 2553 # STDIN should be redirected too. On UNIX-like systems, ffmpeg triggers
7a5c1cfe 2554 # SIGTTOU if yt-dlp is run in the background.
067aa17e 2555 # See https://github.com/ytdl-org/youtube-dl/issues/955#issuecomment-209789656
1cdda329 2556 stdout, _, ret = Popen.run([encodeArgument(exe)] + args, text=True,
2557 stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
2558 if ret:
2559 return None
95807118
PH
2560 except OSError:
2561 return False
f0c9fb96 2562 return stdout
cae97f65
PH
2563
2564
2565def detect_exe_version(output, version_re=None, unrecognized='present'):
14f25df2 2566 assert isinstance(output, str)
cae97f65
PH
2567 if version_re is None:
2568 version_re = r'version\s+([-0-9._a-zA-Z]+)'
2569 m = re.search(version_re, output)
95807118
PH
2570 if m:
2571 return m.group(1)
2572 else:
2573 return unrecognized
2574
2575
9af98e17 2576def get_exe_version(exe, args=['--version'],
1cdda329 2577 version_re=None, unrecognized=('present', 'broken')):
9af98e17 2578 """ Returns the version of the specified executable,
2579 or False if the executable is not present """
1cdda329 2580 unrecognized = variadic(unrecognized)
2581 assert len(unrecognized) in (1, 2)
9af98e17 2582 out = _get_exe_version_output(exe, args)
1cdda329 2583 if out is None:
2584 return unrecognized[-1]
2585 return out and detect_exe_version(out, version_re, unrecognized[0])
9af98e17 2586
2587
7e88d7d7 2588def frange(start=0, stop=None, step=1):
2589 """Float range"""
2590 if stop is None:
2591 start, stop = 0, start
2592 sign = [-1, 1][step > 0] if step else 0
2593 while sign * start < sign * stop:
2594 yield start
2595 start += step
2596
2597
cb89cfc1 2598class LazyList(collections.abc.Sequence):
0f06bcd7 2599 """Lazy immutable list from an iterable
2600 Note that slices of a LazyList are lists and not LazyList"""
483336e7 2601
8e5fecc8 2602 class IndexError(IndexError):
2603 pass
2604
282f5709 2605 def __init__(self, iterable, *, reverse=False, _cache=None):
0f06bcd7 2606 self._iterable = iter(iterable)
2607 self._cache = [] if _cache is None else _cache
2608 self._reversed = reverse
483336e7 2609
2610 def __iter__(self):
0f06bcd7 2611 if self._reversed:
28419ca2 2612 # We need to consume the entire iterable to iterate in reverse
981052c9 2613 yield from self.exhaust()
28419ca2 2614 return
0f06bcd7 2615 yield from self._cache
2616 for item in self._iterable:
2617 self._cache.append(item)
483336e7 2618 yield item
2619
0f06bcd7 2620 def _exhaust(self):
2621 self._cache.extend(self._iterable)
2622 self._iterable = [] # Discard the emptied iterable to make it pickle-able
2623 return self._cache
28419ca2 2624
981052c9 2625 def exhaust(self):
0f06bcd7 2626 """Evaluate the entire iterable"""
2627 return self._exhaust()[::-1 if self._reversed else 1]
981052c9 2628
28419ca2 2629 @staticmethod
0f06bcd7 2630 def _reverse_index(x):
f2df4071 2631 return None if x is None else ~x
483336e7 2632
2633 def __getitem__(self, idx):
2634 if isinstance(idx, slice):
0f06bcd7 2635 if self._reversed:
2636 idx = slice(self._reverse_index(idx.start), self._reverse_index(idx.stop), -(idx.step or 1))
e0f2b4b4 2637 start, stop, step = idx.start, idx.stop, idx.step or 1
483336e7 2638 elif isinstance(idx, int):
0f06bcd7 2639 if self._reversed:
2640 idx = self._reverse_index(idx)
e0f2b4b4 2641 start, stop, step = idx, idx, 0
483336e7 2642 else:
2643 raise TypeError('indices must be integers or slices')
e0f2b4b4 2644 if ((start or 0) < 0 or (stop or 0) < 0
2645 or (start is None and step < 0)
2646 or (stop is None and step > 0)):
483336e7 2647 # We need to consume the entire iterable to be able to slice from the end
2648 # Obviously, never use this with infinite iterables
0f06bcd7 2649 self._exhaust()
8e5fecc8 2650 try:
0f06bcd7 2651 return self._cache[idx]
8e5fecc8 2652 except IndexError as e:
2653 raise self.IndexError(e) from e
0f06bcd7 2654 n = max(start or 0, stop or 0) - len(self._cache) + 1
28419ca2 2655 if n > 0:
0f06bcd7 2656 self._cache.extend(itertools.islice(self._iterable, n))
8e5fecc8 2657 try:
0f06bcd7 2658 return self._cache[idx]
8e5fecc8 2659 except IndexError as e:
2660 raise self.IndexError(e) from e
483336e7 2661
2662 def __bool__(self):
2663 try:
0f06bcd7 2664 self[-1] if self._reversed else self[0]
8e5fecc8 2665 except self.IndexError:
483336e7 2666 return False
2667 return True
2668
2669 def __len__(self):
0f06bcd7 2670 self._exhaust()
2671 return len(self._cache)
483336e7 2672
282f5709 2673 def __reversed__(self):
0f06bcd7 2674 return type(self)(self._iterable, reverse=not self._reversed, _cache=self._cache)
282f5709 2675
2676 def __copy__(self):
0f06bcd7 2677 return type(self)(self._iterable, reverse=self._reversed, _cache=self._cache)
282f5709 2678
28419ca2 2679 def __repr__(self):
2680 # repr and str should mimic a list. So we exhaust the iterable
2681 return repr(self.exhaust())
2682
2683 def __str__(self):
2684 return repr(self.exhaust())
2685
483336e7 2686
7be9ccff 2687class PagedList:
c07a39ae 2688
2689 class IndexError(IndexError):
2690 pass
2691
dd26ced1
PH
2692 def __len__(self):
2693 # This is only useful for tests
2694 return len(self.getslice())
2695
7be9ccff 2696 def __init__(self, pagefunc, pagesize, use_cache=True):
2697 self._pagefunc = pagefunc
2698 self._pagesize = pagesize
f1d13090 2699 self._pagecount = float('inf')
7be9ccff 2700 self._use_cache = use_cache
2701 self._cache = {}
2702
2703 def getpage(self, pagenum):
d8cf8d97 2704 page_results = self._cache.get(pagenum)
2705 if page_results is None:
f1d13090 2706 page_results = [] if pagenum > self._pagecount else list(self._pagefunc(pagenum))
7be9ccff 2707 if self._use_cache:
2708 self._cache[pagenum] = page_results
2709 return page_results
2710
2711 def getslice(self, start=0, end=None):
2712 return list(self._getslice(start, end))
2713
2714 def _getslice(self, start, end):
55575225 2715 raise NotImplementedError('This method must be implemented by subclasses')
2716
2717 def __getitem__(self, idx):
f1d13090 2718 assert self._use_cache, 'Indexing PagedList requires cache'
55575225 2719 if not isinstance(idx, int) or idx < 0:
2720 raise TypeError('indices must be non-negative integers')
2721 entries = self.getslice(idx, idx + 1)
d8cf8d97 2722 if not entries:
c07a39ae 2723 raise self.IndexError()
d8cf8d97 2724 return entries[0]
55575225 2725
9c44d242
PH
2726
2727class OnDemandPagedList(PagedList):
a44ca5a4 2728 """Download pages until a page with less than maximum results"""
86e5f3ed 2729
7be9ccff 2730 def _getslice(self, start, end):
b7ab0590
PH
2731 for pagenum in itertools.count(start // self._pagesize):
2732 firstid = pagenum * self._pagesize
2733 nextfirstid = pagenum * self._pagesize + self._pagesize
2734 if start >= nextfirstid:
2735 continue
2736
b7ab0590
PH
2737 startv = (
2738 start % self._pagesize
2739 if firstid <= start < nextfirstid
2740 else 0)
b7ab0590
PH
2741 endv = (
2742 ((end - 1) % self._pagesize) + 1
2743 if (end is not None and firstid <= end <= nextfirstid)
2744 else None)
2745
f1d13090 2746 try:
2747 page_results = self.getpage(pagenum)
2748 except Exception:
2749 self._pagecount = pagenum - 1
2750 raise
b7ab0590
PH
2751 if startv != 0 or endv is not None:
2752 page_results = page_results[startv:endv]
7be9ccff 2753 yield from page_results
b7ab0590
PH
2754
2755 # A little optimization - if current page is not "full", ie. does
2756 # not contain page_size videos then we can assume that this page
2757 # is the last one - there are no more ids on further pages -
2758 # i.e. no need to query again.
2759 if len(page_results) + startv < self._pagesize:
2760 break
2761
2762 # If we got the whole page, but the next page is not interesting,
2763 # break out early as well
2764 if end == nextfirstid:
2765 break
81c2f20b
PH
2766
2767
9c44d242 2768class InAdvancePagedList(PagedList):
a44ca5a4 2769 """PagedList with total number of pages known in advance"""
86e5f3ed 2770
9c44d242 2771 def __init__(self, pagefunc, pagecount, pagesize):
7be9ccff 2772 PagedList.__init__(self, pagefunc, pagesize, True)
f1d13090 2773 self._pagecount = pagecount
9c44d242 2774
7be9ccff 2775 def _getslice(self, start, end):
9c44d242 2776 start_page = start // self._pagesize
d37707bd 2777 end_page = self._pagecount if end is None else min(self._pagecount, end // self._pagesize + 1)
9c44d242
PH
2778 skip_elems = start - start_page * self._pagesize
2779 only_more = None if end is None else end - start
2780 for pagenum in range(start_page, end_page):
7be9ccff 2781 page_results = self.getpage(pagenum)
9c44d242 2782 if skip_elems:
7be9ccff 2783 page_results = page_results[skip_elems:]
9c44d242
PH
2784 skip_elems = None
2785 if only_more is not None:
7be9ccff 2786 if len(page_results) < only_more:
2787 only_more -= len(page_results)
9c44d242 2788 else:
7be9ccff 2789 yield from page_results[:only_more]
9c44d242 2790 break
7be9ccff 2791 yield from page_results
9c44d242
PH
2792
2793
7e88d7d7 2794class PlaylistEntries:
2795 MissingEntry = object()
2796 is_exhausted = False
2797
2798 def __init__(self, ydl, info_dict):
7e9a6125 2799 self.ydl = ydl
2800
2801 # _entries must be assigned now since infodict can change during iteration
2802 entries = info_dict.get('entries')
2803 if entries is None:
2804 raise EntryNotInPlaylist('There are no entries')
2805 elif isinstance(entries, list):
2806 self.is_exhausted = True
2807
2808 requested_entries = info_dict.get('requested_entries')
bc5c2f8a 2809 self.is_incomplete = requested_entries is not None
7e9a6125 2810 if self.is_incomplete:
2811 assert self.is_exhausted
bc5c2f8a 2812 self._entries = [self.MissingEntry] * max(requested_entries or [0])
7e9a6125 2813 for i, entry in zip(requested_entries, entries):
2814 self._entries[i - 1] = entry
2815 elif isinstance(entries, (list, PagedList, LazyList)):
2816 self._entries = entries
2817 else:
2818 self._entries = LazyList(entries)
7e88d7d7 2819
2820 PLAYLIST_ITEMS_RE = re.compile(r'''(?x)
2821 (?P<start>[+-]?\d+)?
2822 (?P<range>[:-]
2823 (?P<end>[+-]?\d+|inf(?:inite)?)?
2824 (?::(?P<step>[+-]?\d+))?
2825 )?''')
2826
2827 @classmethod
2828 def parse_playlist_items(cls, string):
2829 for segment in string.split(','):
2830 if not segment:
2831 raise ValueError('There is two or more consecutive commas')
2832 mobj = cls.PLAYLIST_ITEMS_RE.fullmatch(segment)
2833 if not mobj:
2834 raise ValueError(f'{segment!r} is not a valid specification')
2835 start, end, step, has_range = mobj.group('start', 'end', 'step', 'range')
2836 if int_or_none(step) == 0:
2837 raise ValueError(f'Step in {segment!r} cannot be zero')
2838 yield slice(int_or_none(start), float_or_none(end), int_or_none(step)) if has_range else int(start)
2839
2840 def get_requested_items(self):
2841 playlist_items = self.ydl.params.get('playlist_items')
2842 playlist_start = self.ydl.params.get('playliststart', 1)
2843 playlist_end = self.ydl.params.get('playlistend')
2844 # For backwards compatibility, interpret -1 as whole list
2845 if playlist_end in (-1, None):
2846 playlist_end = ''
2847 if not playlist_items:
2848 playlist_items = f'{playlist_start}:{playlist_end}'
2849 elif playlist_start != 1 or playlist_end:
2850 self.ydl.report_warning('Ignoring playliststart and playlistend because playlistitems was given', only_once=True)
2851
2852 for index in self.parse_playlist_items(playlist_items):
2853 for i, entry in self[index]:
2854 yield i, entry
1ac4fd80 2855 if not entry:
2856 continue
7e88d7d7 2857 try:
d21056f4 2858 # The item may have just been added to archive. Don't break due to it
2859 if not self.ydl.params.get('lazy_playlist'):
2860 # TODO: Add auto-generated fields
2861 self.ydl._match_entry(entry, incomplete=True, silent=True)
7e88d7d7 2862 except (ExistingVideoReached, RejectedVideoReached):
2863 return
2864
7e9a6125 2865 def get_full_count(self):
2866 if self.is_exhausted and not self.is_incomplete:
7e88d7d7 2867 return len(self)
2868 elif isinstance(self._entries, InAdvancePagedList):
2869 if self._entries._pagesize == 1:
2870 return self._entries._pagecount
2871
7e88d7d7 2872 @functools.cached_property
2873 def _getter(self):
2874 if isinstance(self._entries, list):
2875 def get_entry(i):
2876 try:
2877 entry = self._entries[i]
2878 except IndexError:
2879 entry = self.MissingEntry
2880 if not self.is_incomplete:
2881 raise self.IndexError()
2882 if entry is self.MissingEntry:
bc5c2f8a 2883 raise EntryNotInPlaylist(f'Entry {i + 1} cannot be found')
7e88d7d7 2884 return entry
2885 else:
2886 def get_entry(i):
2887 try:
2888 return type(self.ydl)._handle_extraction_exceptions(lambda _, i: self._entries[i])(self.ydl, i)
2889 except (LazyList.IndexError, PagedList.IndexError):
2890 raise self.IndexError()
2891 return get_entry
2892
2893 def __getitem__(self, idx):
2894 if isinstance(idx, int):
2895 idx = slice(idx, idx)
2896
2897 # NB: PlaylistEntries[1:10] => (0, 1, ... 9)
2898 step = 1 if idx.step is None else idx.step
2899 if idx.start is None:
2900 start = 0 if step > 0 else len(self) - 1
2901 else:
2902 start = idx.start - 1 if idx.start >= 0 else len(self) + idx.start
2903
2904 # NB: Do not call len(self) when idx == [:]
2905 if idx.stop is None:
2906 stop = 0 if step < 0 else float('inf')
2907 else:
2908 stop = idx.stop - 1 if idx.stop >= 0 else len(self) + idx.stop
2909 stop += [-1, 1][step > 0]
2910
2911 for i in frange(start, stop, step):
2912 if i < 0:
2913 continue
2914 try:
7e9a6125 2915 entry = self._getter(i)
2916 except self.IndexError:
2917 self.is_exhausted = True
2918 if step > 0:
7e88d7d7 2919 break
7e9a6125 2920 continue
7e88d7d7 2921 yield i + 1, entry
2922
2923 def __len__(self):
2924 return len(tuple(self[:]))
2925
2926 class IndexError(IndexError):
2927 pass
2928
2929
81c2f20b 2930def uppercase_escape(s):
676eb3f2 2931 unicode_escape = codecs.getdecoder('unicode_escape')
81c2f20b 2932 return re.sub(
a612753d 2933 r'\\U[0-9a-fA-F]{8}',
676eb3f2
PH
2934 lambda m: unicode_escape(m.group(0))[0],
2935 s)
0fe2ff78
YCH
2936
2937
2938def lowercase_escape(s):
2939 unicode_escape = codecs.getdecoder('unicode_escape')
2940 return re.sub(
2941 r'\\u[0-9a-fA-F]{4}',
2942 lambda m: unicode_escape(m.group(0))[0],
2943 s)
b53466e1 2944
d05cfe06
S
2945
2946def escape_rfc3986(s):
2947 """Escape non-ASCII characters as suggested by RFC 3986"""
f9934b96 2948 return urllib.parse.quote(s, b"%/;:@&=+$,!~*'()?#[]")
d05cfe06
S
2949
2950
2951def escape_url(url):
2952 """Escape URL as suggested by RFC 3986"""
14f25df2 2953 url_parsed = urllib.parse.urlparse(url)
d05cfe06 2954 return url_parsed._replace(
efbed08d 2955 netloc=url_parsed.netloc.encode('idna').decode('ascii'),
d05cfe06
S
2956 path=escape_rfc3986(url_parsed.path),
2957 params=escape_rfc3986(url_parsed.params),
2958 query=escape_rfc3986(url_parsed.query),
2959 fragment=escape_rfc3986(url_parsed.fragment)
2960 ).geturl()
2961
62e609ab 2962
96b9e9cf 2963def parse_qs(url, **kwargs):
2964 return urllib.parse.parse_qs(urllib.parse.urlparse(url).query, **kwargs)
4dfbf869 2965
2966
62e609ab
PH
2967def read_batch_urls(batch_fd):
2968 def fixup(url):
14f25df2 2969 if not isinstance(url, str):
62e609ab 2970 url = url.decode('utf-8', 'replace')
8c04f0be 2971 BOM_UTF8 = ('\xef\xbb\xbf', '\ufeff')
2972 for bom in BOM_UTF8:
2973 if url.startswith(bom):
2974 url = url[len(bom):]
2975 url = url.lstrip()
2976 if not url or url.startswith(('#', ';', ']')):
62e609ab 2977 return False
8c04f0be 2978 # "#" cannot be stripped out since it is part of the URI
962ffcf8 2979 # However, it can be safely stripped out if following a whitespace
8c04f0be 2980 return re.split(r'\s#', url, 1)[0].rstrip()
62e609ab
PH
2981
2982 with contextlib.closing(batch_fd) as fd:
2983 return [url for url in map(fixup, fd) if url]
b74fa8cd
JMF
2984
2985
2986def urlencode_postdata(*args, **kargs):
14f25df2 2987 return urllib.parse.urlencode(*args, **kargs).encode('ascii')
bcf89ce6
PH
2988
2989
45b2ee6f 2990def update_url(url, *, query_update=None, **kwargs):
2991 """Replace URL components specified by kwargs
2992 @param url str or parse url tuple
2993 @param query_update update query
2994 @returns str
2995 """
2996 if isinstance(url, str):
2997 if not kwargs and not query_update:
2998 return url
2999 else:
3000 url = urllib.parse.urlparse(url)
3001 if query_update:
3002 assert 'query' not in kwargs, 'query_update and query cannot be specified at the same time'
3003 kwargs['query'] = urllib.parse.urlencode({
3004 **urllib.parse.parse_qs(url.query),
3005 **query_update
3006 }, True)
3007 return urllib.parse.urlunparse(url._replace(**kwargs))
3008
3009
38f9ef31 3010def update_url_query(url, query):
45b2ee6f 3011 return update_url(url, query_update=query)
16392824 3012
8e60dc75 3013
c043c246 3014def update_Request(req, url=None, data=None, headers=None, query=None):
ed0291d1 3015 req_headers = req.headers.copy()
c043c246 3016 req_headers.update(headers or {})
ed0291d1
S
3017 req_data = data or req.data
3018 req_url = update_url_query(url or req.get_full_url(), query)
95cf60e8
S
3019 req_get_method = req.get_method()
3020 if req_get_method == 'HEAD':
3021 req_type = HEADRequest
3022 elif req_get_method == 'PUT':
3023 req_type = PUTRequest
3024 else:
ac668111 3025 req_type = urllib.request.Request
ed0291d1
S
3026 new_req = req_type(
3027 req_url, data=req_data, headers=req_headers,
3028 origin_req_host=req.origin_req_host, unverifiable=req.unverifiable)
3029 if hasattr(req, 'timeout'):
3030 new_req.timeout = req.timeout
3031 return new_req
3032
3033
10c87c15 3034def _multipart_encode_impl(data, boundary):
0c265486
YCH
3035 content_type = 'multipart/form-data; boundary=%s' % boundary
3036
3037 out = b''
3038 for k, v in data.items():
3039 out += b'--' + boundary.encode('ascii') + b'\r\n'
14f25df2 3040 if isinstance(k, str):
0f06bcd7 3041 k = k.encode()
14f25df2 3042 if isinstance(v, str):
0f06bcd7 3043 v = v.encode()
0c265486
YCH
3044 # RFC 2047 requires non-ASCII field names to be encoded, while RFC 7578
3045 # suggests sending UTF-8 directly. Firefox sends UTF-8, too
b2ad479d 3046 content = b'Content-Disposition: form-data; name="' + k + b'"\r\n\r\n' + v + b'\r\n'
0c265486
YCH
3047 if boundary.encode('ascii') in content:
3048 raise ValueError('Boundary overlaps with data')
3049 out += content
3050
3051 out += b'--' + boundary.encode('ascii') + b'--\r\n'
3052
3053 return out, content_type
3054
3055
3056def multipart_encode(data, boundary=None):
3057 '''
3058 Encode a dict to RFC 7578-compliant form-data
3059
3060 data:
3061 A dict where keys and values can be either Unicode or bytes-like
3062 objects.
3063 boundary:
3064 If specified a Unicode object, it's used as the boundary. Otherwise
3065 a random boundary is generated.
3066
3067 Reference: https://tools.ietf.org/html/rfc7578
3068 '''
3069 has_specified_boundary = boundary is not None
3070
3071 while True:
3072 if boundary is None:
3073 boundary = '---------------' + str(random.randrange(0x0fffffff, 0xffffffff))
3074
3075 try:
10c87c15 3076 out, content_type = _multipart_encode_impl(data, boundary)
0c265486
YCH
3077 break
3078 except ValueError:
3079 if has_specified_boundary:
3080 raise
3081 boundary = None
3082
3083 return out, content_type
3084
3085
b079c26f
SS
3086def is_iterable_like(x, allowed_types=collections.abc.Iterable, blocked_types=NO_DEFAULT):
3087 if blocked_types is NO_DEFAULT:
3088 blocked_types = (str, bytes, collections.abc.Mapping)
3089 return isinstance(x, allowed_types) and not isinstance(x, blocked_types)
3090
3091
3092def variadic(x, allowed_types=NO_DEFAULT):
4823ec9f 3093 if not isinstance(allowed_types, (tuple, type)):
3094 deprecation_warning('allowed_types should be a tuple or a type')
3095 allowed_types = tuple(allowed_types)
6f2287cb 3096 return x if is_iterable_like(x, blocked_types=allowed_types) else (x, )
304ad45a 3097
3098
c4f60dd7 3099def try_call(*funcs, expected_type=None, args=[], kwargs={}):
3100 for f in funcs:
a32a9a7e 3101 try:
c4f60dd7 3102 val = f(*args, **kwargs)
ab029d7e 3103 except (AttributeError, KeyError, TypeError, IndexError, ValueError, ZeroDivisionError):
a32a9a7e
S
3104 pass
3105 else:
c4f60dd7 3106 if expected_type is None or isinstance(val, expected_type):
3107 return val
3108
3109
3110def try_get(src, getter, expected_type=None):
3111 return try_call(*variadic(getter), args=(src,), expected_type=expected_type)
329ca3be
S
3112
3113
90137ca4 3114def filter_dict(dct, cndn=lambda _, v: v is not None):
3115 return {k: v for k, v in dct.items() if cndn(k, v)}
3116
3117
6cc62232
S
3118def merge_dicts(*dicts):
3119 merged = {}
3120 for a_dict in dicts:
3121 for k, v in a_dict.items():
90137ca4 3122 if (v is not None and k not in merged
3123 or isinstance(v, str) and merged[k] == ''):
6cc62232
S
3124 merged[k] = v
3125 return merged
3126
3127
8e60dc75 3128def encode_compat_str(string, encoding=preferredencoding(), errors='strict'):
14f25df2 3129 return string if isinstance(string, str) else str(string, encoding, errors)
8e60dc75 3130
16392824 3131
a1a530b0
PH
3132US_RATINGS = {
3133 'G': 0,
3134 'PG': 10,
3135 'PG-13': 13,
3136 'R': 16,
3137 'NC': 18,
3138}
fac55558
PH
3139
3140
a8795327 3141TV_PARENTAL_GUIDELINES = {
5a16c9d9
RA
3142 'TV-Y': 0,
3143 'TV-Y7': 7,
3144 'TV-G': 0,
3145 'TV-PG': 0,
3146 'TV-14': 14,
3147 'TV-MA': 17,
a8795327
S
3148}
3149
3150
146c80e2 3151def parse_age_limit(s):
19a03940 3152 # isinstance(False, int) is True. So type() must be used instead
c487cf00 3153 if type(s) is int: # noqa: E721
a8795327 3154 return s if 0 <= s <= 21 else None
19a03940 3155 elif not isinstance(s, str):
d838b1bd 3156 return None
146c80e2 3157 m = re.match(r'^(?P<age>\d{1,2})\+?$', s)
a8795327
S
3158 if m:
3159 return int(m.group('age'))
5c5fae6d 3160 s = s.upper()
a8795327
S
3161 if s in US_RATINGS:
3162 return US_RATINGS[s]
5a16c9d9 3163 m = re.match(r'^TV[_-]?(%s)$' % '|'.join(k[3:] for k in TV_PARENTAL_GUIDELINES), s)
b8361187 3164 if m:
5a16c9d9 3165 return TV_PARENTAL_GUIDELINES['TV-' + m.group(1)]
b8361187 3166 return None
146c80e2
S
3167
3168
fac55558 3169def strip_jsonp(code):
609a61e3 3170 return re.sub(
5552c9eb 3171 r'''(?sx)^
e9c671d5 3172 (?:window\.)?(?P<func_name>[a-zA-Z0-9_.$]*)
5552c9eb
YCH
3173 (?:\s*&&\s*(?P=func_name))?
3174 \s*\(\s*(?P<callback_data>.*)\);?
3175 \s*?(?://[^\n]*)*$''',
3176 r'\g<callback_data>', code)
478c2c61
PH
3177
3178
8f53dc44 3179def js_to_json(code, vars={}, *, strict=False):
5c610515 3180 # vars is a dict of var, val pairs to substitute
0898c5c8 3181 STRING_QUOTES = '\'"`'
a71b812f 3182 STRING_RE = '|'.join(rf'{q}(?:\\.|[^\\{q}])*{q}' for q in STRING_QUOTES)
c843e685 3183 COMMENT_RE = r'/\*(?:(?!\*/).)*?\*/|//[^\n]*\n'
86e5f3ed 3184 SKIP_RE = fr'\s*(?:{COMMENT_RE})?\s*'
4195096e 3185 INTEGER_TABLE = (
86e5f3ed 3186 (fr'(?s)^(0[xX][0-9a-fA-F]+){SKIP_RE}:?$', 16),
3187 (fr'(?s)^(0+[0-7]+){SKIP_RE}:?$', 8),
4195096e
S
3188 )
3189
a71b812f
SS
3190 def process_escape(match):
3191 JSON_PASSTHROUGH_ESCAPES = R'"\bfnrtu'
3192 escape = match.group(1) or match.group(2)
3193
3194 return (Rf'\{escape}' if escape in JSON_PASSTHROUGH_ESCAPES
3195 else R'\u00' if escape == 'x'
3196 else '' if escape == '\n'
3197 else escape)
3198
0898c5c8
SS
3199 def template_substitute(match):
3200 evaluated = js_to_json(match.group(1), vars, strict=strict)
3201 if evaluated[0] == '"':
3202 return json.loads(evaluated)
3203 return evaluated
3204
e05f6939 3205 def fix_kv(m):
e7b6d122
PH
3206 v = m.group(0)
3207 if v in ('true', 'false', 'null'):
3208 return v
421ddcb8
C
3209 elif v in ('undefined', 'void 0'):
3210 return 'null'
8bdd16b4 3211 elif v.startswith('/*') or v.startswith('//') or v.startswith('!') or v == ',':
a71b812f
SS
3212 return ''
3213
3214 if v[0] in STRING_QUOTES:
0898c5c8
SS
3215 v = re.sub(r'(?s)\${([^}]+)}', template_substitute, v[1:-1]) if v[0] == '`' else v[1:-1]
3216 escaped = re.sub(r'(?s)(")|\\(.)', process_escape, v)
a71b812f
SS
3217 return f'"{escaped}"'
3218
3219 for regex, base in INTEGER_TABLE:
3220 im = re.match(regex, v)
3221 if im:
3222 i = int(im.group(1), base)
3223 return f'"{i}":' if v.endswith(':') else str(i)
3224
3225 if v in vars:
d5f043d1
C
3226 try:
3227 if not strict:
3228 json.loads(vars[v])
08e29b9f 3229 except json.JSONDecodeError:
d5f043d1
C
3230 return json.dumps(vars[v])
3231 else:
3232 return vars[v]
89ac4a19 3233
a71b812f
SS
3234 if not strict:
3235 return f'"{v}"'
5c610515 3236
a71b812f 3237 raise ValueError(f'Unknown value: {v}')
e05f6939 3238
8072ef2b 3239 def create_map(mobj):
3240 return json.dumps(dict(json.loads(js_to_json(mobj.group(1) or '[]', vars=vars))))
3241
8072ef2b 3242 code = re.sub(r'new Map\((\[.*?\])?\)', create_map, code)
8f53dc44 3243 if not strict:
3244 code = re.sub(r'new Date\((".+")\)', r'\g<1>', code)
f55523cf 3245 code = re.sub(r'new \w+\((.*?)\)', lambda m: json.dumps(m.group(0)), code)
389896df 3246 code = re.sub(r'parseInt\([^\d]+(\d+)[^\d]+\)', r'\1', code)
3247 code = re.sub(r'\(function\([^)]*\)\s*\{[^}]*\}\s*\)\s*\(\s*(["\'][^)]*["\'])\s*\)', r'\1', code)
febff4c1 3248
a71b812f
SS
3249 return re.sub(rf'''(?sx)
3250 {STRING_RE}|
3251 {COMMENT_RE}|,(?={SKIP_RE}[\]}}])|
421ddcb8 3252 void\s0|(?:(?<![0-9])[eE]|[a-df-zA-DF-Z_$])[.a-zA-Z_$0-9]*|
a71b812f
SS
3253 \b(?:0[xX][0-9a-fA-F]+|0+[0-7]+)(?:{SKIP_RE}:)?|
3254 [0-9]+(?={SKIP_RE}:)|
8bdd16b4 3255 !+
a71b812f 3256 ''', fix_kv, code)
e05f6939
PH
3257
3258
478c2c61
PH
3259def qualities(quality_ids):
3260 """ Get a numeric quality value out of a list of possible values """
3261 def q(qid):
3262 try:
3263 return quality_ids.index(qid)
3264 except ValueError:
3265 return -1
3266 return q
3267
acd69589 3268
119e40ef 3269POSTPROCESS_WHEN = ('pre_process', 'after_filter', 'video', 'before_dl', 'post_process', 'after_move', 'after_video', 'playlist')
1e43a6f7 3270
3271
de6000d9 3272DEFAULT_OUTTMPL = {
3273 'default': '%(title)s [%(id)s].%(ext)s',
72755351 3274 'chapter': '%(title)s - %(section_number)03d %(section_title)s [%(id)s].%(ext)s',
de6000d9 3275}
3276OUTTMPL_TYPES = {
72755351 3277 'chapter': None,
de6000d9 3278 'subtitle': None,
3279 'thumbnail': None,
3280 'description': 'description',
3281 'annotation': 'annotations.xml',
3282 'infojson': 'info.json',
08438d2c 3283 'link': None,
3b603dbd 3284 'pl_video': None,
5112f26a 3285 'pl_thumbnail': None,
de6000d9 3286 'pl_description': 'description',
3287 'pl_infojson': 'info.json',
3288}
0a871f68 3289
143db31d 3290# As of [1] format syntax is:
3291# %[mapping_key][conversion_flags][minimum_width][.precision][length_modifier]type
3292# 1. https://docs.python.org/2/library/stdtypes.html#string-formatting
901130bb 3293STR_FORMAT_RE_TMPL = r'''(?x)
3294 (?<!%)(?P<prefix>(?:%%)*)
143db31d 3295 %
524e2e4f 3296 (?P<has_key>\((?P<key>{0})\))?
752cda38 3297 (?P<format>
524e2e4f 3298 (?P<conversion>[#0\-+ ]+)?
3299 (?P<min_width>\d+)?
3300 (?P<precision>\.\d+)?
3301 (?P<len_mod>[hlL])? # unused in python
901130bb 3302 {1} # conversion type
752cda38 3303 )
143db31d 3304'''
3305
7d1eb38a 3306
ebe1b4e3 3307STR_FORMAT_TYPES = 'diouxXeEfFgGcrsa'
a020a0dc 3308
7d1eb38a 3309
a020a0dc
PH
3310def limit_length(s, length):
3311 """ Add ellipses to overly long strings """
3312 if s is None:
3313 return None
3314 ELLIPSES = '...'
3315 if len(s) > length:
3316 return s[:length - len(ELLIPSES)] + ELLIPSES
3317 return s
48844745
PH
3318
3319
3320def version_tuple(v):
5f9b8394 3321 return tuple(int(e) for e in re.split(r'[-.]', v))
48844745
PH
3322
3323
3324def is_outdated_version(version, limit, assume_new=True):
3325 if not version:
3326 return not assume_new
3327 try:
3328 return version_tuple(version) < version_tuple(limit)
3329 except ValueError:
3330 return not assume_new
732ea2f0
PH
3331
3332
3333def ytdl_is_updateable():
7a5c1cfe 3334 """ Returns if yt-dlp can be updated with -U """
735d865e 3335
69bec673 3336 from ..update import is_non_updateable
732ea2f0 3337
5d535b4a 3338 return not is_non_updateable()
7d4111ed
PH
3339
3340
3341def args_to_str(args):
3342 # Get a short string representation for a subprocess command
702ccf2d 3343 return ' '.join(compat_shlex_quote(a) for a in args)
2ccd1b10
PH
3344
3345
a44ca5a4 3346def error_to_str(err):
3347 return f'{type(err).__name__}: {err}'
3348
3349
2647c933 3350def mimetype2ext(mt, default=NO_DEFAULT):
3351 if not isinstance(mt, str):
3352 if default is not NO_DEFAULT:
3353 return default
eb9ee194
S
3354 return None
3355
2647c933 3356 MAP = {
3357 # video
f6861ec9 3358 '3gpp': '3gp',
2647c933 3359 'mp2t': 'ts',
3360 'mp4': 'mp4',
3361 'mpeg': 'mpeg',
3362 'mpegurl': 'm3u8',
3363 'quicktime': 'mov',
3364 'webm': 'webm',
3365 'vp9': 'vp9',
f6861ec9 3366 'x-flv': 'flv',
2647c933 3367 'x-m4v': 'm4v',
3368 'x-matroska': 'mkv',
3369 'x-mng': 'mng',
a0d8d704 3370 'x-mp4-fragmented': 'mp4',
2647c933 3371 'x-ms-asf': 'asf',
a0d8d704 3372 'x-ms-wmv': 'wmv',
2647c933 3373 'x-msvideo': 'avi',
3374
3375 # application (streaming playlists)
b4173f15 3376 'dash+xml': 'mpd',
b4173f15 3377 'f4m+xml': 'f4m',
f164b971 3378 'hds+xml': 'f4m',
2647c933 3379 'vnd.apple.mpegurl': 'm3u8',
e910fe2f 3380 'vnd.ms-sstr+xml': 'ism',
2647c933 3381 'x-mpegurl': 'm3u8',
3382
3383 # audio
3384 'audio/mp4': 'm4a',
3385 # Per RFC 3003, audio/mpeg can be .mp1, .mp2 or .mp3.
3386 # Using .mp3 as it's the most popular one
3387 'audio/mpeg': 'mp3',
d80ca5de 3388 'audio/webm': 'webm',
2647c933 3389 'audio/x-matroska': 'mka',
3390 'audio/x-mpegurl': 'm3u',
3391 'midi': 'mid',
3392 'ogg': 'ogg',
3393 'wav': 'wav',
3394 'wave': 'wav',
3395 'x-aac': 'aac',
3396 'x-flac': 'flac',
3397 'x-m4a': 'm4a',
3398 'x-realaudio': 'ra',
39e7107d 3399 'x-wav': 'wav',
9359f3d4 3400
2647c933 3401 # image
3402 'avif': 'avif',
3403 'bmp': 'bmp',
3404 'gif': 'gif',
3405 'jpeg': 'jpg',
3406 'png': 'png',
3407 'svg+xml': 'svg',
3408 'tiff': 'tif',
3409 'vnd.wap.wbmp': 'wbmp',
3410 'webp': 'webp',
3411 'x-icon': 'ico',
3412 'x-jng': 'jng',
3413 'x-ms-bmp': 'bmp',
3414
3415 # caption
3416 'filmstrip+json': 'fs',
3417 'smptett+xml': 'tt',
3418 'ttaf+xml': 'dfxp',
3419 'ttml+xml': 'ttml',
3420 'x-ms-sami': 'sami',
9359f3d4 3421
2647c933 3422 # misc
3423 'gzip': 'gz',
9359f3d4
F
3424 'json': 'json',
3425 'xml': 'xml',
3426 'zip': 'zip',
9359f3d4
F
3427 }
3428
2647c933 3429 mimetype = mt.partition(';')[0].strip().lower()
3430 _, _, subtype = mimetype.rpartition('/')
9359f3d4 3431
69bec673 3432 ext = traversal.traverse_obj(MAP, mimetype, subtype, subtype.rsplit('+')[-1])
2647c933 3433 if ext:
3434 return ext
3435 elif default is not NO_DEFAULT:
3436 return default
9359f3d4 3437 return subtype.replace('+', '.')
c460bdd5
PH
3438
3439
2814f12b
THD
3440def ext2mimetype(ext_or_url):
3441 if not ext_or_url:
3442 return None
3443 if '.' not in ext_or_url:
3444 ext_or_url = f'file.{ext_or_url}'
3445 return mimetypes.guess_type(ext_or_url)[0]
3446
3447
4f3c5e06 3448def parse_codecs(codecs_str):
3449 # http://tools.ietf.org/html/rfc6381
3450 if not codecs_str:
3451 return {}
a0566bbf 3452 split_codecs = list(filter(None, map(
dbf5416a 3453 str.strip, codecs_str.strip().strip(',').split(','))))
3fe75fdc 3454 vcodec, acodec, scodec, hdr = None, None, None, None
a0566bbf 3455 for full_codec in split_codecs:
d816f61f 3456 parts = re.sub(r'0+(?=\d)', '', full_codec).split('.')
3457 if parts[0] in ('avc1', 'avc2', 'avc3', 'avc4', 'vp9', 'vp8', 'hev1', 'hev2',
3458 'h263', 'h264', 'mp4v', 'hvc1', 'av1', 'theora', 'dvh1', 'dvhe'):
3459 if vcodec:
3460 continue
3461 vcodec = full_codec
3462 if parts[0] in ('dvh1', 'dvhe'):
3463 hdr = 'DV'
69bec673 3464 elif parts[0] == 'av1' and traversal.traverse_obj(parts, 3) == '10':
d816f61f 3465 hdr = 'HDR10'
3466 elif parts[:2] == ['vp9', '2']:
3467 hdr = 'HDR10'
71082216 3468 elif parts[0] in ('flac', 'mp4a', 'opus', 'vorbis', 'mp3', 'aac', 'ac-4',
d816f61f 3469 'ac-3', 'ec-3', 'eac3', 'dtsc', 'dtse', 'dtsh', 'dtsl'):
3470 acodec = acodec or full_codec
3471 elif parts[0] in ('stpp', 'wvtt'):
3472 scodec = scodec or full_codec
4f3c5e06 3473 else:
19a03940 3474 write_string(f'WARNING: Unknown codec {full_codec}\n')
3fe75fdc 3475 if vcodec or acodec or scodec:
4f3c5e06 3476 return {
3477 'vcodec': vcodec or 'none',
3478 'acodec': acodec or 'none',
176f1866 3479 'dynamic_range': hdr,
3fe75fdc 3480 **({'scodec': scodec} if scodec is not None else {}),
4f3c5e06 3481 }
b69fd25c 3482 elif len(split_codecs) == 2:
3483 return {
3484 'vcodec': split_codecs[0],
3485 'acodec': split_codecs[1],
3486 }
4f3c5e06 3487 return {}
3488
3489
fc61aff4
LL
3490def get_compatible_ext(*, vcodecs, acodecs, vexts, aexts, preferences=None):
3491 assert len(vcodecs) == len(vexts) and len(acodecs) == len(aexts)
3492
3493 allow_mkv = not preferences or 'mkv' in preferences
3494
3495 if allow_mkv and max(len(acodecs), len(vcodecs)) > 1:
3496 return 'mkv' # TODO: any other format allows this?
3497
3498 # TODO: All codecs supported by parse_codecs isn't handled here
3499 COMPATIBLE_CODECS = {
3500 'mp4': {
71082216 3501 'av1', 'hevc', 'avc1', 'mp4a', 'ac-4', # fourcc (m3u8, mpd)
81b6102d 3502 'h264', 'aacl', 'ec-3', # Set in ISM
fc61aff4
LL
3503 },
3504 'webm': {
3505 'av1', 'vp9', 'vp8', 'opus', 'vrbs',
3506 'vp9x', 'vp8x', # in the webm spec
3507 },
3508 }
3509
812cdfa0 3510 sanitize_codec = functools.partial(
3511 try_get, getter=lambda x: x[0].split('.')[0].replace('0', '').lower())
8f84770a 3512 vcodec, acodec = sanitize_codec(vcodecs), sanitize_codec(acodecs)
fc61aff4
LL
3513
3514 for ext in preferences or COMPATIBLE_CODECS.keys():
3515 codec_set = COMPATIBLE_CODECS.get(ext, set())
3516 if ext == 'mkv' or codec_set.issuperset((vcodec, acodec)):
3517 return ext
3518
3519 COMPATIBLE_EXTS = (
3520 {'mp3', 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'ismv', 'isma', 'mov'},
fbb73833 3521 {'webm', 'weba'},
fc61aff4
LL
3522 )
3523 for ext in preferences or vexts:
3524 current_exts = {ext, *vexts, *aexts}
3525 if ext == 'mkv' or current_exts == {ext} or any(
3526 ext_sets.issuperset(current_exts) for ext_sets in COMPATIBLE_EXTS):
3527 return ext
3528 return 'mkv' if allow_mkv else preferences[-1]
3529
3530
2647c933 3531def urlhandle_detect_ext(url_handle, default=NO_DEFAULT):
79298173 3532 getheader = url_handle.headers.get
2ccd1b10 3533
b55ee18f
PH
3534 cd = getheader('Content-Disposition')
3535 if cd:
3536 m = re.match(r'attachment;\s*filename="(?P<filename>[^"]+)"', cd)
3537 if m:
3538 e = determine_ext(m.group('filename'), default_ext=None)
3539 if e:
3540 return e
3541
2647c933 3542 meta_ext = getheader('x-amz-meta-name')
3543 if meta_ext:
3544 e = meta_ext.rpartition('.')[2]
3545 if e:
3546 return e
3547
3548 return mimetype2ext(getheader('Content-Type'), default=default)
05900629
PH
3549
3550
1e399778
YCH
3551def encode_data_uri(data, mime_type):
3552 return 'data:%s;base64,%s' % (mime_type, base64.b64encode(data).decode('ascii'))
3553
3554
05900629 3555def age_restricted(content_limit, age_limit):
6ec6cb4e 3556 """ Returns True iff the content should be blocked """
05900629
PH
3557
3558 if age_limit is None: # No limit set
3559 return False
3560 if content_limit is None:
3561 return False # Content available for everyone
3562 return age_limit < content_limit
61ca9a80
PH
3563
3564
88f60feb 3565# List of known byte-order-marks (BOM)
a904a7f8
L
3566BOMS = [
3567 (b'\xef\xbb\xbf', 'utf-8'),
3568 (b'\x00\x00\xfe\xff', 'utf-32-be'),
3569 (b'\xff\xfe\x00\x00', 'utf-32-le'),
3570 (b'\xff\xfe', 'utf-16-le'),
3571 (b'\xfe\xff', 'utf-16-be'),
3572]
a904a7f8
L
3573
3574
61ca9a80
PH
3575def is_html(first_bytes):
3576 """ Detect whether a file contains HTML by examining its first bytes. """
3577
80e8493e 3578 encoding = 'utf-8'
61ca9a80 3579 for bom, enc in BOMS:
80e8493e 3580 while first_bytes.startswith(bom):
3581 encoding, first_bytes = enc, first_bytes[len(bom):]
61ca9a80 3582
80e8493e 3583 return re.match(r'^\s*<', first_bytes.decode(encoding, 'replace'))
a055469f
PH
3584
3585
3586def determine_protocol(info_dict):
3587 protocol = info_dict.get('protocol')
3588 if protocol is not None:
3589 return protocol
3590
7de837a5 3591 url = sanitize_url(info_dict['url'])
a055469f
PH
3592 if url.startswith('rtmp'):
3593 return 'rtmp'
3594 elif url.startswith('mms'):
3595 return 'mms'
3596 elif url.startswith('rtsp'):
3597 return 'rtsp'
3598
3599 ext = determine_ext(url)
3600 if ext == 'm3u8':
deae7c17 3601 return 'm3u8' if info_dict.get('is_live') else 'm3u8_native'
a055469f
PH
3602 elif ext == 'f4m':
3603 return 'f4m'
3604
14f25df2 3605 return urllib.parse.urlparse(url).scheme
cfb56d1a
PH
3606
3607
c5e3f849 3608def render_table(header_row, data, delim=False, extra_gap=0, hide_empty=False):
3609 """ Render a list of rows, each as a list of values.
3610 Text after a \t will be right aligned """
ec11a9f4 3611 def width(string):
c5e3f849 3612 return len(remove_terminal_sequences(string).replace('\t', ''))
76d321f6 3613
3614 def get_max_lens(table):
ec11a9f4 3615 return [max(width(str(v)) for v in col) for col in zip(*table)]
76d321f6 3616
3617 def filter_using_list(row, filterArray):
d16df59d 3618 return [col for take, col in itertools.zip_longest(filterArray, row, fillvalue=True) if take]
76d321f6 3619
d16df59d 3620 max_lens = get_max_lens(data) if hide_empty else []
3621 header_row = filter_using_list(header_row, max_lens)
3622 data = [filter_using_list(row, max_lens) for row in data]
76d321f6 3623
cfb56d1a 3624 table = [header_row] + data
76d321f6 3625 max_lens = get_max_lens(table)
c5e3f849 3626 extra_gap += 1
76d321f6 3627 if delim:
c5e3f849 3628 table = [header_row, [delim * (ml + extra_gap) for ml in max_lens]] + data
1ed7953a 3629 table[1][-1] = table[1][-1][:-extra_gap * len(delim)] # Remove extra_gap from end of delimiter
ec11a9f4 3630 for row in table:
3631 for pos, text in enumerate(map(str, row)):
c5e3f849 3632 if '\t' in text:
3633 row[pos] = text.replace('\t', ' ' * (max_lens[pos] - width(text))) + ' ' * extra_gap
3634 else:
3635 row[pos] = text + ' ' * (max_lens[pos] - width(text) + extra_gap)
3636 ret = '\n'.join(''.join(row).rstrip() for row in table)
ec11a9f4 3637 return ret
347de493
PH
3638
3639
8f18aca8 3640def _match_one(filter_part, dct, incomplete):
77b87f05 3641 # TODO: Generalize code with YoutubeDL._build_format_filter
a047eeb6 3642 STRING_OPERATORS = {
3643 '*=': operator.contains,
3644 '^=': lambda attr, value: attr.startswith(value),
3645 '$=': lambda attr, value: attr.endswith(value),
3646 '~=': lambda attr, value: re.search(value, attr),
3647 }
347de493 3648 COMPARISON_OPERATORS = {
a047eeb6 3649 **STRING_OPERATORS,
3650 '<=': operator.le, # "<=" must be defined above "<"
347de493 3651 '<': operator.lt,
347de493 3652 '>=': operator.ge,
a047eeb6 3653 '>': operator.gt,
347de493 3654 '=': operator.eq,
347de493 3655 }
a047eeb6 3656
6db9c4d5 3657 if isinstance(incomplete, bool):
3658 is_incomplete = lambda _: incomplete
3659 else:
3660 is_incomplete = lambda k: k in incomplete
3661
64fa820c 3662 operator_rex = re.compile(r'''(?x)
347de493 3663 (?P<key>[a-z_]+)
77b87f05 3664 \s*(?P<negation>!\s*)?(?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
347de493 3665 (?:
a047eeb6 3666 (?P<quote>["\'])(?P<quotedstrval>.+?)(?P=quote)|
3667 (?P<strval>.+?)
347de493 3668 )
347de493 3669 ''' % '|'.join(map(re.escape, COMPARISON_OPERATORS.keys())))
64fa820c 3670 m = operator_rex.fullmatch(filter_part.strip())
347de493 3671 if m:
18f96d12 3672 m = m.groupdict()
3673 unnegated_op = COMPARISON_OPERATORS[m['op']]
3674 if m['negation']:
77b87f05
MT
3675 op = lambda attr, value: not unnegated_op(attr, value)
3676 else:
3677 op = unnegated_op
18f96d12 3678 comparison_value = m['quotedstrval'] or m['strval'] or m['intval']
3679 if m['quote']:
3680 comparison_value = comparison_value.replace(r'\%s' % m['quote'], m['quote'])
3681 actual_value = dct.get(m['key'])
3682 numeric_comparison = None
f9934b96 3683 if isinstance(actual_value, (int, float)):
e5a088dc
S
3684 # If the original field is a string and matching comparisonvalue is
3685 # a number we should respect the origin of the original field
3686 # and process comparison value as a string (see
18f96d12 3687 # https://github.com/ytdl-org/youtube-dl/issues/11082)
347de493 3688 try:
18f96d12 3689 numeric_comparison = int(comparison_value)
347de493 3690 except ValueError:
18f96d12 3691 numeric_comparison = parse_filesize(comparison_value)
3692 if numeric_comparison is None:
3693 numeric_comparison = parse_filesize(f'{comparison_value}B')
3694 if numeric_comparison is None:
3695 numeric_comparison = parse_duration(comparison_value)
3696 if numeric_comparison is not None and m['op'] in STRING_OPERATORS:
3697 raise ValueError('Operator %s only supports string values!' % m['op'])
347de493 3698 if actual_value is None:
6db9c4d5 3699 return is_incomplete(m['key']) or m['none_inclusive']
18f96d12 3700 return op(actual_value, comparison_value if numeric_comparison is None else numeric_comparison)
347de493
PH
3701
3702 UNARY_OPERATORS = {
1cc47c66
S
3703 '': lambda v: (v is True) if isinstance(v, bool) else (v is not None),
3704 '!': lambda v: (v is False) if isinstance(v, bool) else (v is None),
347de493 3705 }
64fa820c 3706 operator_rex = re.compile(r'''(?x)
347de493 3707 (?P<op>%s)\s*(?P<key>[a-z_]+)
347de493 3708 ''' % '|'.join(map(re.escape, UNARY_OPERATORS.keys())))
64fa820c 3709 m = operator_rex.fullmatch(filter_part.strip())
347de493
PH
3710 if m:
3711 op = UNARY_OPERATORS[m.group('op')]
3712 actual_value = dct.get(m.group('key'))
6db9c4d5 3713 if is_incomplete(m.group('key')) and actual_value is None:
8f18aca8 3714 return True
347de493
PH
3715 return op(actual_value)
3716
3717 raise ValueError('Invalid filter part %r' % filter_part)
3718
3719
8f18aca8 3720def match_str(filter_str, dct, incomplete=False):
6db9c4d5 3721 """ Filter a dictionary with a simple string syntax.
3722 @returns Whether the filter passes
3723 @param incomplete Set of keys that is expected to be missing from dct.
3724 Can be True/False to indicate all/none of the keys may be missing.
3725 All conditions on incomplete keys pass if the key is missing
8f18aca8 3726 """
347de493 3727 return all(
8f18aca8 3728 _match_one(filter_part.replace(r'\&', '&'), dct, incomplete)
a047eeb6 3729 for filter_part in re.split(r'(?<!\\)&', filter_str))
347de493
PH
3730
3731
fe2ce85a 3732def match_filter_func(filters, breaking_filters=None):
3733 if not filters and not breaking_filters:
d1b5f70b 3734 return None
fe2ce85a 3735 breaking_filters = match_filter_func(breaking_filters) or (lambda _, __: None)
3736 filters = set(variadic(filters or []))
d1b5f70b 3737
492272fe 3738 interactive = '-' in filters
3739 if interactive:
3740 filters.remove('-')
3741
3742 def _match_func(info_dict, incomplete=False):
fe2ce85a 3743 ret = breaking_filters(info_dict, incomplete)
3744 if ret is not None:
3745 raise RejectedVideoReached(ret)
3746
492272fe 3747 if not filters or any(match_str(f, info_dict, incomplete) for f in filters):
3748 return NO_DEFAULT if interactive and not incomplete else None
347de493 3749 else:
3bec830a 3750 video_title = info_dict.get('title') or info_dict.get('id') or 'entry'
b1a7cd05 3751 filter_str = ') | ('.join(map(str.strip, filters))
3752 return f'{video_title} does not pass filter ({filter_str}), skipping ..'
347de493 3753 return _match_func
91410c9b
PH
3754
3755
f2df4071 3756class download_range_func:
b4e0d758 3757 def __init__(self, chapters, ranges, from_info=False):
3758 self.chapters, self.ranges, self.from_info = chapters, ranges, from_info
f2df4071 3759
3760 def __call__(self, info_dict, ydl):
0500ee3d 3761
5ec1b6b7 3762 warning = ('There are no chapters matching the regex' if info_dict.get('chapters')
56ba69e4 3763 else 'Cannot match chapters since chapter information is unavailable')
f2df4071 3764 for regex in self.chapters or []:
5ec1b6b7 3765 for i, chapter in enumerate(info_dict.get('chapters') or []):
3766 if re.search(regex, chapter['title']):
3767 warning = None
3768 yield {**chapter, 'index': i}
f2df4071 3769 if self.chapters and warning:
5ec1b6b7 3770 ydl.to_screen(f'[info] {info_dict["id"]}: {warning}')
3771
b4e0d758 3772 for start, end in self.ranges or []:
3773 yield {
3774 'start_time': self._handle_negative_timestamp(start, info_dict),
3775 'end_time': self._handle_negative_timestamp(end, info_dict),
3776 }
3777
3778 if self.from_info and (info_dict.get('start_time') or info_dict.get('end_time')):
3779 yield {
e59e2074 3780 'start_time': info_dict.get('start_time') or 0,
3781 'end_time': info_dict.get('end_time') or float('inf'),
b4e0d758 3782 }
e59e2074 3783 elif not self.ranges and not self.chapters:
3784 yield {}
b4e0d758 3785
3786 @staticmethod
3787 def _handle_negative_timestamp(time, info):
3788 return max(info['duration'] + time, 0) if info.get('duration') and time < 0 else time
5ec1b6b7 3789
f2df4071 3790 def __eq__(self, other):
3791 return (isinstance(other, download_range_func)
3792 and self.chapters == other.chapters and self.ranges == other.ranges)
5ec1b6b7 3793
71df9b7f 3794 def __repr__(self):
a5387729 3795 return f'{__name__}.{type(self).__name__}({self.chapters}, {self.ranges})'
71df9b7f 3796
5ec1b6b7 3797
bf6427d2
YCH
3798def parse_dfxp_time_expr(time_expr):
3799 if not time_expr:
d631d5f9 3800 return
bf6427d2 3801
1d485a1a 3802 mobj = re.match(rf'^(?P<time_offset>{NUMBER_RE})s?$', time_expr)
bf6427d2
YCH
3803 if mobj:
3804 return float(mobj.group('time_offset'))
3805
db2fe38b 3806 mobj = re.match(r'^(\d+):(\d\d):(\d\d(?:(?:\.|:)\d+)?)$', time_expr)
bf6427d2 3807 if mobj:
db2fe38b 3808 return 3600 * int(mobj.group(1)) + 60 * int(mobj.group(2)) + float(mobj.group(3).replace(':', '.'))
bf6427d2
YCH
3809
3810
c1c924ab 3811def srt_subtitles_timecode(seconds):
aa7785f8 3812 return '%02d:%02d:%02d,%03d' % timetuple_from_msec(seconds * 1000)
3813
3814
3815def ass_subtitles_timecode(seconds):
3816 time = timetuple_from_msec(seconds * 1000)
3817 return '%01d:%02d:%02d.%02d' % (*time[:-1], time.milliseconds / 10)
bf6427d2
YCH
3818
3819
3820def dfxp2srt(dfxp_data):
3869028f
YCH
3821 '''
3822 @param dfxp_data A bytes-like object containing DFXP data
3823 @returns A unicode object containing converted SRT data
3824 '''
5b995f71 3825 LEGACY_NAMESPACES = (
3869028f
YCH
3826 (b'http://www.w3.org/ns/ttml', [
3827 b'http://www.w3.org/2004/11/ttaf1',
3828 b'http://www.w3.org/2006/04/ttaf1',
3829 b'http://www.w3.org/2006/10/ttaf1',
5b995f71 3830 ]),
3869028f
YCH
3831 (b'http://www.w3.org/ns/ttml#styling', [
3832 b'http://www.w3.org/ns/ttml#style',
5b995f71
RA
3833 ]),
3834 )
3835
3836 SUPPORTED_STYLING = [
3837 'color',
3838 'fontFamily',
3839 'fontSize',
3840 'fontStyle',
3841 'fontWeight',
3842 'textDecoration'
3843 ]
3844
4e335771 3845 _x = functools.partial(xpath_with_ns, ns_map={
261f4730 3846 'xml': 'http://www.w3.org/XML/1998/namespace',
4e335771 3847 'ttml': 'http://www.w3.org/ns/ttml',
5b995f71 3848 'tts': 'http://www.w3.org/ns/ttml#styling',
4e335771 3849 })
bf6427d2 3850
5b995f71
RA
3851 styles = {}
3852 default_style = {}
3853
86e5f3ed 3854 class TTMLPElementParser:
5b995f71
RA
3855 _out = ''
3856 _unclosed_elements = []
3857 _applied_styles = []
bf6427d2 3858
2b14cb56 3859 def start(self, tag, attrib):
5b995f71
RA
3860 if tag in (_x('ttml:br'), 'br'):
3861 self._out += '\n'
3862 else:
3863 unclosed_elements = []
3864 style = {}
3865 element_style_id = attrib.get('style')
3866 if default_style:
3867 style.update(default_style)
3868 if element_style_id:
3869 style.update(styles.get(element_style_id, {}))
3870 for prop in SUPPORTED_STYLING:
3871 prop_val = attrib.get(_x('tts:' + prop))
3872 if prop_val:
3873 style[prop] = prop_val
3874 if style:
3875 font = ''
3876 for k, v in sorted(style.items()):
3877 if self._applied_styles and self._applied_styles[-1].get(k) == v:
3878 continue
3879 if k == 'color':
3880 font += ' color="%s"' % v
3881 elif k == 'fontSize':
3882 font += ' size="%s"' % v
3883 elif k == 'fontFamily':
3884 font += ' face="%s"' % v
3885 elif k == 'fontWeight' and v == 'bold':
3886 self._out += '<b>'
3887 unclosed_elements.append('b')
3888 elif k == 'fontStyle' and v == 'italic':
3889 self._out += '<i>'
3890 unclosed_elements.append('i')
3891 elif k == 'textDecoration' and v == 'underline':
3892 self._out += '<u>'
3893 unclosed_elements.append('u')
3894 if font:
3895 self._out += '<font' + font + '>'
3896 unclosed_elements.append('font')
3897 applied_style = {}
3898 if self._applied_styles:
3899 applied_style.update(self._applied_styles[-1])
3900 applied_style.update(style)
3901 self._applied_styles.append(applied_style)
3902 self._unclosed_elements.append(unclosed_elements)
bf6427d2 3903
2b14cb56 3904 def end(self, tag):
5b995f71
RA
3905 if tag not in (_x('ttml:br'), 'br'):
3906 unclosed_elements = self._unclosed_elements.pop()
3907 for element in reversed(unclosed_elements):
3908 self._out += '</%s>' % element
3909 if unclosed_elements and self._applied_styles:
3910 self._applied_styles.pop()
bf6427d2 3911
2b14cb56 3912 def data(self, data):
5b995f71 3913 self._out += data
2b14cb56 3914
3915 def close(self):
5b995f71 3916 return self._out.strip()
2b14cb56 3917
6a765f13 3918 # Fix UTF-8 encoded file wrongly marked as UTF-16. See https://github.com/yt-dlp/yt-dlp/issues/6543#issuecomment-1477169870
3919 # This will not trigger false positives since only UTF-8 text is being replaced
3920 dfxp_data = dfxp_data.replace(b'encoding=\'UTF-16\'', b'encoding=\'UTF-8\'')
3921
2b14cb56 3922 def parse_node(node):
3923 target = TTMLPElementParser()
3924 parser = xml.etree.ElementTree.XMLParser(target=target)
3925 parser.feed(xml.etree.ElementTree.tostring(node))
3926 return parser.close()
bf6427d2 3927
5b995f71
RA
3928 for k, v in LEGACY_NAMESPACES:
3929 for ns in v:
3930 dfxp_data = dfxp_data.replace(ns, k)
3931
3869028f 3932 dfxp = compat_etree_fromstring(dfxp_data)
bf6427d2 3933 out = []
5b995f71 3934 paras = dfxp.findall(_x('.//ttml:p')) or dfxp.findall('.//p')
1b0427e6
YCH
3935
3936 if not paras:
3937 raise ValueError('Invalid dfxp/TTML subtitle')
bf6427d2 3938
5b995f71
RA
3939 repeat = False
3940 while True:
3941 for style in dfxp.findall(_x('.//ttml:style')):
261f4730
RA
3942 style_id = style.get('id') or style.get(_x('xml:id'))
3943 if not style_id:
3944 continue
5b995f71
RA
3945 parent_style_id = style.get('style')
3946 if parent_style_id:
3947 if parent_style_id not in styles:
3948 repeat = True
3949 continue
3950 styles[style_id] = styles[parent_style_id].copy()
3951 for prop in SUPPORTED_STYLING:
3952 prop_val = style.get(_x('tts:' + prop))
3953 if prop_val:
3954 styles.setdefault(style_id, {})[prop] = prop_val
3955 if repeat:
3956 repeat = False
3957 else:
3958 break
3959
3960 for p in ('body', 'div'):
3961 ele = xpath_element(dfxp, [_x('.//ttml:' + p), './/' + p])
3962 if ele is None:
3963 continue
3964 style = styles.get(ele.get('style'))
3965 if not style:
3966 continue
3967 default_style.update(style)
3968
bf6427d2 3969 for para, index in zip(paras, itertools.count(1)):
d631d5f9 3970 begin_time = parse_dfxp_time_expr(para.attrib.get('begin'))
7dff0363 3971 end_time = parse_dfxp_time_expr(para.attrib.get('end'))
d631d5f9
YCH
3972 dur = parse_dfxp_time_expr(para.attrib.get('dur'))
3973 if begin_time is None:
3974 continue
7dff0363 3975 if not end_time:
d631d5f9
YCH
3976 if not dur:
3977 continue
3978 end_time = begin_time + dur
bf6427d2
YCH
3979 out.append('%d\n%s --> %s\n%s\n\n' % (
3980 index,
c1c924ab
YCH
3981 srt_subtitles_timecode(begin_time),
3982 srt_subtitles_timecode(end_time),
bf6427d2
YCH
3983 parse_node(para)))
3984
3985 return ''.join(out)
3986
3987
c487cf00 3988def cli_option(params, command_option, param, separator=None):
66e289ba 3989 param = params.get(param)
c487cf00 3990 return ([] if param is None
3991 else [command_option, str(param)] if separator is None
3992 else [f'{command_option}{separator}{param}'])
66e289ba
S
3993
3994
3995def cli_bool_option(params, command_option, param, true_value='true', false_value='false', separator=None):
3996 param = params.get(param)
c487cf00 3997 assert param in (True, False, None)
3998 return cli_option({True: true_value, False: false_value}, command_option, param, separator)
66e289ba
S
3999
4000
4001def cli_valueless_option(params, command_option, param, expected_value=True):
c487cf00 4002 return [command_option] if params.get(param) == expected_value else []
66e289ba
S
4003
4004
e92caff5 4005def cli_configuration_args(argdict, keys, default=[], use_compat=True):
eab9b2bc 4006 if isinstance(argdict, (list, tuple)): # for backward compatibility
e92caff5 4007 if use_compat:
5b1ecbb3 4008 return argdict
4009 else:
4010 argdict = None
eab9b2bc 4011 if argdict is None:
5b1ecbb3 4012 return default
eab9b2bc 4013 assert isinstance(argdict, dict)
4014
e92caff5 4015 assert isinstance(keys, (list, tuple))
4016 for key_list in keys:
e92caff5 4017 arg_list = list(filter(
4018 lambda x: x is not None,
6606817a 4019 [argdict.get(key.lower()) for key in variadic(key_list)]))
e92caff5 4020 if arg_list:
4021 return [arg for args in arg_list for arg in args]
4022 return default
66e289ba 4023
6251555f 4024
330690a2 4025def _configuration_args(main_key, argdict, exe, keys=None, default=[], use_compat=True):
4026 main_key, exe = main_key.lower(), exe.lower()
4027 root_key = exe if main_key == exe else f'{main_key}+{exe}'
4028 keys = [f'{root_key}{k}' for k in (keys or [''])]
4029 if root_key in keys:
4030 if main_key != exe:
4031 keys.append((main_key, exe))
4032 keys.append('default')
4033 else:
4034 use_compat = False
4035 return cli_configuration_args(argdict, keys, default, use_compat)
4036
66e289ba 4037
86e5f3ed 4038class ISO639Utils:
39672624
YCH
4039 # See http://www.loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt
4040 _lang_map = {
4041 'aa': 'aar',
4042 'ab': 'abk',
4043 'ae': 'ave',
4044 'af': 'afr',
4045 'ak': 'aka',
4046 'am': 'amh',
4047 'an': 'arg',
4048 'ar': 'ara',
4049 'as': 'asm',
4050 'av': 'ava',
4051 'ay': 'aym',
4052 'az': 'aze',
4053 'ba': 'bak',
4054 'be': 'bel',
4055 'bg': 'bul',
4056 'bh': 'bih',
4057 'bi': 'bis',
4058 'bm': 'bam',
4059 'bn': 'ben',
4060 'bo': 'bod',
4061 'br': 'bre',
4062 'bs': 'bos',
4063 'ca': 'cat',
4064 'ce': 'che',
4065 'ch': 'cha',
4066 'co': 'cos',
4067 'cr': 'cre',
4068 'cs': 'ces',
4069 'cu': 'chu',
4070 'cv': 'chv',
4071 'cy': 'cym',
4072 'da': 'dan',
4073 'de': 'deu',
4074 'dv': 'div',
4075 'dz': 'dzo',
4076 'ee': 'ewe',
4077 'el': 'ell',
4078 'en': 'eng',
4079 'eo': 'epo',
4080 'es': 'spa',
4081 'et': 'est',
4082 'eu': 'eus',
4083 'fa': 'fas',
4084 'ff': 'ful',
4085 'fi': 'fin',
4086 'fj': 'fij',
4087 'fo': 'fao',
4088 'fr': 'fra',
4089 'fy': 'fry',
4090 'ga': 'gle',
4091 'gd': 'gla',
4092 'gl': 'glg',
4093 'gn': 'grn',
4094 'gu': 'guj',
4095 'gv': 'glv',
4096 'ha': 'hau',
4097 'he': 'heb',
b7acc835 4098 'iw': 'heb', # Replaced by he in 1989 revision
39672624
YCH
4099 'hi': 'hin',
4100 'ho': 'hmo',
4101 'hr': 'hrv',
4102 'ht': 'hat',
4103 'hu': 'hun',
4104 'hy': 'hye',
4105 'hz': 'her',
4106 'ia': 'ina',
4107 'id': 'ind',
b7acc835 4108 'in': 'ind', # Replaced by id in 1989 revision
39672624
YCH
4109 'ie': 'ile',
4110 'ig': 'ibo',
4111 'ii': 'iii',
4112 'ik': 'ipk',
4113 'io': 'ido',
4114 'is': 'isl',
4115 'it': 'ita',
4116 'iu': 'iku',
4117 'ja': 'jpn',
4118 'jv': 'jav',
4119 'ka': 'kat',
4120 'kg': 'kon',
4121 'ki': 'kik',
4122 'kj': 'kua',
4123 'kk': 'kaz',
4124 'kl': 'kal',
4125 'km': 'khm',
4126 'kn': 'kan',
4127 'ko': 'kor',
4128 'kr': 'kau',
4129 'ks': 'kas',
4130 'ku': 'kur',
4131 'kv': 'kom',
4132 'kw': 'cor',
4133 'ky': 'kir',
4134 'la': 'lat',
4135 'lb': 'ltz',
4136 'lg': 'lug',
4137 'li': 'lim',
4138 'ln': 'lin',
4139 'lo': 'lao',
4140 'lt': 'lit',
4141 'lu': 'lub',
4142 'lv': 'lav',
4143 'mg': 'mlg',
4144 'mh': 'mah',
4145 'mi': 'mri',
4146 'mk': 'mkd',
4147 'ml': 'mal',
4148 'mn': 'mon',
4149 'mr': 'mar',
4150 'ms': 'msa',
4151 'mt': 'mlt',
4152 'my': 'mya',
4153 'na': 'nau',
4154 'nb': 'nob',
4155 'nd': 'nde',
4156 'ne': 'nep',
4157 'ng': 'ndo',
4158 'nl': 'nld',
4159 'nn': 'nno',
4160 'no': 'nor',
4161 'nr': 'nbl',
4162 'nv': 'nav',
4163 'ny': 'nya',
4164 'oc': 'oci',
4165 'oj': 'oji',
4166 'om': 'orm',
4167 'or': 'ori',
4168 'os': 'oss',
4169 'pa': 'pan',
7bcd4813 4170 'pe': 'per',
39672624
YCH
4171 'pi': 'pli',
4172 'pl': 'pol',
4173 'ps': 'pus',
4174 'pt': 'por',
4175 'qu': 'que',
4176 'rm': 'roh',
4177 'rn': 'run',
4178 'ro': 'ron',
4179 'ru': 'rus',
4180 'rw': 'kin',
4181 'sa': 'san',
4182 'sc': 'srd',
4183 'sd': 'snd',
4184 'se': 'sme',
4185 'sg': 'sag',
4186 'si': 'sin',
4187 'sk': 'slk',
4188 'sl': 'slv',
4189 'sm': 'smo',
4190 'sn': 'sna',
4191 'so': 'som',
4192 'sq': 'sqi',
4193 'sr': 'srp',
4194 'ss': 'ssw',
4195 'st': 'sot',
4196 'su': 'sun',
4197 'sv': 'swe',
4198 'sw': 'swa',
4199 'ta': 'tam',
4200 'te': 'tel',
4201 'tg': 'tgk',
4202 'th': 'tha',
4203 'ti': 'tir',
4204 'tk': 'tuk',
4205 'tl': 'tgl',
4206 'tn': 'tsn',
4207 'to': 'ton',
4208 'tr': 'tur',
4209 'ts': 'tso',
4210 'tt': 'tat',
4211 'tw': 'twi',
4212 'ty': 'tah',
4213 'ug': 'uig',
4214 'uk': 'ukr',
4215 'ur': 'urd',
4216 'uz': 'uzb',
4217 've': 'ven',
4218 'vi': 'vie',
4219 'vo': 'vol',
4220 'wa': 'wln',
4221 'wo': 'wol',
4222 'xh': 'xho',
4223 'yi': 'yid',
e9a50fba 4224 'ji': 'yid', # Replaced by yi in 1989 revision
39672624
YCH
4225 'yo': 'yor',
4226 'za': 'zha',
4227 'zh': 'zho',
4228 'zu': 'zul',
4229 }
4230
4231 @classmethod
4232 def short2long(cls, code):
4233 """Convert language code from ISO 639-1 to ISO 639-2/T"""
4234 return cls._lang_map.get(code[:2])
4235
4236 @classmethod
4237 def long2short(cls, code):
4238 """Convert language code from ISO 639-2/T to ISO 639-1"""
4239 for short_name, long_name in cls._lang_map.items():
4240 if long_name == code:
4241 return short_name
4242
4243
86e5f3ed 4244class ISO3166Utils:
4eb10f66
YCH
4245 # From http://data.okfn.org/data/core/country-list
4246 _country_map = {
4247 'AF': 'Afghanistan',
4248 'AX': 'Åland Islands',
4249 'AL': 'Albania',
4250 'DZ': 'Algeria',
4251 'AS': 'American Samoa',
4252 'AD': 'Andorra',
4253 'AO': 'Angola',
4254 'AI': 'Anguilla',
4255 'AQ': 'Antarctica',
4256 'AG': 'Antigua and Barbuda',
4257 'AR': 'Argentina',
4258 'AM': 'Armenia',
4259 'AW': 'Aruba',
4260 'AU': 'Australia',
4261 'AT': 'Austria',
4262 'AZ': 'Azerbaijan',
4263 'BS': 'Bahamas',
4264 'BH': 'Bahrain',
4265 'BD': 'Bangladesh',
4266 'BB': 'Barbados',
4267 'BY': 'Belarus',
4268 'BE': 'Belgium',
4269 'BZ': 'Belize',
4270 'BJ': 'Benin',
4271 'BM': 'Bermuda',
4272 'BT': 'Bhutan',
4273 'BO': 'Bolivia, Plurinational State of',
4274 'BQ': 'Bonaire, Sint Eustatius and Saba',
4275 'BA': 'Bosnia and Herzegovina',
4276 'BW': 'Botswana',
4277 'BV': 'Bouvet Island',
4278 'BR': 'Brazil',
4279 'IO': 'British Indian Ocean Territory',
4280 'BN': 'Brunei Darussalam',
4281 'BG': 'Bulgaria',
4282 'BF': 'Burkina Faso',
4283 'BI': 'Burundi',
4284 'KH': 'Cambodia',
4285 'CM': 'Cameroon',
4286 'CA': 'Canada',
4287 'CV': 'Cape Verde',
4288 'KY': 'Cayman Islands',
4289 'CF': 'Central African Republic',
4290 'TD': 'Chad',
4291 'CL': 'Chile',
4292 'CN': 'China',
4293 'CX': 'Christmas Island',
4294 'CC': 'Cocos (Keeling) Islands',
4295 'CO': 'Colombia',
4296 'KM': 'Comoros',
4297 'CG': 'Congo',
4298 'CD': 'Congo, the Democratic Republic of the',
4299 'CK': 'Cook Islands',
4300 'CR': 'Costa Rica',
4301 'CI': 'Côte d\'Ivoire',
4302 'HR': 'Croatia',
4303 'CU': 'Cuba',
4304 'CW': 'Curaçao',
4305 'CY': 'Cyprus',
4306 'CZ': 'Czech Republic',
4307 'DK': 'Denmark',
4308 'DJ': 'Djibouti',
4309 'DM': 'Dominica',
4310 'DO': 'Dominican Republic',
4311 'EC': 'Ecuador',
4312 'EG': 'Egypt',
4313 'SV': 'El Salvador',
4314 'GQ': 'Equatorial Guinea',
4315 'ER': 'Eritrea',
4316 'EE': 'Estonia',
4317 'ET': 'Ethiopia',
4318 'FK': 'Falkland Islands (Malvinas)',
4319 'FO': 'Faroe Islands',
4320 'FJ': 'Fiji',
4321 'FI': 'Finland',
4322 'FR': 'France',
4323 'GF': 'French Guiana',
4324 'PF': 'French Polynesia',
4325 'TF': 'French Southern Territories',
4326 'GA': 'Gabon',
4327 'GM': 'Gambia',
4328 'GE': 'Georgia',
4329 'DE': 'Germany',
4330 'GH': 'Ghana',
4331 'GI': 'Gibraltar',
4332 'GR': 'Greece',
4333 'GL': 'Greenland',
4334 'GD': 'Grenada',
4335 'GP': 'Guadeloupe',
4336 'GU': 'Guam',
4337 'GT': 'Guatemala',
4338 'GG': 'Guernsey',
4339 'GN': 'Guinea',
4340 'GW': 'Guinea-Bissau',
4341 'GY': 'Guyana',
4342 'HT': 'Haiti',
4343 'HM': 'Heard Island and McDonald Islands',
4344 'VA': 'Holy See (Vatican City State)',
4345 'HN': 'Honduras',
4346 'HK': 'Hong Kong',
4347 'HU': 'Hungary',
4348 'IS': 'Iceland',
4349 'IN': 'India',
4350 'ID': 'Indonesia',
4351 'IR': 'Iran, Islamic Republic of',
4352 'IQ': 'Iraq',
4353 'IE': 'Ireland',
4354 'IM': 'Isle of Man',
4355 'IL': 'Israel',
4356 'IT': 'Italy',
4357 'JM': 'Jamaica',
4358 'JP': 'Japan',
4359 'JE': 'Jersey',
4360 'JO': 'Jordan',
4361 'KZ': 'Kazakhstan',
4362 'KE': 'Kenya',
4363 'KI': 'Kiribati',
4364 'KP': 'Korea, Democratic People\'s Republic of',
4365 'KR': 'Korea, Republic of',
4366 'KW': 'Kuwait',
4367 'KG': 'Kyrgyzstan',
4368 'LA': 'Lao People\'s Democratic Republic',
4369 'LV': 'Latvia',
4370 'LB': 'Lebanon',
4371 'LS': 'Lesotho',
4372 'LR': 'Liberia',
4373 'LY': 'Libya',
4374 'LI': 'Liechtenstein',
4375 'LT': 'Lithuania',
4376 'LU': 'Luxembourg',
4377 'MO': 'Macao',
4378 'MK': 'Macedonia, the Former Yugoslav Republic of',
4379 'MG': 'Madagascar',
4380 'MW': 'Malawi',
4381 'MY': 'Malaysia',
4382 'MV': 'Maldives',
4383 'ML': 'Mali',
4384 'MT': 'Malta',
4385 'MH': 'Marshall Islands',
4386 'MQ': 'Martinique',
4387 'MR': 'Mauritania',
4388 'MU': 'Mauritius',
4389 'YT': 'Mayotte',
4390 'MX': 'Mexico',
4391 'FM': 'Micronesia, Federated States of',
4392 'MD': 'Moldova, Republic of',
4393 'MC': 'Monaco',
4394 'MN': 'Mongolia',
4395 'ME': 'Montenegro',
4396 'MS': 'Montserrat',
4397 'MA': 'Morocco',
4398 'MZ': 'Mozambique',
4399 'MM': 'Myanmar',
4400 'NA': 'Namibia',
4401 'NR': 'Nauru',
4402 'NP': 'Nepal',
4403 'NL': 'Netherlands',
4404 'NC': 'New Caledonia',
4405 'NZ': 'New Zealand',
4406 'NI': 'Nicaragua',
4407 'NE': 'Niger',
4408 'NG': 'Nigeria',
4409 'NU': 'Niue',
4410 'NF': 'Norfolk Island',
4411 'MP': 'Northern Mariana Islands',
4412 'NO': 'Norway',
4413 'OM': 'Oman',
4414 'PK': 'Pakistan',
4415 'PW': 'Palau',
4416 'PS': 'Palestine, State of',
4417 'PA': 'Panama',
4418 'PG': 'Papua New Guinea',
4419 'PY': 'Paraguay',
4420 'PE': 'Peru',
4421 'PH': 'Philippines',
4422 'PN': 'Pitcairn',
4423 'PL': 'Poland',
4424 'PT': 'Portugal',
4425 'PR': 'Puerto Rico',
4426 'QA': 'Qatar',
4427 'RE': 'Réunion',
4428 'RO': 'Romania',
4429 'RU': 'Russian Federation',
4430 'RW': 'Rwanda',
4431 'BL': 'Saint Barthélemy',
4432 'SH': 'Saint Helena, Ascension and Tristan da Cunha',
4433 'KN': 'Saint Kitts and Nevis',
4434 'LC': 'Saint Lucia',
4435 'MF': 'Saint Martin (French part)',
4436 'PM': 'Saint Pierre and Miquelon',
4437 'VC': 'Saint Vincent and the Grenadines',
4438 'WS': 'Samoa',
4439 'SM': 'San Marino',
4440 'ST': 'Sao Tome and Principe',
4441 'SA': 'Saudi Arabia',
4442 'SN': 'Senegal',
4443 'RS': 'Serbia',
4444 'SC': 'Seychelles',
4445 'SL': 'Sierra Leone',
4446 'SG': 'Singapore',
4447 'SX': 'Sint Maarten (Dutch part)',
4448 'SK': 'Slovakia',
4449 'SI': 'Slovenia',
4450 'SB': 'Solomon Islands',
4451 'SO': 'Somalia',
4452 'ZA': 'South Africa',
4453 'GS': 'South Georgia and the South Sandwich Islands',
4454 'SS': 'South Sudan',
4455 'ES': 'Spain',
4456 'LK': 'Sri Lanka',
4457 'SD': 'Sudan',
4458 'SR': 'Suriname',
4459 'SJ': 'Svalbard and Jan Mayen',
4460 'SZ': 'Swaziland',
4461 'SE': 'Sweden',
4462 'CH': 'Switzerland',
4463 'SY': 'Syrian Arab Republic',
4464 'TW': 'Taiwan, Province of China',
4465 'TJ': 'Tajikistan',
4466 'TZ': 'Tanzania, United Republic of',
4467 'TH': 'Thailand',
4468 'TL': 'Timor-Leste',
4469 'TG': 'Togo',
4470 'TK': 'Tokelau',
4471 'TO': 'Tonga',
4472 'TT': 'Trinidad and Tobago',
4473 'TN': 'Tunisia',
4474 'TR': 'Turkey',
4475 'TM': 'Turkmenistan',
4476 'TC': 'Turks and Caicos Islands',
4477 'TV': 'Tuvalu',
4478 'UG': 'Uganda',
4479 'UA': 'Ukraine',
4480 'AE': 'United Arab Emirates',
4481 'GB': 'United Kingdom',
4482 'US': 'United States',
4483 'UM': 'United States Minor Outlying Islands',
4484 'UY': 'Uruguay',
4485 'UZ': 'Uzbekistan',
4486 'VU': 'Vanuatu',
4487 'VE': 'Venezuela, Bolivarian Republic of',
4488 'VN': 'Viet Nam',
4489 'VG': 'Virgin Islands, British',
4490 'VI': 'Virgin Islands, U.S.',
4491 'WF': 'Wallis and Futuna',
4492 'EH': 'Western Sahara',
4493 'YE': 'Yemen',
4494 'ZM': 'Zambia',
4495 'ZW': 'Zimbabwe',
2f97cc61 4496 # Not ISO 3166 codes, but used for IP blocks
4497 'AP': 'Asia/Pacific Region',
4498 'EU': 'Europe',
4eb10f66
YCH
4499 }
4500
4501 @classmethod
4502 def short2full(cls, code):
4503 """Convert an ISO 3166-2 country code to the corresponding full name"""
4504 return cls._country_map.get(code.upper())
4505
4506
86e5f3ed 4507class GeoUtils:
773f291d
S
4508 # Major IPv4 address blocks per country
4509 _country_ip_map = {
53896ca5 4510 'AD': '46.172.224.0/19',
773f291d
S
4511 'AE': '94.200.0.0/13',
4512 'AF': '149.54.0.0/17',
4513 'AG': '209.59.64.0/18',
4514 'AI': '204.14.248.0/21',
4515 'AL': '46.99.0.0/16',
4516 'AM': '46.70.0.0/15',
4517 'AO': '105.168.0.0/13',
53896ca5
S
4518 'AP': '182.50.184.0/21',
4519 'AQ': '23.154.160.0/24',
773f291d
S
4520 'AR': '181.0.0.0/12',
4521 'AS': '202.70.112.0/20',
53896ca5 4522 'AT': '77.116.0.0/14',
773f291d
S
4523 'AU': '1.128.0.0/11',
4524 'AW': '181.41.0.0/18',
53896ca5
S
4525 'AX': '185.217.4.0/22',
4526 'AZ': '5.197.0.0/16',
773f291d
S
4527 'BA': '31.176.128.0/17',
4528 'BB': '65.48.128.0/17',
4529 'BD': '114.130.0.0/16',
4530 'BE': '57.0.0.0/8',
53896ca5 4531 'BF': '102.178.0.0/15',
773f291d
S
4532 'BG': '95.42.0.0/15',
4533 'BH': '37.131.0.0/17',
4534 'BI': '154.117.192.0/18',
4535 'BJ': '137.255.0.0/16',
53896ca5 4536 'BL': '185.212.72.0/23',
773f291d
S
4537 'BM': '196.12.64.0/18',
4538 'BN': '156.31.0.0/16',
4539 'BO': '161.56.0.0/16',
4540 'BQ': '161.0.80.0/20',
53896ca5 4541 'BR': '191.128.0.0/12',
773f291d
S
4542 'BS': '24.51.64.0/18',
4543 'BT': '119.2.96.0/19',
4544 'BW': '168.167.0.0/16',
4545 'BY': '178.120.0.0/13',
4546 'BZ': '179.42.192.0/18',
4547 'CA': '99.224.0.0/11',
4548 'CD': '41.243.0.0/16',
53896ca5
S
4549 'CF': '197.242.176.0/21',
4550 'CG': '160.113.0.0/16',
773f291d 4551 'CH': '85.0.0.0/13',
53896ca5 4552 'CI': '102.136.0.0/14',
773f291d
S
4553 'CK': '202.65.32.0/19',
4554 'CL': '152.172.0.0/14',
53896ca5 4555 'CM': '102.244.0.0/14',
773f291d
S
4556 'CN': '36.128.0.0/10',
4557 'CO': '181.240.0.0/12',
4558 'CR': '201.192.0.0/12',
4559 'CU': '152.206.0.0/15',
4560 'CV': '165.90.96.0/19',
4561 'CW': '190.88.128.0/17',
53896ca5 4562 'CY': '31.153.0.0/16',
773f291d
S
4563 'CZ': '88.100.0.0/14',
4564 'DE': '53.0.0.0/8',
4565 'DJ': '197.241.0.0/17',
4566 'DK': '87.48.0.0/12',
4567 'DM': '192.243.48.0/20',
4568 'DO': '152.166.0.0/15',
4569 'DZ': '41.96.0.0/12',
4570 'EC': '186.68.0.0/15',
4571 'EE': '90.190.0.0/15',
4572 'EG': '156.160.0.0/11',
4573 'ER': '196.200.96.0/20',
4574 'ES': '88.0.0.0/11',
4575 'ET': '196.188.0.0/14',
4576 'EU': '2.16.0.0/13',
4577 'FI': '91.152.0.0/13',
4578 'FJ': '144.120.0.0/16',
53896ca5 4579 'FK': '80.73.208.0/21',
773f291d
S
4580 'FM': '119.252.112.0/20',
4581 'FO': '88.85.32.0/19',
4582 'FR': '90.0.0.0/9',
4583 'GA': '41.158.0.0/15',
4584 'GB': '25.0.0.0/8',
4585 'GD': '74.122.88.0/21',
4586 'GE': '31.146.0.0/16',
4587 'GF': '161.22.64.0/18',
4588 'GG': '62.68.160.0/19',
53896ca5
S
4589 'GH': '154.160.0.0/12',
4590 'GI': '95.164.0.0/16',
773f291d
S
4591 'GL': '88.83.0.0/19',
4592 'GM': '160.182.0.0/15',
4593 'GN': '197.149.192.0/18',
4594 'GP': '104.250.0.0/19',
4595 'GQ': '105.235.224.0/20',
4596 'GR': '94.64.0.0/13',
4597 'GT': '168.234.0.0/16',
4598 'GU': '168.123.0.0/16',
4599 'GW': '197.214.80.0/20',
4600 'GY': '181.41.64.0/18',
4601 'HK': '113.252.0.0/14',
4602 'HN': '181.210.0.0/16',
4603 'HR': '93.136.0.0/13',
4604 'HT': '148.102.128.0/17',
4605 'HU': '84.0.0.0/14',
4606 'ID': '39.192.0.0/10',
4607 'IE': '87.32.0.0/12',
4608 'IL': '79.176.0.0/13',
4609 'IM': '5.62.80.0/20',
4610 'IN': '117.192.0.0/10',
4611 'IO': '203.83.48.0/21',
4612 'IQ': '37.236.0.0/14',
4613 'IR': '2.176.0.0/12',
4614 'IS': '82.221.0.0/16',
4615 'IT': '79.0.0.0/10',
4616 'JE': '87.244.64.0/18',
4617 'JM': '72.27.0.0/17',
4618 'JO': '176.29.0.0/16',
53896ca5 4619 'JP': '133.0.0.0/8',
773f291d
S
4620 'KE': '105.48.0.0/12',
4621 'KG': '158.181.128.0/17',
4622 'KH': '36.37.128.0/17',
4623 'KI': '103.25.140.0/22',
4624 'KM': '197.255.224.0/20',
53896ca5 4625 'KN': '198.167.192.0/19',
773f291d
S
4626 'KP': '175.45.176.0/22',
4627 'KR': '175.192.0.0/10',
4628 'KW': '37.36.0.0/14',
4629 'KY': '64.96.0.0/15',
4630 'KZ': '2.72.0.0/13',
4631 'LA': '115.84.64.0/18',
4632 'LB': '178.135.0.0/16',
53896ca5 4633 'LC': '24.92.144.0/20',
773f291d
S
4634 'LI': '82.117.0.0/19',
4635 'LK': '112.134.0.0/15',
53896ca5 4636 'LR': '102.183.0.0/16',
773f291d
S
4637 'LS': '129.232.0.0/17',
4638 'LT': '78.56.0.0/13',
4639 'LU': '188.42.0.0/16',
4640 'LV': '46.109.0.0/16',
4641 'LY': '41.252.0.0/14',
4642 'MA': '105.128.0.0/11',
4643 'MC': '88.209.64.0/18',
4644 'MD': '37.246.0.0/16',
4645 'ME': '178.175.0.0/17',
4646 'MF': '74.112.232.0/21',
4647 'MG': '154.126.0.0/17',
4648 'MH': '117.103.88.0/21',
4649 'MK': '77.28.0.0/15',
4650 'ML': '154.118.128.0/18',
4651 'MM': '37.111.0.0/17',
4652 'MN': '49.0.128.0/17',
4653 'MO': '60.246.0.0/16',
4654 'MP': '202.88.64.0/20',
4655 'MQ': '109.203.224.0/19',
4656 'MR': '41.188.64.0/18',
4657 'MS': '208.90.112.0/22',
4658 'MT': '46.11.0.0/16',
4659 'MU': '105.16.0.0/12',
4660 'MV': '27.114.128.0/18',
53896ca5 4661 'MW': '102.70.0.0/15',
773f291d
S
4662 'MX': '187.192.0.0/11',
4663 'MY': '175.136.0.0/13',
4664 'MZ': '197.218.0.0/15',
4665 'NA': '41.182.0.0/16',
4666 'NC': '101.101.0.0/18',
4667 'NE': '197.214.0.0/18',
4668 'NF': '203.17.240.0/22',
4669 'NG': '105.112.0.0/12',
4670 'NI': '186.76.0.0/15',
4671 'NL': '145.96.0.0/11',
4672 'NO': '84.208.0.0/13',
4673 'NP': '36.252.0.0/15',
4674 'NR': '203.98.224.0/19',
4675 'NU': '49.156.48.0/22',
4676 'NZ': '49.224.0.0/14',
4677 'OM': '5.36.0.0/15',
4678 'PA': '186.72.0.0/15',
4679 'PE': '186.160.0.0/14',
4680 'PF': '123.50.64.0/18',
4681 'PG': '124.240.192.0/19',
4682 'PH': '49.144.0.0/13',
4683 'PK': '39.32.0.0/11',
4684 'PL': '83.0.0.0/11',
4685 'PM': '70.36.0.0/20',
4686 'PR': '66.50.0.0/16',
4687 'PS': '188.161.0.0/16',
4688 'PT': '85.240.0.0/13',
4689 'PW': '202.124.224.0/20',
4690 'PY': '181.120.0.0/14',
4691 'QA': '37.210.0.0/15',
53896ca5 4692 'RE': '102.35.0.0/16',
773f291d 4693 'RO': '79.112.0.0/13',
53896ca5 4694 'RS': '93.86.0.0/15',
773f291d 4695 'RU': '5.136.0.0/13',
53896ca5 4696 'RW': '41.186.0.0/16',
773f291d
S
4697 'SA': '188.48.0.0/13',
4698 'SB': '202.1.160.0/19',
4699 'SC': '154.192.0.0/11',
53896ca5 4700 'SD': '102.120.0.0/13',
773f291d 4701 'SE': '78.64.0.0/12',
53896ca5 4702 'SG': '8.128.0.0/10',
773f291d
S
4703 'SI': '188.196.0.0/14',
4704 'SK': '78.98.0.0/15',
53896ca5 4705 'SL': '102.143.0.0/17',
773f291d
S
4706 'SM': '89.186.32.0/19',
4707 'SN': '41.82.0.0/15',
53896ca5 4708 'SO': '154.115.192.0/18',
773f291d
S
4709 'SR': '186.179.128.0/17',
4710 'SS': '105.235.208.0/21',
4711 'ST': '197.159.160.0/19',
4712 'SV': '168.243.0.0/16',
4713 'SX': '190.102.0.0/20',
4714 'SY': '5.0.0.0/16',
4715 'SZ': '41.84.224.0/19',
4716 'TC': '65.255.48.0/20',
4717 'TD': '154.68.128.0/19',
4718 'TG': '196.168.0.0/14',
4719 'TH': '171.96.0.0/13',
4720 'TJ': '85.9.128.0/18',
4721 'TK': '27.96.24.0/21',
4722 'TL': '180.189.160.0/20',
4723 'TM': '95.85.96.0/19',
4724 'TN': '197.0.0.0/11',
4725 'TO': '175.176.144.0/21',
4726 'TR': '78.160.0.0/11',
4727 'TT': '186.44.0.0/15',
4728 'TV': '202.2.96.0/19',
4729 'TW': '120.96.0.0/11',
4730 'TZ': '156.156.0.0/14',
53896ca5
S
4731 'UA': '37.52.0.0/14',
4732 'UG': '102.80.0.0/13',
4733 'US': '6.0.0.0/8',
773f291d 4734 'UY': '167.56.0.0/13',
53896ca5 4735 'UZ': '84.54.64.0/18',
773f291d 4736 'VA': '212.77.0.0/19',
53896ca5 4737 'VC': '207.191.240.0/21',
773f291d 4738 'VE': '186.88.0.0/13',
53896ca5 4739 'VG': '66.81.192.0/20',
773f291d
S
4740 'VI': '146.226.0.0/16',
4741 'VN': '14.160.0.0/11',
4742 'VU': '202.80.32.0/20',
4743 'WF': '117.20.32.0/21',
4744 'WS': '202.4.32.0/19',
4745 'YE': '134.35.0.0/16',
4746 'YT': '41.242.116.0/22',
4747 'ZA': '41.0.0.0/11',
53896ca5
S
4748 'ZM': '102.144.0.0/13',
4749 'ZW': '102.177.192.0/18',
773f291d
S
4750 }
4751
4752 @classmethod
5f95927a
S
4753 def random_ipv4(cls, code_or_block):
4754 if len(code_or_block) == 2:
4755 block = cls._country_ip_map.get(code_or_block.upper())
4756 if not block:
4757 return None
4758 else:
4759 block = code_or_block
773f291d 4760 addr, preflen = block.split('/')
ac668111 4761 addr_min = struct.unpack('!L', socket.inet_aton(addr))[0]
773f291d 4762 addr_max = addr_min | (0xffffffff >> int(preflen))
14f25df2 4763 return str(socket.inet_ntoa(
ac668111 4764 struct.pack('!L', random.randint(addr_min, addr_max))))
773f291d
S
4765
4766
ac668111 4767class PerRequestProxyHandler(urllib.request.ProxyHandler):
2461f79d
PH
4768 def __init__(self, proxies=None):
4769 # Set default handlers
4770 for type in ('http', 'https'):
4771 setattr(self, '%s_open' % type,
4772 lambda r, proxy='__noproxy__', type=type, meth=self.proxy_open:
4773 meth(r, proxy, type))
ac668111 4774 urllib.request.ProxyHandler.__init__(self, proxies)
2461f79d 4775
91410c9b 4776 def proxy_open(self, req, proxy, type):
2461f79d 4777 req_proxy = req.headers.get('Ytdl-request-proxy')
91410c9b
PH
4778 if req_proxy is not None:
4779 proxy = req_proxy
2461f79d
PH
4780 del req.headers['Ytdl-request-proxy']
4781
4782 if proxy == '__noproxy__':
4783 return None # No Proxy
14f25df2 4784 if urllib.parse.urlparse(proxy).scheme.lower() in ('socks', 'socks4', 'socks4a', 'socks5'):
71aff188 4785 req.add_header('Ytdl-socks-proxy', proxy)
7a5c1cfe 4786 # yt-dlp's http/https handlers do wrapping the socket with socks
71aff188 4787 return None
ac668111 4788 return urllib.request.ProxyHandler.proxy_open(
91410c9b 4789 self, req, proxy, type)
5bc880b9
YCH
4790
4791
0a5445dd
YCH
4792# Both long_to_bytes and bytes_to_long are adapted from PyCrypto, which is
4793# released into Public Domain
4794# https://github.com/dlitz/pycrypto/blob/master/lib/Crypto/Util/number.py#L387
4795
4796def long_to_bytes(n, blocksize=0):
4797 """long_to_bytes(n:long, blocksize:int) : string
4798 Convert a long integer to a byte string.
4799
4800 If optional blocksize is given and greater than zero, pad the front of the
4801 byte string with binary zeros so that the length is a multiple of
4802 blocksize.
4803 """
4804 # after much testing, this algorithm was deemed to be the fastest
4805 s = b''
4806 n = int(n)
4807 while n > 0:
ac668111 4808 s = struct.pack('>I', n & 0xffffffff) + s
0a5445dd
YCH
4809 n = n >> 32
4810 # strip off leading zeros
4811 for i in range(len(s)):
4812 if s[i] != b'\000'[0]:
4813 break
4814 else:
4815 # only happens when n == 0
4816 s = b'\000'
4817 i = 0
4818 s = s[i:]
4819 # add back some pad bytes. this could be done more efficiently w.r.t. the
4820 # de-padding being done above, but sigh...
4821 if blocksize > 0 and len(s) % blocksize:
4822 s = (blocksize - len(s) % blocksize) * b'\000' + s
4823 return s
4824
4825
4826def bytes_to_long(s):
4827 """bytes_to_long(string) : long
4828 Convert a byte string to a long integer.
4829
4830 This is (essentially) the inverse of long_to_bytes().
4831 """
4832 acc = 0
4833 length = len(s)
4834 if length % 4:
4835 extra = (4 - length % 4)
4836 s = b'\000' * extra + s
4837 length = length + extra
4838 for i in range(0, length, 4):
ac668111 4839 acc = (acc << 32) + struct.unpack('>I', s[i:i + 4])[0]
0a5445dd
YCH
4840 return acc
4841
4842
5bc880b9
YCH
4843def ohdave_rsa_encrypt(data, exponent, modulus):
4844 '''
4845 Implement OHDave's RSA algorithm. See http://www.ohdave.com/rsa/
4846
4847 Input:
4848 data: data to encrypt, bytes-like object
4849 exponent, modulus: parameter e and N of RSA algorithm, both integer
4850 Output: hex string of encrypted data
4851
4852 Limitation: supports one block encryption only
4853 '''
4854
4855 payload = int(binascii.hexlify(data[::-1]), 16)
4856 encrypted = pow(payload, exponent, modulus)
4857 return '%x' % encrypted
81bdc8fd
YCH
4858
4859
f48409c7
YCH
4860def pkcs1pad(data, length):
4861 """
4862 Padding input data with PKCS#1 scheme
4863
4864 @param {int[]} data input data
4865 @param {int} length target length
4866 @returns {int[]} padded data
4867 """
4868 if len(data) > length - 11:
4869 raise ValueError('Input data too long for PKCS#1 padding')
4870
4871 pseudo_random = [random.randint(0, 254) for _ in range(length - len(data) - 3)]
4872 return [0, 2] + pseudo_random + [0] + data
4873
4874
7b2c3f47 4875def _base_n_table(n, table):
4876 if not table and not n:
4877 raise ValueError('Either table or n must be specified')
612f2be5 4878 table = (table or '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ')[:n]
4879
44f14eb4 4880 if n and n != len(table):
612f2be5 4881 raise ValueError(f'base {n} exceeds table length {len(table)}')
4882 return table
59f898b7 4883
5eb6bdce 4884
7b2c3f47 4885def encode_base_n(num, n=None, table=None):
4886 """Convert given int to a base-n string"""
612f2be5 4887 table = _base_n_table(n, table)
7b2c3f47 4888 if not num:
5eb6bdce
YCH
4889 return table[0]
4890
7b2c3f47 4891 result, base = '', len(table)
81bdc8fd 4892 while num:
7b2c3f47 4893 result = table[num % base] + result
612f2be5 4894 num = num // base
7b2c3f47 4895 return result
4896
4897
4898def decode_base_n(string, n=None, table=None):
4899 """Convert given base-n string to int"""
4900 table = {char: index for index, char in enumerate(_base_n_table(n, table))}
4901 result, base = 0, len(table)
4902 for char in string:
4903 result = result * base + table[char]
4904 return result
4905
4906
f52354a8 4907def decode_packed_codes(code):
06b3fe29 4908 mobj = re.search(PACKED_CODES_RE, code)
a0566bbf 4909 obfuscated_code, base, count, symbols = mobj.groups()
f52354a8
YCH
4910 base = int(base)
4911 count = int(count)
4912 symbols = symbols.split('|')
4913 symbol_table = {}
4914
4915 while count:
4916 count -= 1
5eb6bdce 4917 base_n_count = encode_base_n(count, base)
f52354a8
YCH
4918 symbol_table[base_n_count] = symbols[count] or base_n_count
4919
4920 return re.sub(
4921 r'\b(\w+)\b', lambda mobj: symbol_table[mobj.group(0)],
a0566bbf 4922 obfuscated_code)
e154c651 4923
4924
1ced2221
S
4925def caesar(s, alphabet, shift):
4926 if shift == 0:
4927 return s
4928 l = len(alphabet)
4929 return ''.join(
4930 alphabet[(alphabet.index(c) + shift) % l] if c in alphabet else c
4931 for c in s)
4932
4933
4934def rot47(s):
4935 return caesar(s, r'''!"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~''', 47)
4936
4937
e154c651 4938def parse_m3u8_attributes(attrib):
4939 info = {}
4940 for (key, val) in re.findall(r'(?P<key>[A-Z0-9-]+)=(?P<val>"[^"]+"|[^",]+)(?:,|$)', attrib):
4941 if val.startswith('"'):
4942 val = val[1:-1]
4943 info[key] = val
4944 return info
1143535d
YCH
4945
4946
4947def urshift(val, n):
4948 return val >> n if val >= 0 else (val + 0x100000000) >> n
d3f8e038
YCH
4949
4950
efa97bdc 4951def write_xattr(path, key, value):
6f7563be 4952 # Windows: Write xattrs to NTFS Alternate Data Streams:
4953 # http://en.wikipedia.org/wiki/NTFS#Alternate_data_streams_.28ADS.29
4954 if compat_os_name == 'nt':
4955 assert ':' not in key
4956 assert os.path.exists(path)
efa97bdc
YCH
4957
4958 try:
6f7563be 4959 with open(f'{path}:{key}', 'wb') as f:
4960 f.write(value)
86e5f3ed 4961 except OSError as e:
efa97bdc 4962 raise XAttrMetadataError(e.errno, e.strerror)
6f7563be 4963 return
efa97bdc 4964
6f7563be 4965 # UNIX Method 1. Use xattrs/pyxattrs modules
efa97bdc 4966
6f7563be 4967 setxattr = None
4968 if getattr(xattr, '_yt_dlp__identifier', None) == 'pyxattr':
4969 # Unicode arguments are not supported in pyxattr until version 0.5.0
4970 # See https://github.com/ytdl-org/youtube-dl/issues/5498
4971 if version_tuple(xattr.__version__) >= (0, 5, 0):
4972 setxattr = xattr.set
4973 elif xattr:
4974 setxattr = xattr.setxattr
efa97bdc 4975
6f7563be 4976 if setxattr:
4977 try:
4978 setxattr(path, key, value)
4979 except OSError as e:
4980 raise XAttrMetadataError(e.errno, e.strerror)
4981 return
efa97bdc 4982
6f7563be 4983 # UNIX Method 2. Use setfattr/xattr executables
4984 exe = ('setfattr' if check_executable('setfattr', ['--version'])
4985 else 'xattr' if check_executable('xattr', ['-h']) else None)
4986 if not exe:
4987 raise XAttrUnavailableError(
4988 'Couldn\'t find a tool to set the xattrs. Install either the python "xattr" or "pyxattr" modules or the '
4989 + ('"xattr" binary' if sys.platform != 'linux' else 'GNU "attr" package (which contains the "setfattr" tool)'))
efa97bdc 4990
0f06bcd7 4991 value = value.decode()
6f7563be 4992 try:
f0c9fb96 4993 _, stderr, returncode = Popen.run(
6f7563be 4994 [exe, '-w', key, value, path] if exe == 'xattr' else [exe, '-n', key, '-v', value, path],
e121e3ce 4995 text=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
6f7563be 4996 except OSError as e:
4997 raise XAttrMetadataError(e.errno, e.strerror)
f0c9fb96 4998 if returncode:
4999 raise XAttrMetadataError(returncode, stderr)
0c265486
YCH
5000
5001
5002def random_birthday(year_field, month_field, day_field):
aa374bc7
AS
5003 start_date = datetime.date(1950, 1, 1)
5004 end_date = datetime.date(1995, 12, 31)
5005 offset = random.randint(0, (end_date - start_date).days)
5006 random_date = start_date + datetime.timedelta(offset)
0c265486 5007 return {
aa374bc7
AS
5008 year_field: str(random_date.year),
5009 month_field: str(random_date.month),
5010 day_field: str(random_date.day),
0c265486 5011 }
732044af 5012
c76eb41b 5013
8c53322c
L
5014def find_available_port(interface=''):
5015 try:
5016 with socket.socket() as sock:
5017 sock.bind((interface, 0))
5018 return sock.getsockname()[1]
5019 except OSError:
5020 return None
5021
5022
732044af 5023# Templates for internet shortcut files, which are plain text files.
e5a998f3 5024DOT_URL_LINK_TEMPLATE = '''\
732044af 5025[InternetShortcut]
5026URL=%(url)s
e5a998f3 5027'''
732044af 5028
e5a998f3 5029DOT_WEBLOC_LINK_TEMPLATE = '''\
732044af 5030<?xml version="1.0" encoding="UTF-8"?>
5031<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
5032<plist version="1.0">
5033<dict>
5034\t<key>URL</key>
5035\t<string>%(url)s</string>
5036</dict>
5037</plist>
e5a998f3 5038'''
732044af 5039
e5a998f3 5040DOT_DESKTOP_LINK_TEMPLATE = '''\
732044af 5041[Desktop Entry]
5042Encoding=UTF-8
5043Name=%(filename)s
5044Type=Link
5045URL=%(url)s
5046Icon=text-html
e5a998f3 5047'''
732044af 5048
08438d2c 5049LINK_TEMPLATES = {
5050 'url': DOT_URL_LINK_TEMPLATE,
5051 'desktop': DOT_DESKTOP_LINK_TEMPLATE,
5052 'webloc': DOT_WEBLOC_LINK_TEMPLATE,
5053}
5054
732044af 5055
5056def iri_to_uri(iri):
5057 """
5058 Converts an IRI (Internationalized Resource Identifier, allowing Unicode characters) to a URI (Uniform Resource Identifier, ASCII-only).
5059
5060 The function doesn't add an additional layer of escaping; e.g., it doesn't escape `%3C` as `%253C`. Instead, it percent-escapes characters with an underlying UTF-8 encoding *besides* those already escaped, leaving the URI intact.
5061 """
5062
14f25df2 5063 iri_parts = urllib.parse.urlparse(iri)
732044af 5064
5065 if '[' in iri_parts.netloc:
5066 raise ValueError('IPv6 URIs are not, yet, supported.')
5067 # Querying `.netloc`, when there's only one bracket, also raises a ValueError.
5068
5069 # The `safe` argument values, that the following code uses, contain the characters that should not be percent-encoded. Everything else but letters, digits and '_.-' will be percent-encoded with an underlying UTF-8 encoding. Everything already percent-encoded will be left as is.
5070
5071 net_location = ''
5072 if iri_parts.username:
f9934b96 5073 net_location += urllib.parse.quote(iri_parts.username, safe=r"!$%&'()*+,~")
732044af 5074 if iri_parts.password is not None:
f9934b96 5075 net_location += ':' + urllib.parse.quote(iri_parts.password, safe=r"!$%&'()*+,~")
732044af 5076 net_location += '@'
5077
0f06bcd7 5078 net_location += iri_parts.hostname.encode('idna').decode() # Punycode for Unicode hostnames.
732044af 5079 # The 'idna' encoding produces ASCII text.
5080 if iri_parts.port is not None and iri_parts.port != 80:
5081 net_location += ':' + str(iri_parts.port)
5082
f9934b96 5083 return urllib.parse.urlunparse(
732044af 5084 (iri_parts.scheme,
5085 net_location,
5086
f9934b96 5087 urllib.parse.quote_plus(iri_parts.path, safe=r"!$%&'()*+,/:;=@|~"),
732044af 5088
5089 # Unsure about the `safe` argument, since this is a legacy way of handling parameters.
f9934b96 5090 urllib.parse.quote_plus(iri_parts.params, safe=r"!$%&'()*+,/:;=@|~"),
732044af 5091
5092 # Not totally sure about the `safe` argument, since the source does not explicitly mention the query URI component.
f9934b96 5093 urllib.parse.quote_plus(iri_parts.query, safe=r"!$%&'()*+,/:;=?@{|}~"),
732044af 5094
f9934b96 5095 urllib.parse.quote_plus(iri_parts.fragment, safe=r"!#$%&'()*+,/:;=?@{|}~")))
732044af 5096
5097 # Source for `safe` arguments: https://url.spec.whatwg.org/#percent-encoded-bytes.
5098
5099
5100def to_high_limit_path(path):
5101 if sys.platform in ['win32', 'cygwin']:
5102 # Work around MAX_PATH limitation on Windows. The maximum allowed length for the individual path segments may still be quite limited.
e5a998f3 5103 return '\\\\?\\' + os.path.abspath(path)
732044af 5104
5105 return path
76d321f6 5106
c76eb41b 5107
7b2c3f47 5108def format_field(obj, field=None, template='%s', ignore=NO_DEFAULT, default='', func=IDENTITY):
69bec673 5109 val = traversal.traverse_obj(obj, *variadic(field))
6f2287cb 5110 if not val if ignore is NO_DEFAULT else val in variadic(ignore):
e0ddbd02 5111 return default
7b2c3f47 5112 return template % func(val)
00dd0cd5 5113
5114
5115def clean_podcast_url(url):
91302ed3 5116 url = re.sub(r'''(?x)
00dd0cd5 5117 (?:
5118 (?:
5119 chtbl\.com/track|
5120 media\.blubrry\.com| # https://create.blubrry.com/resources/podcast-media-download-statistics/getting-started/
5121 play\.podtrac\.com
5122 )/[^/]+|
5123 (?:dts|www)\.podtrac\.com/(?:pts/)?redirect\.[0-9a-z]{3,4}| # http://analytics.podtrac.com/how-to-measure
5124 flex\.acast\.com|
5125 pd(?:
5126 cn\.co| # https://podcorn.com/analytics-prefix/
5127 st\.fm # https://podsights.com/docs/
5128 )/e
5129 )/''', '', url)
91302ed3 5130 return re.sub(r'^\w+://(\w+://)', r'\1', url)
ffcb8191
THD
5131
5132
5133_HEX_TABLE = '0123456789abcdef'
5134
5135
5136def random_uuidv4():
5137 return re.sub(r'[xy]', lambda x: _HEX_TABLE[random.randint(0, 15)], 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx')
0202b52a 5138
5139
5140def make_dir(path, to_screen=None):
5141 try:
5142 dn = os.path.dirname(path)
b25d6cb9
AI
5143 if dn:
5144 os.makedirs(dn, exist_ok=True)
0202b52a 5145 return True
86e5f3ed 5146 except OSError as err:
0202b52a 5147 if callable(to_screen) is not None:
69bec673 5148 to_screen(f'unable to create directory {err}')
0202b52a 5149 return False
f74980cb 5150
5151
5152def get_executable_path():
69bec673 5153 from ..update import _get_variant_and_executable_path
c487cf00 5154
b5899f4f 5155 return os.path.dirname(os.path.abspath(_get_variant_and_executable_path()[1]))
f74980cb 5156
5157
8e40b9d1 5158def get_user_config_dirs(package_name):
8e40b9d1
M
5159 # .config (e.g. ~/.config/package_name)
5160 xdg_config_home = os.getenv('XDG_CONFIG_HOME') or compat_expanduser('~/.config')
773c272d 5161 yield os.path.join(xdg_config_home, package_name)
8e40b9d1
M
5162
5163 # appdata (%APPDATA%/package_name)
5164 appdata_dir = os.getenv('appdata')
5165 if appdata_dir:
773c272d 5166 yield os.path.join(appdata_dir, package_name)
8e40b9d1
M
5167
5168 # home (~/.package_name)
773c272d 5169 yield os.path.join(compat_expanduser('~'), f'.{package_name}')
8e40b9d1
M
5170
5171
5172def get_system_config_dirs(package_name):
8e40b9d1 5173 # /etc/package_name
773c272d 5174 yield os.path.join('/etc', package_name)
06167fbb 5175
5176
3e9b66d7 5177def time_seconds(**kwargs):
83c4970e
L
5178 """
5179 Returns TZ-aware time in seconds since the epoch (1970-01-01T00:00:00Z)
5180 """
5181 return time.time() + datetime.timedelta(**kwargs).total_seconds()
3e9b66d7
LNO
5182
5183
49fa4d9a
N
5184# create a JSON Web Signature (jws) with HS256 algorithm
5185# the resulting format is in JWS Compact Serialization
5186# implemented following JWT https://www.rfc-editor.org/rfc/rfc7519.html
5187# implemented following JWS https://www.rfc-editor.org/rfc/rfc7515.html
5188def jwt_encode_hs256(payload_data, key, headers={}):
5189 header_data = {
5190 'alg': 'HS256',
5191 'typ': 'JWT',
5192 }
5193 if headers:
5194 header_data.update(headers)
0f06bcd7 5195 header_b64 = base64.b64encode(json.dumps(header_data).encode())
5196 payload_b64 = base64.b64encode(json.dumps(payload_data).encode())
5197 h = hmac.new(key.encode(), header_b64 + b'.' + payload_b64, hashlib.sha256)
49fa4d9a
N
5198 signature_b64 = base64.b64encode(h.digest())
5199 token = header_b64 + b'.' + payload_b64 + b'.' + signature_b64
5200 return token
819e0531 5201
5202
16b0d7e6 5203# can be extended in future to verify the signature and parse header and return the algorithm used if it's not HS256
5204def jwt_decode_hs256(jwt):
5205 header_b64, payload_b64, signature_b64 = jwt.split('.')
2c98d998 5206 # add trailing ='s that may have been stripped, superfluous ='s are ignored
5207 payload_data = json.loads(base64.urlsafe_b64decode(f'{payload_b64}==='))
16b0d7e6 5208 return payload_data
5209
5210
53973b4d 5211WINDOWS_VT_MODE = False if compat_os_name == 'nt' else None
5212
5213
7a32c70d 5214@functools.cache
819e0531 5215def supports_terminal_sequences(stream):
5216 if compat_os_name == 'nt':
8a82af35 5217 if not WINDOWS_VT_MODE:
819e0531 5218 return False
5219 elif not os.getenv('TERM'):
5220 return False
5221 try:
5222 return stream.isatty()
5223 except BaseException:
5224 return False
5225
5226
c53a18f0 5227def windows_enable_vt_mode():
5228 """Ref: https://bugs.python.org/issue30075 """
8a82af35 5229 if get_windows_version() < (10, 0, 10586):
53973b4d 5230 return
53973b4d 5231
c53a18f0 5232 import ctypes
5233 import ctypes.wintypes
5234 import msvcrt
5235
5236 ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x0004
5237
5238 dll = ctypes.WinDLL('kernel32', use_last_error=False)
5239 handle = os.open('CONOUT$', os.O_RDWR)
c53a18f0 5240 try:
5241 h_out = ctypes.wintypes.HANDLE(msvcrt.get_osfhandle(handle))
5242 dw_original_mode = ctypes.wintypes.DWORD()
5243 success = dll.GetConsoleMode(h_out, ctypes.byref(dw_original_mode))
5244 if not success:
5245 raise Exception('GetConsoleMode failed')
5246
5247 success = dll.SetConsoleMode(h_out, ctypes.wintypes.DWORD(
5248 dw_original_mode.value | ENABLE_VIRTUAL_TERMINAL_PROCESSING))
5249 if not success:
5250 raise Exception('SetConsoleMode failed')
c53a18f0 5251 finally:
5252 os.close(handle)
53973b4d 5253
f0795149 5254 global WINDOWS_VT_MODE
5255 WINDOWS_VT_MODE = True
5256 supports_terminal_sequences.cache_clear()
5257
53973b4d 5258
ec11a9f4 5259_terminal_sequences_re = re.compile('\033\\[[^m]+m')
5260
5261
5262def remove_terminal_sequences(string):
5263 return _terminal_sequences_re.sub('', string)
5264
5265
5266def number_of_digits(number):
5267 return len('%d' % number)
34921b43 5268
5269
5270def join_nonempty(*values, delim='-', from_dict=None):
5271 if from_dict is not None:
69bec673 5272 values = (traversal.traverse_obj(from_dict, variadic(v)) for v in values)
34921b43 5273 return delim.join(map(str, filter(None, values)))
06e57990 5274
5275
27231526
ZM
5276def scale_thumbnails_to_max_format_width(formats, thumbnails, url_width_re):
5277 """
5278 Find the largest format dimensions in terms of video width and, for each thumbnail:
5279 * Modify the URL: Match the width with the provided regex and replace with the former width
5280 * Update dimensions
5281
5282 This function is useful with video services that scale the provided thumbnails on demand
5283 """
5284 _keys = ('width', 'height')
5285 max_dimensions = max(
86e5f3ed 5286 (tuple(format.get(k) or 0 for k in _keys) for format in formats),
27231526
ZM
5287 default=(0, 0))
5288 if not max_dimensions[0]:
5289 return thumbnails
5290 return [
5291 merge_dicts(
5292 {'url': re.sub(url_width_re, str(max_dimensions[0]), thumbnail['url'])},
5293 dict(zip(_keys, max_dimensions)), thumbnail)
5294 for thumbnail in thumbnails
5295 ]
5296
5297
93c8410d
LNO
5298def parse_http_range(range):
5299 """ Parse value of "Range" or "Content-Range" HTTP header into tuple. """
5300 if not range:
5301 return None, None, None
5302 crg = re.search(r'bytes[ =](\d+)-(\d+)?(?:/(\d+))?', range)
5303 if not crg:
5304 return None, None, None
5305 return int(crg.group(1)), int_or_none(crg.group(2)), int_or_none(crg.group(3))
5306
5307
6b9e832d 5308def read_stdin(what):
5309 eof = 'Ctrl+Z' if compat_os_name == 'nt' else 'Ctrl+D'
5310 write_string(f'Reading {what} from STDIN - EOF ({eof}) to end:\n')
5311 return sys.stdin
5312
5313
a904a7f8
L
5314def determine_file_encoding(data):
5315 """
88f60feb 5316 Detect the text encoding used
a904a7f8
L
5317 @returns (encoding, bytes to skip)
5318 """
5319
88f60feb 5320 # BOM marks are given priority over declarations
a904a7f8 5321 for bom, enc in BOMS:
a904a7f8
L
5322 if data.startswith(bom):
5323 return enc, len(bom)
5324
88f60feb 5325 # Strip off all null bytes to match even when UTF-16 or UTF-32 is used.
5326 # We ignore the endianness to get a good enough match
a904a7f8 5327 data = data.replace(b'\0', b'')
88f60feb 5328 mobj = re.match(rb'(?m)^#\s*coding\s*:\s*(\S+)\s*$', data)
5329 return mobj.group(1).decode() if mobj else None, 0
a904a7f8
L
5330
5331
06e57990 5332class Config:
5333 own_args = None
9e491463 5334 parsed_args = None
06e57990 5335 filename = None
5336 __initialized = False
5337
5338 def __init__(self, parser, label=None):
9e491463 5339 self.parser, self.label = parser, label
06e57990 5340 self._loaded_paths, self.configs = set(), []
5341
5342 def init(self, args=None, filename=None):
5343 assert not self.__initialized
284a60c5 5344 self.own_args, self.filename = args, filename
5345 return self.load_configs()
5346
5347 def load_configs(self):
65662dff 5348 directory = ''
284a60c5 5349 if self.filename:
5350 location = os.path.realpath(self.filename)
65662dff 5351 directory = os.path.dirname(location)
06e57990 5352 if location in self._loaded_paths:
5353 return False
5354 self._loaded_paths.add(location)
5355
284a60c5 5356 self.__initialized = True
5357 opts, _ = self.parser.parse_known_args(self.own_args)
5358 self.parsed_args = self.own_args
9e491463 5359 for location in opts.config_locations or []:
6b9e832d 5360 if location == '-':
1060f82f 5361 if location in self._loaded_paths:
5362 continue
5363 self._loaded_paths.add(location)
6b9e832d 5364 self.append_config(shlex.split(read_stdin('options'), comments=True), label='stdin')
5365 continue
65662dff 5366 location = os.path.join(directory, expand_path(location))
06e57990 5367 if os.path.isdir(location):
5368 location = os.path.join(location, 'yt-dlp.conf')
5369 if not os.path.exists(location):
9e491463 5370 self.parser.error(f'config location {location} does not exist')
06e57990 5371 self.append_config(self.read_file(location), location)
5372 return True
5373
5374 def __str__(self):
5375 label = join_nonempty(
5376 self.label, 'config', f'"{self.filename}"' if self.filename else '',
5377 delim=' ')
5378 return join_nonempty(
5379 self.own_args is not None and f'{label[0].upper()}{label[1:]}: {self.hide_login_info(self.own_args)}',
5380 *(f'\n{c}'.replace('\n', '\n| ')[1:] for c in self.configs),
5381 delim='\n')
5382
7a32c70d 5383 @staticmethod
06e57990 5384 def read_file(filename, default=[]):
5385 try:
a904a7f8 5386 optionf = open(filename, 'rb')
86e5f3ed 5387 except OSError:
06e57990 5388 return default # silently skip if file is not present
a904a7f8
L
5389 try:
5390 enc, skip = determine_file_encoding(optionf.read(512))
5391 optionf.seek(skip, io.SEEK_SET)
5392 except OSError:
5393 enc = None # silently skip read errors
06e57990 5394 try:
5395 # FIXME: https://github.com/ytdl-org/youtube-dl/commit/dfe5fa49aed02cf36ba9f743b11b0903554b5e56
a904a7f8 5396 contents = optionf.read().decode(enc or preferredencoding())
f9934b96 5397 res = shlex.split(contents, comments=True)
44a6fcff 5398 except Exception as err:
5399 raise ValueError(f'Unable to parse "{filename}": {err}')
06e57990 5400 finally:
5401 optionf.close()
5402 return res
5403
7a32c70d 5404 @staticmethod
06e57990 5405 def hide_login_info(opts):
86e5f3ed 5406 PRIVATE_OPTS = {'-p', '--password', '-u', '--username', '--video-password', '--ap-password', '--ap-username'}
06e57990 5407 eqre = re.compile('^(?P<key>' + ('|'.join(re.escape(po) for po in PRIVATE_OPTS)) + ')=.+$')
5408
5409 def _scrub_eq(o):
5410 m = eqre.match(o)
5411 if m:
5412 return m.group('key') + '=PRIVATE'
5413 else:
5414 return o
5415
5416 opts = list(map(_scrub_eq, opts))
5417 for idx, opt in enumerate(opts):
5418 if opt in PRIVATE_OPTS and idx + 1 < len(opts):
5419 opts[idx + 1] = 'PRIVATE'
5420 return opts
5421
5422 def append_config(self, *args, label=None):
9e491463 5423 config = type(self)(self.parser, label)
06e57990 5424 config._loaded_paths = self._loaded_paths
5425 if config.init(*args):
5426 self.configs.append(config)
5427
7a32c70d 5428 @property
06e57990 5429 def all_args(self):
5430 for config in reversed(self.configs):
5431 yield from config.all_args
9e491463 5432 yield from self.parsed_args or []
5433
5434 def parse_known_args(self, **kwargs):
5435 return self.parser.parse_known_args(self.all_args, **kwargs)
06e57990 5436
5437 def parse_args(self):
9e491463 5438 return self.parser.parse_args(self.all_args)
da42679b
LNO
5439
5440
d5d1df8a 5441class WebSocketsWrapper:
da42679b 5442 """Wraps websockets module to use in non-async scopes"""
abfecb7b 5443 pool = None
da42679b 5444
3cea3edd 5445 def __init__(self, url, headers=None, connect=True):
059bc4db 5446 self.loop = asyncio.new_event_loop()
9cd08050 5447 # XXX: "loop" is deprecated
5448 self.conn = websockets.connect(
5449 url, extra_headers=headers, ping_interval=None,
5450 close_timeout=float('inf'), loop=self.loop, ping_timeout=float('inf'))
3cea3edd
LNO
5451 if connect:
5452 self.__enter__()
15dfb392 5453 atexit.register(self.__exit__, None, None, None)
da42679b
LNO
5454
5455 def __enter__(self):
3cea3edd 5456 if not self.pool:
9cd08050 5457 self.pool = self.run_with_loop(self.conn.__aenter__(), self.loop)
da42679b
LNO
5458 return self
5459
5460 def send(self, *args):
5461 self.run_with_loop(self.pool.send(*args), self.loop)
5462
5463 def recv(self, *args):
5464 return self.run_with_loop(self.pool.recv(*args), self.loop)
5465
5466 def __exit__(self, type, value, traceback):
5467 try:
5468 return self.run_with_loop(self.conn.__aexit__(type, value, traceback), self.loop)
5469 finally:
5470 self.loop.close()
15dfb392 5471 self._cancel_all_tasks(self.loop)
da42679b
LNO
5472
5473 # taken from https://github.com/python/cpython/blob/3.9/Lib/asyncio/runners.py with modifications
5474 # for contributors: If there's any new library using asyncio needs to be run in non-async, move these function out of this class
7a32c70d 5475 @staticmethod
da42679b 5476 def run_with_loop(main, loop):
059bc4db 5477 if not asyncio.iscoroutine(main):
da42679b
LNO
5478 raise ValueError(f'a coroutine was expected, got {main!r}')
5479
5480 try:
5481 return loop.run_until_complete(main)
5482 finally:
5483 loop.run_until_complete(loop.shutdown_asyncgens())
5484 if hasattr(loop, 'shutdown_default_executor'):
5485 loop.run_until_complete(loop.shutdown_default_executor())
5486
7a32c70d 5487 @staticmethod
da42679b 5488 def _cancel_all_tasks(loop):
059bc4db 5489 to_cancel = asyncio.all_tasks(loop)
da42679b
LNO
5490
5491 if not to_cancel:
5492 return
5493
5494 for task in to_cancel:
5495 task.cancel()
5496
9cd08050 5497 # XXX: "loop" is removed in python 3.10+
da42679b 5498 loop.run_until_complete(
059bc4db 5499 asyncio.gather(*to_cancel, loop=loop, return_exceptions=True))
da42679b
LNO
5500
5501 for task in to_cancel:
5502 if task.cancelled():
5503 continue
5504 if task.exception() is not None:
5505 loop.call_exception_handler({
5506 'message': 'unhandled exception during asyncio.run() shutdown',
5507 'exception': task.exception(),
5508 'task': task,
5509 })
5510
5511
8b7539d2 5512def merge_headers(*dicts):
08d30158 5513 """Merge dicts of http headers case insensitively, prioritizing the latter ones"""
76aa9913 5514 return {k.title(): v for k, v in itertools.chain.from_iterable(map(dict.items, dicts))}
28787f16 5515
5516
b1f94422 5517def cached_method(f):
5518 """Cache a method"""
5519 signature = inspect.signature(f)
5520
7a32c70d 5521 @functools.wraps(f)
b1f94422 5522 def wrapper(self, *args, **kwargs):
5523 bound_args = signature.bind(self, *args, **kwargs)
5524 bound_args.apply_defaults()
d5d1df8a 5525 key = tuple(bound_args.arguments.values())[1:]
b1f94422 5526
6368e2e6 5527 cache = vars(self).setdefault('_cached_method__cache', {}).setdefault(f.__name__, {})
b1f94422 5528 if key not in cache:
5529 cache[key] = f(self, *args, **kwargs)
5530 return cache[key]
5531 return wrapper
5532
5533
28787f16 5534class classproperty:
83cc7b8a 5535 """property access for class methods with optional caching"""
5536 def __new__(cls, func=None, *args, **kwargs):
5537 if not func:
5538 return functools.partial(cls, *args, **kwargs)
5539 return super().__new__(cls)
c487cf00 5540
83cc7b8a 5541 def __init__(self, func, *, cache=False):
c487cf00 5542 functools.update_wrapper(self, func)
5543 self.func = func
83cc7b8a 5544 self._cache = {} if cache else None
28787f16 5545
5546 def __get__(self, _, cls):
83cc7b8a 5547 if self._cache is None:
5548 return self.func(cls)
5549 elif cls not in self._cache:
5550 self._cache[cls] = self.func(cls)
5551 return self._cache[cls]
19a03940 5552
5553
a5387729 5554class function_with_repr:
b2e0343b 5555 def __init__(self, func, repr_=None):
a5387729 5556 functools.update_wrapper(self, func)
b2e0343b 5557 self.func, self.__repr = func, repr_
a5387729 5558
5559 def __call__(self, *args, **kwargs):
5560 return self.func(*args, **kwargs)
5561
5562 def __repr__(self):
b2e0343b 5563 if self.__repr:
5564 return self.__repr
a5387729 5565 return f'{self.func.__module__}.{self.func.__qualname__}'
5566
5567
64fa820c 5568class Namespace(types.SimpleNamespace):
591bb9d3 5569 """Immutable namespace"""
591bb9d3 5570
7896214c 5571 def __iter__(self):
64fa820c 5572 return iter(self.__dict__.values())
7896214c 5573
7a32c70d 5574 @property
64fa820c 5575 def items_(self):
5576 return self.__dict__.items()
9b8ee23b 5577
5578
8dc59305 5579MEDIA_EXTENSIONS = Namespace(
5580 common_video=('avi', 'flv', 'mkv', 'mov', 'mp4', 'webm'),
5581 video=('3g2', '3gp', 'f4v', 'mk3d', 'divx', 'mpg', 'ogv', 'm4v', 'wmv'),
5582 common_audio=('aiff', 'alac', 'flac', 'm4a', 'mka', 'mp3', 'ogg', 'opus', 'wav'),
fbb73833 5583 audio=('aac', 'ape', 'asf', 'f4a', 'f4b', 'm4b', 'm4p', 'm4r', 'oga', 'ogx', 'spx', 'vorbis', 'wma', 'weba'),
8dc59305 5584 thumbnails=('jpg', 'png', 'webp'),
5585 storyboards=('mhtml', ),
5586 subtitles=('srt', 'vtt', 'ass', 'lrc'),
5587 manifests=('f4f', 'f4m', 'm3u8', 'smil', 'mpd'),
5588)
5589MEDIA_EXTENSIONS.video += MEDIA_EXTENSIONS.common_video
5590MEDIA_EXTENSIONS.audio += MEDIA_EXTENSIONS.common_audio
5591
5592KNOWN_EXTENSIONS = (*MEDIA_EXTENSIONS.video, *MEDIA_EXTENSIONS.audio, *MEDIA_EXTENSIONS.manifests)
5593
5594
be5c1ae8 5595class RetryManager:
5596 """Usage:
5597 for retry in RetryManager(...):
5598 try:
5599 ...
5600 except SomeException as err:
5601 retry.error = err
5602 continue
5603 """
5604 attempt, _error = 0, None
5605
5606 def __init__(self, _retries, _error_callback, **kwargs):
5607 self.retries = _retries or 0
5608 self.error_callback = functools.partial(_error_callback, **kwargs)
5609
5610 def _should_retry(self):
5611 return self._error is not NO_DEFAULT and self.attempt <= self.retries
5612
7a32c70d 5613 @property
be5c1ae8 5614 def error(self):
5615 if self._error is NO_DEFAULT:
5616 return None
5617 return self._error
5618
7a32c70d 5619 @error.setter
be5c1ae8 5620 def error(self, value):
5621 self._error = value
5622
5623 def __iter__(self):
5624 while self._should_retry():
5625 self.error = NO_DEFAULT
5626 self.attempt += 1
5627 yield self
5628 if self.error:
5629 self.error_callback(self.error, self.attempt, self.retries)
5630
7a32c70d 5631 @staticmethod
be5c1ae8 5632 def report_retry(e, count, retries, *, sleep_func, info, warn, error=None, suffix=None):
5633 """Utility function for reporting retries"""
5634 if count > retries:
5635 if error:
5636 return error(f'{e}. Giving up after {count - 1} retries') if count > 1 else error(str(e))
5637 raise e
5638
5639 if not count:
5640 return warn(e)
5641 elif isinstance(e, ExtractorError):
3ce29336 5642 e = remove_end(str_or_none(e.cause) or e.orig_msg, '.')
be5c1ae8 5643 warn(f'{e}. Retrying{format_field(suffix, None, " %s")} ({count}/{retries})...')
5644
5645 delay = float_or_none(sleep_func(n=count - 1)) if callable(sleep_func) else sleep_func
5646 if delay:
5647 info(f'Sleeping {delay:.2f} seconds ...')
5648 time.sleep(delay)
5649
5650
0647d925 5651def make_archive_id(ie, video_id):
5652 ie_key = ie if isinstance(ie, str) else ie.ie_key()
5653 return f'{ie_key.lower()} {video_id}'
5654
5655
a1c5bd82 5656def truncate_string(s, left, right=0):
5657 assert left > 3 and right >= 0
5658 if s is None or len(s) <= left + right:
5659 return s
71df9b7f 5660 return f'{s[:left-3]}...{s[-right:] if right else ""}'
a1c5bd82 5661
5662
5314b521 5663def orderedSet_from_options(options, alias_dict, *, use_regex=False, start=None):
5664 assert 'all' in alias_dict, '"all" alias is required'
5665 requested = list(start or [])
5666 for val in options:
5667 discard = val.startswith('-')
5668 if discard:
5669 val = val[1:]
5670
5671 if val in alias_dict:
5672 val = alias_dict[val] if not discard else [
5673 i[1:] if i.startswith('-') else f'-{i}' for i in alias_dict[val]]
5674 # NB: Do not allow regex in aliases for performance
5675 requested = orderedSet_from_options(val, alias_dict, start=requested)
5676 continue
5677
5678 current = (filter(re.compile(val, re.I).fullmatch, alias_dict['all']) if use_regex
5679 else [val] if val in alias_dict['all'] else None)
5680 if current is None:
5681 raise ValueError(val)
5682
5683 if discard:
5684 for item in current:
5685 while item in requested:
5686 requested.remove(item)
5687 else:
5688 requested.extend(current)
5689
5690 return orderedSet(requested)
5691
5692
eedda525 5693# TODO: Rewrite
d0d74b71 5694class FormatSorter:
5695 regex = r' *((?P<reverse>\+)?(?P<field>[a-zA-Z0-9_]+)((?P<separator>[~:])(?P<limit>.*?))?)? *$'
5696
5697 default = ('hidden', 'aud_or_vid', 'hasvid', 'ie_pref', 'lang', 'quality',
5698 'res', 'fps', 'hdr:12', 'vcodec:vp9.2', 'channels', 'acodec',
5699 'size', 'br', 'asr', 'proto', 'ext', 'hasaud', 'source', 'id') # These must not be aliases
5700 ytdl_default = ('hasaud', 'lang', 'quality', 'tbr', 'filesize', 'vbr',
5701 'height', 'width', 'proto', 'vext', 'abr', 'aext',
5702 'fps', 'fs_approx', 'source', 'id')
5703
5704 settings = {
5705 'vcodec': {'type': 'ordered', 'regex': True,
5706 'order': ['av0?1', 'vp0?9.2', 'vp0?9', '[hx]265|he?vc?', '[hx]264|avc', 'vp0?8', 'mp4v|h263', 'theora', '', None, 'none']},
5707 'acodec': {'type': 'ordered', 'regex': True,
71082216 5708 'order': ['[af]lac', 'wav|aiff', 'opus', 'vorbis|ogg', 'aac', 'mp?4a?', 'mp3', 'ac-?4', 'e-?a?c-?3', 'ac-?3', 'dts', '', None, 'none']},
d0d74b71 5709 'hdr': {'type': 'ordered', 'regex': True, 'field': 'dynamic_range',
5710 'order': ['dv', '(hdr)?12', r'(hdr)?10\+', '(hdr)?10', 'hlg', '', 'sdr', None]},
5711 'proto': {'type': 'ordered', 'regex': True, 'field': 'protocol',
5712 'order': ['(ht|f)tps', '(ht|f)tp$', 'm3u8.*', '.*dash', 'websocket_frag', 'rtmpe?', '', 'mms|rtsp', 'ws|websocket', 'f4']},
5713 'vext': {'type': 'ordered', 'field': 'video_ext',
29ca4082 5714 'order': ('mp4', 'mov', 'webm', 'flv', '', 'none'),
5715 'order_free': ('webm', 'mp4', 'mov', 'flv', '', 'none')},
fbb73833 5716 'aext': {'type': 'ordered', 'regex': True, 'field': 'audio_ext',
5717 'order': ('m4a', 'aac', 'mp3', 'ogg', 'opus', 'web[am]', '', 'none'),
5718 'order_free': ('ogg', 'opus', 'web[am]', 'mp3', 'm4a', 'aac', '', 'none')},
d0d74b71 5719 'hidden': {'visible': False, 'forced': True, 'type': 'extractor', 'max': -1000},
5720 'aud_or_vid': {'visible': False, 'forced': True, 'type': 'multiple',
5721 'field': ('vcodec', 'acodec'),
5722 'function': lambda it: int(any(v != 'none' for v in it))},
5723 'ie_pref': {'priority': True, 'type': 'extractor'},
5724 'hasvid': {'priority': True, 'field': 'vcodec', 'type': 'boolean', 'not_in_list': ('none',)},
5725 'hasaud': {'field': 'acodec', 'type': 'boolean', 'not_in_list': ('none',)},
5726 'lang': {'convert': 'float', 'field': 'language_preference', 'default': -1},
5727 'quality': {'convert': 'float', 'default': -1},
5728 'filesize': {'convert': 'bytes'},
5729 'fs_approx': {'convert': 'bytes', 'field': 'filesize_approx'},
5730 'id': {'convert': 'string', 'field': 'format_id'},
5731 'height': {'convert': 'float_none'},
5732 'width': {'convert': 'float_none'},
5733 'fps': {'convert': 'float_none'},
5734 'channels': {'convert': 'float_none', 'field': 'audio_channels'},
5735 'tbr': {'convert': 'float_none'},
5736 'vbr': {'convert': 'float_none'},
5737 'abr': {'convert': 'float_none'},
5738 'asr': {'convert': 'float_none'},
5739 'source': {'convert': 'float', 'field': 'source_preference', 'default': -1},
5740
5741 'codec': {'type': 'combined', 'field': ('vcodec', 'acodec')},
812cdfa0 5742 'br': {'type': 'multiple', 'field': ('tbr', 'vbr', 'abr'), 'convert': 'float_none',
eedda525 5743 'function': lambda it: next(filter(None, it), None)},
812cdfa0 5744 'size': {'type': 'multiple', 'field': ('filesize', 'fs_approx'), 'convert': 'bytes',
eedda525 5745 'function': lambda it: next(filter(None, it), None)},
d0d74b71 5746 'ext': {'type': 'combined', 'field': ('vext', 'aext')},
5747 'res': {'type': 'multiple', 'field': ('height', 'width'),
5748 'function': lambda it: (lambda l: min(l) if l else 0)(tuple(filter(None, it)))},
5749
5750 # Actual field names
5751 'format_id': {'type': 'alias', 'field': 'id'},
5752 'preference': {'type': 'alias', 'field': 'ie_pref'},
5753 'language_preference': {'type': 'alias', 'field': 'lang'},
5754 'source_preference': {'type': 'alias', 'field': 'source'},
5755 'protocol': {'type': 'alias', 'field': 'proto'},
5756 'filesize_approx': {'type': 'alias', 'field': 'fs_approx'},
5757 'audio_channels': {'type': 'alias', 'field': 'channels'},
5758
5759 # Deprecated
5760 'dimension': {'type': 'alias', 'field': 'res', 'deprecated': True},
5761 'resolution': {'type': 'alias', 'field': 'res', 'deprecated': True},
5762 'extension': {'type': 'alias', 'field': 'ext', 'deprecated': True},
5763 'bitrate': {'type': 'alias', 'field': 'br', 'deprecated': True},
5764 'total_bitrate': {'type': 'alias', 'field': 'tbr', 'deprecated': True},
5765 'video_bitrate': {'type': 'alias', 'field': 'vbr', 'deprecated': True},
5766 'audio_bitrate': {'type': 'alias', 'field': 'abr', 'deprecated': True},
5767 'framerate': {'type': 'alias', 'field': 'fps', 'deprecated': True},
5768 'filesize_estimate': {'type': 'alias', 'field': 'size', 'deprecated': True},
5769 'samplerate': {'type': 'alias', 'field': 'asr', 'deprecated': True},
5770 'video_ext': {'type': 'alias', 'field': 'vext', 'deprecated': True},
5771 'audio_ext': {'type': 'alias', 'field': 'aext', 'deprecated': True},
5772 'video_codec': {'type': 'alias', 'field': 'vcodec', 'deprecated': True},
5773 'audio_codec': {'type': 'alias', 'field': 'acodec', 'deprecated': True},
5774 'video': {'type': 'alias', 'field': 'hasvid', 'deprecated': True},
5775 'has_video': {'type': 'alias', 'field': 'hasvid', 'deprecated': True},
5776 'audio': {'type': 'alias', 'field': 'hasaud', 'deprecated': True},
5777 'has_audio': {'type': 'alias', 'field': 'hasaud', 'deprecated': True},
5778 'extractor': {'type': 'alias', 'field': 'ie_pref', 'deprecated': True},
5779 'extractor_preference': {'type': 'alias', 'field': 'ie_pref', 'deprecated': True},
5780 }
5781
5782 def __init__(self, ydl, field_preference):
5783 self.ydl = ydl
5784 self._order = []
5785 self.evaluate_params(self.ydl.params, field_preference)
5786 if ydl.params.get('verbose'):
5787 self.print_verbose_info(self.ydl.write_debug)
5788
5789 def _get_field_setting(self, field, key):
5790 if field not in self.settings:
5791 if key in ('forced', 'priority'):
5792 return False
5793 self.ydl.deprecated_feature(f'Using arbitrary fields ({field}) for format sorting is '
5794 'deprecated and may be removed in a future version')
5795 self.settings[field] = {}
5796 propObj = self.settings[field]
5797 if key not in propObj:
5798 type = propObj.get('type')
5799 if key == 'field':
5800 default = 'preference' if type == 'extractor' else (field,) if type in ('combined', 'multiple') else field
5801 elif key == 'convert':
5802 default = 'order' if type == 'ordered' else 'float_string' if field else 'ignore'
5803 else:
5804 default = {'type': 'field', 'visible': True, 'order': [], 'not_in_list': (None,)}.get(key, None)
5805 propObj[key] = default
5806 return propObj[key]
5807
5808 def _resolve_field_value(self, field, value, convertNone=False):
5809 if value is None:
5810 if not convertNone:
5811 return None
5812 else:
5813 value = value.lower()
5814 conversion = self._get_field_setting(field, 'convert')
5815 if conversion == 'ignore':
5816 return None
5817 if conversion == 'string':
5818 return value
5819 elif conversion == 'float_none':
5820 return float_or_none(value)
5821 elif conversion == 'bytes':
5822 return parse_bytes(value)
5823 elif conversion == 'order':
5824 order_list = (self._use_free_order and self._get_field_setting(field, 'order_free')) or self._get_field_setting(field, 'order')
5825 use_regex = self._get_field_setting(field, 'regex')
5826 list_length = len(order_list)
5827 empty_pos = order_list.index('') if '' in order_list else list_length + 1
5828 if use_regex and value is not None:
5829 for i, regex in enumerate(order_list):
5830 if regex and re.match(regex, value):
5831 return list_length - i
5832 return list_length - empty_pos # not in list
5833 else: # not regex or value = None
5834 return list_length - (order_list.index(value) if value in order_list else empty_pos)
5835 else:
5836 if value.isnumeric():
5837 return float(value)
5838 else:
5839 self.settings[field]['convert'] = 'string'
5840 return value
5841
5842 def evaluate_params(self, params, sort_extractor):
5843 self._use_free_order = params.get('prefer_free_formats', False)
5844 self._sort_user = params.get('format_sort', [])
5845 self._sort_extractor = sort_extractor
5846
5847 def add_item(field, reverse, closest, limit_text):
5848 field = field.lower()
5849 if field in self._order:
5850 return
5851 self._order.append(field)
5852 limit = self._resolve_field_value(field, limit_text)
5853 data = {
5854 'reverse': reverse,
5855 'closest': False if limit is None else closest,
5856 'limit_text': limit_text,
5857 'limit': limit}
5858 if field in self.settings:
5859 self.settings[field].update(data)
5860 else:
5861 self.settings[field] = data
5862
5863 sort_list = (
5864 tuple(field for field in self.default if self._get_field_setting(field, 'forced'))
5865 + (tuple() if params.get('format_sort_force', False)
5866 else tuple(field for field in self.default if self._get_field_setting(field, 'priority')))
5867 + tuple(self._sort_user) + tuple(sort_extractor) + self.default)
5868
5869 for item in sort_list:
5870 match = re.match(self.regex, item)
5871 if match is None:
5872 raise ExtractorError('Invalid format sort string "%s" given by extractor' % item)
5873 field = match.group('field')
5874 if field is None:
5875 continue
5876 if self._get_field_setting(field, 'type') == 'alias':
5877 alias, field = field, self._get_field_setting(field, 'field')
5878 if self._get_field_setting(alias, 'deprecated'):
5879 self.ydl.deprecated_feature(f'Format sorting alias {alias} is deprecated and may '
5880 f'be removed in a future version. Please use {field} instead')
5881 reverse = match.group('reverse') is not None
5882 closest = match.group('separator') == '~'
5883 limit_text = match.group('limit')
5884
5885 has_limit = limit_text is not None
5886 has_multiple_fields = self._get_field_setting(field, 'type') == 'combined'
5887 has_multiple_limits = has_limit and has_multiple_fields and not self._get_field_setting(field, 'same_limit')
5888
5889 fields = self._get_field_setting(field, 'field') if has_multiple_fields else (field,)
5890 limits = limit_text.split(':') if has_multiple_limits else (limit_text,) if has_limit else tuple()
5891 limit_count = len(limits)
5892 for (i, f) in enumerate(fields):
5893 add_item(f, reverse, closest,
5894 limits[i] if i < limit_count
5895 else limits[0] if has_limit and not has_multiple_limits
5896 else None)
5897
5898 def print_verbose_info(self, write_debug):
5899 if self._sort_user:
5900 write_debug('Sort order given by user: %s' % ', '.join(self._sort_user))
5901 if self._sort_extractor:
5902 write_debug('Sort order given by extractor: %s' % ', '.join(self._sort_extractor))
5903 write_debug('Formats sorted by: %s' % ', '.join(['%s%s%s' % (
5904 '+' if self._get_field_setting(field, 'reverse') else '', field,
5905 '%s%s(%s)' % ('~' if self._get_field_setting(field, 'closest') else ':',
5906 self._get_field_setting(field, 'limit_text'),
5907 self._get_field_setting(field, 'limit'))
5908 if self._get_field_setting(field, 'limit_text') is not None else '')
5909 for field in self._order if self._get_field_setting(field, 'visible')]))
5910
5911 def _calculate_field_preference_from_value(self, format, field, type, value):
5912 reverse = self._get_field_setting(field, 'reverse')
5913 closest = self._get_field_setting(field, 'closest')
5914 limit = self._get_field_setting(field, 'limit')
5915
5916 if type == 'extractor':
5917 maximum = self._get_field_setting(field, 'max')
5918 if value is None or (maximum is not None and value >= maximum):
5919 value = -1
5920 elif type == 'boolean':
5921 in_list = self._get_field_setting(field, 'in_list')
5922 not_in_list = self._get_field_setting(field, 'not_in_list')
5923 value = 0 if ((in_list is None or value in in_list) and (not_in_list is None or value not in not_in_list)) else -1
5924 elif type == 'ordered':
5925 value = self._resolve_field_value(field, value, True)
5926
5927 # try to convert to number
5928 val_num = float_or_none(value, default=self._get_field_setting(field, 'default'))
5929 is_num = self._get_field_setting(field, 'convert') != 'string' and val_num is not None
5930 if is_num:
5931 value = val_num
5932
5933 return ((-10, 0) if value is None
5934 else (1, value, 0) if not is_num # if a field has mixed strings and numbers, strings are sorted higher
5935 else (0, -abs(value - limit), value - limit if reverse else limit - value) if closest
5936 else (0, value, 0) if not reverse and (limit is None or value <= limit)
5937 else (0, -value, 0) if limit is None or (reverse and value == limit) or value > limit
5938 else (-1, value, 0))
5939
5940 def _calculate_field_preference(self, format, field):
5941 type = self._get_field_setting(field, 'type') # extractor, boolean, ordered, field, multiple
5942 get_value = lambda f: format.get(self._get_field_setting(f, 'field'))
5943 if type == 'multiple':
5944 type = 'field' # Only 'field' is allowed in multiple for now
5945 actual_fields = self._get_field_setting(field, 'field')
5946
5947 value = self._get_field_setting(field, 'function')(get_value(f) for f in actual_fields)
5948 else:
5949 value = get_value(field)
5950 return self._calculate_field_preference_from_value(format, field, type, value)
5951
5952 def calculate_preference(self, format):
5953 # Determine missing protocol
5954 if not format.get('protocol'):
5955 format['protocol'] = determine_protocol(format)
5956
5957 # Determine missing ext
5958 if not format.get('ext') and 'url' in format:
5959 format['ext'] = determine_ext(format['url'])
5960 if format.get('vcodec') == 'none':
5961 format['audio_ext'] = format['ext'] if format.get('acodec') != 'none' else 'none'
5962 format['video_ext'] = 'none'
5963 else:
5964 format['video_ext'] = format['ext']
5965 format['audio_ext'] = 'none'
5966 # if format.get('preference') is None and format.get('ext') in ('f4f', 'f4m'): # Not supported?
5967 # format['preference'] = -1000
5968
5424dbaf
L
5969 if format.get('preference') is None and format.get('ext') == 'flv' and re.match('[hx]265|he?vc?', format.get('vcodec') or ''):
5970 # HEVC-over-FLV is out-of-spec by FLV's original spec
5971 # ref. https://trac.ffmpeg.org/ticket/6389
5972 # ref. https://github.com/yt-dlp/yt-dlp/pull/5821
5973 format['preference'] = -100
5974
d0d74b71 5975 # Determine missing bitrates
eedda525 5976 if format.get('vcodec') == 'none':
5977 format['vbr'] = 0
5978 if format.get('acodec') == 'none':
5979 format['abr'] = 0
5980 if not format.get('vbr') and format.get('vcodec') != 'none':
5981 format['vbr'] = try_call(lambda: format['tbr'] - format['abr']) or None
5982 if not format.get('abr') and format.get('acodec') != 'none':
5983 format['abr'] = try_call(lambda: format['tbr'] - format['vbr']) or None
5984 if not format.get('tbr'):
5985 format['tbr'] = try_call(lambda: format['vbr'] + format['abr']) or None
d0d74b71 5986
5987 return tuple(self._calculate_field_preference(format, field) for field in self._order)