]> jfr.im git - yt-dlp.git/blame - yt_dlp/utils/_utils.py
[cookies] Move `YoutubeDLCookieJar` to cookies module (#7091)
[yt-dlp.git] / yt_dlp / utils / _utils.py
CommitLineData
6929b41a 1import asyncio
15dfb392 2import atexit
1e399778 3import base64
5bc880b9 4import binascii
912b38b4 5import calendar
676eb3f2 6import codecs
c380cc28 7import collections
ab029d7e 8import collections.abc
62e609ab 9import contextlib
c496ca96 10import datetime
0c265486 11import email.header
f8271158 12import email.utils
f45c185f 13import errno
d77c3dfd 14import gzip
49fa4d9a
N
15import hashlib
16import hmac
ac668111 17import html.entities
18import html.parser
54007a45 19import http.client
20import http.cookiejar
b1f94422 21import inspect
03f9daab 22import io
79a2e94e 23import itertools
f4bfd65f 24import json
d77c3dfd 25import locale
02dbf93f 26import math
f8271158 27import mimetypes
347de493 28import operator
d77c3dfd 29import os
c496ca96 30import platform
773f291d 31import random
d77c3dfd 32import re
f8271158 33import shlex
c496ca96 34import socket
79a2e94e 35import ssl
ac668111 36import struct
1c088fa8 37import subprocess
d77c3dfd 38import sys
181c8655 39import tempfile
c380cc28 40import time
01951dda 41import traceback
64fa820c 42import types
989a01c2 43import unicodedata
14f25df2 44import urllib.error
f8271158 45import urllib.parse
ac668111 46import urllib.request
bcf89ce6 47import xml.etree.ElementTree
d77c3dfd 48import zlib
d77c3dfd 49
69bec673 50from . import traversal
51
52from ..compat import functools # isort: split
53from ..compat import (
36e6f62c 54 compat_etree_fromstring,
51098426 55 compat_expanduser,
f8271158 56 compat_HTMLParseError,
efa97bdc 57 compat_os_name,
702ccf2d 58 compat_shlex_quote,
8c25f81b 59)
69bec673 60from ..dependencies import brotli, certifi, websockets, xattr
61from ..socks import ProxyType, sockssocket
51fb4995 62
46f1370e 63__name__ = __name__.rsplit('.', 1)[0] # Pretend to be the parent module
64
468e2e92
FV
65# This is not clearly defined otherwise
66compiled_regex_type = type(re.compile(''))
67
f7a147e3
S
68
69def random_user_agent():
70 _USER_AGENT_TPL = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/%s Safari/537.36'
71 _CHROME_VERSIONS = (
19b4c74d 72 '90.0.4430.212',
73 '90.0.4430.24',
74 '90.0.4430.70',
75 '90.0.4430.72',
76 '90.0.4430.85',
77 '90.0.4430.93',
78 '91.0.4472.101',
79 '91.0.4472.106',
80 '91.0.4472.114',
81 '91.0.4472.124',
82 '91.0.4472.164',
83 '91.0.4472.19',
84 '91.0.4472.77',
85 '92.0.4515.107',
86 '92.0.4515.115',
87 '92.0.4515.131',
88 '92.0.4515.159',
89 '92.0.4515.43',
90 '93.0.4556.0',
91 '93.0.4577.15',
92 '93.0.4577.63',
93 '93.0.4577.82',
94 '94.0.4606.41',
95 '94.0.4606.54',
96 '94.0.4606.61',
97 '94.0.4606.71',
98 '94.0.4606.81',
99 '94.0.4606.85',
100 '95.0.4638.17',
101 '95.0.4638.50',
102 '95.0.4638.54',
103 '95.0.4638.69',
104 '95.0.4638.74',
105 '96.0.4664.18',
106 '96.0.4664.45',
107 '96.0.4664.55',
108 '96.0.4664.93',
109 '97.0.4692.20',
f7a147e3
S
110 )
111 return _USER_AGENT_TPL % random.choice(_CHROME_VERSIONS)
112
113
4390d5ec 114SUPPORTED_ENCODINGS = [
115 'gzip', 'deflate'
116]
9b8ee23b 117if brotli:
4390d5ec 118 SUPPORTED_ENCODINGS.append('br')
119
3e669f36 120std_headers = {
f7a147e3 121 'User-Agent': random_user_agent(),
59ae15a5 122 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
59ae15a5 123 'Accept-Language': 'en-us,en;q=0.5',
b1156c1e 124 'Sec-Fetch-Mode': 'navigate',
3e669f36 125}
f427df17 126
5f6a1245 127
fb37eb25
S
128USER_AGENTS = {
129 'Safari': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27',
130}
131
132
4823ec9f 133class NO_DEFAULT:
134 pass
135
136
137def IDENTITY(x):
138 return x
139
bf42a990 140
7105440c
YCH
141ENGLISH_MONTH_NAMES = [
142 'January', 'February', 'March', 'April', 'May', 'June',
143 'July', 'August', 'September', 'October', 'November', 'December']
144
f6717dec
S
145MONTH_NAMES = {
146 'en': ENGLISH_MONTH_NAMES,
147 'fr': [
3e4185c3
S
148 'janvier', 'février', 'mars', 'avril', 'mai', 'juin',
149 'juillet', 'août', 'septembre', 'octobre', 'novembre', 'décembre'],
78545664 150 # these follow the genitive grammatical case (dopełniacz)
151 # some websites might be using nominative, which will require another month list
152 # https://en.wikibooks.org/wiki/Polish/Noun_cases
153 'pl': ['stycznia', 'lutego', 'marca', 'kwietnia', 'maja', 'czerwca',
154 'lipca', 'sierpnia', 'września', 'października', 'listopada', 'grudnia'],
f6717dec 155}
a942d6cb 156
8f53dc44 157# From https://github.com/python/cpython/blob/3.11/Lib/email/_parseaddr.py#L36-L42
158TIMEZONE_NAMES = {
159 'UT': 0, 'UTC': 0, 'GMT': 0, 'Z': 0,
160 'AST': -4, 'ADT': -3, # Atlantic (used in Canada)
161 'EST': -5, 'EDT': -4, # Eastern
162 'CST': -6, 'CDT': -5, # Central
163 'MST': -7, 'MDT': -6, # Mountain
164 'PST': -8, 'PDT': -7 # Pacific
165}
166
c587cbb7 167# needed for sanitizing filenames in restricted mode
c8827027 168ACCENT_CHARS = dict(zip('ÂÃÄÀÁÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖŐØŒÙÚÛÜŰÝÞßàáâãäåæçèéêëìíîïðñòóôõöőøœùúûüűýþÿ',
fd35d8cd
JW
169 itertools.chain('AAAAAA', ['AE'], 'CEEEEIIIIDNOOOOOOO', ['OE'], 'UUUUUY', ['TH', 'ss'],
170 'aaaaaa', ['ae'], 'ceeeeiiiionooooooo', ['oe'], 'uuuuuy', ['th'], 'y')))
c587cbb7 171
46f59e89
S
172DATE_FORMATS = (
173 '%d %B %Y',
174 '%d %b %Y',
175 '%B %d %Y',
cb655f34
S
176 '%B %dst %Y',
177 '%B %dnd %Y',
9d30c213 178 '%B %drd %Y',
cb655f34 179 '%B %dth %Y',
46f59e89 180 '%b %d %Y',
cb655f34
S
181 '%b %dst %Y',
182 '%b %dnd %Y',
9d30c213 183 '%b %drd %Y',
cb655f34 184 '%b %dth %Y',
46f59e89
S
185 '%b %dst %Y %I:%M',
186 '%b %dnd %Y %I:%M',
9d30c213 187 '%b %drd %Y %I:%M',
46f59e89
S
188 '%b %dth %Y %I:%M',
189 '%Y %m %d',
190 '%Y-%m-%d',
bccdbd22 191 '%Y.%m.%d.',
46f59e89 192 '%Y/%m/%d',
81c13222 193 '%Y/%m/%d %H:%M',
46f59e89 194 '%Y/%m/%d %H:%M:%S',
1931a55e
THD
195 '%Y%m%d%H%M',
196 '%Y%m%d%H%M%S',
4f3fa23e 197 '%Y%m%d',
0c1c6f4b 198 '%Y-%m-%d %H:%M',
46f59e89
S
199 '%Y-%m-%d %H:%M:%S',
200 '%Y-%m-%d %H:%M:%S.%f',
5014558a 201 '%Y-%m-%d %H:%M:%S:%f',
46f59e89
S
202 '%d.%m.%Y %H:%M',
203 '%d.%m.%Y %H.%M',
204 '%Y-%m-%dT%H:%M:%SZ',
205 '%Y-%m-%dT%H:%M:%S.%fZ',
206 '%Y-%m-%dT%H:%M:%S.%f0Z',
207 '%Y-%m-%dT%H:%M:%S',
208 '%Y-%m-%dT%H:%M:%S.%f',
209 '%Y-%m-%dT%H:%M',
c6eed6b8
S
210 '%b %d %Y at %H:%M',
211 '%b %d %Y at %H:%M:%S',
b555ae9b
S
212 '%B %d %Y at %H:%M',
213 '%B %d %Y at %H:%M:%S',
a63d9bd0 214 '%H:%M %d-%b-%Y',
46f59e89
S
215)
216
217DATE_FORMATS_DAY_FIRST = list(DATE_FORMATS)
218DATE_FORMATS_DAY_FIRST.extend([
219 '%d-%m-%Y',
220 '%d.%m.%Y',
221 '%d.%m.%y',
222 '%d/%m/%Y',
223 '%d/%m/%y',
224 '%d/%m/%Y %H:%M:%S',
47304e07 225 '%d-%m-%Y %H:%M',
46f59e89
S
226])
227
228DATE_FORMATS_MONTH_FIRST = list(DATE_FORMATS)
229DATE_FORMATS_MONTH_FIRST.extend([
230 '%m-%d-%Y',
231 '%m.%d.%Y',
232 '%m/%d/%Y',
233 '%m/%d/%y',
234 '%m/%d/%Y %H:%M:%S',
235])
236
06b3fe29 237PACKED_CODES_RE = r"}\('(.+)',(\d+),(\d+),'([^']+)'\.split\('\|'\)"
0f60ba6e 238JSON_LD_RE = r'(?is)<script[^>]+type=(["\']?)application/ld\+json\1[^>]*>\s*(?P<json_ld>{.+?}|\[.+?\])\s*</script>'
06b3fe29 239
1d485a1a 240NUMBER_RE = r'\d+(?:\.\d+)?'
241
7105440c 242
0b9c08b4 243@functools.cache
d77c3dfd 244def preferredencoding():
59ae15a5 245 """Get preferred encoding.
d77c3dfd 246
59ae15a5
PH
247 Returns the best encoding scheme for the system, based on
248 locale.getpreferredencoding() and some further tweaks.
249 """
250 try:
251 pref = locale.getpreferredencoding()
28e614de 252 'TEST'.encode(pref)
70a1165b 253 except Exception:
59ae15a5 254 pref = 'UTF-8'
bae611f2 255
59ae15a5 256 return pref
d77c3dfd 257
f4bfd65f 258
181c8655 259def write_json_file(obj, fn):
1394646a 260 """ Encode obj as JSON and write it to fn, atomically if possible """
181c8655 261
cfb0511d 262 tf = tempfile.NamedTemporaryFile(
263 prefix=f'{os.path.basename(fn)}.', dir=os.path.dirname(fn),
264 suffix='.tmp', delete=False, mode='w', encoding='utf-8')
181c8655
PH
265
266 try:
267 with tf:
45d86abe 268 json.dump(obj, tf, ensure_ascii=False)
1394646a
IK
269 if sys.platform == 'win32':
270 # Need to remove existing file on Windows, else os.rename raises
271 # WindowsError or FileExistsError.
19a03940 272 with contextlib.suppress(OSError):
1394646a 273 os.unlink(fn)
19a03940 274 with contextlib.suppress(OSError):
9cd5f54e
R
275 mask = os.umask(0)
276 os.umask(mask)
277 os.chmod(tf.name, 0o666 & ~mask)
181c8655 278 os.rename(tf.name, fn)
70a1165b 279 except Exception:
19a03940 280 with contextlib.suppress(OSError):
181c8655 281 os.remove(tf.name)
181c8655
PH
282 raise
283
284
cfb0511d 285def find_xpath_attr(node, xpath, key, val=None):
286 """ Find the xpath xpath[@key=val] """
287 assert re.match(r'^[a-zA-Z_-]+$', key)
86e5f3ed 288 expr = xpath + ('[@%s]' % key if val is None else f"[@{key}='{val}']")
cfb0511d 289 return node.find(expr)
59ae56fa 290
d7e66d39
JMF
291# On python2.6 the xml.etree.ElementTree.Element methods don't support
292# the namespace parameter
5f6a1245
JW
293
294
d7e66d39
JMF
295def xpath_with_ns(path, ns_map):
296 components = [c.split(':') for c in path.split('/')]
297 replaced = []
298 for c in components:
299 if len(c) == 1:
300 replaced.append(c[0])
301 else:
302 ns, tag = c
303 replaced.append('{%s}%s' % (ns_map[ns], tag))
304 return '/'.join(replaced)
305
d77c3dfd 306
a41fb80c 307def xpath_element(node, xpath, name=None, fatal=False, default=NO_DEFAULT):
578c0745 308 def _find_xpath(xpath):
f9934b96 309 return node.find(xpath)
578c0745 310
14f25df2 311 if isinstance(xpath, str):
578c0745
S
312 n = _find_xpath(xpath)
313 else:
314 for xp in xpath:
315 n = _find_xpath(xp)
316 if n is not None:
317 break
d74bebd5 318
8e636da4 319 if n is None:
bf42a990
S
320 if default is not NO_DEFAULT:
321 return default
322 elif fatal:
bf0ff932
PH
323 name = xpath if name is None else name
324 raise ExtractorError('Could not find XML element %s' % name)
325 else:
326 return None
a41fb80c
S
327 return n
328
329
330def xpath_text(node, xpath, name=None, fatal=False, default=NO_DEFAULT):
8e636da4
S
331 n = xpath_element(node, xpath, name, fatal=fatal, default=default)
332 if n is None or n == default:
333 return n
334 if n.text is None:
335 if default is not NO_DEFAULT:
336 return default
337 elif fatal:
338 name = xpath if name is None else name
339 raise ExtractorError('Could not find XML element\'s text %s' % name)
340 else:
341 return None
342 return n.text
a41fb80c
S
343
344
345def xpath_attr(node, xpath, key, name=None, fatal=False, default=NO_DEFAULT):
346 n = find_xpath_attr(node, xpath, key)
347 if n is None:
348 if default is not NO_DEFAULT:
349 return default
350 elif fatal:
86e5f3ed 351 name = f'{xpath}[@{key}]' if name is None else name
a41fb80c
S
352 raise ExtractorError('Could not find XML attribute %s' % name)
353 else:
354 return None
355 return n.attrib[key]
bf0ff932
PH
356
357
c487cf00 358def get_element_by_id(id, html, **kwargs):
43e8fafd 359 """Return the content of the tag with the specified ID in the passed HTML document"""
c487cf00 360 return get_element_by_attribute('id', id, html, **kwargs)
43e8fafd 361
12ea2f30 362
c487cf00 363def get_element_html_by_id(id, html, **kwargs):
6f32a0b5 364 """Return the html of the tag with the specified ID in the passed HTML document"""
c487cf00 365 return get_element_html_by_attribute('id', id, html, **kwargs)
6f32a0b5
ZM
366
367
84c237fb 368def get_element_by_class(class_name, html):
2af12ad9
TC
369 """Return the content of the first tag with the specified class in the passed HTML document"""
370 retval = get_elements_by_class(class_name, html)
371 return retval[0] if retval else None
372
373
6f32a0b5
ZM
374def get_element_html_by_class(class_name, html):
375 """Return the html of the first tag with the specified class in the passed HTML document"""
376 retval = get_elements_html_by_class(class_name, html)
377 return retval[0] if retval else None
378
379
c487cf00 380def get_element_by_attribute(attribute, value, html, **kwargs):
381 retval = get_elements_by_attribute(attribute, value, html, **kwargs)
2af12ad9
TC
382 return retval[0] if retval else None
383
384
c487cf00 385def get_element_html_by_attribute(attribute, value, html, **kargs):
386 retval = get_elements_html_by_attribute(attribute, value, html, **kargs)
6f32a0b5
ZM
387 return retval[0] if retval else None
388
389
c487cf00 390def get_elements_by_class(class_name, html, **kargs):
2af12ad9
TC
391 """Return the content of all tags with the specified class in the passed HTML document as a list"""
392 return get_elements_by_attribute(
64fa820c 393 'class', r'[^\'"]*(?<=[\'"\s])%s(?=[\'"\s])[^\'"]*' % re.escape(class_name),
84c237fb
YCH
394 html, escape_value=False)
395
396
6f32a0b5
ZM
397def get_elements_html_by_class(class_name, html):
398 """Return the html of all tags with the specified class in the passed HTML document as a list"""
399 return get_elements_html_by_attribute(
64fa820c 400 'class', r'[^\'"]*(?<=[\'"\s])%s(?=[\'"\s])[^\'"]*' % re.escape(class_name),
6f32a0b5
ZM
401 html, escape_value=False)
402
403
404def get_elements_by_attribute(*args, **kwargs):
43e8fafd 405 """Return the content of the tag with the specified attribute in the passed HTML document"""
6f32a0b5
ZM
406 return [content for content, _ in get_elements_text_and_html_by_attribute(*args, **kwargs)]
407
408
409def get_elements_html_by_attribute(*args, **kwargs):
410 """Return the html of the tag with the specified attribute in the passed HTML document"""
411 return [whole for _, whole in get_elements_text_and_html_by_attribute(*args, **kwargs)]
412
413
4c9a1a3b 414def get_elements_text_and_html_by_attribute(attribute, value, html, *, tag=r'[\w:.-]+', escape_value=True):
6f32a0b5
ZM
415 """
416 Return the text (content) and the html (whole) of the tag with the specified
417 attribute in the passed HTML document
418 """
c61473c1
M
419 if not value:
420 return
9e6dd238 421
86e5f3ed 422 quote = '' if re.match(r'''[\s"'`=<>]''', value) else '?'
0254f162 423
84c237fb
YCH
424 value = re.escape(value) if escape_value else value
425
86e5f3ed 426 partial_element_re = rf'''(?x)
4c9a1a3b 427 <(?P<tag>{tag})
0254f162 428 (?:\s(?:[^>"']|"[^"]*"|'[^']*')*)?
86e5f3ed 429 \s{re.escape(attribute)}\s*=\s*(?P<_q>['"]{quote})(?-x:{value})(?P=_q)
430 '''
38285056 431
0254f162
ZM
432 for m in re.finditer(partial_element_re, html):
433 content, whole = get_element_text_and_html_by_tag(m.group('tag'), html[m.start():])
a921f407 434
0254f162
ZM
435 yield (
436 unescapeHTML(re.sub(r'^(?P<q>["\'])(?P<content>.*)(?P=q)$', r'\g<content>', content, flags=re.DOTALL)),
437 whole
438 )
a921f407 439
c5229f39 440
ac668111 441class HTMLBreakOnClosingTagParser(html.parser.HTMLParser):
6f32a0b5
ZM
442 """
443 HTML parser which raises HTMLBreakOnClosingTagException upon reaching the
444 closing tag for the first opening tag it has encountered, and can be used
445 as a context manager
446 """
447
448 class HTMLBreakOnClosingTagException(Exception):
449 pass
450
451 def __init__(self):
452 self.tagstack = collections.deque()
ac668111 453 html.parser.HTMLParser.__init__(self)
6f32a0b5
ZM
454
455 def __enter__(self):
456 return self
457
458 def __exit__(self, *_):
459 self.close()
460
461 def close(self):
462 # handle_endtag does not return upon raising HTMLBreakOnClosingTagException,
463 # so data remains buffered; we no longer have any interest in it, thus
464 # override this method to discard it
465 pass
466
467 def handle_starttag(self, tag, _):
468 self.tagstack.append(tag)
469
470 def handle_endtag(self, tag):
471 if not self.tagstack:
472 raise compat_HTMLParseError('no tags in the stack')
473 while self.tagstack:
474 inner_tag = self.tagstack.pop()
475 if inner_tag == tag:
476 break
477 else:
478 raise compat_HTMLParseError(f'matching opening tag for closing {tag} tag not found')
479 if not self.tagstack:
480 raise self.HTMLBreakOnClosingTagException()
481
482
46d09f87 483# XXX: This should be far less strict
6f32a0b5
ZM
484def get_element_text_and_html_by_tag(tag, html):
485 """
486 For the first element with the specified tag in the passed HTML document
487 return its' content (text) and the whole element (html)
488 """
489 def find_or_raise(haystack, needle, exc):
490 try:
491 return haystack.index(needle)
492 except ValueError:
493 raise exc
494 closing_tag = f'</{tag}>'
495 whole_start = find_or_raise(
496 html, f'<{tag}', compat_HTMLParseError(f'opening {tag} tag not found'))
497 content_start = find_or_raise(
498 html[whole_start:], '>', compat_HTMLParseError(f'malformed opening {tag} tag'))
499 content_start += whole_start + 1
500 with HTMLBreakOnClosingTagParser() as parser:
501 parser.feed(html[whole_start:content_start])
502 if not parser.tagstack or parser.tagstack[0] != tag:
503 raise compat_HTMLParseError(f'parser did not match opening {tag} tag')
504 offset = content_start
505 while offset < len(html):
506 next_closing_tag_start = find_or_raise(
507 html[offset:], closing_tag,
508 compat_HTMLParseError(f'closing {tag} tag not found'))
509 next_closing_tag_end = next_closing_tag_start + len(closing_tag)
510 try:
511 parser.feed(html[offset:offset + next_closing_tag_end])
512 offset += next_closing_tag_end
513 except HTMLBreakOnClosingTagParser.HTMLBreakOnClosingTagException:
514 return html[content_start:offset + next_closing_tag_start], \
515 html[whole_start:offset + next_closing_tag_end]
516 raise compat_HTMLParseError('unexpected end of html')
517
518
ac668111 519class HTMLAttributeParser(html.parser.HTMLParser):
8bb56eee 520 """Trivial HTML parser to gather the attributes for a single element"""
b6e0c7d2 521
8bb56eee 522 def __init__(self):
c5229f39 523 self.attrs = {}
ac668111 524 html.parser.HTMLParser.__init__(self)
8bb56eee
BF
525
526 def handle_starttag(self, tag, attrs):
527 self.attrs = dict(attrs)
7053aa3a 528 raise compat_HTMLParseError('done')
8bb56eee 529
c5229f39 530
ac668111 531class HTMLListAttrsParser(html.parser.HTMLParser):
73673ccf
FF
532 """HTML parser to gather the attributes for the elements of a list"""
533
534 def __init__(self):
ac668111 535 html.parser.HTMLParser.__init__(self)
73673ccf
FF
536 self.items = []
537 self._level = 0
538
539 def handle_starttag(self, tag, attrs):
540 if tag == 'li' and self._level == 0:
541 self.items.append(dict(attrs))
542 self._level += 1
543
544 def handle_endtag(self, tag):
545 self._level -= 1
546
547
8bb56eee
BF
548def extract_attributes(html_element):
549 """Given a string for an HTML element such as
550 <el
551 a="foo" B="bar" c="&98;az" d=boz
552 empty= noval entity="&amp;"
553 sq='"' dq="'"
554 >
555 Decode and return a dictionary of attributes.
556 {
557 'a': 'foo', 'b': 'bar', c: 'baz', d: 'boz',
558 'empty': '', 'noval': None, 'entity': '&',
559 'sq': '"', 'dq': '\''
560 }.
8bb56eee
BF
561 """
562 parser = HTMLAttributeParser()
19a03940 563 with contextlib.suppress(compat_HTMLParseError):
b4a3d461
S
564 parser.feed(html_element)
565 parser.close()
8bb56eee 566 return parser.attrs
9e6dd238 567
c5229f39 568
73673ccf
FF
569def parse_list(webpage):
570 """Given a string for an series of HTML <li> elements,
571 return a dictionary of their attributes"""
572 parser = HTMLListAttrsParser()
573 parser.feed(webpage)
574 parser.close()
575 return parser.items
576
577
9e6dd238 578def clean_html(html):
59ae15a5 579 """Clean an HTML snippet into a readable string"""
dd622d7c
PH
580
581 if html is None: # Convenience for sanitizing descriptions etc.
582 return html
583
49185227 584 html = re.sub(r'\s+', ' ', html)
585 html = re.sub(r'(?u)\s?<\s?br\s?/?\s?>\s?', '\n', html)
586 html = re.sub(r'(?u)<\s?/\s?p\s?>\s?<\s?p[^>]*>', '\n', html)
59ae15a5
PH
587 # Strip html tags
588 html = re.sub('<.*?>', '', html)
589 # Replace html entities
590 html = unescapeHTML(html)
7decf895 591 return html.strip()
9e6dd238
FV
592
593
b7c47b74 594class LenientJSONDecoder(json.JSONDecoder):
cc090836 595 # TODO: Write tests
596 def __init__(self, *args, transform_source=None, ignore_extra=False, close_objects=0, **kwargs):
b7c47b74 597 self.transform_source, self.ignore_extra = transform_source, ignore_extra
cc090836 598 self._close_attempts = 2 * close_objects
b7c47b74 599 super().__init__(*args, **kwargs)
600
cc090836 601 @staticmethod
602 def _close_object(err):
603 doc = err.doc[:err.pos]
604 # We need to add comma first to get the correct error message
605 if err.msg.startswith('Expecting \',\''):
606 return doc + ','
607 elif not doc.endswith(','):
608 return
609
610 if err.msg.startswith('Expecting property name'):
611 return doc[:-1] + '}'
612 elif err.msg.startswith('Expecting value'):
613 return doc[:-1] + ']'
614
b7c47b74 615 def decode(self, s):
616 if self.transform_source:
617 s = self.transform_source(s)
cc090836 618 for attempt in range(self._close_attempts + 1):
619 try:
620 if self.ignore_extra:
621 return self.raw_decode(s.lstrip())[0]
622 return super().decode(s)
623 except json.JSONDecodeError as e:
624 if e.pos is None:
625 raise
626 elif attempt < self._close_attempts:
627 s = self._close_object(e)
628 if s is not None:
629 continue
2fa669f7 630 raise type(e)(f'{e.msg} in {s[e.pos-10:e.pos+10]!r}', s, e.pos)
cc090836 631 assert False, 'Too many attempts to decode JSON'
b7c47b74 632
633
d77c3dfd 634def sanitize_open(filename, open_mode):
59ae15a5
PH
635 """Try to open the given filename, and slightly tweak it if this fails.
636
637 Attempts to open the given filename. If this fails, it tries to change
638 the filename slightly, step by step, until it's either able to open it
639 or it fails and raises a final exception, like the standard open()
640 function.
641
642 It returns the tuple (stream, definitive_file_name).
643 """
0edb3e33 644 if filename == '-':
645 if sys.platform == 'win32':
646 import msvcrt
be5c1ae8 647
62b58c09 648 # stdout may be any IO stream, e.g. when using contextlib.redirect_stdout
daef7911 649 with contextlib.suppress(io.UnsupportedOperation):
650 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
0edb3e33 651 return (sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout, filename)
59ae15a5 652
0edb3e33 653 for attempt in range(2):
654 try:
655 try:
89737671 656 if sys.platform == 'win32':
b506289f 657 # FIXME: An exclusive lock also locks the file from being read.
658 # Since windows locks are mandatory, don't lock the file on windows (for now).
659 # Ref: https://github.com/yt-dlp/yt-dlp/issues/3124
89737671 660 raise LockingUnsupportedError()
0edb3e33 661 stream = locked_file(filename, open_mode, block=False).__enter__()
8a82af35 662 except OSError:
0edb3e33 663 stream = open(filename, open_mode)
8a82af35 664 return stream, filename
86e5f3ed 665 except OSError as err:
0edb3e33 666 if attempt or err.errno in (errno.EACCES,):
667 raise
668 old_filename, filename = filename, sanitize_path(filename)
669 if old_filename == filename:
670 raise
d77c3dfd
FV
671
672
673def timeconvert(timestr):
59ae15a5
PH
674 """Convert RFC 2822 defined time string into system timestamp"""
675 timestamp = None
676 timetuple = email.utils.parsedate_tz(timestr)
677 if timetuple is not None:
678 timestamp = email.utils.mktime_tz(timetuple)
679 return timestamp
1c469a94 680
5f6a1245 681
5c3895ff 682def sanitize_filename(s, restricted=False, is_id=NO_DEFAULT):
59ae15a5 683 """Sanitizes a string so it could be used as part of a filename.
5c3895ff 684 @param restricted Use a stricter subset of allowed characters
685 @param is_id Whether this is an ID that should be kept unchanged if possible.
686 If unset, yt-dlp's new sanitization rules are in effect
59ae15a5 687 """
5c3895ff 688 if s == '':
689 return ''
690
59ae15a5 691 def replace_insane(char):
c587cbb7
AT
692 if restricted and char in ACCENT_CHARS:
693 return ACCENT_CHARS[char]
91dd88b9 694 elif not restricted and char == '\n':
5c3895ff 695 return '\0 '
989a01c2 696 elif is_id is NO_DEFAULT and not restricted and char in '"*:<>?|/\\':
697 # Replace with their full-width unicode counterparts
698 return {'/': '\u29F8', '\\': '\u29f9'}.get(char, chr(ord(char) + 0xfee0))
91dd88b9 699 elif char == '?' or ord(char) < 32 or ord(char) == 127:
59ae15a5
PH
700 return ''
701 elif char == '"':
702 return '' if restricted else '\''
703 elif char == ':':
5c3895ff 704 return '\0_\0-' if restricted else '\0 \0-'
59ae15a5 705 elif char in '\\/|*<>':
5c3895ff 706 return '\0_'
707 if restricted and (char in '!&\'()[]{}$;`^,#' or char.isspace() or ord(char) > 127):
708 return '\0_'
59ae15a5
PH
709 return char
710
db4678e4 711 # Replace look-alike Unicode glyphs
712 if restricted and (is_id is NO_DEFAULT or not is_id):
989a01c2 713 s = unicodedata.normalize('NFKC', s)
5c3895ff 714 s = re.sub(r'[0-9]+(?::[0-9]+)+', lambda m: m.group(0).replace(':', '_'), s) # Handle timestamps
28e614de 715 result = ''.join(map(replace_insane, s))
5c3895ff 716 if is_id is NO_DEFAULT:
ae61d108 717 result = re.sub(r'(\0.)(?:(?=\1)..)+', r'\1', result) # Remove repeated substitute chars
718 STRIP_RE = r'(?:\0.|[ _-])*'
5c3895ff 719 result = re.sub(f'^\0.{STRIP_RE}|{STRIP_RE}\0.$', '', result) # Remove substitute chars from start/end
720 result = result.replace('\0', '') or '_'
721
796173d0
PH
722 if not is_id:
723 while '__' in result:
724 result = result.replace('__', '_')
725 result = result.strip('_')
726 # Common case of "Foreign band name - English song title"
727 if restricted and result.startswith('-_'):
728 result = result[2:]
5a42414b
PH
729 if result.startswith('-'):
730 result = '_' + result[len('-'):]
a7440261 731 result = result.lstrip('.')
796173d0
PH
732 if not result:
733 result = '_'
59ae15a5 734 return result
d77c3dfd 735
5f6a1245 736
c2934512 737def sanitize_path(s, force=False):
a2aaf4db 738 """Sanitizes and normalizes path on Windows"""
c2934512 739 if sys.platform == 'win32':
c4218ac3 740 force = False
c2934512 741 drive_or_unc, _ = os.path.splitdrive(s)
c2934512 742 elif force:
743 drive_or_unc = ''
744 else:
a2aaf4db 745 return s
c2934512 746
be531ef1
S
747 norm_path = os.path.normpath(remove_start(s, drive_or_unc)).split(os.path.sep)
748 if drive_or_unc:
a2aaf4db
S
749 norm_path.pop(0)
750 sanitized_path = [
ec85ded8 751 path_part if path_part in ['.', '..'] else re.sub(r'(?:[/<>:"\|\\?\*]|[\s.]$)', '#', path_part)
a2aaf4db 752 for path_part in norm_path]
be531ef1
S
753 if drive_or_unc:
754 sanitized_path.insert(0, drive_or_unc + os.path.sep)
4abea8ca 755 elif force and s and s[0] == os.path.sep:
c4218ac3 756 sanitized_path.insert(0, os.path.sep)
a2aaf4db
S
757 return os.path.join(*sanitized_path)
758
759
8f97a15d 760def sanitize_url(url, *, scheme='http'):
befa4708
S
761 # Prepend protocol-less URLs with `http:` scheme in order to mitigate
762 # the number of unwanted failures due to missing protocol
21633673 763 if url is None:
764 return
765 elif url.startswith('//'):
8f97a15d 766 return f'{scheme}:{url}'
befa4708
S
767 # Fix some common typos seen so far
768 COMMON_TYPOS = (
067aa17e 769 # https://github.com/ytdl-org/youtube-dl/issues/15649
befa4708
S
770 (r'^httpss://', r'https://'),
771 # https://bx1.be/lives/direct-tv/
772 (r'^rmtp([es]?)://', r'rtmp\1://'),
773 )
774 for mistake, fixup in COMMON_TYPOS:
775 if re.match(mistake, url):
776 return re.sub(mistake, fixup, url)
bc6b9bcd 777 return url
17bcc626
S
778
779
5435dcf9 780def extract_basic_auth(url):
14f25df2 781 parts = urllib.parse.urlsplit(url)
5435dcf9
HH
782 if parts.username is None:
783 return url, None
14f25df2 784 url = urllib.parse.urlunsplit(parts._replace(netloc=(
5435dcf9
HH
785 parts.hostname if parts.port is None
786 else '%s:%d' % (parts.hostname, parts.port))))
787 auth_payload = base64.b64encode(
0f06bcd7 788 ('%s:%s' % (parts.username, parts.password or '')).encode())
789 return url, f'Basic {auth_payload.decode()}'
5435dcf9
HH
790
791
67dda517 792def sanitized_Request(url, *args, **kwargs):
bc6b9bcd 793 url, auth_header = extract_basic_auth(escape_url(sanitize_url(url)))
5435dcf9
HH
794 if auth_header is not None:
795 headers = args[1] if len(args) >= 2 else kwargs.setdefault('headers', {})
796 headers['Authorization'] = auth_header
ac668111 797 return urllib.request.Request(url, *args, **kwargs)
67dda517
S
798
799
51098426 800def expand_path(s):
2fa669f7 801 """Expand shell variables and ~"""
51098426
S
802 return os.path.expandvars(compat_expanduser(s))
803
804
7e9a6125 805def orderedSet(iterable, *, lazy=False):
806 """Remove all duplicates from the input iterable"""
807 def _iter():
808 seen = [] # Do not use set since the items can be unhashable
809 for x in iterable:
810 if x not in seen:
811 seen.append(x)
812 yield x
813
814 return _iter() if lazy else list(_iter())
d77c3dfd 815
912b38b4 816
55b2f099 817def _htmlentity_transform(entity_with_semicolon):
4e408e47 818 """Transforms an HTML entity to a character."""
55b2f099
YCH
819 entity = entity_with_semicolon[:-1]
820
4e408e47 821 # Known non-numeric HTML entity
ac668111 822 if entity in html.entities.name2codepoint:
823 return chr(html.entities.name2codepoint[entity])
4e408e47 824
62b58c09
L
825 # TODO: HTML5 allows entities without a semicolon.
826 # E.g. '&Eacuteric' should be decoded as 'Éric'.
ac668111 827 if entity_with_semicolon in html.entities.html5:
828 return html.entities.html5[entity_with_semicolon]
55b2f099 829
91757b0f 830 mobj = re.match(r'#(x[0-9a-fA-F]+|[0-9]+)', entity)
4e408e47
PH
831 if mobj is not None:
832 numstr = mobj.group(1)
28e614de 833 if numstr.startswith('x'):
4e408e47 834 base = 16
28e614de 835 numstr = '0%s' % numstr
4e408e47
PH
836 else:
837 base = 10
067aa17e 838 # See https://github.com/ytdl-org/youtube-dl/issues/7518
19a03940 839 with contextlib.suppress(ValueError):
ac668111 840 return chr(int(numstr, base))
4e408e47
PH
841
842 # Unknown entity in name, return its literal representation
7a3f0c00 843 return '&%s;' % entity
4e408e47
PH
844
845
d77c3dfd 846def unescapeHTML(s):
912b38b4
PH
847 if s is None:
848 return None
19a03940 849 assert isinstance(s, str)
d77c3dfd 850
4e408e47 851 return re.sub(
95f3f7c2 852 r'&([^&;]+;)', lambda m: _htmlentity_transform(m.group(1)), s)
d77c3dfd 853
8bf48f23 854
cdb19aa4 855def escapeHTML(text):
856 return (
857 text
858 .replace('&', '&amp;')
859 .replace('<', '&lt;')
860 .replace('>', '&gt;')
861 .replace('"', '&quot;')
862 .replace("'", '&#39;')
863 )
864
865
f5b1bca9 866def process_communicate_or_kill(p, *args, **kwargs):
da4db748 867 deprecation_warning(f'"{__name__}.process_communicate_or_kill" is deprecated and may be removed '
868 f'in a future version. Use "{__name__}.Popen.communicate_or_kill" instead')
8a82af35 869 return Popen.communicate_or_kill(p, *args, **kwargs)
f5b1bca9 870
871
d3c93ec2 872class Popen(subprocess.Popen):
873 if sys.platform == 'win32':
874 _startupinfo = subprocess.STARTUPINFO()
875 _startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
876 else:
877 _startupinfo = None
878
82ea226c
L
879 @staticmethod
880 def _fix_pyinstaller_ld_path(env):
881 """Restore LD_LIBRARY_PATH when using PyInstaller
882 Ref: https://github.com/pyinstaller/pyinstaller/blob/develop/doc/runtime-information.rst#ld_library_path--libpath-considerations
883 https://github.com/yt-dlp/yt-dlp/issues/4573
884 """
885 if not hasattr(sys, '_MEIPASS'):
886 return
887
888 def _fix(key):
889 orig = env.get(f'{key}_ORIG')
890 if orig is None:
891 env.pop(key, None)
892 else:
893 env[key] = orig
894
895 _fix('LD_LIBRARY_PATH') # Linux
896 _fix('DYLD_LIBRARY_PATH') # macOS
897
898 def __init__(self, *args, env=None, text=False, **kwargs):
899 if env is None:
900 env = os.environ.copy()
901 self._fix_pyinstaller_ld_path(env)
902
da8e2912 903 self.__text_mode = kwargs.get('encoding') or kwargs.get('errors') or text or kwargs.get('universal_newlines')
f0c9fb96 904 if text is True:
905 kwargs['universal_newlines'] = True # For 3.6 compatibility
906 kwargs.setdefault('encoding', 'utf-8')
907 kwargs.setdefault('errors', 'replace')
82ea226c 908 super().__init__(*args, env=env, **kwargs, startupinfo=self._startupinfo)
d3c93ec2 909
910 def communicate_or_kill(self, *args, **kwargs):
8a82af35 911 try:
912 return self.communicate(*args, **kwargs)
913 except BaseException: # Including KeyboardInterrupt
f0c9fb96 914 self.kill(timeout=None)
8a82af35 915 raise
d3c93ec2 916
f0c9fb96 917 def kill(self, *, timeout=0):
918 super().kill()
919 if timeout != 0:
920 self.wait(timeout=timeout)
921
922 @classmethod
992dc6b4 923 def run(cls, *args, timeout=None, **kwargs):
f0c9fb96 924 with cls(*args, **kwargs) as proc:
da8e2912 925 default = '' if proc.__text_mode else b''
992dc6b4 926 stdout, stderr = proc.communicate_or_kill(timeout=timeout)
914491b8 927 return stdout or default, stderr or default, proc.returncode
f0c9fb96 928
d3c93ec2 929
f07b74fc 930def encodeArgument(s):
cfb0511d 931 # Legacy code that uses byte strings
932 # Uncomment the following line after fixing all post processors
14f25df2 933 # assert isinstance(s, str), 'Internal error: %r should be of type %r, is %r' % (s, str, type(s))
cfb0511d 934 return s if isinstance(s, str) else s.decode('ascii')
f07b74fc
PH
935
936
aa7785f8 937_timetuple = collections.namedtuple('Time', ('hours', 'minutes', 'seconds', 'milliseconds'))
938
939
940def timetuple_from_msec(msec):
941 secs, msec = divmod(msec, 1000)
942 mins, secs = divmod(secs, 60)
943 hrs, mins = divmod(mins, 60)
944 return _timetuple(hrs, mins, secs, msec)
945
946
cdb19aa4 947def formatSeconds(secs, delim=':', msec=False):
aa7785f8 948 time = timetuple_from_msec(secs * 1000)
949 if time.hours:
950 ret = '%d%s%02d%s%02d' % (time.hours, delim, time.minutes, delim, time.seconds)
951 elif time.minutes:
952 ret = '%d%s%02d' % (time.minutes, delim, time.seconds)
4539dd30 953 else:
aa7785f8 954 ret = '%d' % time.seconds
955 return '%s.%03d' % (ret, time.milliseconds) if msec else ret
4539dd30 956
a0ddb8a2 957
77562778 958def _ssl_load_windows_store_certs(ssl_context, storename):
959 # Code adapted from _load_windows_store_certs in https://github.com/python/cpython/blob/main/Lib/ssl.py
960 try:
961 certs = [cert for cert, encoding, trust in ssl.enum_certificates(storename)
962 if encoding == 'x509_asn' and (
963 trust is True or ssl.Purpose.SERVER_AUTH.oid in trust)]
964 except PermissionError:
965 return
966 for cert in certs:
19a03940 967 with contextlib.suppress(ssl.SSLError):
77562778 968 ssl_context.load_verify_locations(cadata=cert)
a2366922 969
77562778 970
971def make_HTTPS_handler(params, **kwargs):
972 opts_check_certificate = not params.get('nocheckcertificate')
973 context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
974 context.check_hostname = opts_check_certificate
f81c62a6 975 if params.get('legacyserverconnect'):
976 context.options |= 4 # SSL_OP_LEGACY_SERVER_CONNECT
4f28b537 977 # Allow use of weaker ciphers in Python 3.10+. See https://bugs.python.org/issue43998
978 context.set_ciphers('DEFAULT')
ac8e69dd
M
979 elif (
980 sys.version_info < (3, 10)
981 and ssl.OPENSSL_VERSION_INFO >= (1, 1, 1)
982 and not ssl.OPENSSL_VERSION.startswith('LibreSSL')
983 ):
5b9f253f
M
984 # Backport the default SSL ciphers and minimum TLS version settings from Python 3.10 [1].
985 # This is to ensure consistent behavior across Python versions, and help avoid fingerprinting
986 # in some situations [2][3].
987 # Python 3.10 only supports OpenSSL 1.1.1+ [4]. Because this change is likely
988 # untested on older versions, we only apply this to OpenSSL 1.1.1+ to be safe.
ac8e69dd 989 # LibreSSL is excluded until further investigation due to cipher support issues [5][6].
5b9f253f
M
990 # 1. https://github.com/python/cpython/commit/e983252b516edb15d4338b0a47631b59ef1e2536
991 # 2. https://github.com/yt-dlp/yt-dlp/issues/4627
992 # 3. https://github.com/yt-dlp/yt-dlp/pull/5294
993 # 4. https://peps.python.org/pep-0644/
ac8e69dd
M
994 # 5. https://peps.python.org/pep-0644/#libressl-support
995 # 6. https://github.com/yt-dlp/yt-dlp/commit/5b9f253fa0aee996cf1ed30185d4b502e00609c4#commitcomment-89054368
5b9f253f
M
996 context.set_ciphers('@SECLEVEL=2:ECDH+AESGCM:ECDH+CHACHA20:ECDH+AES:DHE+AES:!aNULL:!eNULL:!aDSS:!SHA1:!AESCCM')
997 context.minimum_version = ssl.TLSVersion.TLSv1_2
8a82af35 998
77562778 999 context.verify_mode = ssl.CERT_REQUIRED if opts_check_certificate else ssl.CERT_NONE
1000 if opts_check_certificate:
69bec673 1001 if certifi and 'no-certifi' not in params.get('compat_opts', []):
d5820461 1002 context.load_verify_locations(cafile=certifi.where())
168bbc4f 1003 else:
1004 try:
1005 context.load_default_certs()
1006 # Work around the issue in load_default_certs when there are bad certificates. See:
1007 # https://github.com/yt-dlp/yt-dlp/issues/1060,
1008 # https://bugs.python.org/issue35665, https://bugs.python.org/issue45312
1009 except ssl.SSLError:
1010 # enum_certificates is not present in mingw python. See https://github.com/yt-dlp/yt-dlp/issues/1151
1011 if sys.platform == 'win32' and hasattr(ssl, 'enum_certificates'):
1012 for storename in ('CA', 'ROOT'):
1013 _ssl_load_windows_store_certs(context, storename)
1014 context.set_default_verify_paths()
8a82af35 1015
bb58c9ed 1016 client_certfile = params.get('client_certificate')
1017 if client_certfile:
1018 try:
1019 context.load_cert_chain(
1020 client_certfile, keyfile=params.get('client_certificate_key'),
1021 password=params.get('client_certificate_password'))
1022 except ssl.SSLError:
1023 raise YoutubeDLError('Unable to load client certificate')
2c6dcb65 1024
1025 # Some servers may reject requests if ALPN extension is not sent. See:
1026 # https://github.com/python/cpython/issues/85140
1027 # https://github.com/yt-dlp/yt-dlp/issues/3878
1028 with contextlib.suppress(NotImplementedError):
1029 context.set_alpn_protocols(['http/1.1'])
1030
77562778 1031 return YoutubeDLHTTPSHandler(params, context=context, **kwargs)
ea6d901e 1032
732ea2f0 1033
5873d4cc 1034def bug_reports_message(before=';'):
69bec673 1035 from ..update import REPOSITORY
57e0f077 1036
1037 msg = (f'please report this issue on https://github.com/{REPOSITORY}/issues?q= , '
1038 'filling out the appropriate issue template. Confirm you are on the latest version using yt-dlp -U')
5873d4cc
F
1039
1040 before = before.rstrip()
1041 if not before or before.endswith(('.', '!', '?')):
1042 msg = msg[0].title() + msg[1:]
1043
1044 return (before + ' ' if before else '') + msg
08f2a92c
JMF
1045
1046
bf5b9d85
PM
1047class YoutubeDLError(Exception):
1048 """Base exception for YoutubeDL errors."""
aa9369a2 1049 msg = None
1050
1051 def __init__(self, msg=None):
1052 if msg is not None:
1053 self.msg = msg
1054 elif self.msg is None:
1055 self.msg = type(self).__name__
1056 super().__init__(self.msg)
bf5b9d85
PM
1057
1058
ac668111 1059network_exceptions = [urllib.error.URLError, http.client.HTTPException, socket.error]
3158150c 1060if hasattr(ssl, 'CertificateError'):
1061 network_exceptions.append(ssl.CertificateError)
1062network_exceptions = tuple(network_exceptions)
1063
1064
bf5b9d85 1065class ExtractorError(YoutubeDLError):
1c256f70 1066 """Error during info extraction."""
5f6a1245 1067
1151c407 1068 def __init__(self, msg, tb=None, expected=False, cause=None, video_id=None, ie=None):
9a82b238 1069 """ tb, if given, is the original traceback (so that it can be printed out).
7a5c1cfe 1070 If expected is set, this is a normal error message and most likely not a bug in yt-dlp.
9a82b238 1071 """
3158150c 1072 if sys.exc_info()[0] in network_exceptions:
9a82b238 1073 expected = True
d5979c5d 1074
7265a219 1075 self.orig_msg = str(msg)
1c256f70 1076 self.traceback = tb
1151c407 1077 self.expected = expected
2eabb802 1078 self.cause = cause
d11271dd 1079 self.video_id = video_id
1151c407 1080 self.ie = ie
1081 self.exc_info = sys.exc_info() # preserve original exception
5df14442 1082 if isinstance(self.exc_info[1], ExtractorError):
1083 self.exc_info = self.exc_info[1].exc_info
9bcfe33b 1084 super().__init__(self.__msg)
1151c407 1085
9bcfe33b 1086 @property
1087 def __msg(self):
1088 return ''.join((
1089 format_field(self.ie, None, '[%s] '),
1090 format_field(self.video_id, None, '%s: '),
1091 self.orig_msg,
1092 format_field(self.cause, None, ' (caused by %r)'),
1093 '' if self.expected else bug_reports_message()))
1c256f70 1094
01951dda 1095 def format_traceback(self):
497d2fab 1096 return join_nonempty(
1097 self.traceback and ''.join(traceback.format_tb(self.traceback)),
e491d06d 1098 self.cause and ''.join(traceback.format_exception(None, self.cause, self.cause.__traceback__)[1:]),
497d2fab 1099 delim='\n') or None
01951dda 1100
9bcfe33b 1101 def __setattr__(self, name, value):
1102 super().__setattr__(name, value)
1103 if getattr(self, 'msg', None) and name not in ('msg', 'args'):
1104 self.msg = self.__msg or type(self).__name__
1105 self.args = (self.msg, ) # Cannot be property
1106
1c256f70 1107
416c7fcb
PH
1108class UnsupportedError(ExtractorError):
1109 def __init__(self, url):
86e5f3ed 1110 super().__init__(
416c7fcb
PH
1111 'Unsupported URL: %s' % url, expected=True)
1112 self.url = url
1113
1114
55b3e45b
JMF
1115class RegexNotFoundError(ExtractorError):
1116 """Error when a regex didn't match"""
1117 pass
1118
1119
773f291d
S
1120class GeoRestrictedError(ExtractorError):
1121 """Geographic restriction Error exception.
1122
1123 This exception may be thrown when a video is not available from your
1124 geographic location due to geographic restrictions imposed by a website.
1125 """
b6e0c7d2 1126
0db3bae8 1127 def __init__(self, msg, countries=None, **kwargs):
1128 kwargs['expected'] = True
86e5f3ed 1129 super().__init__(msg, **kwargs)
773f291d
S
1130 self.countries = countries
1131
1132
693f0600 1133class UserNotLive(ExtractorError):
1134 """Error when a channel/user is not live"""
1135
1136 def __init__(self, msg=None, **kwargs):
1137 kwargs['expected'] = True
1138 super().__init__(msg or 'The channel is not currently live', **kwargs)
1139
1140
bf5b9d85 1141class DownloadError(YoutubeDLError):
59ae15a5 1142 """Download Error exception.
d77c3dfd 1143
59ae15a5
PH
1144 This exception may be thrown by FileDownloader objects if they are not
1145 configured to continue on errors. They will contain the appropriate
1146 error message.
1147 """
5f6a1245 1148
8cc83b8d
FV
1149 def __init__(self, msg, exc_info=None):
1150 """ exc_info, if given, is the original exception that caused the trouble (as returned by sys.exc_info()). """
86e5f3ed 1151 super().__init__(msg)
8cc83b8d 1152 self.exc_info = exc_info
d77c3dfd
FV
1153
1154
498f5606 1155class EntryNotInPlaylist(YoutubeDLError):
1156 """Entry not in playlist exception.
1157
1158 This exception will be thrown by YoutubeDL when a requested entry
1159 is not found in the playlist info_dict
1160 """
aa9369a2 1161 msg = 'Entry not found in info'
498f5606 1162
1163
bf5b9d85 1164class SameFileError(YoutubeDLError):
59ae15a5 1165 """Same File exception.
d77c3dfd 1166
59ae15a5
PH
1167 This exception will be thrown by FileDownloader objects if they detect
1168 multiple files would have to be downloaded to the same file on disk.
1169 """
aa9369a2 1170 msg = 'Fixed output name but more than one file to download'
1171
1172 def __init__(self, filename=None):
1173 if filename is not None:
1174 self.msg += f': {filename}'
1175 super().__init__(self.msg)
d77c3dfd
FV
1176
1177
bf5b9d85 1178class PostProcessingError(YoutubeDLError):
59ae15a5 1179 """Post Processing exception.
d77c3dfd 1180
59ae15a5
PH
1181 This exception may be raised by PostProcessor's .run() method to
1182 indicate an error in the postprocessing task.
1183 """
5f6a1245 1184
5f6a1245 1185
48f79687 1186class DownloadCancelled(YoutubeDLError):
1187 """ Exception raised when the download queue should be interrupted """
1188 msg = 'The download was cancelled'
8b0d7497 1189
8b0d7497 1190
48f79687 1191class ExistingVideoReached(DownloadCancelled):
1192 """ --break-on-existing triggered """
1193 msg = 'Encountered a video that is already in the archive, stopping due to --break-on-existing'
8b0d7497 1194
48f79687 1195
1196class RejectedVideoReached(DownloadCancelled):
fe2ce85a 1197 """ --break-match-filter triggered """
1198 msg = 'Encountered a video that did not match filter, stopping due to --break-match-filter'
51d9739f 1199
1200
48f79687 1201class MaxDownloadsReached(DownloadCancelled):
59ae15a5 1202 """ --max-downloads limit has been reached. """
48f79687 1203 msg = 'Maximum number of downloads reached, stopping due to --max-downloads'
1204
1205
f2ebc5c7 1206class ReExtractInfo(YoutubeDLError):
1207 """ Video info needs to be re-extracted. """
1208
1209 def __init__(self, msg, expected=False):
1210 super().__init__(msg)
1211 self.expected = expected
1212
1213
1214class ThrottledDownload(ReExtractInfo):
48f79687 1215 """ Download speed below --throttled-rate. """
aa9369a2 1216 msg = 'The download speed is below throttle limit'
d77c3dfd 1217
43b22906 1218 def __init__(self):
1219 super().__init__(self.msg, expected=False)
f2ebc5c7 1220
d77c3dfd 1221
bf5b9d85 1222class UnavailableVideoError(YoutubeDLError):
59ae15a5 1223 """Unavailable Format exception.
d77c3dfd 1224
59ae15a5
PH
1225 This exception will be thrown when a video is requested
1226 in a format that is not available for that video.
1227 """
aa9369a2 1228 msg = 'Unable to download video'
1229
1230 def __init__(self, err=None):
1231 if err is not None:
1232 self.msg += f': {err}'
1233 super().__init__(self.msg)
d77c3dfd
FV
1234
1235
bf5b9d85 1236class ContentTooShortError(YoutubeDLError):
59ae15a5 1237 """Content Too Short exception.
d77c3dfd 1238
59ae15a5
PH
1239 This exception may be raised by FileDownloader objects when a file they
1240 download is too small for what the server announced first, indicating
1241 the connection was probably interrupted.
1242 """
d77c3dfd 1243
59ae15a5 1244 def __init__(self, downloaded, expected):
86e5f3ed 1245 super().__init__(f'Downloaded {downloaded} bytes, expected {expected} bytes')
2c7ed247 1246 # Both in bytes
59ae15a5
PH
1247 self.downloaded = downloaded
1248 self.expected = expected
d77c3dfd 1249
5f6a1245 1250
bf5b9d85 1251class XAttrMetadataError(YoutubeDLError):
efa97bdc 1252 def __init__(self, code=None, msg='Unknown error'):
86e5f3ed 1253 super().__init__(msg)
efa97bdc 1254 self.code = code
bd264412 1255 self.msg = msg
efa97bdc
YCH
1256
1257 # Parsing code and msg
3089bc74 1258 if (self.code in (errno.ENOSPC, errno.EDQUOT)
a0566bbf 1259 or 'No space left' in self.msg or 'Disk quota exceeded' in self.msg):
efa97bdc
YCH
1260 self.reason = 'NO_SPACE'
1261 elif self.code == errno.E2BIG or 'Argument list too long' in self.msg:
1262 self.reason = 'VALUE_TOO_LONG'
1263 else:
1264 self.reason = 'NOT_SUPPORTED'
1265
1266
bf5b9d85 1267class XAttrUnavailableError(YoutubeDLError):
efa97bdc
YCH
1268 pass
1269
1270
c5a59d93 1271def _create_http_connection(ydl_handler, http_class, is_https, *args, **kwargs):
f9934b96 1272 hc = http_class(*args, **kwargs)
be4a824d 1273 source_address = ydl_handler._params.get('source_address')
8959018a 1274
be4a824d 1275 if source_address is not None:
8959018a
AU
1276 # This is to workaround _create_connection() from socket where it will try all
1277 # address data from getaddrinfo() including IPv6. This filters the result from
1278 # getaddrinfo() based on the source_address value.
1279 # This is based on the cpython socket.create_connection() function.
1280 # https://github.com/python/cpython/blob/master/Lib/socket.py#L691
1281 def _create_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, source_address=None):
1282 host, port = address
1283 err = None
1284 addrs = socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM)
9e21e6d9
S
1285 af = socket.AF_INET if '.' in source_address[0] else socket.AF_INET6
1286 ip_addrs = [addr for addr in addrs if addr[0] == af]
1287 if addrs and not ip_addrs:
1288 ip_version = 'v4' if af == socket.AF_INET else 'v6'
86e5f3ed 1289 raise OSError(
9e21e6d9
S
1290 "No remote IP%s addresses available for connect, can't use '%s' as source address"
1291 % (ip_version, source_address[0]))
8959018a
AU
1292 for res in ip_addrs:
1293 af, socktype, proto, canonname, sa = res
1294 sock = None
1295 try:
1296 sock = socket.socket(af, socktype, proto)
1297 if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT:
1298 sock.settimeout(timeout)
1299 sock.bind(source_address)
1300 sock.connect(sa)
1301 err = None # Explicitly break reference cycle
1302 return sock
86e5f3ed 1303 except OSError as _:
8959018a
AU
1304 err = _
1305 if sock is not None:
1306 sock.close()
1307 if err is not None:
1308 raise err
1309 else:
86e5f3ed 1310 raise OSError('getaddrinfo returns an empty list')
9e21e6d9
S
1311 if hasattr(hc, '_create_connection'):
1312 hc._create_connection = _create_connection
cfb0511d 1313 hc.source_address = (source_address, 0)
be4a824d
PH
1314
1315 return hc
1316
1317
ac668111 1318class YoutubeDLHandler(urllib.request.HTTPHandler):
59ae15a5
PH
1319 """Handler for HTTP requests and responses.
1320
1321 This class, when installed with an OpenerDirector, automatically adds
955c8958 1322 the standard headers to every HTTP request and handles gzipped, deflated and
1323 brotli responses from web servers.
59ae15a5
PH
1324
1325 Part of this code was copied from:
1326
1327 http://techknack.net/python-urllib2-handlers/
1328
1329 Andrew Rowls, the author of that code, agreed to release it to the
1330 public domain.
1331 """
1332
be4a824d 1333 def __init__(self, params, *args, **kwargs):
ac668111 1334 urllib.request.HTTPHandler.__init__(self, *args, **kwargs)
be4a824d
PH
1335 self._params = params
1336
1337 def http_open(self, req):
ac668111 1338 conn_class = http.client.HTTPConnection
71aff188
YCH
1339
1340 socks_proxy = req.headers.get('Ytdl-socks-proxy')
1341 if socks_proxy:
1342 conn_class = make_socks_conn_class(conn_class, socks_proxy)
1343 del req.headers['Ytdl-socks-proxy']
1344
be4a824d 1345 return self.do_open(functools.partial(
71aff188 1346 _create_http_connection, self, conn_class, False),
be4a824d
PH
1347 req)
1348
59ae15a5
PH
1349 @staticmethod
1350 def deflate(data):
fc2119f2 1351 if not data:
1352 return data
59ae15a5
PH
1353 try:
1354 return zlib.decompress(data, -zlib.MAX_WBITS)
1355 except zlib.error:
1356 return zlib.decompress(data)
1357
4390d5ec 1358 @staticmethod
1359 def brotli(data):
1360 if not data:
1361 return data
9b8ee23b 1362 return brotli.decompress(data)
4390d5ec 1363
acebc9cd 1364 def http_request(self, req):
51f267d9
S
1365 # According to RFC 3986, URLs can not contain non-ASCII characters, however this is not
1366 # always respected by websites, some tend to give out URLs with non percent-encoded
1367 # non-ASCII characters (see telemb.py, ard.py [#3412])
1368 # urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991)
1369 # To work around aforementioned issue we will replace request's original URL with
1370 # percent-encoded one
1371 # Since redirects are also affected (e.g. http://www.southpark.de/alle-episoden/s18e09)
1372 # the code of this workaround has been moved here from YoutubeDL.urlopen()
1373 url = req.get_full_url()
1374 url_escaped = escape_url(url)
1375
1376 # Substitute URL if any change after escaping
1377 if url != url_escaped:
15d260eb 1378 req = update_Request(req, url=url_escaped)
51f267d9 1379
8b7539d2 1380 for h, v in self._params.get('http_headers', std_headers).items():
3d5f7a39
JK
1381 # Capitalize is needed because of Python bug 2275: http://bugs.python.org/issue2275
1382 # The dict keys are capitalized because of this bug by urllib
1383 if h.capitalize() not in req.headers:
33ac271b 1384 req.add_header(h, v)
87f0e62d 1385
955c8958 1386 if 'Youtubedl-no-compression' in req.headers: # deprecated
1387 req.headers.pop('Youtubedl-no-compression', None)
1388 req.add_header('Accept-encoding', 'identity')
1389
af14914b 1390 if 'Accept-encoding' not in req.headers:
1391 req.add_header('Accept-encoding', ', '.join(SUPPORTED_ENCODINGS))
1392
379a4f16 1393 return super().do_request_(req)
59ae15a5 1394
acebc9cd 1395 def http_response(self, req, resp):
59ae15a5
PH
1396 old_resp = resp
1397 # gzip
1398 if resp.headers.get('Content-encoding', '') == 'gzip':
aa3e9507
PH
1399 content = resp.read()
1400 gz = gzip.GzipFile(fileobj=io.BytesIO(content), mode='rb')
1401 try:
1402 uncompressed = io.BytesIO(gz.read())
86e5f3ed 1403 except OSError as original_ioerror:
aa3e9507
PH
1404 # There may be junk add the end of the file
1405 # See http://stackoverflow.com/q/4928560/35070 for details
1406 for i in range(1, 1024):
1407 try:
1408 gz = gzip.GzipFile(fileobj=io.BytesIO(content[:-i]), mode='rb')
1409 uncompressed = io.BytesIO(gz.read())
86e5f3ed 1410 except OSError:
aa3e9507
PH
1411 continue
1412 break
1413 else:
1414 raise original_ioerror
ac668111 1415 resp = urllib.request.addinfourl(uncompressed, old_resp.headers, old_resp.url, old_resp.code)
59ae15a5
PH
1416 resp.msg = old_resp.msg
1417 # deflate
1418 if resp.headers.get('Content-encoding', '') == 'deflate':
1419 gz = io.BytesIO(self.deflate(resp.read()))
ac668111 1420 resp = urllib.request.addinfourl(gz, old_resp.headers, old_resp.url, old_resp.code)
59ae15a5 1421 resp.msg = old_resp.msg
4390d5ec 1422 # brotli
1423 if resp.headers.get('Content-encoding', '') == 'br':
ac668111 1424 resp = urllib.request.addinfourl(
4390d5ec 1425 io.BytesIO(self.brotli(resp.read())), old_resp.headers, old_resp.url, old_resp.code)
1426 resp.msg = old_resp.msg
ad729172 1427 # Percent-encode redirect URL of Location HTTP header to satisfy RFC 3986 (see
067aa17e 1428 # https://github.com/ytdl-org/youtube-dl/issues/6457).
5a4d9ddb
S
1429 if 300 <= resp.code < 400:
1430 location = resp.headers.get('Location')
1431 if location:
1432 # As of RFC 2616 default charset is iso-8859-1 that is respected by python 3
0f06bcd7 1433 location = location.encode('iso-8859-1').decode()
5a4d9ddb
S
1434 location_escaped = escape_url(location)
1435 if location != location_escaped:
1436 del resp.headers['Location']
1437 resp.headers['Location'] = location_escaped
59ae15a5 1438 return resp
0f8d03f8 1439
acebc9cd
PH
1440 https_request = http_request
1441 https_response = http_response
bf50b038 1442
5de90176 1443
71aff188
YCH
1444def make_socks_conn_class(base_class, socks_proxy):
1445 assert issubclass(base_class, (
ac668111 1446 http.client.HTTPConnection, http.client.HTTPSConnection))
71aff188 1447
14f25df2 1448 url_components = urllib.parse.urlparse(socks_proxy)
71aff188
YCH
1449 if url_components.scheme.lower() == 'socks5':
1450 socks_type = ProxyType.SOCKS5
1451 elif url_components.scheme.lower() in ('socks', 'socks4'):
1452 socks_type = ProxyType.SOCKS4
51fb4995
YCH
1453 elif url_components.scheme.lower() == 'socks4a':
1454 socks_type = ProxyType.SOCKS4A
71aff188 1455
cdd94c2e
YCH
1456 def unquote_if_non_empty(s):
1457 if not s:
1458 return s
ac668111 1459 return urllib.parse.unquote_plus(s)
cdd94c2e 1460
71aff188
YCH
1461 proxy_args = (
1462 socks_type,
1463 url_components.hostname, url_components.port or 1080,
1464 True, # Remote DNS
cdd94c2e
YCH
1465 unquote_if_non_empty(url_components.username),
1466 unquote_if_non_empty(url_components.password),
71aff188
YCH
1467 )
1468
1469 class SocksConnection(base_class):
1470 def connect(self):
1471 self.sock = sockssocket()
1472 self.sock.setproxy(*proxy_args)
19a03940 1473 if isinstance(self.timeout, (int, float)):
71aff188
YCH
1474 self.sock.settimeout(self.timeout)
1475 self.sock.connect((self.host, self.port))
1476
ac668111 1477 if isinstance(self, http.client.HTTPSConnection):
71aff188
YCH
1478 if hasattr(self, '_context'): # Python > 2.6
1479 self.sock = self._context.wrap_socket(
1480 self.sock, server_hostname=self.host)
1481 else:
1482 self.sock = ssl.wrap_socket(self.sock)
1483
1484 return SocksConnection
1485
1486
ac668111 1487class YoutubeDLHTTPSHandler(urllib.request.HTTPSHandler):
be4a824d 1488 def __init__(self, params, https_conn_class=None, *args, **kwargs):
ac668111 1489 urllib.request.HTTPSHandler.__init__(self, *args, **kwargs)
1490 self._https_conn_class = https_conn_class or http.client.HTTPSConnection
be4a824d
PH
1491 self._params = params
1492
1493 def https_open(self, req):
4f264c02 1494 kwargs = {}
71aff188
YCH
1495 conn_class = self._https_conn_class
1496
4f264c02
JMF
1497 if hasattr(self, '_context'): # python > 2.6
1498 kwargs['context'] = self._context
1499 if hasattr(self, '_check_hostname'): # python 3.x
1500 kwargs['check_hostname'] = self._check_hostname
71aff188
YCH
1501
1502 socks_proxy = req.headers.get('Ytdl-socks-proxy')
1503 if socks_proxy:
1504 conn_class = make_socks_conn_class(conn_class, socks_proxy)
1505 del req.headers['Ytdl-socks-proxy']
1506
4f28b537 1507 try:
1508 return self.do_open(
1509 functools.partial(_create_http_connection, self, conn_class, True), req, **kwargs)
1510 except urllib.error.URLError as e:
1511 if (isinstance(e.reason, ssl.SSLError)
1512 and getattr(e.reason, 'reason', None) == 'SSLV3_ALERT_HANDSHAKE_FAILURE'):
1513 raise YoutubeDLError('SSLV3_ALERT_HANDSHAKE_FAILURE: Try using --legacy-server-connect')
1514 raise
be4a824d
PH
1515
1516
941e881e 1517def is_path_like(f):
1518 return isinstance(f, (str, bytes, os.PathLike))
1519
1520
ac668111 1521class YoutubeDLCookieProcessor(urllib.request.HTTPCookieProcessor):
a6420bf5 1522 def __init__(self, cookiejar=None):
ac668111 1523 urllib.request.HTTPCookieProcessor.__init__(self, cookiejar)
a6420bf5
S
1524
1525 def http_response(self, request, response):
ac668111 1526 return urllib.request.HTTPCookieProcessor.http_response(self, request, response)
a6420bf5 1527
ac668111 1528 https_request = urllib.request.HTTPCookieProcessor.http_request
a6420bf5
S
1529 https_response = http_response
1530
1531
ac668111 1532class YoutubeDLRedirectHandler(urllib.request.HTTPRedirectHandler):
201c1459 1533 """YoutubeDL redirect handler
1534
1535 The code is based on HTTPRedirectHandler implementation from CPython [1].
1536
08916a49 1537 This redirect handler fixes and improves the logic to better align with RFC7261
1538 and what browsers tend to do [2][3]
201c1459 1539
1540 1. https://github.com/python/cpython/blob/master/Lib/urllib/request.py
08916a49 1541 2. https://datatracker.ietf.org/doc/html/rfc7231
1542 3. https://github.com/python/cpython/issues/91306
201c1459 1543 """
1544
ac668111 1545 http_error_301 = http_error_303 = http_error_307 = http_error_308 = urllib.request.HTTPRedirectHandler.http_error_302
201c1459 1546
1547 def redirect_request(self, req, fp, code, msg, headers, newurl):
08916a49 1548 if code not in (301, 302, 303, 307, 308):
14f25df2 1549 raise urllib.error.HTTPError(req.full_url, code, msg, headers, fp)
afac4caa 1550
08916a49 1551 new_method = req.get_method()
1552 new_data = req.data
1553 remove_headers = []
afac4caa 1554 # A 303 must either use GET or HEAD for subsequent request
1555 # https://datatracker.ietf.org/doc/html/rfc7231#section-6.4.4
08916a49 1556 if code == 303 and req.get_method() != 'HEAD':
1557 new_method = 'GET'
afac4caa 1558 # 301 and 302 redirects are commonly turned into a GET from a POST
1559 # for subsequent requests by browsers, so we'll do the same.
1560 # https://datatracker.ietf.org/doc/html/rfc7231#section-6.4.2
1561 # https://datatracker.ietf.org/doc/html/rfc7231#section-6.4.3
08916a49 1562 elif code in (301, 302) and req.get_method() == 'POST':
1563 new_method = 'GET'
1564
1565 # only remove payload if method changed (e.g. POST to GET)
1566 if new_method != req.get_method():
1567 new_data = None
1568 remove_headers.extend(['Content-Length', 'Content-Type'])
1569
1570 new_headers = {k: v for k, v in req.headers.items() if k.lower() not in remove_headers}
afac4caa 1571
ac668111 1572 return urllib.request.Request(
08916a49 1573 newurl, headers=new_headers, origin_req_host=req.origin_req_host,
1574 unverifiable=True, method=new_method, data=new_data)
fca6dba8
S
1575
1576
46f59e89
S
1577def extract_timezone(date_str):
1578 m = re.search(
f137e4c2 1579 r'''(?x)
1580 ^.{8,}? # >=8 char non-TZ prefix, if present
1581 (?P<tz>Z| # just the UTC Z, or
1582 (?:(?<=.\b\d{4}|\b\d{2}:\d\d)| # preceded by 4 digits or hh:mm or
1583 (?<!.\b[a-zA-Z]{3}|[a-zA-Z]{4}|..\b\d\d)) # not preceded by 3 alpha word or >= 4 alpha or 2 digits
1584 [ ]? # optional space
1585 (?P<sign>\+|-) # +/-
1586 (?P<hours>[0-9]{2}):?(?P<minutes>[0-9]{2}) # hh[:]mm
1587 $)
1588 ''', date_str)
46f59e89 1589 if not m:
8f53dc44 1590 m = re.search(r'\d{1,2}:\d{1,2}(?:\.\d+)?(?P<tz>\s*[A-Z]+)$', date_str)
1591 timezone = TIMEZONE_NAMES.get(m and m.group('tz').strip())
1592 if timezone is not None:
1593 date_str = date_str[:-len(m.group('tz'))]
1594 timezone = datetime.timedelta(hours=timezone or 0)
46f59e89
S
1595 else:
1596 date_str = date_str[:-len(m.group('tz'))]
1597 if not m.group('sign'):
1598 timezone = datetime.timedelta()
1599 else:
1600 sign = 1 if m.group('sign') == '+' else -1
1601 timezone = datetime.timedelta(
1602 hours=sign * int(m.group('hours')),
1603 minutes=sign * int(m.group('minutes')))
1604 return timezone, date_str
1605
1606
08b38d54 1607def parse_iso8601(date_str, delimiter='T', timezone=None):
912b38b4
PH
1608 """ Return a UNIX timestamp from the given date """
1609
1610 if date_str is None:
1611 return None
1612
52c3a6e4
S
1613 date_str = re.sub(r'\.[0-9]+', '', date_str)
1614
08b38d54 1615 if timezone is None:
46f59e89
S
1616 timezone, date_str = extract_timezone(date_str)
1617
19a03940 1618 with contextlib.suppress(ValueError):
86e5f3ed 1619 date_format = f'%Y-%m-%d{delimiter}%H:%M:%S'
52c3a6e4
S
1620 dt = datetime.datetime.strptime(date_str, date_format) - timezone
1621 return calendar.timegm(dt.timetuple())
912b38b4
PH
1622
1623
46f59e89
S
1624def date_formats(day_first=True):
1625 return DATE_FORMATS_DAY_FIRST if day_first else DATE_FORMATS_MONTH_FIRST
1626
1627
42bdd9d0 1628def unified_strdate(date_str, day_first=True):
bf50b038 1629 """Return a string with the date in the format YYYYMMDD"""
64e7ad60
PH
1630
1631 if date_str is None:
1632 return None
bf50b038 1633 upload_date = None
5f6a1245 1634 # Replace commas
026fcc04 1635 date_str = date_str.replace(',', ' ')
42bdd9d0 1636 # Remove AM/PM + timezone
9bb8e0a3 1637 date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str)
46f59e89 1638 _, date_str = extract_timezone(date_str)
42bdd9d0 1639
46f59e89 1640 for expression in date_formats(day_first):
19a03940 1641 with contextlib.suppress(ValueError):
bf50b038 1642 upload_date = datetime.datetime.strptime(date_str, expression).strftime('%Y%m%d')
42393ce2
PH
1643 if upload_date is None:
1644 timetuple = email.utils.parsedate_tz(date_str)
1645 if timetuple:
19a03940 1646 with contextlib.suppress(ValueError):
c6b9cf05 1647 upload_date = datetime.datetime(*timetuple[:6]).strftime('%Y%m%d')
6a750402 1648 if upload_date is not None:
14f25df2 1649 return str(upload_date)
bf50b038 1650
5f6a1245 1651
46f59e89
S
1652def unified_timestamp(date_str, day_first=True):
1653 if date_str is None:
1654 return None
1655
8f53dc44 1656 date_str = re.sub(r'\s+', ' ', re.sub(
1657 r'(?i)[,|]|(mon|tues?|wed(nes)?|thu(rs)?|fri|sat(ur)?)(day)?', '', date_str))
46f59e89 1658
7dc2a74e 1659 pm_delta = 12 if re.search(r'(?i)PM', date_str) else 0
46f59e89
S
1660 timezone, date_str = extract_timezone(date_str)
1661
1662 # Remove AM/PM + timezone
1663 date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str)
1664
deef3195
S
1665 # Remove unrecognized timezones from ISO 8601 alike timestamps
1666 m = re.search(r'\d{1,2}:\d{1,2}(?:\.\d+)?(?P<tz>\s*[A-Z]+)$', date_str)
1667 if m:
1668 date_str = date_str[:-len(m.group('tz'))]
1669
f226880c
PH
1670 # Python only supports microseconds, so remove nanoseconds
1671 m = re.search(r'^([0-9]{4,}-[0-9]{1,2}-[0-9]{1,2}T[0-9]{1,2}:[0-9]{1,2}:[0-9]{1,2}\.[0-9]{6})[0-9]+$', date_str)
1672 if m:
1673 date_str = m.group(1)
1674
46f59e89 1675 for expression in date_formats(day_first):
19a03940 1676 with contextlib.suppress(ValueError):
7dc2a74e 1677 dt = datetime.datetime.strptime(date_str, expression) - timezone + datetime.timedelta(hours=pm_delta)
46f59e89 1678 return calendar.timegm(dt.timetuple())
8f53dc44 1679
46f59e89
S
1680 timetuple = email.utils.parsedate_tz(date_str)
1681 if timetuple:
8f53dc44 1682 return calendar.timegm(timetuple) + pm_delta * 3600 - timezone.total_seconds()
46f59e89
S
1683
1684
28e614de 1685def determine_ext(url, default_ext='unknown_video'):
85750f89 1686 if url is None or '.' not in url:
f4776371 1687 return default_ext
9cb9a5df 1688 guess = url.partition('?')[0].rpartition('.')[2]
73e79f2a
PH
1689 if re.match(r'^[A-Za-z0-9]+$', guess):
1690 return guess
a7aaa398
S
1691 # Try extract ext from URLs like http://example.com/foo/bar.mp4/?download
1692 elif guess.rstrip('/') in KNOWN_EXTENSIONS:
9cb9a5df 1693 return guess.rstrip('/')
73e79f2a 1694 else:
cbdbb766 1695 return default_ext
73e79f2a 1696
5f6a1245 1697
824fa511
S
1698def subtitles_filename(filename, sub_lang, sub_format, expected_real_ext=None):
1699 return replace_extension(filename, sub_lang + '.' + sub_format, expected_real_ext)
d4051a8e 1700
5f6a1245 1701
9e62f283 1702def datetime_from_str(date_str, precision='auto', format='%Y%m%d'):
3d38b2d6 1703 R"""
1704 Return a datetime object from a string.
1705 Supported format:
1706 (now|today|yesterday|DATE)([+-]\d+(microsecond|second|minute|hour|day|week|month|year)s?)?
1707
1708 @param format strftime format of DATE
1709 @param precision Round the datetime object: auto|microsecond|second|minute|hour|day
1710 auto: round to the unit provided in date_str (if applicable).
9e62f283 1711 """
1712 auto_precision = False
1713 if precision == 'auto':
1714 auto_precision = True
1715 precision = 'microsecond'
396a76f7 1716 today = datetime_round(datetime.datetime.utcnow(), precision)
f8795e10 1717 if date_str in ('now', 'today'):
37254abc 1718 return today
f8795e10
PH
1719 if date_str == 'yesterday':
1720 return today - datetime.timedelta(days=1)
9e62f283 1721 match = re.match(
3d38b2d6 1722 r'(?P<start>.+)(?P<sign>[+-])(?P<time>\d+)(?P<unit>microsecond|second|minute|hour|day|week|month|year)s?',
9e62f283 1723 date_str)
37254abc 1724 if match is not None:
9e62f283 1725 start_time = datetime_from_str(match.group('start'), precision, format)
1726 time = int(match.group('time')) * (-1 if match.group('sign') == '-' else 1)
37254abc 1727 unit = match.group('unit')
9e62f283 1728 if unit == 'month' or unit == 'year':
1729 new_date = datetime_add_months(start_time, time * 12 if unit == 'year' else time)
37254abc 1730 unit = 'day'
9e62f283 1731 else:
1732 if unit == 'week':
1733 unit = 'day'
1734 time *= 7
1735 delta = datetime.timedelta(**{unit + 's': time})
1736 new_date = start_time + delta
1737 if auto_precision:
1738 return datetime_round(new_date, unit)
1739 return new_date
1740
1741 return datetime_round(datetime.datetime.strptime(date_str, format), precision)
1742
1743
d49f8db3 1744def date_from_str(date_str, format='%Y%m%d', strict=False):
3d38b2d6 1745 R"""
1746 Return a date object from a string using datetime_from_str
9e62f283 1747
3d38b2d6 1748 @param strict Restrict allowed patterns to "YYYYMMDD" and
1749 (now|today|yesterday)(-\d+(day|week|month|year)s?)?
9e62f283 1750 """
3d38b2d6 1751 if strict and not re.fullmatch(r'\d{8}|(now|today|yesterday)(-\d+(day|week|month|year)s?)?', date_str):
1752 raise ValueError(f'Invalid date format "{date_str}"')
9e62f283 1753 return datetime_from_str(date_str, precision='microsecond', format=format).date()
1754
1755
1756def datetime_add_months(dt, months):
1757 """Increment/Decrement a datetime object by months."""
1758 month = dt.month + months - 1
1759 year = dt.year + month // 12
1760 month = month % 12 + 1
1761 day = min(dt.day, calendar.monthrange(year, month)[1])
1762 return dt.replace(year, month, day)
1763
1764
1765def datetime_round(dt, precision='day'):
1766 """
1767 Round a datetime object's time to a specific precision
1768 """
1769 if precision == 'microsecond':
1770 return dt
1771
1772 unit_seconds = {
1773 'day': 86400,
1774 'hour': 3600,
1775 'minute': 60,
1776 'second': 1,
1777 }
1778 roundto = lambda x, n: ((x + n / 2) // n) * n
1779 timestamp = calendar.timegm(dt.timetuple())
1780 return datetime.datetime.utcfromtimestamp(roundto(timestamp, unit_seconds[precision]))
5f6a1245
JW
1781
1782
e63fc1be 1783def hyphenate_date(date_str):
1784 """
1785 Convert a date in 'YYYYMMDD' format to 'YYYY-MM-DD' format"""
1786 match = re.match(r'^(\d\d\d\d)(\d\d)(\d\d)$', date_str)
1787 if match is not None:
1788 return '-'.join(match.groups())
1789 else:
1790 return date_str
1791
5f6a1245 1792
86e5f3ed 1793class DateRange:
bd558525 1794 """Represents a time interval between two dates"""
5f6a1245 1795
bd558525
JMF
1796 def __init__(self, start=None, end=None):
1797 """start and end must be strings in the format accepted by date"""
1798 if start is not None:
d49f8db3 1799 self.start = date_from_str(start, strict=True)
bd558525
JMF
1800 else:
1801 self.start = datetime.datetime.min.date()
1802 if end is not None:
d49f8db3 1803 self.end = date_from_str(end, strict=True)
bd558525
JMF
1804 else:
1805 self.end = datetime.datetime.max.date()
37254abc 1806 if self.start > self.end:
bd558525 1807 raise ValueError('Date range: "%s" , the start date must be before the end date' % self)
5f6a1245 1808
bd558525
JMF
1809 @classmethod
1810 def day(cls, day):
1811 """Returns a range that only contains the given day"""
5f6a1245
JW
1812 return cls(day, day)
1813
bd558525
JMF
1814 def __contains__(self, date):
1815 """Check if the date is in the range"""
37254abc
JMF
1816 if not isinstance(date, datetime.date):
1817 date = date_from_str(date)
1818 return self.start <= date <= self.end
5f6a1245 1819
46f1370e 1820 def __repr__(self):
1821 return f'{__name__}.{type(self).__name__}({self.start.isoformat()!r}, {self.end.isoformat()!r})'
c496ca96 1822
f2df4071 1823 def __eq__(self, other):
1824 return (isinstance(other, DateRange)
1825 and self.start == other.start and self.end == other.end)
1826
c496ca96 1827
b1f94422 1828@functools.cache
1829def system_identifier():
1830 python_implementation = platform.python_implementation()
1831 if python_implementation == 'PyPy' and hasattr(sys, 'pypy_version_info'):
1832 python_implementation += ' version %d.%d.%d' % sys.pypy_version_info[:3]
dab284f8 1833 libc_ver = []
1834 with contextlib.suppress(OSError): # We may not have access to the executable
1835 libc_ver = platform.libc_ver()
b1f94422 1836
17fc3dc4 1837 return 'Python %s (%s %s %s) - %s (%s%s)' % (
b1f94422 1838 platform.python_version(),
1839 python_implementation,
17fc3dc4 1840 platform.machine(),
b1f94422 1841 platform.architecture()[0],
1842 platform.platform(),
5b9f253f
M
1843 ssl.OPENSSL_VERSION,
1844 format_field(join_nonempty(*libc_ver, delim=' '), None, ', %s'),
b1f94422 1845 )
c257baff
PH
1846
1847
0b9c08b4 1848@functools.cache
49fa4d9a 1849def get_windows_version():
8a82af35 1850 ''' Get Windows version. returns () if it's not running on Windows '''
49fa4d9a
N
1851 if compat_os_name == 'nt':
1852 return version_tuple(platform.win32_ver()[1])
1853 else:
8a82af35 1854 return ()
49fa4d9a
N
1855
1856
734f90bb 1857def write_string(s, out=None, encoding=None):
19a03940 1858 assert isinstance(s, str)
1859 out = out or sys.stderr
3b479100
SS
1860 # `sys.stderr` might be `None` (Ref: https://github.com/pyinstaller/pyinstaller/pull/7217)
1861 if not out:
1862 return
7459e3a2 1863
fe1daad3 1864 if compat_os_name == 'nt' and supports_terminal_sequences(out):
3fe75fdc 1865 s = re.sub(r'([\r\n]+)', r' \1', s)
59f943cd 1866
8a82af35 1867 enc, buffer = None, out
cfb0511d 1868 if 'b' in getattr(out, 'mode', ''):
c487cf00 1869 enc = encoding or preferredencoding()
104aa738 1870 elif hasattr(out, 'buffer'):
8a82af35 1871 buffer = out.buffer
104aa738 1872 enc = encoding or getattr(out, 'encoding', None) or preferredencoding()
c487cf00 1873
8a82af35 1874 buffer.write(s.encode(enc, 'ignore') if enc else s)
7459e3a2
PH
1875 out.flush()
1876
1877
da4db748 1878def deprecation_warning(msg, *, printer=None, stacklevel=0, **kwargs):
69bec673 1879 from .. import _IN_CLI
da4db748 1880 if _IN_CLI:
1881 if msg in deprecation_warning._cache:
1882 return
1883 deprecation_warning._cache.add(msg)
1884 if printer:
1885 return printer(f'{msg}{bug_reports_message()}', **kwargs)
1886 return write_string(f'ERROR: {msg}{bug_reports_message()}\n', **kwargs)
1887 else:
1888 import warnings
1889 warnings.warn(DeprecationWarning(msg), stacklevel=stacklevel + 3)
1890
1891
1892deprecation_warning._cache = set()
1893
1894
48ea9cea
PH
1895def bytes_to_intlist(bs):
1896 if not bs:
1897 return []
1898 if isinstance(bs[0], int): # Python 3
1899 return list(bs)
1900 else:
1901 return [ord(c) for c in bs]
1902
c257baff 1903
cba892fa 1904def intlist_to_bytes(xs):
1905 if not xs:
1906 return b''
ac668111 1907 return struct.pack('%dB' % len(xs), *xs)
c38b1e77
PH
1908
1909
8a82af35 1910class LockingUnsupportedError(OSError):
1890fc63 1911 msg = 'File locking is not supported'
0edb3e33 1912
1913 def __init__(self):
1914 super().__init__(self.msg)
1915
1916
c1c9a79c
PH
1917# Cross-platform file locking
1918if sys.platform == 'win32':
fe0918bb 1919 import ctypes
c1c9a79c
PH
1920 import ctypes.wintypes
1921 import msvcrt
1922
1923 class OVERLAPPED(ctypes.Structure):
1924 _fields_ = [
1925 ('Internal', ctypes.wintypes.LPVOID),
1926 ('InternalHigh', ctypes.wintypes.LPVOID),
1927 ('Offset', ctypes.wintypes.DWORD),
1928 ('OffsetHigh', ctypes.wintypes.DWORD),
1929 ('hEvent', ctypes.wintypes.HANDLE),
1930 ]
1931
37e325b9 1932 kernel32 = ctypes.WinDLL('kernel32')
c1c9a79c
PH
1933 LockFileEx = kernel32.LockFileEx
1934 LockFileEx.argtypes = [
1935 ctypes.wintypes.HANDLE, # hFile
1936 ctypes.wintypes.DWORD, # dwFlags
1937 ctypes.wintypes.DWORD, # dwReserved
1938 ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow
1939 ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh
1940 ctypes.POINTER(OVERLAPPED) # Overlapped
1941 ]
1942 LockFileEx.restype = ctypes.wintypes.BOOL
1943 UnlockFileEx = kernel32.UnlockFileEx
1944 UnlockFileEx.argtypes = [
1945 ctypes.wintypes.HANDLE, # hFile
1946 ctypes.wintypes.DWORD, # dwReserved
1947 ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow
1948 ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh
1949 ctypes.POINTER(OVERLAPPED) # Overlapped
1950 ]
1951 UnlockFileEx.restype = ctypes.wintypes.BOOL
1952 whole_low = 0xffffffff
1953 whole_high = 0x7fffffff
1954
747c0bd1 1955 def _lock_file(f, exclusive, block):
c1c9a79c
PH
1956 overlapped = OVERLAPPED()
1957 overlapped.Offset = 0
1958 overlapped.OffsetHigh = 0
1959 overlapped.hEvent = 0
1960 f._lock_file_overlapped_p = ctypes.pointer(overlapped)
747c0bd1 1961
1962 if not LockFileEx(msvcrt.get_osfhandle(f.fileno()),
1963 (0x2 if exclusive else 0x0) | (0x0 if block else 0x1),
1964 0, whole_low, whole_high, f._lock_file_overlapped_p):
2cb19820 1965 # NB: No argument form of "ctypes.FormatError" does not work on PyPy
1966 raise BlockingIOError(f'Locking file failed: {ctypes.FormatError(ctypes.GetLastError())!r}')
c1c9a79c
PH
1967
1968 def _unlock_file(f):
1969 assert f._lock_file_overlapped_p
1970 handle = msvcrt.get_osfhandle(f.fileno())
747c0bd1 1971 if not UnlockFileEx(handle, 0, whole_low, whole_high, f._lock_file_overlapped_p):
c1c9a79c
PH
1972 raise OSError('Unlocking file failed: %r' % ctypes.FormatError())
1973
1974else:
399a76e6
YCH
1975 try:
1976 import fcntl
c1c9a79c 1977
a3125791 1978 def _lock_file(f, exclusive, block):
b63837bc 1979 flags = fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH
1980 if not block:
1981 flags |= fcntl.LOCK_NB
acea8d7c 1982 try:
b63837bc 1983 fcntl.flock(f, flags)
acea8d7c
JK
1984 except BlockingIOError:
1985 raise
1986 except OSError: # AOSP does not have flock()
b63837bc 1987 fcntl.lockf(f, flags)
c1c9a79c 1988
399a76e6 1989 def _unlock_file(f):
45998b3e
E
1990 with contextlib.suppress(OSError):
1991 return fcntl.flock(f, fcntl.LOCK_UN)
1992 with contextlib.suppress(OSError):
1993 return fcntl.lockf(f, fcntl.LOCK_UN) # AOSP does not have flock()
1994 return fcntl.flock(f, fcntl.LOCK_UN | fcntl.LOCK_NB) # virtiofs needs LOCK_NB on unlocking
a3125791 1995
399a76e6 1996 except ImportError:
399a76e6 1997
a3125791 1998 def _lock_file(f, exclusive, block):
0edb3e33 1999 raise LockingUnsupportedError()
399a76e6
YCH
2000
2001 def _unlock_file(f):
0edb3e33 2002 raise LockingUnsupportedError()
c1c9a79c
PH
2003
2004
86e5f3ed 2005class locked_file:
0edb3e33 2006 locked = False
747c0bd1 2007
a3125791 2008 def __init__(self, filename, mode, block=True, encoding=None):
fcfa8853
JK
2009 if mode not in {'r', 'rb', 'a', 'ab', 'w', 'wb'}:
2010 raise NotImplementedError(mode)
2011 self.mode, self.block = mode, block
2012
2013 writable = any(f in mode for f in 'wax+')
2014 readable = any(f in mode for f in 'r+')
2015 flags = functools.reduce(operator.ior, (
2016 getattr(os, 'O_CLOEXEC', 0), # UNIX only
2017 getattr(os, 'O_BINARY', 0), # Windows only
2018 getattr(os, 'O_NOINHERIT', 0), # Windows only
2019 os.O_CREAT if writable else 0, # O_TRUNC only after locking
2020 os.O_APPEND if 'a' in mode else 0,
2021 os.O_EXCL if 'x' in mode else 0,
2022 os.O_RDONLY if not writable else os.O_RDWR if readable else os.O_WRONLY,
2023 ))
2024
98804d03 2025 self.f = os.fdopen(os.open(filename, flags, 0o666), mode, encoding=encoding)
c1c9a79c
PH
2026
2027 def __enter__(self):
a3125791 2028 exclusive = 'r' not in self.mode
c1c9a79c 2029 try:
a3125791 2030 _lock_file(self.f, exclusive, self.block)
0edb3e33 2031 self.locked = True
86e5f3ed 2032 except OSError:
c1c9a79c
PH
2033 self.f.close()
2034 raise
fcfa8853 2035 if 'w' in self.mode:
131e14dc
JK
2036 try:
2037 self.f.truncate()
2038 except OSError as e:
1890fc63 2039 if e.errno not in (
2040 errno.ESPIPE, # Illegal seek - expected for FIFO
2041 errno.EINVAL, # Invalid argument - expected for /dev/null
2042 ):
2043 raise
c1c9a79c
PH
2044 return self
2045
0edb3e33 2046 def unlock(self):
2047 if not self.locked:
2048 return
c1c9a79c 2049 try:
0edb3e33 2050 _unlock_file(self.f)
c1c9a79c 2051 finally:
0edb3e33 2052 self.locked = False
c1c9a79c 2053
0edb3e33 2054 def __exit__(self, *_):
2055 try:
2056 self.unlock()
2057 finally:
2058 self.f.close()
4eb7f1d1 2059
0edb3e33 2060 open = __enter__
2061 close = __exit__
a3125791 2062
0edb3e33 2063 def __getattr__(self, attr):
2064 return getattr(self.f, attr)
a3125791 2065
0edb3e33 2066 def __iter__(self):
2067 return iter(self.f)
a3125791 2068
4eb7f1d1 2069
0b9c08b4 2070@functools.cache
4644ac55
S
2071def get_filesystem_encoding():
2072 encoding = sys.getfilesystemencoding()
2073 return encoding if encoding is not None else 'utf-8'
2074
2075
4eb7f1d1 2076def shell_quote(args):
a6a173c2 2077 quoted_args = []
4644ac55 2078 encoding = get_filesystem_encoding()
a6a173c2
JMF
2079 for a in args:
2080 if isinstance(a, bytes):
2081 # We may get a filename encoded with 'encodeFilename'
2082 a = a.decode(encoding)
aefce8e6 2083 quoted_args.append(compat_shlex_quote(a))
28e614de 2084 return ' '.join(quoted_args)
9d4660ca
PH
2085
2086
2087def smuggle_url(url, data):
2088 """ Pass additional data in a URL for internal use. """
2089
81953d1a
RA
2090 url, idata = unsmuggle_url(url, {})
2091 data.update(idata)
14f25df2 2092 sdata = urllib.parse.urlencode(
28e614de
PH
2093 {'__youtubedl_smuggle': json.dumps(data)})
2094 return url + '#' + sdata
9d4660ca
PH
2095
2096
79f82953 2097def unsmuggle_url(smug_url, default=None):
83e865a3 2098 if '#__youtubedl_smuggle' not in smug_url:
79f82953 2099 return smug_url, default
28e614de 2100 url, _, sdata = smug_url.rpartition('#')
14f25df2 2101 jsond = urllib.parse.parse_qs(sdata)['__youtubedl_smuggle'][0]
9d4660ca
PH
2102 data = json.loads(jsond)
2103 return url, data
02dbf93f
PH
2104
2105
e0fd9573 2106def format_decimal_suffix(num, fmt='%d%s', *, factor=1000):
2107 """ Formats numbers with decimal sufixes like K, M, etc """
2108 num, factor = float_or_none(num), float(factor)
4c3f8c3f 2109 if num is None or num < 0:
e0fd9573 2110 return None
eeb2a770 2111 POSSIBLE_SUFFIXES = 'kMGTPEZY'
2112 exponent = 0 if num == 0 else min(int(math.log(num, factor)), len(POSSIBLE_SUFFIXES))
2113 suffix = ['', *POSSIBLE_SUFFIXES][exponent]
abbeeebc 2114 if factor == 1024:
2115 suffix = {'k': 'Ki', '': ''}.get(suffix, f'{suffix}i')
e0fd9573 2116 converted = num / (factor ** exponent)
abbeeebc 2117 return fmt % (converted, suffix)
e0fd9573 2118
2119
02dbf93f 2120def format_bytes(bytes):
f02d24d8 2121 return format_decimal_suffix(bytes, '%.2f%sB', factor=1024) or 'N/A'
f53c966a 2122
1c088fa8 2123
64c464a1 2124def lookup_unit_table(unit_table, s, strict=False):
2125 num_re = NUMBER_RE if strict else NUMBER_RE.replace(R'\.', '[,.]')
fb47597b 2126 units_re = '|'.join(re.escape(u) for u in unit_table)
64c464a1 2127 m = (re.fullmatch if strict else re.match)(
2128 rf'(?P<num>{num_re})\s*(?P<unit>{units_re})\b', s)
fb47597b
S
2129 if not m:
2130 return None
64c464a1 2131
2132 num = float(m.group('num').replace(',', '.'))
fb47597b 2133 mult = unit_table[m.group('unit')]
64c464a1 2134 return round(num * mult)
2135
2136
2137def parse_bytes(s):
2138 """Parse a string indicating a byte quantity into an integer"""
2139 return lookup_unit_table(
2140 {u: 1024**i for i, u in enumerate(['', *'KMGTPEZY'])},
2141 s.upper(), strict=True)
fb47597b
S
2142
2143
be64b5b0
PH
2144def parse_filesize(s):
2145 if s is None:
2146 return None
2147
dfb1b146 2148 # The lower-case forms are of course incorrect and unofficial,
be64b5b0
PH
2149 # but we support those too
2150 _UNIT_TABLE = {
2151 'B': 1,
2152 'b': 1,
70852b47 2153 'bytes': 1,
be64b5b0
PH
2154 'KiB': 1024,
2155 'KB': 1000,
2156 'kB': 1024,
2157 'Kb': 1000,
13585d76 2158 'kb': 1000,
70852b47
YCH
2159 'kilobytes': 1000,
2160 'kibibytes': 1024,
be64b5b0
PH
2161 'MiB': 1024 ** 2,
2162 'MB': 1000 ** 2,
2163 'mB': 1024 ** 2,
2164 'Mb': 1000 ** 2,
13585d76 2165 'mb': 1000 ** 2,
70852b47
YCH
2166 'megabytes': 1000 ** 2,
2167 'mebibytes': 1024 ** 2,
be64b5b0
PH
2168 'GiB': 1024 ** 3,
2169 'GB': 1000 ** 3,
2170 'gB': 1024 ** 3,
2171 'Gb': 1000 ** 3,
13585d76 2172 'gb': 1000 ** 3,
70852b47
YCH
2173 'gigabytes': 1000 ** 3,
2174 'gibibytes': 1024 ** 3,
be64b5b0
PH
2175 'TiB': 1024 ** 4,
2176 'TB': 1000 ** 4,
2177 'tB': 1024 ** 4,
2178 'Tb': 1000 ** 4,
13585d76 2179 'tb': 1000 ** 4,
70852b47
YCH
2180 'terabytes': 1000 ** 4,
2181 'tebibytes': 1024 ** 4,
be64b5b0
PH
2182 'PiB': 1024 ** 5,
2183 'PB': 1000 ** 5,
2184 'pB': 1024 ** 5,
2185 'Pb': 1000 ** 5,
13585d76 2186 'pb': 1000 ** 5,
70852b47
YCH
2187 'petabytes': 1000 ** 5,
2188 'pebibytes': 1024 ** 5,
be64b5b0
PH
2189 'EiB': 1024 ** 6,
2190 'EB': 1000 ** 6,
2191 'eB': 1024 ** 6,
2192 'Eb': 1000 ** 6,
13585d76 2193 'eb': 1000 ** 6,
70852b47
YCH
2194 'exabytes': 1000 ** 6,
2195 'exbibytes': 1024 ** 6,
be64b5b0
PH
2196 'ZiB': 1024 ** 7,
2197 'ZB': 1000 ** 7,
2198 'zB': 1024 ** 7,
2199 'Zb': 1000 ** 7,
13585d76 2200 'zb': 1000 ** 7,
70852b47
YCH
2201 'zettabytes': 1000 ** 7,
2202 'zebibytes': 1024 ** 7,
be64b5b0
PH
2203 'YiB': 1024 ** 8,
2204 'YB': 1000 ** 8,
2205 'yB': 1024 ** 8,
2206 'Yb': 1000 ** 8,
13585d76 2207 'yb': 1000 ** 8,
70852b47
YCH
2208 'yottabytes': 1000 ** 8,
2209 'yobibytes': 1024 ** 8,
be64b5b0
PH
2210 }
2211
fb47597b
S
2212 return lookup_unit_table(_UNIT_TABLE, s)
2213
2214
2215def parse_count(s):
2216 if s is None:
be64b5b0
PH
2217 return None
2218
352d5da8 2219 s = re.sub(r'^[^\d]+\s', '', s).strip()
fb47597b
S
2220
2221 if re.match(r'^[\d,.]+$', s):
2222 return str_to_int(s)
2223
2224 _UNIT_TABLE = {
2225 'k': 1000,
2226 'K': 1000,
2227 'm': 1000 ** 2,
2228 'M': 1000 ** 2,
2229 'kk': 1000 ** 2,
2230 'KK': 1000 ** 2,
352d5da8 2231 'b': 1000 ** 3,
2232 'B': 1000 ** 3,
fb47597b 2233 }
be64b5b0 2234
352d5da8 2235 ret = lookup_unit_table(_UNIT_TABLE, s)
2236 if ret is not None:
2237 return ret
2238
2239 mobj = re.match(r'([\d,.]+)(?:$|\s)', s)
2240 if mobj:
2241 return str_to_int(mobj.group(1))
be64b5b0 2242
2f7ae819 2243
5d45484c 2244def parse_resolution(s, *, lenient=False):
b871d7e9
S
2245 if s is None:
2246 return {}
2247
5d45484c
LNO
2248 if lenient:
2249 mobj = re.search(r'(?P<w>\d+)\s*[xX×,]\s*(?P<h>\d+)', s)
2250 else:
2251 mobj = re.search(r'(?<![a-zA-Z0-9])(?P<w>\d+)\s*[xX×,]\s*(?P<h>\d+)(?![a-zA-Z0-9])', s)
b871d7e9
S
2252 if mobj:
2253 return {
2254 'width': int(mobj.group('w')),
2255 'height': int(mobj.group('h')),
2256 }
2257
17ec8bcf 2258 mobj = re.search(r'(?<![a-zA-Z0-9])(\d+)[pPiI](?![a-zA-Z0-9])', s)
b871d7e9
S
2259 if mobj:
2260 return {'height': int(mobj.group(1))}
2261
2262 mobj = re.search(r'\b([48])[kK]\b', s)
2263 if mobj:
2264 return {'height': int(mobj.group(1)) * 540}
2265
2266 return {}
2267
2268
0dc41787 2269def parse_bitrate(s):
14f25df2 2270 if not isinstance(s, str):
0dc41787
S
2271 return
2272 mobj = re.search(r'\b(\d+)\s*kbps', s)
2273 if mobj:
2274 return int(mobj.group(1))
2275
2276
a942d6cb 2277def month_by_name(name, lang='en'):
caefb1de
PH
2278 """ Return the number of a month by (locale-independently) English name """
2279
f6717dec 2280 month_names = MONTH_NAMES.get(lang, MONTH_NAMES['en'])
a942d6cb 2281
caefb1de 2282 try:
f6717dec 2283 return month_names.index(name) + 1
7105440c
YCH
2284 except ValueError:
2285 return None
2286
2287
2288def month_by_abbreviation(abbrev):
2289 """ Return the number of a month by (locale-independently) English
2290 abbreviations """
2291
2292 try:
2293 return [s[:3] for s in ENGLISH_MONTH_NAMES].index(abbrev) + 1
caefb1de
PH
2294 except ValueError:
2295 return None
18258362
JMF
2296
2297
5aafe895 2298def fix_xml_ampersands(xml_str):
18258362 2299 """Replace all the '&' by '&amp;' in XML"""
5aafe895
PH
2300 return re.sub(
2301 r'&(?!amp;|lt;|gt;|apos;|quot;|#x[0-9a-fA-F]{,4};|#[0-9]{,4};)',
28e614de 2302 '&amp;',
5aafe895 2303 xml_str)
e3946f98
PH
2304
2305
2306def setproctitle(title):
14f25df2 2307 assert isinstance(title, str)
c1c05c67 2308
fe0918bb 2309 # Workaround for https://github.com/yt-dlp/yt-dlp/issues/4541
2310 try:
2311 import ctypes
2312 except ImportError:
c1c05c67
YCH
2313 return
2314
e3946f98 2315 try:
611c1dd9 2316 libc = ctypes.cdll.LoadLibrary('libc.so.6')
e3946f98
PH
2317 except OSError:
2318 return
2f49bcd6
RC
2319 except TypeError:
2320 # LoadLibrary in Windows Python 2.7.13 only expects
2321 # a bytestring, but since unicode_literals turns
2322 # every string into a unicode string, it fails.
2323 return
0f06bcd7 2324 title_bytes = title.encode()
6eefe533
PH
2325 buf = ctypes.create_string_buffer(len(title_bytes))
2326 buf.value = title_bytes
e3946f98 2327 try:
6eefe533 2328 libc.prctl(15, buf, 0, 0, 0)
e3946f98
PH
2329 except AttributeError:
2330 return # Strange libc, just skip this
d7dda168
PH
2331
2332
2333def remove_start(s, start):
46bc9b7d 2334 return s[len(start):] if s is not None and s.startswith(start) else s
29eb5174
PH
2335
2336
2b9faf55 2337def remove_end(s, end):
46bc9b7d 2338 return s[:-len(end)] if s is not None and s.endswith(end) else s
2b9faf55
PH
2339
2340
31b2051e
S
2341def remove_quotes(s):
2342 if s is None or len(s) < 2:
2343 return s
2344 for quote in ('"', "'", ):
2345 if s[0] == quote and s[-1] == quote:
2346 return s[1:-1]
2347 return s
2348
2349
b6e0c7d2 2350def get_domain(url):
ebf99aaf 2351 """
2352 This implementation is inconsistent, but is kept for compatibility.
2353 Use this only for "webpage_url_domain"
2354 """
2355 return remove_start(urllib.parse.urlparse(url).netloc, 'www.') or None
b6e0c7d2
U
2356
2357
29eb5174 2358def url_basename(url):
14f25df2 2359 path = urllib.parse.urlparse(url).path
28e614de 2360 return path.strip('/').split('/')[-1]
aa94a6d3
PH
2361
2362
02dc0a36 2363def base_url(url):
7657ec7e 2364 return re.match(r'https?://[^?#]+/', url).group()
02dc0a36
S
2365
2366
e34c3361 2367def urljoin(base, path):
4b5de77b 2368 if isinstance(path, bytes):
0f06bcd7 2369 path = path.decode()
14f25df2 2370 if not isinstance(path, str) or not path:
e34c3361 2371 return None
fad4ceb5 2372 if re.match(r'^(?:[a-zA-Z][a-zA-Z0-9+-.]*:)?//', path):
e34c3361 2373 return path
4b5de77b 2374 if isinstance(base, bytes):
0f06bcd7 2375 base = base.decode()
14f25df2 2376 if not isinstance(base, str) or not re.match(
4b5de77b 2377 r'^(?:https?:)?//', base):
e34c3361 2378 return None
14f25df2 2379 return urllib.parse.urljoin(base, path)
e34c3361
S
2380
2381
ac668111 2382class HEADRequest(urllib.request.Request):
aa94a6d3 2383 def get_method(self):
611c1dd9 2384 return 'HEAD'
7217e148
PH
2385
2386
ac668111 2387class PUTRequest(urllib.request.Request):
95cf60e8
S
2388 def get_method(self):
2389 return 'PUT'
2390
2391
9732d77e 2392def int_or_none(v, scale=1, default=None, get_attr=None, invscale=1):
9e907ebd 2393 if get_attr and v is not None:
2394 v = getattr(v, get_attr, None)
1812afb7
S
2395 try:
2396 return int(v) * invscale // scale
31c49255 2397 except (ValueError, TypeError, OverflowError):
af98f8ff 2398 return default
9732d77e 2399
9572013d 2400
40a90862 2401def str_or_none(v, default=None):
14f25df2 2402 return default if v is None else str(v)
40a90862 2403
9732d77e
PH
2404
2405def str_to_int(int_str):
48d4681e 2406 """ A more relaxed version of int_or_none """
f9934b96 2407 if isinstance(int_str, int):
348c6bf1 2408 return int_str
14f25df2 2409 elif isinstance(int_str, str):
42db58ec
S
2410 int_str = re.sub(r'[,\.\+]', '', int_str)
2411 return int_or_none(int_str)
608d11f5
PH
2412
2413
9732d77e 2414def float_or_none(v, scale=1, invscale=1, default=None):
caf80631
S
2415 if v is None:
2416 return default
2417 try:
2418 return float(v) * invscale / scale
5e1271c5 2419 except (ValueError, TypeError):
caf80631 2420 return default
43f775e4
PH
2421
2422
c7e327c4
S
2423def bool_or_none(v, default=None):
2424 return v if isinstance(v, bool) else default
2425
2426
53cd37ba 2427def strip_or_none(v, default=None):
14f25df2 2428 return v.strip() if isinstance(v, str) else default
b72b4431
S
2429
2430
af03000a 2431def url_or_none(url):
14f25df2 2432 if not url or not isinstance(url, str):
af03000a
S
2433 return None
2434 url = url.strip()
29f7c58a 2435 return url if re.match(r'^(?:(?:https?|rt(?:m(?:pt?[es]?|fp)|sp[su]?)|mms|ftps?):)?//', url) else None
af03000a
S
2436
2437
3e9b66d7 2438def request_to_url(req):
ac668111 2439 if isinstance(req, urllib.request.Request):
3e9b66d7
LNO
2440 return req.get_full_url()
2441 else:
2442 return req
2443
2444
e29663c6 2445def strftime_or_none(timestamp, date_format, default=None):
2446 datetime_object = None
2447 try:
f9934b96 2448 if isinstance(timestamp, (int, float)): # unix timestamp
d509c1f5 2449 # Using naive datetime here can break timestamp() in Windows
2450 # Ref: https://github.com/yt-dlp/yt-dlp/issues/5185, https://github.com/python/cpython/issues/94414
2451 datetime_object = datetime.datetime.fromtimestamp(timestamp, datetime.timezone.utc)
14f25df2 2452 elif isinstance(timestamp, str): # assume YYYYMMDD
e29663c6 2453 datetime_object = datetime.datetime.strptime(timestamp, '%Y%m%d')
9665f15a 2454 date_format = re.sub( # Support %s on windows
2455 r'(?<!%)(%%)*%s', rf'\g<1>{int(datetime_object.timestamp())}', date_format)
e29663c6 2456 return datetime_object.strftime(date_format)
2457 except (ValueError, TypeError, AttributeError):
2458 return default
2459
2460
608d11f5 2461def parse_duration(s):
f9934b96 2462 if not isinstance(s, str):
608d11f5 2463 return None
ca7b3246 2464 s = s.strip()
38d79fd1 2465 if not s:
2466 return None
ca7b3246 2467
acaff495 2468 days, hours, mins, secs, ms = [None] * 5
8bd1c00b 2469 m = re.match(r'''(?x)
2470 (?P<before_secs>
2471 (?:(?:(?P<days>[0-9]+):)?(?P<hours>[0-9]+):)?(?P<mins>[0-9]+):)?
2472 (?P<secs>(?(before_secs)[0-9]{1,2}|[0-9]+))
2473 (?P<ms>[.:][0-9]+)?Z?$
2474 ''', s)
acaff495 2475 if m:
8bd1c00b 2476 days, hours, mins, secs, ms = m.group('days', 'hours', 'mins', 'secs', 'ms')
acaff495 2477 else:
2478 m = re.match(
056653bb
S
2479 r'''(?ix)(?:P?
2480 (?:
1c1b2f96 2481 [0-9]+\s*y(?:ears?)?,?\s*
056653bb
S
2482 )?
2483 (?:
1c1b2f96 2484 [0-9]+\s*m(?:onths?)?,?\s*
056653bb
S
2485 )?
2486 (?:
1c1b2f96 2487 [0-9]+\s*w(?:eeks?)?,?\s*
056653bb 2488 )?
8f4b58d7 2489 (?:
1c1b2f96 2490 (?P<days>[0-9]+)\s*d(?:ays?)?,?\s*
8f4b58d7 2491 )?
056653bb 2492 T)?
acaff495 2493 (?:
1c1b2f96 2494 (?P<hours>[0-9]+)\s*h(?:ours?)?,?\s*
acaff495 2495 )?
2496 (?:
1c1b2f96 2497 (?P<mins>[0-9]+)\s*m(?:in(?:ute)?s?)?,?\s*
acaff495 2498 )?
2499 (?:
2500 (?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?\s*s(?:ec(?:ond)?s?)?\s*
15846398 2501 )?Z?$''', s)
acaff495 2502 if m:
2503 days, hours, mins, secs, ms = m.groups()
2504 else:
15846398 2505 m = re.match(r'(?i)(?:(?P<hours>[0-9.]+)\s*(?:hours?)|(?P<mins>[0-9.]+)\s*(?:mins?\.?|minutes?)\s*)Z?$', s)
acaff495 2506 if m:
2507 hours, mins = m.groups()
2508 else:
2509 return None
2510
acaff495 2511 if ms:
19a03940 2512 ms = ms.replace(':', '.')
2513 return sum(float(part or 0) * mult for part, mult in (
2514 (days, 86400), (hours, 3600), (mins, 60), (secs, 1), (ms, 1)))
91d7d0b3
JMF
2515
2516
e65e4c88 2517def prepend_extension(filename, ext, expected_real_ext=None):
5f6a1245 2518 name, real_ext = os.path.splitext(filename)
e65e4c88 2519 return (
86e5f3ed 2520 f'{name}.{ext}{real_ext}'
e65e4c88 2521 if not expected_real_ext or real_ext[1:] == expected_real_ext
86e5f3ed 2522 else f'{filename}.{ext}')
d70ad093
PH
2523
2524
b3ed15b7
S
2525def replace_extension(filename, ext, expected_real_ext=None):
2526 name, real_ext = os.path.splitext(filename)
86e5f3ed 2527 return '{}.{}'.format(
b3ed15b7
S
2528 name if not expected_real_ext or real_ext[1:] == expected_real_ext else filename,
2529 ext)
2530
2531
d70ad093
PH
2532def check_executable(exe, args=[]):
2533 """ Checks if the given binary is installed somewhere in PATH, and returns its name.
2534 args can be a list of arguments for a short output (like -version) """
2535 try:
f0c9fb96 2536 Popen.run([exe] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
d70ad093
PH
2537 except OSError:
2538 return False
2539 return exe
b7ab0590
PH
2540
2541
7aaf4cd2 2542def _get_exe_version_output(exe, args):
95807118 2543 try:
b64d04c1 2544 # STDIN should be redirected too. On UNIX-like systems, ffmpeg triggers
7a5c1cfe 2545 # SIGTTOU if yt-dlp is run in the background.
067aa17e 2546 # See https://github.com/ytdl-org/youtube-dl/issues/955#issuecomment-209789656
1cdda329 2547 stdout, _, ret = Popen.run([encodeArgument(exe)] + args, text=True,
2548 stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
2549 if ret:
2550 return None
95807118
PH
2551 except OSError:
2552 return False
f0c9fb96 2553 return stdout
cae97f65
PH
2554
2555
2556def detect_exe_version(output, version_re=None, unrecognized='present'):
14f25df2 2557 assert isinstance(output, str)
cae97f65
PH
2558 if version_re is None:
2559 version_re = r'version\s+([-0-9._a-zA-Z]+)'
2560 m = re.search(version_re, output)
95807118
PH
2561 if m:
2562 return m.group(1)
2563 else:
2564 return unrecognized
2565
2566
9af98e17 2567def get_exe_version(exe, args=['--version'],
1cdda329 2568 version_re=None, unrecognized=('present', 'broken')):
9af98e17 2569 """ Returns the version of the specified executable,
2570 or False if the executable is not present """
1cdda329 2571 unrecognized = variadic(unrecognized)
2572 assert len(unrecognized) in (1, 2)
9af98e17 2573 out = _get_exe_version_output(exe, args)
1cdda329 2574 if out is None:
2575 return unrecognized[-1]
2576 return out and detect_exe_version(out, version_re, unrecognized[0])
9af98e17 2577
2578
7e88d7d7 2579def frange(start=0, stop=None, step=1):
2580 """Float range"""
2581 if stop is None:
2582 start, stop = 0, start
2583 sign = [-1, 1][step > 0] if step else 0
2584 while sign * start < sign * stop:
2585 yield start
2586 start += step
2587
2588
cb89cfc1 2589class LazyList(collections.abc.Sequence):
0f06bcd7 2590 """Lazy immutable list from an iterable
2591 Note that slices of a LazyList are lists and not LazyList"""
483336e7 2592
8e5fecc8 2593 class IndexError(IndexError):
2594 pass
2595
282f5709 2596 def __init__(self, iterable, *, reverse=False, _cache=None):
0f06bcd7 2597 self._iterable = iter(iterable)
2598 self._cache = [] if _cache is None else _cache
2599 self._reversed = reverse
483336e7 2600
2601 def __iter__(self):
0f06bcd7 2602 if self._reversed:
28419ca2 2603 # We need to consume the entire iterable to iterate in reverse
981052c9 2604 yield from self.exhaust()
28419ca2 2605 return
0f06bcd7 2606 yield from self._cache
2607 for item in self._iterable:
2608 self._cache.append(item)
483336e7 2609 yield item
2610
0f06bcd7 2611 def _exhaust(self):
2612 self._cache.extend(self._iterable)
2613 self._iterable = [] # Discard the emptied iterable to make it pickle-able
2614 return self._cache
28419ca2 2615
981052c9 2616 def exhaust(self):
0f06bcd7 2617 """Evaluate the entire iterable"""
2618 return self._exhaust()[::-1 if self._reversed else 1]
981052c9 2619
28419ca2 2620 @staticmethod
0f06bcd7 2621 def _reverse_index(x):
f2df4071 2622 return None if x is None else ~x
483336e7 2623
2624 def __getitem__(self, idx):
2625 if isinstance(idx, slice):
0f06bcd7 2626 if self._reversed:
2627 idx = slice(self._reverse_index(idx.start), self._reverse_index(idx.stop), -(idx.step or 1))
e0f2b4b4 2628 start, stop, step = idx.start, idx.stop, idx.step or 1
483336e7 2629 elif isinstance(idx, int):
0f06bcd7 2630 if self._reversed:
2631 idx = self._reverse_index(idx)
e0f2b4b4 2632 start, stop, step = idx, idx, 0
483336e7 2633 else:
2634 raise TypeError('indices must be integers or slices')
e0f2b4b4 2635 if ((start or 0) < 0 or (stop or 0) < 0
2636 or (start is None and step < 0)
2637 or (stop is None and step > 0)):
483336e7 2638 # We need to consume the entire iterable to be able to slice from the end
2639 # Obviously, never use this with infinite iterables
0f06bcd7 2640 self._exhaust()
8e5fecc8 2641 try:
0f06bcd7 2642 return self._cache[idx]
8e5fecc8 2643 except IndexError as e:
2644 raise self.IndexError(e) from e
0f06bcd7 2645 n = max(start or 0, stop or 0) - len(self._cache) + 1
28419ca2 2646 if n > 0:
0f06bcd7 2647 self._cache.extend(itertools.islice(self._iterable, n))
8e5fecc8 2648 try:
0f06bcd7 2649 return self._cache[idx]
8e5fecc8 2650 except IndexError as e:
2651 raise self.IndexError(e) from e
483336e7 2652
2653 def __bool__(self):
2654 try:
0f06bcd7 2655 self[-1] if self._reversed else self[0]
8e5fecc8 2656 except self.IndexError:
483336e7 2657 return False
2658 return True
2659
2660 def __len__(self):
0f06bcd7 2661 self._exhaust()
2662 return len(self._cache)
483336e7 2663
282f5709 2664 def __reversed__(self):
0f06bcd7 2665 return type(self)(self._iterable, reverse=not self._reversed, _cache=self._cache)
282f5709 2666
2667 def __copy__(self):
0f06bcd7 2668 return type(self)(self._iterable, reverse=self._reversed, _cache=self._cache)
282f5709 2669
28419ca2 2670 def __repr__(self):
2671 # repr and str should mimic a list. So we exhaust the iterable
2672 return repr(self.exhaust())
2673
2674 def __str__(self):
2675 return repr(self.exhaust())
2676
483336e7 2677
7be9ccff 2678class PagedList:
c07a39ae 2679
2680 class IndexError(IndexError):
2681 pass
2682
dd26ced1
PH
2683 def __len__(self):
2684 # This is only useful for tests
2685 return len(self.getslice())
2686
7be9ccff 2687 def __init__(self, pagefunc, pagesize, use_cache=True):
2688 self._pagefunc = pagefunc
2689 self._pagesize = pagesize
f1d13090 2690 self._pagecount = float('inf')
7be9ccff 2691 self._use_cache = use_cache
2692 self._cache = {}
2693
2694 def getpage(self, pagenum):
d8cf8d97 2695 page_results = self._cache.get(pagenum)
2696 if page_results is None:
f1d13090 2697 page_results = [] if pagenum > self._pagecount else list(self._pagefunc(pagenum))
7be9ccff 2698 if self._use_cache:
2699 self._cache[pagenum] = page_results
2700 return page_results
2701
2702 def getslice(self, start=0, end=None):
2703 return list(self._getslice(start, end))
2704
2705 def _getslice(self, start, end):
55575225 2706 raise NotImplementedError('This method must be implemented by subclasses')
2707
2708 def __getitem__(self, idx):
f1d13090 2709 assert self._use_cache, 'Indexing PagedList requires cache'
55575225 2710 if not isinstance(idx, int) or idx < 0:
2711 raise TypeError('indices must be non-negative integers')
2712 entries = self.getslice(idx, idx + 1)
d8cf8d97 2713 if not entries:
c07a39ae 2714 raise self.IndexError()
d8cf8d97 2715 return entries[0]
55575225 2716
9c44d242
PH
2717
2718class OnDemandPagedList(PagedList):
a44ca5a4 2719 """Download pages until a page with less than maximum results"""
86e5f3ed 2720
7be9ccff 2721 def _getslice(self, start, end):
b7ab0590
PH
2722 for pagenum in itertools.count(start // self._pagesize):
2723 firstid = pagenum * self._pagesize
2724 nextfirstid = pagenum * self._pagesize + self._pagesize
2725 if start >= nextfirstid:
2726 continue
2727
b7ab0590
PH
2728 startv = (
2729 start % self._pagesize
2730 if firstid <= start < nextfirstid
2731 else 0)
b7ab0590
PH
2732 endv = (
2733 ((end - 1) % self._pagesize) + 1
2734 if (end is not None and firstid <= end <= nextfirstid)
2735 else None)
2736
f1d13090 2737 try:
2738 page_results = self.getpage(pagenum)
2739 except Exception:
2740 self._pagecount = pagenum - 1
2741 raise
b7ab0590
PH
2742 if startv != 0 or endv is not None:
2743 page_results = page_results[startv:endv]
7be9ccff 2744 yield from page_results
b7ab0590
PH
2745
2746 # A little optimization - if current page is not "full", ie. does
2747 # not contain page_size videos then we can assume that this page
2748 # is the last one - there are no more ids on further pages -
2749 # i.e. no need to query again.
2750 if len(page_results) + startv < self._pagesize:
2751 break
2752
2753 # If we got the whole page, but the next page is not interesting,
2754 # break out early as well
2755 if end == nextfirstid:
2756 break
81c2f20b
PH
2757
2758
9c44d242 2759class InAdvancePagedList(PagedList):
a44ca5a4 2760 """PagedList with total number of pages known in advance"""
86e5f3ed 2761
9c44d242 2762 def __init__(self, pagefunc, pagecount, pagesize):
7be9ccff 2763 PagedList.__init__(self, pagefunc, pagesize, True)
f1d13090 2764 self._pagecount = pagecount
9c44d242 2765
7be9ccff 2766 def _getslice(self, start, end):
9c44d242 2767 start_page = start // self._pagesize
d37707bd 2768 end_page = self._pagecount if end is None else min(self._pagecount, end // self._pagesize + 1)
9c44d242
PH
2769 skip_elems = start - start_page * self._pagesize
2770 only_more = None if end is None else end - start
2771 for pagenum in range(start_page, end_page):
7be9ccff 2772 page_results = self.getpage(pagenum)
9c44d242 2773 if skip_elems:
7be9ccff 2774 page_results = page_results[skip_elems:]
9c44d242
PH
2775 skip_elems = None
2776 if only_more is not None:
7be9ccff 2777 if len(page_results) < only_more:
2778 only_more -= len(page_results)
9c44d242 2779 else:
7be9ccff 2780 yield from page_results[:only_more]
9c44d242 2781 break
7be9ccff 2782 yield from page_results
9c44d242
PH
2783
2784
7e88d7d7 2785class PlaylistEntries:
2786 MissingEntry = object()
2787 is_exhausted = False
2788
2789 def __init__(self, ydl, info_dict):
7e9a6125 2790 self.ydl = ydl
2791
2792 # _entries must be assigned now since infodict can change during iteration
2793 entries = info_dict.get('entries')
2794 if entries is None:
2795 raise EntryNotInPlaylist('There are no entries')
2796 elif isinstance(entries, list):
2797 self.is_exhausted = True
2798
2799 requested_entries = info_dict.get('requested_entries')
bc5c2f8a 2800 self.is_incomplete = requested_entries is not None
7e9a6125 2801 if self.is_incomplete:
2802 assert self.is_exhausted
bc5c2f8a 2803 self._entries = [self.MissingEntry] * max(requested_entries or [0])
7e9a6125 2804 for i, entry in zip(requested_entries, entries):
2805 self._entries[i - 1] = entry
2806 elif isinstance(entries, (list, PagedList, LazyList)):
2807 self._entries = entries
2808 else:
2809 self._entries = LazyList(entries)
7e88d7d7 2810
2811 PLAYLIST_ITEMS_RE = re.compile(r'''(?x)
2812 (?P<start>[+-]?\d+)?
2813 (?P<range>[:-]
2814 (?P<end>[+-]?\d+|inf(?:inite)?)?
2815 (?::(?P<step>[+-]?\d+))?
2816 )?''')
2817
2818 @classmethod
2819 def parse_playlist_items(cls, string):
2820 for segment in string.split(','):
2821 if not segment:
2822 raise ValueError('There is two or more consecutive commas')
2823 mobj = cls.PLAYLIST_ITEMS_RE.fullmatch(segment)
2824 if not mobj:
2825 raise ValueError(f'{segment!r} is not a valid specification')
2826 start, end, step, has_range = mobj.group('start', 'end', 'step', 'range')
2827 if int_or_none(step) == 0:
2828 raise ValueError(f'Step in {segment!r} cannot be zero')
2829 yield slice(int_or_none(start), float_or_none(end), int_or_none(step)) if has_range else int(start)
2830
2831 def get_requested_items(self):
2832 playlist_items = self.ydl.params.get('playlist_items')
2833 playlist_start = self.ydl.params.get('playliststart', 1)
2834 playlist_end = self.ydl.params.get('playlistend')
2835 # For backwards compatibility, interpret -1 as whole list
2836 if playlist_end in (-1, None):
2837 playlist_end = ''
2838 if not playlist_items:
2839 playlist_items = f'{playlist_start}:{playlist_end}'
2840 elif playlist_start != 1 or playlist_end:
2841 self.ydl.report_warning('Ignoring playliststart and playlistend because playlistitems was given', only_once=True)
2842
2843 for index in self.parse_playlist_items(playlist_items):
2844 for i, entry in self[index]:
2845 yield i, entry
1ac4fd80 2846 if not entry:
2847 continue
7e88d7d7 2848 try:
d21056f4 2849 # The item may have just been added to archive. Don't break due to it
2850 if not self.ydl.params.get('lazy_playlist'):
2851 # TODO: Add auto-generated fields
2852 self.ydl._match_entry(entry, incomplete=True, silent=True)
7e88d7d7 2853 except (ExistingVideoReached, RejectedVideoReached):
2854 return
2855
7e9a6125 2856 def get_full_count(self):
2857 if self.is_exhausted and not self.is_incomplete:
7e88d7d7 2858 return len(self)
2859 elif isinstance(self._entries, InAdvancePagedList):
2860 if self._entries._pagesize == 1:
2861 return self._entries._pagecount
2862
7e88d7d7 2863 @functools.cached_property
2864 def _getter(self):
2865 if isinstance(self._entries, list):
2866 def get_entry(i):
2867 try:
2868 entry = self._entries[i]
2869 except IndexError:
2870 entry = self.MissingEntry
2871 if not self.is_incomplete:
2872 raise self.IndexError()
2873 if entry is self.MissingEntry:
bc5c2f8a 2874 raise EntryNotInPlaylist(f'Entry {i + 1} cannot be found')
7e88d7d7 2875 return entry
2876 else:
2877 def get_entry(i):
2878 try:
2879 return type(self.ydl)._handle_extraction_exceptions(lambda _, i: self._entries[i])(self.ydl, i)
2880 except (LazyList.IndexError, PagedList.IndexError):
2881 raise self.IndexError()
2882 return get_entry
2883
2884 def __getitem__(self, idx):
2885 if isinstance(idx, int):
2886 idx = slice(idx, idx)
2887
2888 # NB: PlaylistEntries[1:10] => (0, 1, ... 9)
2889 step = 1 if idx.step is None else idx.step
2890 if idx.start is None:
2891 start = 0 if step > 0 else len(self) - 1
2892 else:
2893 start = idx.start - 1 if idx.start >= 0 else len(self) + idx.start
2894
2895 # NB: Do not call len(self) when idx == [:]
2896 if idx.stop is None:
2897 stop = 0 if step < 0 else float('inf')
2898 else:
2899 stop = idx.stop - 1 if idx.stop >= 0 else len(self) + idx.stop
2900 stop += [-1, 1][step > 0]
2901
2902 for i in frange(start, stop, step):
2903 if i < 0:
2904 continue
2905 try:
7e9a6125 2906 entry = self._getter(i)
2907 except self.IndexError:
2908 self.is_exhausted = True
2909 if step > 0:
7e88d7d7 2910 break
7e9a6125 2911 continue
7e88d7d7 2912 yield i + 1, entry
2913
2914 def __len__(self):
2915 return len(tuple(self[:]))
2916
2917 class IndexError(IndexError):
2918 pass
2919
2920
81c2f20b 2921def uppercase_escape(s):
676eb3f2 2922 unicode_escape = codecs.getdecoder('unicode_escape')
81c2f20b 2923 return re.sub(
a612753d 2924 r'\\U[0-9a-fA-F]{8}',
676eb3f2
PH
2925 lambda m: unicode_escape(m.group(0))[0],
2926 s)
0fe2ff78
YCH
2927
2928
2929def lowercase_escape(s):
2930 unicode_escape = codecs.getdecoder('unicode_escape')
2931 return re.sub(
2932 r'\\u[0-9a-fA-F]{4}',
2933 lambda m: unicode_escape(m.group(0))[0],
2934 s)
b53466e1 2935
d05cfe06
S
2936
2937def escape_rfc3986(s):
2938 """Escape non-ASCII characters as suggested by RFC 3986"""
f9934b96 2939 return urllib.parse.quote(s, b"%/;:@&=+$,!~*'()?#[]")
d05cfe06
S
2940
2941
2942def escape_url(url):
2943 """Escape URL as suggested by RFC 3986"""
14f25df2 2944 url_parsed = urllib.parse.urlparse(url)
d05cfe06 2945 return url_parsed._replace(
efbed08d 2946 netloc=url_parsed.netloc.encode('idna').decode('ascii'),
d05cfe06
S
2947 path=escape_rfc3986(url_parsed.path),
2948 params=escape_rfc3986(url_parsed.params),
2949 query=escape_rfc3986(url_parsed.query),
2950 fragment=escape_rfc3986(url_parsed.fragment)
2951 ).geturl()
2952
62e609ab 2953
96b9e9cf 2954def parse_qs(url, **kwargs):
2955 return urllib.parse.parse_qs(urllib.parse.urlparse(url).query, **kwargs)
4dfbf869 2956
2957
62e609ab
PH
2958def read_batch_urls(batch_fd):
2959 def fixup(url):
14f25df2 2960 if not isinstance(url, str):
62e609ab 2961 url = url.decode('utf-8', 'replace')
8c04f0be 2962 BOM_UTF8 = ('\xef\xbb\xbf', '\ufeff')
2963 for bom in BOM_UTF8:
2964 if url.startswith(bom):
2965 url = url[len(bom):]
2966 url = url.lstrip()
2967 if not url or url.startswith(('#', ';', ']')):
62e609ab 2968 return False
8c04f0be 2969 # "#" cannot be stripped out since it is part of the URI
962ffcf8 2970 # However, it can be safely stripped out if following a whitespace
8c04f0be 2971 return re.split(r'\s#', url, 1)[0].rstrip()
62e609ab
PH
2972
2973 with contextlib.closing(batch_fd) as fd:
2974 return [url for url in map(fixup, fd) if url]
b74fa8cd
JMF
2975
2976
2977def urlencode_postdata(*args, **kargs):
14f25df2 2978 return urllib.parse.urlencode(*args, **kargs).encode('ascii')
bcf89ce6
PH
2979
2980
45b2ee6f 2981def update_url(url, *, query_update=None, **kwargs):
2982 """Replace URL components specified by kwargs
2983 @param url str or parse url tuple
2984 @param query_update update query
2985 @returns str
2986 """
2987 if isinstance(url, str):
2988 if not kwargs and not query_update:
2989 return url
2990 else:
2991 url = urllib.parse.urlparse(url)
2992 if query_update:
2993 assert 'query' not in kwargs, 'query_update and query cannot be specified at the same time'
2994 kwargs['query'] = urllib.parse.urlencode({
2995 **urllib.parse.parse_qs(url.query),
2996 **query_update
2997 }, True)
2998 return urllib.parse.urlunparse(url._replace(**kwargs))
2999
3000
38f9ef31 3001def update_url_query(url, query):
45b2ee6f 3002 return update_url(url, query_update=query)
16392824 3003
8e60dc75 3004
c043c246 3005def update_Request(req, url=None, data=None, headers=None, query=None):
ed0291d1 3006 req_headers = req.headers.copy()
c043c246 3007 req_headers.update(headers or {})
ed0291d1
S
3008 req_data = data or req.data
3009 req_url = update_url_query(url or req.get_full_url(), query)
95cf60e8
S
3010 req_get_method = req.get_method()
3011 if req_get_method == 'HEAD':
3012 req_type = HEADRequest
3013 elif req_get_method == 'PUT':
3014 req_type = PUTRequest
3015 else:
ac668111 3016 req_type = urllib.request.Request
ed0291d1
S
3017 new_req = req_type(
3018 req_url, data=req_data, headers=req_headers,
3019 origin_req_host=req.origin_req_host, unverifiable=req.unverifiable)
3020 if hasattr(req, 'timeout'):
3021 new_req.timeout = req.timeout
3022 return new_req
3023
3024
10c87c15 3025def _multipart_encode_impl(data, boundary):
0c265486
YCH
3026 content_type = 'multipart/form-data; boundary=%s' % boundary
3027
3028 out = b''
3029 for k, v in data.items():
3030 out += b'--' + boundary.encode('ascii') + b'\r\n'
14f25df2 3031 if isinstance(k, str):
0f06bcd7 3032 k = k.encode()
14f25df2 3033 if isinstance(v, str):
0f06bcd7 3034 v = v.encode()
0c265486
YCH
3035 # RFC 2047 requires non-ASCII field names to be encoded, while RFC 7578
3036 # suggests sending UTF-8 directly. Firefox sends UTF-8, too
b2ad479d 3037 content = b'Content-Disposition: form-data; name="' + k + b'"\r\n\r\n' + v + b'\r\n'
0c265486
YCH
3038 if boundary.encode('ascii') in content:
3039 raise ValueError('Boundary overlaps with data')
3040 out += content
3041
3042 out += b'--' + boundary.encode('ascii') + b'--\r\n'
3043
3044 return out, content_type
3045
3046
3047def multipart_encode(data, boundary=None):
3048 '''
3049 Encode a dict to RFC 7578-compliant form-data
3050
3051 data:
3052 A dict where keys and values can be either Unicode or bytes-like
3053 objects.
3054 boundary:
3055 If specified a Unicode object, it's used as the boundary. Otherwise
3056 a random boundary is generated.
3057
3058 Reference: https://tools.ietf.org/html/rfc7578
3059 '''
3060 has_specified_boundary = boundary is not None
3061
3062 while True:
3063 if boundary is None:
3064 boundary = '---------------' + str(random.randrange(0x0fffffff, 0xffffffff))
3065
3066 try:
10c87c15 3067 out, content_type = _multipart_encode_impl(data, boundary)
0c265486
YCH
3068 break
3069 except ValueError:
3070 if has_specified_boundary:
3071 raise
3072 boundary = None
3073
3074 return out, content_type
3075
3076
b079c26f
SS
3077def is_iterable_like(x, allowed_types=collections.abc.Iterable, blocked_types=NO_DEFAULT):
3078 if blocked_types is NO_DEFAULT:
3079 blocked_types = (str, bytes, collections.abc.Mapping)
3080 return isinstance(x, allowed_types) and not isinstance(x, blocked_types)
3081
3082
3083def variadic(x, allowed_types=NO_DEFAULT):
4823ec9f 3084 if not isinstance(allowed_types, (tuple, type)):
3085 deprecation_warning('allowed_types should be a tuple or a type')
3086 allowed_types = tuple(allowed_types)
6f2287cb 3087 return x if is_iterable_like(x, blocked_types=allowed_types) else (x, )
304ad45a 3088
3089
c4f60dd7 3090def try_call(*funcs, expected_type=None, args=[], kwargs={}):
3091 for f in funcs:
a32a9a7e 3092 try:
c4f60dd7 3093 val = f(*args, **kwargs)
ab029d7e 3094 except (AttributeError, KeyError, TypeError, IndexError, ValueError, ZeroDivisionError):
a32a9a7e
S
3095 pass
3096 else:
c4f60dd7 3097 if expected_type is None or isinstance(val, expected_type):
3098 return val
3099
3100
3101def try_get(src, getter, expected_type=None):
3102 return try_call(*variadic(getter), args=(src,), expected_type=expected_type)
329ca3be
S
3103
3104
90137ca4 3105def filter_dict(dct, cndn=lambda _, v: v is not None):
3106 return {k: v for k, v in dct.items() if cndn(k, v)}
3107
3108
6cc62232
S
3109def merge_dicts(*dicts):
3110 merged = {}
3111 for a_dict in dicts:
3112 for k, v in a_dict.items():
90137ca4 3113 if (v is not None and k not in merged
3114 or isinstance(v, str) and merged[k] == ''):
6cc62232
S
3115 merged[k] = v
3116 return merged
3117
3118
8e60dc75 3119def encode_compat_str(string, encoding=preferredencoding(), errors='strict'):
14f25df2 3120 return string if isinstance(string, str) else str(string, encoding, errors)
8e60dc75 3121
16392824 3122
a1a530b0
PH
3123US_RATINGS = {
3124 'G': 0,
3125 'PG': 10,
3126 'PG-13': 13,
3127 'R': 16,
3128 'NC': 18,
3129}
fac55558
PH
3130
3131
a8795327 3132TV_PARENTAL_GUIDELINES = {
5a16c9d9
RA
3133 'TV-Y': 0,
3134 'TV-Y7': 7,
3135 'TV-G': 0,
3136 'TV-PG': 0,
3137 'TV-14': 14,
3138 'TV-MA': 17,
a8795327
S
3139}
3140
3141
146c80e2 3142def parse_age_limit(s):
19a03940 3143 # isinstance(False, int) is True. So type() must be used instead
c487cf00 3144 if type(s) is int: # noqa: E721
a8795327 3145 return s if 0 <= s <= 21 else None
19a03940 3146 elif not isinstance(s, str):
d838b1bd 3147 return None
146c80e2 3148 m = re.match(r'^(?P<age>\d{1,2})\+?$', s)
a8795327
S
3149 if m:
3150 return int(m.group('age'))
5c5fae6d 3151 s = s.upper()
a8795327
S
3152 if s in US_RATINGS:
3153 return US_RATINGS[s]
5a16c9d9 3154 m = re.match(r'^TV[_-]?(%s)$' % '|'.join(k[3:] for k in TV_PARENTAL_GUIDELINES), s)
b8361187 3155 if m:
5a16c9d9 3156 return TV_PARENTAL_GUIDELINES['TV-' + m.group(1)]
b8361187 3157 return None
146c80e2
S
3158
3159
fac55558 3160def strip_jsonp(code):
609a61e3 3161 return re.sub(
5552c9eb 3162 r'''(?sx)^
e9c671d5 3163 (?:window\.)?(?P<func_name>[a-zA-Z0-9_.$]*)
5552c9eb
YCH
3164 (?:\s*&&\s*(?P=func_name))?
3165 \s*\(\s*(?P<callback_data>.*)\);?
3166 \s*?(?://[^\n]*)*$''',
3167 r'\g<callback_data>', code)
478c2c61
PH
3168
3169
8f53dc44 3170def js_to_json(code, vars={}, *, strict=False):
5c610515 3171 # vars is a dict of var, val pairs to substitute
0898c5c8 3172 STRING_QUOTES = '\'"`'
a71b812f 3173 STRING_RE = '|'.join(rf'{q}(?:\\.|[^\\{q}])*{q}' for q in STRING_QUOTES)
c843e685 3174 COMMENT_RE = r'/\*(?:(?!\*/).)*?\*/|//[^\n]*\n'
86e5f3ed 3175 SKIP_RE = fr'\s*(?:{COMMENT_RE})?\s*'
4195096e 3176 INTEGER_TABLE = (
86e5f3ed 3177 (fr'(?s)^(0[xX][0-9a-fA-F]+){SKIP_RE}:?$', 16),
3178 (fr'(?s)^(0+[0-7]+){SKIP_RE}:?$', 8),
4195096e
S
3179 )
3180
a71b812f
SS
3181 def process_escape(match):
3182 JSON_PASSTHROUGH_ESCAPES = R'"\bfnrtu'
3183 escape = match.group(1) or match.group(2)
3184
3185 return (Rf'\{escape}' if escape in JSON_PASSTHROUGH_ESCAPES
3186 else R'\u00' if escape == 'x'
3187 else '' if escape == '\n'
3188 else escape)
3189
0898c5c8
SS
3190 def template_substitute(match):
3191 evaluated = js_to_json(match.group(1), vars, strict=strict)
3192 if evaluated[0] == '"':
3193 return json.loads(evaluated)
3194 return evaluated
3195
e05f6939 3196 def fix_kv(m):
e7b6d122
PH
3197 v = m.group(0)
3198 if v in ('true', 'false', 'null'):
3199 return v
421ddcb8
C
3200 elif v in ('undefined', 'void 0'):
3201 return 'null'
8bdd16b4 3202 elif v.startswith('/*') or v.startswith('//') or v.startswith('!') or v == ',':
a71b812f
SS
3203 return ''
3204
3205 if v[0] in STRING_QUOTES:
0898c5c8
SS
3206 v = re.sub(r'(?s)\${([^}]+)}', template_substitute, v[1:-1]) if v[0] == '`' else v[1:-1]
3207 escaped = re.sub(r'(?s)(")|\\(.)', process_escape, v)
a71b812f
SS
3208 return f'"{escaped}"'
3209
3210 for regex, base in INTEGER_TABLE:
3211 im = re.match(regex, v)
3212 if im:
3213 i = int(im.group(1), base)
3214 return f'"{i}":' if v.endswith(':') else str(i)
3215
3216 if v in vars:
d5f043d1
C
3217 try:
3218 if not strict:
3219 json.loads(vars[v])
08e29b9f 3220 except json.JSONDecodeError:
d5f043d1
C
3221 return json.dumps(vars[v])
3222 else:
3223 return vars[v]
89ac4a19 3224
a71b812f
SS
3225 if not strict:
3226 return f'"{v}"'
5c610515 3227
a71b812f 3228 raise ValueError(f'Unknown value: {v}')
e05f6939 3229
8072ef2b 3230 def create_map(mobj):
3231 return json.dumps(dict(json.loads(js_to_json(mobj.group(1) or '[]', vars=vars))))
3232
8072ef2b 3233 code = re.sub(r'new Map\((\[.*?\])?\)', create_map, code)
8f53dc44 3234 if not strict:
3235 code = re.sub(r'new Date\((".+")\)', r'\g<1>', code)
f55523cf 3236 code = re.sub(r'new \w+\((.*?)\)', lambda m: json.dumps(m.group(0)), code)
389896df 3237 code = re.sub(r'parseInt\([^\d]+(\d+)[^\d]+\)', r'\1', code)
3238 code = re.sub(r'\(function\([^)]*\)\s*\{[^}]*\}\s*\)\s*\(\s*(["\'][^)]*["\'])\s*\)', r'\1', code)
febff4c1 3239
a71b812f
SS
3240 return re.sub(rf'''(?sx)
3241 {STRING_RE}|
3242 {COMMENT_RE}|,(?={SKIP_RE}[\]}}])|
421ddcb8 3243 void\s0|(?:(?<![0-9])[eE]|[a-df-zA-DF-Z_$])[.a-zA-Z_$0-9]*|
a71b812f
SS
3244 \b(?:0[xX][0-9a-fA-F]+|0+[0-7]+)(?:{SKIP_RE}:)?|
3245 [0-9]+(?={SKIP_RE}:)|
8bdd16b4 3246 !+
a71b812f 3247 ''', fix_kv, code)
e05f6939
PH
3248
3249
478c2c61
PH
3250def qualities(quality_ids):
3251 """ Get a numeric quality value out of a list of possible values """
3252 def q(qid):
3253 try:
3254 return quality_ids.index(qid)
3255 except ValueError:
3256 return -1
3257 return q
3258
acd69589 3259
119e40ef 3260POSTPROCESS_WHEN = ('pre_process', 'after_filter', 'video', 'before_dl', 'post_process', 'after_move', 'after_video', 'playlist')
1e43a6f7 3261
3262
de6000d9 3263DEFAULT_OUTTMPL = {
3264 'default': '%(title)s [%(id)s].%(ext)s',
72755351 3265 'chapter': '%(title)s - %(section_number)03d %(section_title)s [%(id)s].%(ext)s',
de6000d9 3266}
3267OUTTMPL_TYPES = {
72755351 3268 'chapter': None,
de6000d9 3269 'subtitle': None,
3270 'thumbnail': None,
3271 'description': 'description',
3272 'annotation': 'annotations.xml',
3273 'infojson': 'info.json',
08438d2c 3274 'link': None,
3b603dbd 3275 'pl_video': None,
5112f26a 3276 'pl_thumbnail': None,
de6000d9 3277 'pl_description': 'description',
3278 'pl_infojson': 'info.json',
3279}
0a871f68 3280
143db31d 3281# As of [1] format syntax is:
3282# %[mapping_key][conversion_flags][minimum_width][.precision][length_modifier]type
3283# 1. https://docs.python.org/2/library/stdtypes.html#string-formatting
901130bb 3284STR_FORMAT_RE_TMPL = r'''(?x)
3285 (?<!%)(?P<prefix>(?:%%)*)
143db31d 3286 %
524e2e4f 3287 (?P<has_key>\((?P<key>{0})\))?
752cda38 3288 (?P<format>
524e2e4f 3289 (?P<conversion>[#0\-+ ]+)?
3290 (?P<min_width>\d+)?
3291 (?P<precision>\.\d+)?
3292 (?P<len_mod>[hlL])? # unused in python
901130bb 3293 {1} # conversion type
752cda38 3294 )
143db31d 3295'''
3296
7d1eb38a 3297
901130bb 3298STR_FORMAT_TYPES = 'diouxXeEfFgGcrs'
a020a0dc 3299
7d1eb38a 3300
a020a0dc
PH
3301def limit_length(s, length):
3302 """ Add ellipses to overly long strings """
3303 if s is None:
3304 return None
3305 ELLIPSES = '...'
3306 if len(s) > length:
3307 return s[:length - len(ELLIPSES)] + ELLIPSES
3308 return s
48844745
PH
3309
3310
3311def version_tuple(v):
5f9b8394 3312 return tuple(int(e) for e in re.split(r'[-.]', v))
48844745
PH
3313
3314
3315def is_outdated_version(version, limit, assume_new=True):
3316 if not version:
3317 return not assume_new
3318 try:
3319 return version_tuple(version) < version_tuple(limit)
3320 except ValueError:
3321 return not assume_new
732ea2f0
PH
3322
3323
3324def ytdl_is_updateable():
7a5c1cfe 3325 """ Returns if yt-dlp can be updated with -U """
735d865e 3326
69bec673 3327 from ..update import is_non_updateable
732ea2f0 3328
5d535b4a 3329 return not is_non_updateable()
7d4111ed
PH
3330
3331
3332def args_to_str(args):
3333 # Get a short string representation for a subprocess command
702ccf2d 3334 return ' '.join(compat_shlex_quote(a) for a in args)
2ccd1b10
PH
3335
3336
a44ca5a4 3337def error_to_str(err):
3338 return f'{type(err).__name__}: {err}'
3339
3340
2647c933 3341def mimetype2ext(mt, default=NO_DEFAULT):
3342 if not isinstance(mt, str):
3343 if default is not NO_DEFAULT:
3344 return default
eb9ee194
S
3345 return None
3346
2647c933 3347 MAP = {
3348 # video
f6861ec9 3349 '3gpp': '3gp',
2647c933 3350 'mp2t': 'ts',
3351 'mp4': 'mp4',
3352 'mpeg': 'mpeg',
3353 'mpegurl': 'm3u8',
3354 'quicktime': 'mov',
3355 'webm': 'webm',
3356 'vp9': 'vp9',
f6861ec9 3357 'x-flv': 'flv',
2647c933 3358 'x-m4v': 'm4v',
3359 'x-matroska': 'mkv',
3360 'x-mng': 'mng',
a0d8d704 3361 'x-mp4-fragmented': 'mp4',
2647c933 3362 'x-ms-asf': 'asf',
a0d8d704 3363 'x-ms-wmv': 'wmv',
2647c933 3364 'x-msvideo': 'avi',
3365
3366 # application (streaming playlists)
b4173f15 3367 'dash+xml': 'mpd',
b4173f15 3368 'f4m+xml': 'f4m',
f164b971 3369 'hds+xml': 'f4m',
2647c933 3370 'vnd.apple.mpegurl': 'm3u8',
e910fe2f 3371 'vnd.ms-sstr+xml': 'ism',
2647c933 3372 'x-mpegurl': 'm3u8',
3373
3374 # audio
3375 'audio/mp4': 'm4a',
3376 # Per RFC 3003, audio/mpeg can be .mp1, .mp2 or .mp3.
3377 # Using .mp3 as it's the most popular one
3378 'audio/mpeg': 'mp3',
d80ca5de 3379 'audio/webm': 'webm',
2647c933 3380 'audio/x-matroska': 'mka',
3381 'audio/x-mpegurl': 'm3u',
3382 'midi': 'mid',
3383 'ogg': 'ogg',
3384 'wav': 'wav',
3385 'wave': 'wav',
3386 'x-aac': 'aac',
3387 'x-flac': 'flac',
3388 'x-m4a': 'm4a',
3389 'x-realaudio': 'ra',
39e7107d 3390 'x-wav': 'wav',
9359f3d4 3391
2647c933 3392 # image
3393 'avif': 'avif',
3394 'bmp': 'bmp',
3395 'gif': 'gif',
3396 'jpeg': 'jpg',
3397 'png': 'png',
3398 'svg+xml': 'svg',
3399 'tiff': 'tif',
3400 'vnd.wap.wbmp': 'wbmp',
3401 'webp': 'webp',
3402 'x-icon': 'ico',
3403 'x-jng': 'jng',
3404 'x-ms-bmp': 'bmp',
3405
3406 # caption
3407 'filmstrip+json': 'fs',
3408 'smptett+xml': 'tt',
3409 'ttaf+xml': 'dfxp',
3410 'ttml+xml': 'ttml',
3411 'x-ms-sami': 'sami',
9359f3d4 3412
2647c933 3413 # misc
3414 'gzip': 'gz',
9359f3d4
F
3415 'json': 'json',
3416 'xml': 'xml',
3417 'zip': 'zip',
9359f3d4
F
3418 }
3419
2647c933 3420 mimetype = mt.partition(';')[0].strip().lower()
3421 _, _, subtype = mimetype.rpartition('/')
9359f3d4 3422
69bec673 3423 ext = traversal.traverse_obj(MAP, mimetype, subtype, subtype.rsplit('+')[-1])
2647c933 3424 if ext:
3425 return ext
3426 elif default is not NO_DEFAULT:
3427 return default
9359f3d4 3428 return subtype.replace('+', '.')
c460bdd5
PH
3429
3430
2814f12b
THD
3431def ext2mimetype(ext_or_url):
3432 if not ext_or_url:
3433 return None
3434 if '.' not in ext_or_url:
3435 ext_or_url = f'file.{ext_or_url}'
3436 return mimetypes.guess_type(ext_or_url)[0]
3437
3438
4f3c5e06 3439def parse_codecs(codecs_str):
3440 # http://tools.ietf.org/html/rfc6381
3441 if not codecs_str:
3442 return {}
a0566bbf 3443 split_codecs = list(filter(None, map(
dbf5416a 3444 str.strip, codecs_str.strip().strip(',').split(','))))
3fe75fdc 3445 vcodec, acodec, scodec, hdr = None, None, None, None
a0566bbf 3446 for full_codec in split_codecs:
d816f61f 3447 parts = re.sub(r'0+(?=\d)', '', full_codec).split('.')
3448 if parts[0] in ('avc1', 'avc2', 'avc3', 'avc4', 'vp9', 'vp8', 'hev1', 'hev2',
3449 'h263', 'h264', 'mp4v', 'hvc1', 'av1', 'theora', 'dvh1', 'dvhe'):
3450 if vcodec:
3451 continue
3452 vcodec = full_codec
3453 if parts[0] in ('dvh1', 'dvhe'):
3454 hdr = 'DV'
69bec673 3455 elif parts[0] == 'av1' and traversal.traverse_obj(parts, 3) == '10':
d816f61f 3456 hdr = 'HDR10'
3457 elif parts[:2] == ['vp9', '2']:
3458 hdr = 'HDR10'
71082216 3459 elif parts[0] in ('flac', 'mp4a', 'opus', 'vorbis', 'mp3', 'aac', 'ac-4',
d816f61f 3460 'ac-3', 'ec-3', 'eac3', 'dtsc', 'dtse', 'dtsh', 'dtsl'):
3461 acodec = acodec or full_codec
3462 elif parts[0] in ('stpp', 'wvtt'):
3463 scodec = scodec or full_codec
4f3c5e06 3464 else:
19a03940 3465 write_string(f'WARNING: Unknown codec {full_codec}\n')
3fe75fdc 3466 if vcodec or acodec or scodec:
4f3c5e06 3467 return {
3468 'vcodec': vcodec or 'none',
3469 'acodec': acodec or 'none',
176f1866 3470 'dynamic_range': hdr,
3fe75fdc 3471 **({'scodec': scodec} if scodec is not None else {}),
4f3c5e06 3472 }
b69fd25c 3473 elif len(split_codecs) == 2:
3474 return {
3475 'vcodec': split_codecs[0],
3476 'acodec': split_codecs[1],
3477 }
4f3c5e06 3478 return {}
3479
3480
fc61aff4
LL
3481def get_compatible_ext(*, vcodecs, acodecs, vexts, aexts, preferences=None):
3482 assert len(vcodecs) == len(vexts) and len(acodecs) == len(aexts)
3483
3484 allow_mkv = not preferences or 'mkv' in preferences
3485
3486 if allow_mkv and max(len(acodecs), len(vcodecs)) > 1:
3487 return 'mkv' # TODO: any other format allows this?
3488
3489 # TODO: All codecs supported by parse_codecs isn't handled here
3490 COMPATIBLE_CODECS = {
3491 'mp4': {
71082216 3492 'av1', 'hevc', 'avc1', 'mp4a', 'ac-4', # fourcc (m3u8, mpd)
81b6102d 3493 'h264', 'aacl', 'ec-3', # Set in ISM
fc61aff4
LL
3494 },
3495 'webm': {
3496 'av1', 'vp9', 'vp8', 'opus', 'vrbs',
3497 'vp9x', 'vp8x', # in the webm spec
3498 },
3499 }
3500
69bec673 3501 sanitize_codec = functools.partial(try_get, getter=lambda x: x[0].split('.')[0].replace('0', ''))
8f84770a 3502 vcodec, acodec = sanitize_codec(vcodecs), sanitize_codec(acodecs)
fc61aff4
LL
3503
3504 for ext in preferences or COMPATIBLE_CODECS.keys():
3505 codec_set = COMPATIBLE_CODECS.get(ext, set())
3506 if ext == 'mkv' or codec_set.issuperset((vcodec, acodec)):
3507 return ext
3508
3509 COMPATIBLE_EXTS = (
3510 {'mp3', 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'ismv', 'isma', 'mov'},
fbb73833 3511 {'webm', 'weba'},
fc61aff4
LL
3512 )
3513 for ext in preferences or vexts:
3514 current_exts = {ext, *vexts, *aexts}
3515 if ext == 'mkv' or current_exts == {ext} or any(
3516 ext_sets.issuperset(current_exts) for ext_sets in COMPATIBLE_EXTS):
3517 return ext
3518 return 'mkv' if allow_mkv else preferences[-1]
3519
3520
2647c933 3521def urlhandle_detect_ext(url_handle, default=NO_DEFAULT):
79298173 3522 getheader = url_handle.headers.get
2ccd1b10 3523
b55ee18f
PH
3524 cd = getheader('Content-Disposition')
3525 if cd:
3526 m = re.match(r'attachment;\s*filename="(?P<filename>[^"]+)"', cd)
3527 if m:
3528 e = determine_ext(m.group('filename'), default_ext=None)
3529 if e:
3530 return e
3531
2647c933 3532 meta_ext = getheader('x-amz-meta-name')
3533 if meta_ext:
3534 e = meta_ext.rpartition('.')[2]
3535 if e:
3536 return e
3537
3538 return mimetype2ext(getheader('Content-Type'), default=default)
05900629
PH
3539
3540
1e399778
YCH
3541def encode_data_uri(data, mime_type):
3542 return 'data:%s;base64,%s' % (mime_type, base64.b64encode(data).decode('ascii'))
3543
3544
05900629 3545def age_restricted(content_limit, age_limit):
6ec6cb4e 3546 """ Returns True iff the content should be blocked """
05900629
PH
3547
3548 if age_limit is None: # No limit set
3549 return False
3550 if content_limit is None:
3551 return False # Content available for everyone
3552 return age_limit < content_limit
61ca9a80
PH
3553
3554
88f60feb 3555# List of known byte-order-marks (BOM)
a904a7f8
L
3556BOMS = [
3557 (b'\xef\xbb\xbf', 'utf-8'),
3558 (b'\x00\x00\xfe\xff', 'utf-32-be'),
3559 (b'\xff\xfe\x00\x00', 'utf-32-le'),
3560 (b'\xff\xfe', 'utf-16-le'),
3561 (b'\xfe\xff', 'utf-16-be'),
3562]
a904a7f8
L
3563
3564
61ca9a80
PH
3565def is_html(first_bytes):
3566 """ Detect whether a file contains HTML by examining its first bytes. """
3567
80e8493e 3568 encoding = 'utf-8'
61ca9a80 3569 for bom, enc in BOMS:
80e8493e 3570 while first_bytes.startswith(bom):
3571 encoding, first_bytes = enc, first_bytes[len(bom):]
61ca9a80 3572
80e8493e 3573 return re.match(r'^\s*<', first_bytes.decode(encoding, 'replace'))
a055469f
PH
3574
3575
3576def determine_protocol(info_dict):
3577 protocol = info_dict.get('protocol')
3578 if protocol is not None:
3579 return protocol
3580
7de837a5 3581 url = sanitize_url(info_dict['url'])
a055469f
PH
3582 if url.startswith('rtmp'):
3583 return 'rtmp'
3584 elif url.startswith('mms'):
3585 return 'mms'
3586 elif url.startswith('rtsp'):
3587 return 'rtsp'
3588
3589 ext = determine_ext(url)
3590 if ext == 'm3u8':
deae7c17 3591 return 'm3u8' if info_dict.get('is_live') else 'm3u8_native'
a055469f
PH
3592 elif ext == 'f4m':
3593 return 'f4m'
3594
14f25df2 3595 return urllib.parse.urlparse(url).scheme
cfb56d1a
PH
3596
3597
c5e3f849 3598def render_table(header_row, data, delim=False, extra_gap=0, hide_empty=False):
3599 """ Render a list of rows, each as a list of values.
3600 Text after a \t will be right aligned """
ec11a9f4 3601 def width(string):
c5e3f849 3602 return len(remove_terminal_sequences(string).replace('\t', ''))
76d321f6 3603
3604 def get_max_lens(table):
ec11a9f4 3605 return [max(width(str(v)) for v in col) for col in zip(*table)]
76d321f6 3606
3607 def filter_using_list(row, filterArray):
d16df59d 3608 return [col for take, col in itertools.zip_longest(filterArray, row, fillvalue=True) if take]
76d321f6 3609
d16df59d 3610 max_lens = get_max_lens(data) if hide_empty else []
3611 header_row = filter_using_list(header_row, max_lens)
3612 data = [filter_using_list(row, max_lens) for row in data]
76d321f6 3613
cfb56d1a 3614 table = [header_row] + data
76d321f6 3615 max_lens = get_max_lens(table)
c5e3f849 3616 extra_gap += 1
76d321f6 3617 if delim:
c5e3f849 3618 table = [header_row, [delim * (ml + extra_gap) for ml in max_lens]] + data
1ed7953a 3619 table[1][-1] = table[1][-1][:-extra_gap * len(delim)] # Remove extra_gap from end of delimiter
ec11a9f4 3620 for row in table:
3621 for pos, text in enumerate(map(str, row)):
c5e3f849 3622 if '\t' in text:
3623 row[pos] = text.replace('\t', ' ' * (max_lens[pos] - width(text))) + ' ' * extra_gap
3624 else:
3625 row[pos] = text + ' ' * (max_lens[pos] - width(text) + extra_gap)
3626 ret = '\n'.join(''.join(row).rstrip() for row in table)
ec11a9f4 3627 return ret
347de493
PH
3628
3629
8f18aca8 3630def _match_one(filter_part, dct, incomplete):
77b87f05 3631 # TODO: Generalize code with YoutubeDL._build_format_filter
a047eeb6 3632 STRING_OPERATORS = {
3633 '*=': operator.contains,
3634 '^=': lambda attr, value: attr.startswith(value),
3635 '$=': lambda attr, value: attr.endswith(value),
3636 '~=': lambda attr, value: re.search(value, attr),
3637 }
347de493 3638 COMPARISON_OPERATORS = {
a047eeb6 3639 **STRING_OPERATORS,
3640 '<=': operator.le, # "<=" must be defined above "<"
347de493 3641 '<': operator.lt,
347de493 3642 '>=': operator.ge,
a047eeb6 3643 '>': operator.gt,
347de493 3644 '=': operator.eq,
347de493 3645 }
a047eeb6 3646
6db9c4d5 3647 if isinstance(incomplete, bool):
3648 is_incomplete = lambda _: incomplete
3649 else:
3650 is_incomplete = lambda k: k in incomplete
3651
64fa820c 3652 operator_rex = re.compile(r'''(?x)
347de493 3653 (?P<key>[a-z_]+)
77b87f05 3654 \s*(?P<negation>!\s*)?(?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
347de493 3655 (?:
a047eeb6 3656 (?P<quote>["\'])(?P<quotedstrval>.+?)(?P=quote)|
3657 (?P<strval>.+?)
347de493 3658 )
347de493 3659 ''' % '|'.join(map(re.escape, COMPARISON_OPERATORS.keys())))
64fa820c 3660 m = operator_rex.fullmatch(filter_part.strip())
347de493 3661 if m:
18f96d12 3662 m = m.groupdict()
3663 unnegated_op = COMPARISON_OPERATORS[m['op']]
3664 if m['negation']:
77b87f05
MT
3665 op = lambda attr, value: not unnegated_op(attr, value)
3666 else:
3667 op = unnegated_op
18f96d12 3668 comparison_value = m['quotedstrval'] or m['strval'] or m['intval']
3669 if m['quote']:
3670 comparison_value = comparison_value.replace(r'\%s' % m['quote'], m['quote'])
3671 actual_value = dct.get(m['key'])
3672 numeric_comparison = None
f9934b96 3673 if isinstance(actual_value, (int, float)):
e5a088dc
S
3674 # If the original field is a string and matching comparisonvalue is
3675 # a number we should respect the origin of the original field
3676 # and process comparison value as a string (see
18f96d12 3677 # https://github.com/ytdl-org/youtube-dl/issues/11082)
347de493 3678 try:
18f96d12 3679 numeric_comparison = int(comparison_value)
347de493 3680 except ValueError:
18f96d12 3681 numeric_comparison = parse_filesize(comparison_value)
3682 if numeric_comparison is None:
3683 numeric_comparison = parse_filesize(f'{comparison_value}B')
3684 if numeric_comparison is None:
3685 numeric_comparison = parse_duration(comparison_value)
3686 if numeric_comparison is not None and m['op'] in STRING_OPERATORS:
3687 raise ValueError('Operator %s only supports string values!' % m['op'])
347de493 3688 if actual_value is None:
6db9c4d5 3689 return is_incomplete(m['key']) or m['none_inclusive']
18f96d12 3690 return op(actual_value, comparison_value if numeric_comparison is None else numeric_comparison)
347de493
PH
3691
3692 UNARY_OPERATORS = {
1cc47c66
S
3693 '': lambda v: (v is True) if isinstance(v, bool) else (v is not None),
3694 '!': lambda v: (v is False) if isinstance(v, bool) else (v is None),
347de493 3695 }
64fa820c 3696 operator_rex = re.compile(r'''(?x)
347de493 3697 (?P<op>%s)\s*(?P<key>[a-z_]+)
347de493 3698 ''' % '|'.join(map(re.escape, UNARY_OPERATORS.keys())))
64fa820c 3699 m = operator_rex.fullmatch(filter_part.strip())
347de493
PH
3700 if m:
3701 op = UNARY_OPERATORS[m.group('op')]
3702 actual_value = dct.get(m.group('key'))
6db9c4d5 3703 if is_incomplete(m.group('key')) and actual_value is None:
8f18aca8 3704 return True
347de493
PH
3705 return op(actual_value)
3706
3707 raise ValueError('Invalid filter part %r' % filter_part)
3708
3709
8f18aca8 3710def match_str(filter_str, dct, incomplete=False):
6db9c4d5 3711 """ Filter a dictionary with a simple string syntax.
3712 @returns Whether the filter passes
3713 @param incomplete Set of keys that is expected to be missing from dct.
3714 Can be True/False to indicate all/none of the keys may be missing.
3715 All conditions on incomplete keys pass if the key is missing
8f18aca8 3716 """
347de493 3717 return all(
8f18aca8 3718 _match_one(filter_part.replace(r'\&', '&'), dct, incomplete)
a047eeb6 3719 for filter_part in re.split(r'(?<!\\)&', filter_str))
347de493
PH
3720
3721
fe2ce85a 3722def match_filter_func(filters, breaking_filters=None):
3723 if not filters and not breaking_filters:
d1b5f70b 3724 return None
fe2ce85a 3725 breaking_filters = match_filter_func(breaking_filters) or (lambda _, __: None)
3726 filters = set(variadic(filters or []))
d1b5f70b 3727
492272fe 3728 interactive = '-' in filters
3729 if interactive:
3730 filters.remove('-')
3731
3732 def _match_func(info_dict, incomplete=False):
fe2ce85a 3733 ret = breaking_filters(info_dict, incomplete)
3734 if ret is not None:
3735 raise RejectedVideoReached(ret)
3736
492272fe 3737 if not filters or any(match_str(f, info_dict, incomplete) for f in filters):
3738 return NO_DEFAULT if interactive and not incomplete else None
347de493 3739 else:
3bec830a 3740 video_title = info_dict.get('title') or info_dict.get('id') or 'entry'
b1a7cd05 3741 filter_str = ') | ('.join(map(str.strip, filters))
3742 return f'{video_title} does not pass filter ({filter_str}), skipping ..'
347de493 3743 return _match_func
91410c9b
PH
3744
3745
f2df4071 3746class download_range_func:
3747 def __init__(self, chapters, ranges):
3748 self.chapters, self.ranges = chapters, ranges
3749
3750 def __call__(self, info_dict, ydl):
0500ee3d 3751 if not self.ranges and not self.chapters:
3752 yield {}
3753
5ec1b6b7 3754 warning = ('There are no chapters matching the regex' if info_dict.get('chapters')
56ba69e4 3755 else 'Cannot match chapters since chapter information is unavailable')
f2df4071 3756 for regex in self.chapters or []:
5ec1b6b7 3757 for i, chapter in enumerate(info_dict.get('chapters') or []):
3758 if re.search(regex, chapter['title']):
3759 warning = None
3760 yield {**chapter, 'index': i}
f2df4071 3761 if self.chapters and warning:
5ec1b6b7 3762 ydl.to_screen(f'[info] {info_dict["id"]}: {warning}')
3763
f2df4071 3764 yield from ({'start_time': start, 'end_time': end} for start, end in self.ranges or [])
5ec1b6b7 3765
f2df4071 3766 def __eq__(self, other):
3767 return (isinstance(other, download_range_func)
3768 and self.chapters == other.chapters and self.ranges == other.ranges)
5ec1b6b7 3769
71df9b7f 3770 def __repr__(self):
a5387729 3771 return f'{__name__}.{type(self).__name__}({self.chapters}, {self.ranges})'
71df9b7f 3772
5ec1b6b7 3773
bf6427d2
YCH
3774def parse_dfxp_time_expr(time_expr):
3775 if not time_expr:
d631d5f9 3776 return
bf6427d2 3777
1d485a1a 3778 mobj = re.match(rf'^(?P<time_offset>{NUMBER_RE})s?$', time_expr)
bf6427d2
YCH
3779 if mobj:
3780 return float(mobj.group('time_offset'))
3781
db2fe38b 3782 mobj = re.match(r'^(\d+):(\d\d):(\d\d(?:(?:\.|:)\d+)?)$', time_expr)
bf6427d2 3783 if mobj:
db2fe38b 3784 return 3600 * int(mobj.group(1)) + 60 * int(mobj.group(2)) + float(mobj.group(3).replace(':', '.'))
bf6427d2
YCH
3785
3786
c1c924ab 3787def srt_subtitles_timecode(seconds):
aa7785f8 3788 return '%02d:%02d:%02d,%03d' % timetuple_from_msec(seconds * 1000)
3789
3790
3791def ass_subtitles_timecode(seconds):
3792 time = timetuple_from_msec(seconds * 1000)
3793 return '%01d:%02d:%02d.%02d' % (*time[:-1], time.milliseconds / 10)
bf6427d2
YCH
3794
3795
3796def dfxp2srt(dfxp_data):
3869028f
YCH
3797 '''
3798 @param dfxp_data A bytes-like object containing DFXP data
3799 @returns A unicode object containing converted SRT data
3800 '''
5b995f71 3801 LEGACY_NAMESPACES = (
3869028f
YCH
3802 (b'http://www.w3.org/ns/ttml', [
3803 b'http://www.w3.org/2004/11/ttaf1',
3804 b'http://www.w3.org/2006/04/ttaf1',
3805 b'http://www.w3.org/2006/10/ttaf1',
5b995f71 3806 ]),
3869028f
YCH
3807 (b'http://www.w3.org/ns/ttml#styling', [
3808 b'http://www.w3.org/ns/ttml#style',
5b995f71
RA
3809 ]),
3810 )
3811
3812 SUPPORTED_STYLING = [
3813 'color',
3814 'fontFamily',
3815 'fontSize',
3816 'fontStyle',
3817 'fontWeight',
3818 'textDecoration'
3819 ]
3820
4e335771 3821 _x = functools.partial(xpath_with_ns, ns_map={
261f4730 3822 'xml': 'http://www.w3.org/XML/1998/namespace',
4e335771 3823 'ttml': 'http://www.w3.org/ns/ttml',
5b995f71 3824 'tts': 'http://www.w3.org/ns/ttml#styling',
4e335771 3825 })
bf6427d2 3826
5b995f71
RA
3827 styles = {}
3828 default_style = {}
3829
86e5f3ed 3830 class TTMLPElementParser:
5b995f71
RA
3831 _out = ''
3832 _unclosed_elements = []
3833 _applied_styles = []
bf6427d2 3834
2b14cb56 3835 def start(self, tag, attrib):
5b995f71
RA
3836 if tag in (_x('ttml:br'), 'br'):
3837 self._out += '\n'
3838 else:
3839 unclosed_elements = []
3840 style = {}
3841 element_style_id = attrib.get('style')
3842 if default_style:
3843 style.update(default_style)
3844 if element_style_id:
3845 style.update(styles.get(element_style_id, {}))
3846 for prop in SUPPORTED_STYLING:
3847 prop_val = attrib.get(_x('tts:' + prop))
3848 if prop_val:
3849 style[prop] = prop_val
3850 if style:
3851 font = ''
3852 for k, v in sorted(style.items()):
3853 if self._applied_styles and self._applied_styles[-1].get(k) == v:
3854 continue
3855 if k == 'color':
3856 font += ' color="%s"' % v
3857 elif k == 'fontSize':
3858 font += ' size="%s"' % v
3859 elif k == 'fontFamily':
3860 font += ' face="%s"' % v
3861 elif k == 'fontWeight' and v == 'bold':
3862 self._out += '<b>'
3863 unclosed_elements.append('b')
3864 elif k == 'fontStyle' and v == 'italic':
3865 self._out += '<i>'
3866 unclosed_elements.append('i')
3867 elif k == 'textDecoration' and v == 'underline':
3868 self._out += '<u>'
3869 unclosed_elements.append('u')
3870 if font:
3871 self._out += '<font' + font + '>'
3872 unclosed_elements.append('font')
3873 applied_style = {}
3874 if self._applied_styles:
3875 applied_style.update(self._applied_styles[-1])
3876 applied_style.update(style)
3877 self._applied_styles.append(applied_style)
3878 self._unclosed_elements.append(unclosed_elements)
bf6427d2 3879
2b14cb56 3880 def end(self, tag):
5b995f71
RA
3881 if tag not in (_x('ttml:br'), 'br'):
3882 unclosed_elements = self._unclosed_elements.pop()
3883 for element in reversed(unclosed_elements):
3884 self._out += '</%s>' % element
3885 if unclosed_elements and self._applied_styles:
3886 self._applied_styles.pop()
bf6427d2 3887
2b14cb56 3888 def data(self, data):
5b995f71 3889 self._out += data
2b14cb56 3890
3891 def close(self):
5b995f71 3892 return self._out.strip()
2b14cb56 3893
6a765f13 3894 # Fix UTF-8 encoded file wrongly marked as UTF-16. See https://github.com/yt-dlp/yt-dlp/issues/6543#issuecomment-1477169870
3895 # This will not trigger false positives since only UTF-8 text is being replaced
3896 dfxp_data = dfxp_data.replace(b'encoding=\'UTF-16\'', b'encoding=\'UTF-8\'')
3897
2b14cb56 3898 def parse_node(node):
3899 target = TTMLPElementParser()
3900 parser = xml.etree.ElementTree.XMLParser(target=target)
3901 parser.feed(xml.etree.ElementTree.tostring(node))
3902 return parser.close()
bf6427d2 3903
5b995f71
RA
3904 for k, v in LEGACY_NAMESPACES:
3905 for ns in v:
3906 dfxp_data = dfxp_data.replace(ns, k)
3907
3869028f 3908 dfxp = compat_etree_fromstring(dfxp_data)
bf6427d2 3909 out = []
5b995f71 3910 paras = dfxp.findall(_x('.//ttml:p')) or dfxp.findall('.//p')
1b0427e6
YCH
3911
3912 if not paras:
3913 raise ValueError('Invalid dfxp/TTML subtitle')
bf6427d2 3914
5b995f71
RA
3915 repeat = False
3916 while True:
3917 for style in dfxp.findall(_x('.//ttml:style')):
261f4730
RA
3918 style_id = style.get('id') or style.get(_x('xml:id'))
3919 if not style_id:
3920 continue
5b995f71
RA
3921 parent_style_id = style.get('style')
3922 if parent_style_id:
3923 if parent_style_id not in styles:
3924 repeat = True
3925 continue
3926 styles[style_id] = styles[parent_style_id].copy()
3927 for prop in SUPPORTED_STYLING:
3928 prop_val = style.get(_x('tts:' + prop))
3929 if prop_val:
3930 styles.setdefault(style_id, {})[prop] = prop_val
3931 if repeat:
3932 repeat = False
3933 else:
3934 break
3935
3936 for p in ('body', 'div'):
3937 ele = xpath_element(dfxp, [_x('.//ttml:' + p), './/' + p])
3938 if ele is None:
3939 continue
3940 style = styles.get(ele.get('style'))
3941 if not style:
3942 continue
3943 default_style.update(style)
3944
bf6427d2 3945 for para, index in zip(paras, itertools.count(1)):
d631d5f9 3946 begin_time = parse_dfxp_time_expr(para.attrib.get('begin'))
7dff0363 3947 end_time = parse_dfxp_time_expr(para.attrib.get('end'))
d631d5f9
YCH
3948 dur = parse_dfxp_time_expr(para.attrib.get('dur'))
3949 if begin_time is None:
3950 continue
7dff0363 3951 if not end_time:
d631d5f9
YCH
3952 if not dur:
3953 continue
3954 end_time = begin_time + dur
bf6427d2
YCH
3955 out.append('%d\n%s --> %s\n%s\n\n' % (
3956 index,
c1c924ab
YCH
3957 srt_subtitles_timecode(begin_time),
3958 srt_subtitles_timecode(end_time),
bf6427d2
YCH
3959 parse_node(para)))
3960
3961 return ''.join(out)
3962
3963
c487cf00 3964def cli_option(params, command_option, param, separator=None):
66e289ba 3965 param = params.get(param)
c487cf00 3966 return ([] if param is None
3967 else [command_option, str(param)] if separator is None
3968 else [f'{command_option}{separator}{param}'])
66e289ba
S
3969
3970
3971def cli_bool_option(params, command_option, param, true_value='true', false_value='false', separator=None):
3972 param = params.get(param)
c487cf00 3973 assert param in (True, False, None)
3974 return cli_option({True: true_value, False: false_value}, command_option, param, separator)
66e289ba
S
3975
3976
3977def cli_valueless_option(params, command_option, param, expected_value=True):
c487cf00 3978 return [command_option] if params.get(param) == expected_value else []
66e289ba
S
3979
3980
e92caff5 3981def cli_configuration_args(argdict, keys, default=[], use_compat=True):
eab9b2bc 3982 if isinstance(argdict, (list, tuple)): # for backward compatibility
e92caff5 3983 if use_compat:
5b1ecbb3 3984 return argdict
3985 else:
3986 argdict = None
eab9b2bc 3987 if argdict is None:
5b1ecbb3 3988 return default
eab9b2bc 3989 assert isinstance(argdict, dict)
3990
e92caff5 3991 assert isinstance(keys, (list, tuple))
3992 for key_list in keys:
e92caff5 3993 arg_list = list(filter(
3994 lambda x: x is not None,
6606817a 3995 [argdict.get(key.lower()) for key in variadic(key_list)]))
e92caff5 3996 if arg_list:
3997 return [arg for args in arg_list for arg in args]
3998 return default
66e289ba 3999
6251555f 4000
330690a2 4001def _configuration_args(main_key, argdict, exe, keys=None, default=[], use_compat=True):
4002 main_key, exe = main_key.lower(), exe.lower()
4003 root_key = exe if main_key == exe else f'{main_key}+{exe}'
4004 keys = [f'{root_key}{k}' for k in (keys or [''])]
4005 if root_key in keys:
4006 if main_key != exe:
4007 keys.append((main_key, exe))
4008 keys.append('default')
4009 else:
4010 use_compat = False
4011 return cli_configuration_args(argdict, keys, default, use_compat)
4012
66e289ba 4013
86e5f3ed 4014class ISO639Utils:
39672624
YCH
4015 # See http://www.loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt
4016 _lang_map = {
4017 'aa': 'aar',
4018 'ab': 'abk',
4019 'ae': 'ave',
4020 'af': 'afr',
4021 'ak': 'aka',
4022 'am': 'amh',
4023 'an': 'arg',
4024 'ar': 'ara',
4025 'as': 'asm',
4026 'av': 'ava',
4027 'ay': 'aym',
4028 'az': 'aze',
4029 'ba': 'bak',
4030 'be': 'bel',
4031 'bg': 'bul',
4032 'bh': 'bih',
4033 'bi': 'bis',
4034 'bm': 'bam',
4035 'bn': 'ben',
4036 'bo': 'bod',
4037 'br': 'bre',
4038 'bs': 'bos',
4039 'ca': 'cat',
4040 'ce': 'che',
4041 'ch': 'cha',
4042 'co': 'cos',
4043 'cr': 'cre',
4044 'cs': 'ces',
4045 'cu': 'chu',
4046 'cv': 'chv',
4047 'cy': 'cym',
4048 'da': 'dan',
4049 'de': 'deu',
4050 'dv': 'div',
4051 'dz': 'dzo',
4052 'ee': 'ewe',
4053 'el': 'ell',
4054 'en': 'eng',
4055 'eo': 'epo',
4056 'es': 'spa',
4057 'et': 'est',
4058 'eu': 'eus',
4059 'fa': 'fas',
4060 'ff': 'ful',
4061 'fi': 'fin',
4062 'fj': 'fij',
4063 'fo': 'fao',
4064 'fr': 'fra',
4065 'fy': 'fry',
4066 'ga': 'gle',
4067 'gd': 'gla',
4068 'gl': 'glg',
4069 'gn': 'grn',
4070 'gu': 'guj',
4071 'gv': 'glv',
4072 'ha': 'hau',
4073 'he': 'heb',
b7acc835 4074 'iw': 'heb', # Replaced by he in 1989 revision
39672624
YCH
4075 'hi': 'hin',
4076 'ho': 'hmo',
4077 'hr': 'hrv',
4078 'ht': 'hat',
4079 'hu': 'hun',
4080 'hy': 'hye',
4081 'hz': 'her',
4082 'ia': 'ina',
4083 'id': 'ind',
b7acc835 4084 'in': 'ind', # Replaced by id in 1989 revision
39672624
YCH
4085 'ie': 'ile',
4086 'ig': 'ibo',
4087 'ii': 'iii',
4088 'ik': 'ipk',
4089 'io': 'ido',
4090 'is': 'isl',
4091 'it': 'ita',
4092 'iu': 'iku',
4093 'ja': 'jpn',
4094 'jv': 'jav',
4095 'ka': 'kat',
4096 'kg': 'kon',
4097 'ki': 'kik',
4098 'kj': 'kua',
4099 'kk': 'kaz',
4100 'kl': 'kal',
4101 'km': 'khm',
4102 'kn': 'kan',
4103 'ko': 'kor',
4104 'kr': 'kau',
4105 'ks': 'kas',
4106 'ku': 'kur',
4107 'kv': 'kom',
4108 'kw': 'cor',
4109 'ky': 'kir',
4110 'la': 'lat',
4111 'lb': 'ltz',
4112 'lg': 'lug',
4113 'li': 'lim',
4114 'ln': 'lin',
4115 'lo': 'lao',
4116 'lt': 'lit',
4117 'lu': 'lub',
4118 'lv': 'lav',
4119 'mg': 'mlg',
4120 'mh': 'mah',
4121 'mi': 'mri',
4122 'mk': 'mkd',
4123 'ml': 'mal',
4124 'mn': 'mon',
4125 'mr': 'mar',
4126 'ms': 'msa',
4127 'mt': 'mlt',
4128 'my': 'mya',
4129 'na': 'nau',
4130 'nb': 'nob',
4131 'nd': 'nde',
4132 'ne': 'nep',
4133 'ng': 'ndo',
4134 'nl': 'nld',
4135 'nn': 'nno',
4136 'no': 'nor',
4137 'nr': 'nbl',
4138 'nv': 'nav',
4139 'ny': 'nya',
4140 'oc': 'oci',
4141 'oj': 'oji',
4142 'om': 'orm',
4143 'or': 'ori',
4144 'os': 'oss',
4145 'pa': 'pan',
4146 'pi': 'pli',
4147 'pl': 'pol',
4148 'ps': 'pus',
4149 'pt': 'por',
4150 'qu': 'que',
4151 'rm': 'roh',
4152 'rn': 'run',
4153 'ro': 'ron',
4154 'ru': 'rus',
4155 'rw': 'kin',
4156 'sa': 'san',
4157 'sc': 'srd',
4158 'sd': 'snd',
4159 'se': 'sme',
4160 'sg': 'sag',
4161 'si': 'sin',
4162 'sk': 'slk',
4163 'sl': 'slv',
4164 'sm': 'smo',
4165 'sn': 'sna',
4166 'so': 'som',
4167 'sq': 'sqi',
4168 'sr': 'srp',
4169 'ss': 'ssw',
4170 'st': 'sot',
4171 'su': 'sun',
4172 'sv': 'swe',
4173 'sw': 'swa',
4174 'ta': 'tam',
4175 'te': 'tel',
4176 'tg': 'tgk',
4177 'th': 'tha',
4178 'ti': 'tir',
4179 'tk': 'tuk',
4180 'tl': 'tgl',
4181 'tn': 'tsn',
4182 'to': 'ton',
4183 'tr': 'tur',
4184 'ts': 'tso',
4185 'tt': 'tat',
4186 'tw': 'twi',
4187 'ty': 'tah',
4188 'ug': 'uig',
4189 'uk': 'ukr',
4190 'ur': 'urd',
4191 'uz': 'uzb',
4192 've': 'ven',
4193 'vi': 'vie',
4194 'vo': 'vol',
4195 'wa': 'wln',
4196 'wo': 'wol',
4197 'xh': 'xho',
4198 'yi': 'yid',
e9a50fba 4199 'ji': 'yid', # Replaced by yi in 1989 revision
39672624
YCH
4200 'yo': 'yor',
4201 'za': 'zha',
4202 'zh': 'zho',
4203 'zu': 'zul',
4204 }
4205
4206 @classmethod
4207 def short2long(cls, code):
4208 """Convert language code from ISO 639-1 to ISO 639-2/T"""
4209 return cls._lang_map.get(code[:2])
4210
4211 @classmethod
4212 def long2short(cls, code):
4213 """Convert language code from ISO 639-2/T to ISO 639-1"""
4214 for short_name, long_name in cls._lang_map.items():
4215 if long_name == code:
4216 return short_name
4217
4218
86e5f3ed 4219class ISO3166Utils:
4eb10f66
YCH
4220 # From http://data.okfn.org/data/core/country-list
4221 _country_map = {
4222 'AF': 'Afghanistan',
4223 'AX': 'Åland Islands',
4224 'AL': 'Albania',
4225 'DZ': 'Algeria',
4226 'AS': 'American Samoa',
4227 'AD': 'Andorra',
4228 'AO': 'Angola',
4229 'AI': 'Anguilla',
4230 'AQ': 'Antarctica',
4231 'AG': 'Antigua and Barbuda',
4232 'AR': 'Argentina',
4233 'AM': 'Armenia',
4234 'AW': 'Aruba',
4235 'AU': 'Australia',
4236 'AT': 'Austria',
4237 'AZ': 'Azerbaijan',
4238 'BS': 'Bahamas',
4239 'BH': 'Bahrain',
4240 'BD': 'Bangladesh',
4241 'BB': 'Barbados',
4242 'BY': 'Belarus',
4243 'BE': 'Belgium',
4244 'BZ': 'Belize',
4245 'BJ': 'Benin',
4246 'BM': 'Bermuda',
4247 'BT': 'Bhutan',
4248 'BO': 'Bolivia, Plurinational State of',
4249 'BQ': 'Bonaire, Sint Eustatius and Saba',
4250 'BA': 'Bosnia and Herzegovina',
4251 'BW': 'Botswana',
4252 'BV': 'Bouvet Island',
4253 'BR': 'Brazil',
4254 'IO': 'British Indian Ocean Territory',
4255 'BN': 'Brunei Darussalam',
4256 'BG': 'Bulgaria',
4257 'BF': 'Burkina Faso',
4258 'BI': 'Burundi',
4259 'KH': 'Cambodia',
4260 'CM': 'Cameroon',
4261 'CA': 'Canada',
4262 'CV': 'Cape Verde',
4263 'KY': 'Cayman Islands',
4264 'CF': 'Central African Republic',
4265 'TD': 'Chad',
4266 'CL': 'Chile',
4267 'CN': 'China',
4268 'CX': 'Christmas Island',
4269 'CC': 'Cocos (Keeling) Islands',
4270 'CO': 'Colombia',
4271 'KM': 'Comoros',
4272 'CG': 'Congo',
4273 'CD': 'Congo, the Democratic Republic of the',
4274 'CK': 'Cook Islands',
4275 'CR': 'Costa Rica',
4276 'CI': 'Côte d\'Ivoire',
4277 'HR': 'Croatia',
4278 'CU': 'Cuba',
4279 'CW': 'Curaçao',
4280 'CY': 'Cyprus',
4281 'CZ': 'Czech Republic',
4282 'DK': 'Denmark',
4283 'DJ': 'Djibouti',
4284 'DM': 'Dominica',
4285 'DO': 'Dominican Republic',
4286 'EC': 'Ecuador',
4287 'EG': 'Egypt',
4288 'SV': 'El Salvador',
4289 'GQ': 'Equatorial Guinea',
4290 'ER': 'Eritrea',
4291 'EE': 'Estonia',
4292 'ET': 'Ethiopia',
4293 'FK': 'Falkland Islands (Malvinas)',
4294 'FO': 'Faroe Islands',
4295 'FJ': 'Fiji',
4296 'FI': 'Finland',
4297 'FR': 'France',
4298 'GF': 'French Guiana',
4299 'PF': 'French Polynesia',
4300 'TF': 'French Southern Territories',
4301 'GA': 'Gabon',
4302 'GM': 'Gambia',
4303 'GE': 'Georgia',
4304 'DE': 'Germany',
4305 'GH': 'Ghana',
4306 'GI': 'Gibraltar',
4307 'GR': 'Greece',
4308 'GL': 'Greenland',
4309 'GD': 'Grenada',
4310 'GP': 'Guadeloupe',
4311 'GU': 'Guam',
4312 'GT': 'Guatemala',
4313 'GG': 'Guernsey',
4314 'GN': 'Guinea',
4315 'GW': 'Guinea-Bissau',
4316 'GY': 'Guyana',
4317 'HT': 'Haiti',
4318 'HM': 'Heard Island and McDonald Islands',
4319 'VA': 'Holy See (Vatican City State)',
4320 'HN': 'Honduras',
4321 'HK': 'Hong Kong',
4322 'HU': 'Hungary',
4323 'IS': 'Iceland',
4324 'IN': 'India',
4325 'ID': 'Indonesia',
4326 'IR': 'Iran, Islamic Republic of',
4327 'IQ': 'Iraq',
4328 'IE': 'Ireland',
4329 'IM': 'Isle of Man',
4330 'IL': 'Israel',
4331 'IT': 'Italy',
4332 'JM': 'Jamaica',
4333 'JP': 'Japan',
4334 'JE': 'Jersey',
4335 'JO': 'Jordan',
4336 'KZ': 'Kazakhstan',
4337 'KE': 'Kenya',
4338 'KI': 'Kiribati',
4339 'KP': 'Korea, Democratic People\'s Republic of',
4340 'KR': 'Korea, Republic of',
4341 'KW': 'Kuwait',
4342 'KG': 'Kyrgyzstan',
4343 'LA': 'Lao People\'s Democratic Republic',
4344 'LV': 'Latvia',
4345 'LB': 'Lebanon',
4346 'LS': 'Lesotho',
4347 'LR': 'Liberia',
4348 'LY': 'Libya',
4349 'LI': 'Liechtenstein',
4350 'LT': 'Lithuania',
4351 'LU': 'Luxembourg',
4352 'MO': 'Macao',
4353 'MK': 'Macedonia, the Former Yugoslav Republic of',
4354 'MG': 'Madagascar',
4355 'MW': 'Malawi',
4356 'MY': 'Malaysia',
4357 'MV': 'Maldives',
4358 'ML': 'Mali',
4359 'MT': 'Malta',
4360 'MH': 'Marshall Islands',
4361 'MQ': 'Martinique',
4362 'MR': 'Mauritania',
4363 'MU': 'Mauritius',
4364 'YT': 'Mayotte',
4365 'MX': 'Mexico',
4366 'FM': 'Micronesia, Federated States of',
4367 'MD': 'Moldova, Republic of',
4368 'MC': 'Monaco',
4369 'MN': 'Mongolia',
4370 'ME': 'Montenegro',
4371 'MS': 'Montserrat',
4372 'MA': 'Morocco',
4373 'MZ': 'Mozambique',
4374 'MM': 'Myanmar',
4375 'NA': 'Namibia',
4376 'NR': 'Nauru',
4377 'NP': 'Nepal',
4378 'NL': 'Netherlands',
4379 'NC': 'New Caledonia',
4380 'NZ': 'New Zealand',
4381 'NI': 'Nicaragua',
4382 'NE': 'Niger',
4383 'NG': 'Nigeria',
4384 'NU': 'Niue',
4385 'NF': 'Norfolk Island',
4386 'MP': 'Northern Mariana Islands',
4387 'NO': 'Norway',
4388 'OM': 'Oman',
4389 'PK': 'Pakistan',
4390 'PW': 'Palau',
4391 'PS': 'Palestine, State of',
4392 'PA': 'Panama',
4393 'PG': 'Papua New Guinea',
4394 'PY': 'Paraguay',
4395 'PE': 'Peru',
4396 'PH': 'Philippines',
4397 'PN': 'Pitcairn',
4398 'PL': 'Poland',
4399 'PT': 'Portugal',
4400 'PR': 'Puerto Rico',
4401 'QA': 'Qatar',
4402 'RE': 'Réunion',
4403 'RO': 'Romania',
4404 'RU': 'Russian Federation',
4405 'RW': 'Rwanda',
4406 'BL': 'Saint Barthélemy',
4407 'SH': 'Saint Helena, Ascension and Tristan da Cunha',
4408 'KN': 'Saint Kitts and Nevis',
4409 'LC': 'Saint Lucia',
4410 'MF': 'Saint Martin (French part)',
4411 'PM': 'Saint Pierre and Miquelon',
4412 'VC': 'Saint Vincent and the Grenadines',
4413 'WS': 'Samoa',
4414 'SM': 'San Marino',
4415 'ST': 'Sao Tome and Principe',
4416 'SA': 'Saudi Arabia',
4417 'SN': 'Senegal',
4418 'RS': 'Serbia',
4419 'SC': 'Seychelles',
4420 'SL': 'Sierra Leone',
4421 'SG': 'Singapore',
4422 'SX': 'Sint Maarten (Dutch part)',
4423 'SK': 'Slovakia',
4424 'SI': 'Slovenia',
4425 'SB': 'Solomon Islands',
4426 'SO': 'Somalia',
4427 'ZA': 'South Africa',
4428 'GS': 'South Georgia and the South Sandwich Islands',
4429 'SS': 'South Sudan',
4430 'ES': 'Spain',
4431 'LK': 'Sri Lanka',
4432 'SD': 'Sudan',
4433 'SR': 'Suriname',
4434 'SJ': 'Svalbard and Jan Mayen',
4435 'SZ': 'Swaziland',
4436 'SE': 'Sweden',
4437 'CH': 'Switzerland',
4438 'SY': 'Syrian Arab Republic',
4439 'TW': 'Taiwan, Province of China',
4440 'TJ': 'Tajikistan',
4441 'TZ': 'Tanzania, United Republic of',
4442 'TH': 'Thailand',
4443 'TL': 'Timor-Leste',
4444 'TG': 'Togo',
4445 'TK': 'Tokelau',
4446 'TO': 'Tonga',
4447 'TT': 'Trinidad and Tobago',
4448 'TN': 'Tunisia',
4449 'TR': 'Turkey',
4450 'TM': 'Turkmenistan',
4451 'TC': 'Turks and Caicos Islands',
4452 'TV': 'Tuvalu',
4453 'UG': 'Uganda',
4454 'UA': 'Ukraine',
4455 'AE': 'United Arab Emirates',
4456 'GB': 'United Kingdom',
4457 'US': 'United States',
4458 'UM': 'United States Minor Outlying Islands',
4459 'UY': 'Uruguay',
4460 'UZ': 'Uzbekistan',
4461 'VU': 'Vanuatu',
4462 'VE': 'Venezuela, Bolivarian Republic of',
4463 'VN': 'Viet Nam',
4464 'VG': 'Virgin Islands, British',
4465 'VI': 'Virgin Islands, U.S.',
4466 'WF': 'Wallis and Futuna',
4467 'EH': 'Western Sahara',
4468 'YE': 'Yemen',
4469 'ZM': 'Zambia',
4470 'ZW': 'Zimbabwe',
2f97cc61 4471 # Not ISO 3166 codes, but used for IP blocks
4472 'AP': 'Asia/Pacific Region',
4473 'EU': 'Europe',
4eb10f66
YCH
4474 }
4475
4476 @classmethod
4477 def short2full(cls, code):
4478 """Convert an ISO 3166-2 country code to the corresponding full name"""
4479 return cls._country_map.get(code.upper())
4480
4481
86e5f3ed 4482class GeoUtils:
773f291d
S
4483 # Major IPv4 address blocks per country
4484 _country_ip_map = {
53896ca5 4485 'AD': '46.172.224.0/19',
773f291d
S
4486 'AE': '94.200.0.0/13',
4487 'AF': '149.54.0.0/17',
4488 'AG': '209.59.64.0/18',
4489 'AI': '204.14.248.0/21',
4490 'AL': '46.99.0.0/16',
4491 'AM': '46.70.0.0/15',
4492 'AO': '105.168.0.0/13',
53896ca5
S
4493 'AP': '182.50.184.0/21',
4494 'AQ': '23.154.160.0/24',
773f291d
S
4495 'AR': '181.0.0.0/12',
4496 'AS': '202.70.112.0/20',
53896ca5 4497 'AT': '77.116.0.0/14',
773f291d
S
4498 'AU': '1.128.0.0/11',
4499 'AW': '181.41.0.0/18',
53896ca5
S
4500 'AX': '185.217.4.0/22',
4501 'AZ': '5.197.0.0/16',
773f291d
S
4502 'BA': '31.176.128.0/17',
4503 'BB': '65.48.128.0/17',
4504 'BD': '114.130.0.0/16',
4505 'BE': '57.0.0.0/8',
53896ca5 4506 'BF': '102.178.0.0/15',
773f291d
S
4507 'BG': '95.42.0.0/15',
4508 'BH': '37.131.0.0/17',
4509 'BI': '154.117.192.0/18',
4510 'BJ': '137.255.0.0/16',
53896ca5 4511 'BL': '185.212.72.0/23',
773f291d
S
4512 'BM': '196.12.64.0/18',
4513 'BN': '156.31.0.0/16',
4514 'BO': '161.56.0.0/16',
4515 'BQ': '161.0.80.0/20',
53896ca5 4516 'BR': '191.128.0.0/12',
773f291d
S
4517 'BS': '24.51.64.0/18',
4518 'BT': '119.2.96.0/19',
4519 'BW': '168.167.0.0/16',
4520 'BY': '178.120.0.0/13',
4521 'BZ': '179.42.192.0/18',
4522 'CA': '99.224.0.0/11',
4523 'CD': '41.243.0.0/16',
53896ca5
S
4524 'CF': '197.242.176.0/21',
4525 'CG': '160.113.0.0/16',
773f291d 4526 'CH': '85.0.0.0/13',
53896ca5 4527 'CI': '102.136.0.0/14',
773f291d
S
4528 'CK': '202.65.32.0/19',
4529 'CL': '152.172.0.0/14',
53896ca5 4530 'CM': '102.244.0.0/14',
773f291d
S
4531 'CN': '36.128.0.0/10',
4532 'CO': '181.240.0.0/12',
4533 'CR': '201.192.0.0/12',
4534 'CU': '152.206.0.0/15',
4535 'CV': '165.90.96.0/19',
4536 'CW': '190.88.128.0/17',
53896ca5 4537 'CY': '31.153.0.0/16',
773f291d
S
4538 'CZ': '88.100.0.0/14',
4539 'DE': '53.0.0.0/8',
4540 'DJ': '197.241.0.0/17',
4541 'DK': '87.48.0.0/12',
4542 'DM': '192.243.48.0/20',
4543 'DO': '152.166.0.0/15',
4544 'DZ': '41.96.0.0/12',
4545 'EC': '186.68.0.0/15',
4546 'EE': '90.190.0.0/15',
4547 'EG': '156.160.0.0/11',
4548 'ER': '196.200.96.0/20',
4549 'ES': '88.0.0.0/11',
4550 'ET': '196.188.0.0/14',
4551 'EU': '2.16.0.0/13',
4552 'FI': '91.152.0.0/13',
4553 'FJ': '144.120.0.0/16',
53896ca5 4554 'FK': '80.73.208.0/21',
773f291d
S
4555 'FM': '119.252.112.0/20',
4556 'FO': '88.85.32.0/19',
4557 'FR': '90.0.0.0/9',
4558 'GA': '41.158.0.0/15',
4559 'GB': '25.0.0.0/8',
4560 'GD': '74.122.88.0/21',
4561 'GE': '31.146.0.0/16',
4562 'GF': '161.22.64.0/18',
4563 'GG': '62.68.160.0/19',
53896ca5
S
4564 'GH': '154.160.0.0/12',
4565 'GI': '95.164.0.0/16',
773f291d
S
4566 'GL': '88.83.0.0/19',
4567 'GM': '160.182.0.0/15',
4568 'GN': '197.149.192.0/18',
4569 'GP': '104.250.0.0/19',
4570 'GQ': '105.235.224.0/20',
4571 'GR': '94.64.0.0/13',
4572 'GT': '168.234.0.0/16',
4573 'GU': '168.123.0.0/16',
4574 'GW': '197.214.80.0/20',
4575 'GY': '181.41.64.0/18',
4576 'HK': '113.252.0.0/14',
4577 'HN': '181.210.0.0/16',
4578 'HR': '93.136.0.0/13',
4579 'HT': '148.102.128.0/17',
4580 'HU': '84.0.0.0/14',
4581 'ID': '39.192.0.0/10',
4582 'IE': '87.32.0.0/12',
4583 'IL': '79.176.0.0/13',
4584 'IM': '5.62.80.0/20',
4585 'IN': '117.192.0.0/10',
4586 'IO': '203.83.48.0/21',
4587 'IQ': '37.236.0.0/14',
4588 'IR': '2.176.0.0/12',
4589 'IS': '82.221.0.0/16',
4590 'IT': '79.0.0.0/10',
4591 'JE': '87.244.64.0/18',
4592 'JM': '72.27.0.0/17',
4593 'JO': '176.29.0.0/16',
53896ca5 4594 'JP': '133.0.0.0/8',
773f291d
S
4595 'KE': '105.48.0.0/12',
4596 'KG': '158.181.128.0/17',
4597 'KH': '36.37.128.0/17',
4598 'KI': '103.25.140.0/22',
4599 'KM': '197.255.224.0/20',
53896ca5 4600 'KN': '198.167.192.0/19',
773f291d
S
4601 'KP': '175.45.176.0/22',
4602 'KR': '175.192.0.0/10',
4603 'KW': '37.36.0.0/14',
4604 'KY': '64.96.0.0/15',
4605 'KZ': '2.72.0.0/13',
4606 'LA': '115.84.64.0/18',
4607 'LB': '178.135.0.0/16',
53896ca5 4608 'LC': '24.92.144.0/20',
773f291d
S
4609 'LI': '82.117.0.0/19',
4610 'LK': '112.134.0.0/15',
53896ca5 4611 'LR': '102.183.0.0/16',
773f291d
S
4612 'LS': '129.232.0.0/17',
4613 'LT': '78.56.0.0/13',
4614 'LU': '188.42.0.0/16',
4615 'LV': '46.109.0.0/16',
4616 'LY': '41.252.0.0/14',
4617 'MA': '105.128.0.0/11',
4618 'MC': '88.209.64.0/18',
4619 'MD': '37.246.0.0/16',
4620 'ME': '178.175.0.0/17',
4621 'MF': '74.112.232.0/21',
4622 'MG': '154.126.0.0/17',
4623 'MH': '117.103.88.0/21',
4624 'MK': '77.28.0.0/15',
4625 'ML': '154.118.128.0/18',
4626 'MM': '37.111.0.0/17',
4627 'MN': '49.0.128.0/17',
4628 'MO': '60.246.0.0/16',
4629 'MP': '202.88.64.0/20',
4630 'MQ': '109.203.224.0/19',
4631 'MR': '41.188.64.0/18',
4632 'MS': '208.90.112.0/22',
4633 'MT': '46.11.0.0/16',
4634 'MU': '105.16.0.0/12',
4635 'MV': '27.114.128.0/18',
53896ca5 4636 'MW': '102.70.0.0/15',
773f291d
S
4637 'MX': '187.192.0.0/11',
4638 'MY': '175.136.0.0/13',
4639 'MZ': '197.218.0.0/15',
4640 'NA': '41.182.0.0/16',
4641 'NC': '101.101.0.0/18',
4642 'NE': '197.214.0.0/18',
4643 'NF': '203.17.240.0/22',
4644 'NG': '105.112.0.0/12',
4645 'NI': '186.76.0.0/15',
4646 'NL': '145.96.0.0/11',
4647 'NO': '84.208.0.0/13',
4648 'NP': '36.252.0.0/15',
4649 'NR': '203.98.224.0/19',
4650 'NU': '49.156.48.0/22',
4651 'NZ': '49.224.0.0/14',
4652 'OM': '5.36.0.0/15',
4653 'PA': '186.72.0.0/15',
4654 'PE': '186.160.0.0/14',
4655 'PF': '123.50.64.0/18',
4656 'PG': '124.240.192.0/19',
4657 'PH': '49.144.0.0/13',
4658 'PK': '39.32.0.0/11',
4659 'PL': '83.0.0.0/11',
4660 'PM': '70.36.0.0/20',
4661 'PR': '66.50.0.0/16',
4662 'PS': '188.161.0.0/16',
4663 'PT': '85.240.0.0/13',
4664 'PW': '202.124.224.0/20',
4665 'PY': '181.120.0.0/14',
4666 'QA': '37.210.0.0/15',
53896ca5 4667 'RE': '102.35.0.0/16',
773f291d 4668 'RO': '79.112.0.0/13',
53896ca5 4669 'RS': '93.86.0.0/15',
773f291d 4670 'RU': '5.136.0.0/13',
53896ca5 4671 'RW': '41.186.0.0/16',
773f291d
S
4672 'SA': '188.48.0.0/13',
4673 'SB': '202.1.160.0/19',
4674 'SC': '154.192.0.0/11',
53896ca5 4675 'SD': '102.120.0.0/13',
773f291d 4676 'SE': '78.64.0.0/12',
53896ca5 4677 'SG': '8.128.0.0/10',
773f291d
S
4678 'SI': '188.196.0.0/14',
4679 'SK': '78.98.0.0/15',
53896ca5 4680 'SL': '102.143.0.0/17',
773f291d
S
4681 'SM': '89.186.32.0/19',
4682 'SN': '41.82.0.0/15',
53896ca5 4683 'SO': '154.115.192.0/18',
773f291d
S
4684 'SR': '186.179.128.0/17',
4685 'SS': '105.235.208.0/21',
4686 'ST': '197.159.160.0/19',
4687 'SV': '168.243.0.0/16',
4688 'SX': '190.102.0.0/20',
4689 'SY': '5.0.0.0/16',
4690 'SZ': '41.84.224.0/19',
4691 'TC': '65.255.48.0/20',
4692 'TD': '154.68.128.0/19',
4693 'TG': '196.168.0.0/14',
4694 'TH': '171.96.0.0/13',
4695 'TJ': '85.9.128.0/18',
4696 'TK': '27.96.24.0/21',
4697 'TL': '180.189.160.0/20',
4698 'TM': '95.85.96.0/19',
4699 'TN': '197.0.0.0/11',
4700 'TO': '175.176.144.0/21',
4701 'TR': '78.160.0.0/11',
4702 'TT': '186.44.0.0/15',
4703 'TV': '202.2.96.0/19',
4704 'TW': '120.96.0.0/11',
4705 'TZ': '156.156.0.0/14',
53896ca5
S
4706 'UA': '37.52.0.0/14',
4707 'UG': '102.80.0.0/13',
4708 'US': '6.0.0.0/8',
773f291d 4709 'UY': '167.56.0.0/13',
53896ca5 4710 'UZ': '84.54.64.0/18',
773f291d 4711 'VA': '212.77.0.0/19',
53896ca5 4712 'VC': '207.191.240.0/21',
773f291d 4713 'VE': '186.88.0.0/13',
53896ca5 4714 'VG': '66.81.192.0/20',
773f291d
S
4715 'VI': '146.226.0.0/16',
4716 'VN': '14.160.0.0/11',
4717 'VU': '202.80.32.0/20',
4718 'WF': '117.20.32.0/21',
4719 'WS': '202.4.32.0/19',
4720 'YE': '134.35.0.0/16',
4721 'YT': '41.242.116.0/22',
4722 'ZA': '41.0.0.0/11',
53896ca5
S
4723 'ZM': '102.144.0.0/13',
4724 'ZW': '102.177.192.0/18',
773f291d
S
4725 }
4726
4727 @classmethod
5f95927a
S
4728 def random_ipv4(cls, code_or_block):
4729 if len(code_or_block) == 2:
4730 block = cls._country_ip_map.get(code_or_block.upper())
4731 if not block:
4732 return None
4733 else:
4734 block = code_or_block
773f291d 4735 addr, preflen = block.split('/')
ac668111 4736 addr_min = struct.unpack('!L', socket.inet_aton(addr))[0]
773f291d 4737 addr_max = addr_min | (0xffffffff >> int(preflen))
14f25df2 4738 return str(socket.inet_ntoa(
ac668111 4739 struct.pack('!L', random.randint(addr_min, addr_max))))
773f291d
S
4740
4741
ac668111 4742class PerRequestProxyHandler(urllib.request.ProxyHandler):
2461f79d
PH
4743 def __init__(self, proxies=None):
4744 # Set default handlers
4745 for type in ('http', 'https'):
4746 setattr(self, '%s_open' % type,
4747 lambda r, proxy='__noproxy__', type=type, meth=self.proxy_open:
4748 meth(r, proxy, type))
ac668111 4749 urllib.request.ProxyHandler.__init__(self, proxies)
2461f79d 4750
91410c9b 4751 def proxy_open(self, req, proxy, type):
2461f79d 4752 req_proxy = req.headers.get('Ytdl-request-proxy')
91410c9b
PH
4753 if req_proxy is not None:
4754 proxy = req_proxy
2461f79d
PH
4755 del req.headers['Ytdl-request-proxy']
4756
4757 if proxy == '__noproxy__':
4758 return None # No Proxy
14f25df2 4759 if urllib.parse.urlparse(proxy).scheme.lower() in ('socks', 'socks4', 'socks4a', 'socks5'):
71aff188 4760 req.add_header('Ytdl-socks-proxy', proxy)
7a5c1cfe 4761 # yt-dlp's http/https handlers do wrapping the socket with socks
71aff188 4762 return None
ac668111 4763 return urllib.request.ProxyHandler.proxy_open(
91410c9b 4764 self, req, proxy, type)
5bc880b9
YCH
4765
4766
0a5445dd
YCH
4767# Both long_to_bytes and bytes_to_long are adapted from PyCrypto, which is
4768# released into Public Domain
4769# https://github.com/dlitz/pycrypto/blob/master/lib/Crypto/Util/number.py#L387
4770
4771def long_to_bytes(n, blocksize=0):
4772 """long_to_bytes(n:long, blocksize:int) : string
4773 Convert a long integer to a byte string.
4774
4775 If optional blocksize is given and greater than zero, pad the front of the
4776 byte string with binary zeros so that the length is a multiple of
4777 blocksize.
4778 """
4779 # after much testing, this algorithm was deemed to be the fastest
4780 s = b''
4781 n = int(n)
4782 while n > 0:
ac668111 4783 s = struct.pack('>I', n & 0xffffffff) + s
0a5445dd
YCH
4784 n = n >> 32
4785 # strip off leading zeros
4786 for i in range(len(s)):
4787 if s[i] != b'\000'[0]:
4788 break
4789 else:
4790 # only happens when n == 0
4791 s = b'\000'
4792 i = 0
4793 s = s[i:]
4794 # add back some pad bytes. this could be done more efficiently w.r.t. the
4795 # de-padding being done above, but sigh...
4796 if blocksize > 0 and len(s) % blocksize:
4797 s = (blocksize - len(s) % blocksize) * b'\000' + s
4798 return s
4799
4800
4801def bytes_to_long(s):
4802 """bytes_to_long(string) : long
4803 Convert a byte string to a long integer.
4804
4805 This is (essentially) the inverse of long_to_bytes().
4806 """
4807 acc = 0
4808 length = len(s)
4809 if length % 4:
4810 extra = (4 - length % 4)
4811 s = b'\000' * extra + s
4812 length = length + extra
4813 for i in range(0, length, 4):
ac668111 4814 acc = (acc << 32) + struct.unpack('>I', s[i:i + 4])[0]
0a5445dd
YCH
4815 return acc
4816
4817
5bc880b9
YCH
4818def ohdave_rsa_encrypt(data, exponent, modulus):
4819 '''
4820 Implement OHDave's RSA algorithm. See http://www.ohdave.com/rsa/
4821
4822 Input:
4823 data: data to encrypt, bytes-like object
4824 exponent, modulus: parameter e and N of RSA algorithm, both integer
4825 Output: hex string of encrypted data
4826
4827 Limitation: supports one block encryption only
4828 '''
4829
4830 payload = int(binascii.hexlify(data[::-1]), 16)
4831 encrypted = pow(payload, exponent, modulus)
4832 return '%x' % encrypted
81bdc8fd
YCH
4833
4834
f48409c7
YCH
4835def pkcs1pad(data, length):
4836 """
4837 Padding input data with PKCS#1 scheme
4838
4839 @param {int[]} data input data
4840 @param {int} length target length
4841 @returns {int[]} padded data
4842 """
4843 if len(data) > length - 11:
4844 raise ValueError('Input data too long for PKCS#1 padding')
4845
4846 pseudo_random = [random.randint(0, 254) for _ in range(length - len(data) - 3)]
4847 return [0, 2] + pseudo_random + [0] + data
4848
4849
7b2c3f47 4850def _base_n_table(n, table):
4851 if not table and not n:
4852 raise ValueError('Either table or n must be specified')
612f2be5 4853 table = (table or '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ')[:n]
4854
44f14eb4 4855 if n and n != len(table):
612f2be5 4856 raise ValueError(f'base {n} exceeds table length {len(table)}')
4857 return table
59f898b7 4858
5eb6bdce 4859
7b2c3f47 4860def encode_base_n(num, n=None, table=None):
4861 """Convert given int to a base-n string"""
612f2be5 4862 table = _base_n_table(n, table)
7b2c3f47 4863 if not num:
5eb6bdce
YCH
4864 return table[0]
4865
7b2c3f47 4866 result, base = '', len(table)
81bdc8fd 4867 while num:
7b2c3f47 4868 result = table[num % base] + result
612f2be5 4869 num = num // base
7b2c3f47 4870 return result
4871
4872
4873def decode_base_n(string, n=None, table=None):
4874 """Convert given base-n string to int"""
4875 table = {char: index for index, char in enumerate(_base_n_table(n, table))}
4876 result, base = 0, len(table)
4877 for char in string:
4878 result = result * base + table[char]
4879 return result
4880
4881
f52354a8 4882def decode_packed_codes(code):
06b3fe29 4883 mobj = re.search(PACKED_CODES_RE, code)
a0566bbf 4884 obfuscated_code, base, count, symbols = mobj.groups()
f52354a8
YCH
4885 base = int(base)
4886 count = int(count)
4887 symbols = symbols.split('|')
4888 symbol_table = {}
4889
4890 while count:
4891 count -= 1
5eb6bdce 4892 base_n_count = encode_base_n(count, base)
f52354a8
YCH
4893 symbol_table[base_n_count] = symbols[count] or base_n_count
4894
4895 return re.sub(
4896 r'\b(\w+)\b', lambda mobj: symbol_table[mobj.group(0)],
a0566bbf 4897 obfuscated_code)
e154c651 4898
4899
1ced2221
S
4900def caesar(s, alphabet, shift):
4901 if shift == 0:
4902 return s
4903 l = len(alphabet)
4904 return ''.join(
4905 alphabet[(alphabet.index(c) + shift) % l] if c in alphabet else c
4906 for c in s)
4907
4908
4909def rot47(s):
4910 return caesar(s, r'''!"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~''', 47)
4911
4912
e154c651 4913def parse_m3u8_attributes(attrib):
4914 info = {}
4915 for (key, val) in re.findall(r'(?P<key>[A-Z0-9-]+)=(?P<val>"[^"]+"|[^",]+)(?:,|$)', attrib):
4916 if val.startswith('"'):
4917 val = val[1:-1]
4918 info[key] = val
4919 return info
1143535d
YCH
4920
4921
4922def urshift(val, n):
4923 return val >> n if val >= 0 else (val + 0x100000000) >> n
d3f8e038
YCH
4924
4925
efa97bdc 4926def write_xattr(path, key, value):
6f7563be 4927 # Windows: Write xattrs to NTFS Alternate Data Streams:
4928 # http://en.wikipedia.org/wiki/NTFS#Alternate_data_streams_.28ADS.29
4929 if compat_os_name == 'nt':
4930 assert ':' not in key
4931 assert os.path.exists(path)
efa97bdc
YCH
4932
4933 try:
6f7563be 4934 with open(f'{path}:{key}', 'wb') as f:
4935 f.write(value)
86e5f3ed 4936 except OSError as e:
efa97bdc 4937 raise XAttrMetadataError(e.errno, e.strerror)
6f7563be 4938 return
efa97bdc 4939
6f7563be 4940 # UNIX Method 1. Use xattrs/pyxattrs modules
efa97bdc 4941
6f7563be 4942 setxattr = None
4943 if getattr(xattr, '_yt_dlp__identifier', None) == 'pyxattr':
4944 # Unicode arguments are not supported in pyxattr until version 0.5.0
4945 # See https://github.com/ytdl-org/youtube-dl/issues/5498
4946 if version_tuple(xattr.__version__) >= (0, 5, 0):
4947 setxattr = xattr.set
4948 elif xattr:
4949 setxattr = xattr.setxattr
efa97bdc 4950
6f7563be 4951 if setxattr:
4952 try:
4953 setxattr(path, key, value)
4954 except OSError as e:
4955 raise XAttrMetadataError(e.errno, e.strerror)
4956 return
efa97bdc 4957
6f7563be 4958 # UNIX Method 2. Use setfattr/xattr executables
4959 exe = ('setfattr' if check_executable('setfattr', ['--version'])
4960 else 'xattr' if check_executable('xattr', ['-h']) else None)
4961 if not exe:
4962 raise XAttrUnavailableError(
4963 'Couldn\'t find a tool to set the xattrs. Install either the python "xattr" or "pyxattr" modules or the '
4964 + ('"xattr" binary' if sys.platform != 'linux' else 'GNU "attr" package (which contains the "setfattr" tool)'))
efa97bdc 4965
0f06bcd7 4966 value = value.decode()
6f7563be 4967 try:
f0c9fb96 4968 _, stderr, returncode = Popen.run(
6f7563be 4969 [exe, '-w', key, value, path] if exe == 'xattr' else [exe, '-n', key, '-v', value, path],
e121e3ce 4970 text=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
6f7563be 4971 except OSError as e:
4972 raise XAttrMetadataError(e.errno, e.strerror)
f0c9fb96 4973 if returncode:
4974 raise XAttrMetadataError(returncode, stderr)
0c265486
YCH
4975
4976
4977def random_birthday(year_field, month_field, day_field):
aa374bc7
AS
4978 start_date = datetime.date(1950, 1, 1)
4979 end_date = datetime.date(1995, 12, 31)
4980 offset = random.randint(0, (end_date - start_date).days)
4981 random_date = start_date + datetime.timedelta(offset)
0c265486 4982 return {
aa374bc7
AS
4983 year_field: str(random_date.year),
4984 month_field: str(random_date.month),
4985 day_field: str(random_date.day),
0c265486 4986 }
732044af 4987
c76eb41b 4988
8c53322c
L
4989def find_available_port(interface=''):
4990 try:
4991 with socket.socket() as sock:
4992 sock.bind((interface, 0))
4993 return sock.getsockname()[1]
4994 except OSError:
4995 return None
4996
4997
732044af 4998# Templates for internet shortcut files, which are plain text files.
e5a998f3 4999DOT_URL_LINK_TEMPLATE = '''\
732044af 5000[InternetShortcut]
5001URL=%(url)s
e5a998f3 5002'''
732044af 5003
e5a998f3 5004DOT_WEBLOC_LINK_TEMPLATE = '''\
732044af 5005<?xml version="1.0" encoding="UTF-8"?>
5006<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
5007<plist version="1.0">
5008<dict>
5009\t<key>URL</key>
5010\t<string>%(url)s</string>
5011</dict>
5012</plist>
e5a998f3 5013'''
732044af 5014
e5a998f3 5015DOT_DESKTOP_LINK_TEMPLATE = '''\
732044af 5016[Desktop Entry]
5017Encoding=UTF-8
5018Name=%(filename)s
5019Type=Link
5020URL=%(url)s
5021Icon=text-html
e5a998f3 5022'''
732044af 5023
08438d2c 5024LINK_TEMPLATES = {
5025 'url': DOT_URL_LINK_TEMPLATE,
5026 'desktop': DOT_DESKTOP_LINK_TEMPLATE,
5027 'webloc': DOT_WEBLOC_LINK_TEMPLATE,
5028}
5029
732044af 5030
5031def iri_to_uri(iri):
5032 """
5033 Converts an IRI (Internationalized Resource Identifier, allowing Unicode characters) to a URI (Uniform Resource Identifier, ASCII-only).
5034
5035 The function doesn't add an additional layer of escaping; e.g., it doesn't escape `%3C` as `%253C`. Instead, it percent-escapes characters with an underlying UTF-8 encoding *besides* those already escaped, leaving the URI intact.
5036 """
5037
14f25df2 5038 iri_parts = urllib.parse.urlparse(iri)
732044af 5039
5040 if '[' in iri_parts.netloc:
5041 raise ValueError('IPv6 URIs are not, yet, supported.')
5042 # Querying `.netloc`, when there's only one bracket, also raises a ValueError.
5043
5044 # The `safe` argument values, that the following code uses, contain the characters that should not be percent-encoded. Everything else but letters, digits and '_.-' will be percent-encoded with an underlying UTF-8 encoding. Everything already percent-encoded will be left as is.
5045
5046 net_location = ''
5047 if iri_parts.username:
f9934b96 5048 net_location += urllib.parse.quote(iri_parts.username, safe=r"!$%&'()*+,~")
732044af 5049 if iri_parts.password is not None:
f9934b96 5050 net_location += ':' + urllib.parse.quote(iri_parts.password, safe=r"!$%&'()*+,~")
732044af 5051 net_location += '@'
5052
0f06bcd7 5053 net_location += iri_parts.hostname.encode('idna').decode() # Punycode for Unicode hostnames.
732044af 5054 # The 'idna' encoding produces ASCII text.
5055 if iri_parts.port is not None and iri_parts.port != 80:
5056 net_location += ':' + str(iri_parts.port)
5057
f9934b96 5058 return urllib.parse.urlunparse(
732044af 5059 (iri_parts.scheme,
5060 net_location,
5061
f9934b96 5062 urllib.parse.quote_plus(iri_parts.path, safe=r"!$%&'()*+,/:;=@|~"),
732044af 5063
5064 # Unsure about the `safe` argument, since this is a legacy way of handling parameters.
f9934b96 5065 urllib.parse.quote_plus(iri_parts.params, safe=r"!$%&'()*+,/:;=@|~"),
732044af 5066
5067 # Not totally sure about the `safe` argument, since the source does not explicitly mention the query URI component.
f9934b96 5068 urllib.parse.quote_plus(iri_parts.query, safe=r"!$%&'()*+,/:;=?@{|}~"),
732044af 5069
f9934b96 5070 urllib.parse.quote_plus(iri_parts.fragment, safe=r"!#$%&'()*+,/:;=?@{|}~")))
732044af 5071
5072 # Source for `safe` arguments: https://url.spec.whatwg.org/#percent-encoded-bytes.
5073
5074
5075def to_high_limit_path(path):
5076 if sys.platform in ['win32', 'cygwin']:
5077 # Work around MAX_PATH limitation on Windows. The maximum allowed length for the individual path segments may still be quite limited.
e5a998f3 5078 return '\\\\?\\' + os.path.abspath(path)
732044af 5079
5080 return path
76d321f6 5081
c76eb41b 5082
7b2c3f47 5083def format_field(obj, field=None, template='%s', ignore=NO_DEFAULT, default='', func=IDENTITY):
69bec673 5084 val = traversal.traverse_obj(obj, *variadic(field))
6f2287cb 5085 if not val if ignore is NO_DEFAULT else val in variadic(ignore):
e0ddbd02 5086 return default
7b2c3f47 5087 return template % func(val)
00dd0cd5 5088
5089
5090def clean_podcast_url(url):
5091 return re.sub(r'''(?x)
5092 (?:
5093 (?:
5094 chtbl\.com/track|
5095 media\.blubrry\.com| # https://create.blubrry.com/resources/podcast-media-download-statistics/getting-started/
5096 play\.podtrac\.com
5097 )/[^/]+|
5098 (?:dts|www)\.podtrac\.com/(?:pts/)?redirect\.[0-9a-z]{3,4}| # http://analytics.podtrac.com/how-to-measure
5099 flex\.acast\.com|
5100 pd(?:
5101 cn\.co| # https://podcorn.com/analytics-prefix/
5102 st\.fm # https://podsights.com/docs/
5103 )/e
5104 )/''', '', url)
ffcb8191
THD
5105
5106
5107_HEX_TABLE = '0123456789abcdef'
5108
5109
5110def random_uuidv4():
5111 return re.sub(r'[xy]', lambda x: _HEX_TABLE[random.randint(0, 15)], 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx')
0202b52a 5112
5113
5114def make_dir(path, to_screen=None):
5115 try:
5116 dn = os.path.dirname(path)
b25d6cb9
AI
5117 if dn:
5118 os.makedirs(dn, exist_ok=True)
0202b52a 5119 return True
86e5f3ed 5120 except OSError as err:
0202b52a 5121 if callable(to_screen) is not None:
69bec673 5122 to_screen(f'unable to create directory {err}')
0202b52a 5123 return False
f74980cb 5124
5125
5126def get_executable_path():
69bec673 5127 from ..update import _get_variant_and_executable_path
c487cf00 5128
b5899f4f 5129 return os.path.dirname(os.path.abspath(_get_variant_and_executable_path()[1]))
f74980cb 5130
5131
8e40b9d1 5132def get_user_config_dirs(package_name):
8e40b9d1
M
5133 # .config (e.g. ~/.config/package_name)
5134 xdg_config_home = os.getenv('XDG_CONFIG_HOME') or compat_expanduser('~/.config')
773c272d 5135 yield os.path.join(xdg_config_home, package_name)
8e40b9d1
M
5136
5137 # appdata (%APPDATA%/package_name)
5138 appdata_dir = os.getenv('appdata')
5139 if appdata_dir:
773c272d 5140 yield os.path.join(appdata_dir, package_name)
8e40b9d1
M
5141
5142 # home (~/.package_name)
773c272d 5143 yield os.path.join(compat_expanduser('~'), f'.{package_name}')
8e40b9d1
M
5144
5145
5146def get_system_config_dirs(package_name):
8e40b9d1 5147 # /etc/package_name
773c272d 5148 yield os.path.join('/etc', package_name)
06167fbb 5149
5150
3e9b66d7 5151def time_seconds(**kwargs):
83c4970e
L
5152 """
5153 Returns TZ-aware time in seconds since the epoch (1970-01-01T00:00:00Z)
5154 """
5155 return time.time() + datetime.timedelta(**kwargs).total_seconds()
3e9b66d7
LNO
5156
5157
49fa4d9a
N
5158# create a JSON Web Signature (jws) with HS256 algorithm
5159# the resulting format is in JWS Compact Serialization
5160# implemented following JWT https://www.rfc-editor.org/rfc/rfc7519.html
5161# implemented following JWS https://www.rfc-editor.org/rfc/rfc7515.html
5162def jwt_encode_hs256(payload_data, key, headers={}):
5163 header_data = {
5164 'alg': 'HS256',
5165 'typ': 'JWT',
5166 }
5167 if headers:
5168 header_data.update(headers)
0f06bcd7 5169 header_b64 = base64.b64encode(json.dumps(header_data).encode())
5170 payload_b64 = base64.b64encode(json.dumps(payload_data).encode())
5171 h = hmac.new(key.encode(), header_b64 + b'.' + payload_b64, hashlib.sha256)
49fa4d9a
N
5172 signature_b64 = base64.b64encode(h.digest())
5173 token = header_b64 + b'.' + payload_b64 + b'.' + signature_b64
5174 return token
819e0531 5175
5176
16b0d7e6 5177# can be extended in future to verify the signature and parse header and return the algorithm used if it's not HS256
5178def jwt_decode_hs256(jwt):
5179 header_b64, payload_b64, signature_b64 = jwt.split('.')
2c98d998 5180 # add trailing ='s that may have been stripped, superfluous ='s are ignored
5181 payload_data = json.loads(base64.urlsafe_b64decode(f'{payload_b64}==='))
16b0d7e6 5182 return payload_data
5183
5184
53973b4d 5185WINDOWS_VT_MODE = False if compat_os_name == 'nt' else None
5186
5187
7a32c70d 5188@functools.cache
819e0531 5189def supports_terminal_sequences(stream):
5190 if compat_os_name == 'nt':
8a82af35 5191 if not WINDOWS_VT_MODE:
819e0531 5192 return False
5193 elif not os.getenv('TERM'):
5194 return False
5195 try:
5196 return stream.isatty()
5197 except BaseException:
5198 return False
5199
5200
c53a18f0 5201def windows_enable_vt_mode():
5202 """Ref: https://bugs.python.org/issue30075 """
8a82af35 5203 if get_windows_version() < (10, 0, 10586):
53973b4d 5204 return
53973b4d 5205
c53a18f0 5206 import ctypes
5207 import ctypes.wintypes
5208 import msvcrt
5209
5210 ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x0004
5211
5212 dll = ctypes.WinDLL('kernel32', use_last_error=False)
5213 handle = os.open('CONOUT$', os.O_RDWR)
c53a18f0 5214 try:
5215 h_out = ctypes.wintypes.HANDLE(msvcrt.get_osfhandle(handle))
5216 dw_original_mode = ctypes.wintypes.DWORD()
5217 success = dll.GetConsoleMode(h_out, ctypes.byref(dw_original_mode))
5218 if not success:
5219 raise Exception('GetConsoleMode failed')
5220
5221 success = dll.SetConsoleMode(h_out, ctypes.wintypes.DWORD(
5222 dw_original_mode.value | ENABLE_VIRTUAL_TERMINAL_PROCESSING))
5223 if not success:
5224 raise Exception('SetConsoleMode failed')
c53a18f0 5225 finally:
5226 os.close(handle)
53973b4d 5227
f0795149 5228 global WINDOWS_VT_MODE
5229 WINDOWS_VT_MODE = True
5230 supports_terminal_sequences.cache_clear()
5231
53973b4d 5232
ec11a9f4 5233_terminal_sequences_re = re.compile('\033\\[[^m]+m')
5234
5235
5236def remove_terminal_sequences(string):
5237 return _terminal_sequences_re.sub('', string)
5238
5239
5240def number_of_digits(number):
5241 return len('%d' % number)
34921b43 5242
5243
5244def join_nonempty(*values, delim='-', from_dict=None):
5245 if from_dict is not None:
69bec673 5246 values = (traversal.traverse_obj(from_dict, variadic(v)) for v in values)
34921b43 5247 return delim.join(map(str, filter(None, values)))
06e57990 5248
5249
27231526
ZM
5250def scale_thumbnails_to_max_format_width(formats, thumbnails, url_width_re):
5251 """
5252 Find the largest format dimensions in terms of video width and, for each thumbnail:
5253 * Modify the URL: Match the width with the provided regex and replace with the former width
5254 * Update dimensions
5255
5256 This function is useful with video services that scale the provided thumbnails on demand
5257 """
5258 _keys = ('width', 'height')
5259 max_dimensions = max(
86e5f3ed 5260 (tuple(format.get(k) or 0 for k in _keys) for format in formats),
27231526
ZM
5261 default=(0, 0))
5262 if not max_dimensions[0]:
5263 return thumbnails
5264 return [
5265 merge_dicts(
5266 {'url': re.sub(url_width_re, str(max_dimensions[0]), thumbnail['url'])},
5267 dict(zip(_keys, max_dimensions)), thumbnail)
5268 for thumbnail in thumbnails
5269 ]
5270
5271
93c8410d
LNO
5272def parse_http_range(range):
5273 """ Parse value of "Range" or "Content-Range" HTTP header into tuple. """
5274 if not range:
5275 return None, None, None
5276 crg = re.search(r'bytes[ =](\d+)-(\d+)?(?:/(\d+))?', range)
5277 if not crg:
5278 return None, None, None
5279 return int(crg.group(1)), int_or_none(crg.group(2)), int_or_none(crg.group(3))
5280
5281
6b9e832d 5282def read_stdin(what):
5283 eof = 'Ctrl+Z' if compat_os_name == 'nt' else 'Ctrl+D'
5284 write_string(f'Reading {what} from STDIN - EOF ({eof}) to end:\n')
5285 return sys.stdin
5286
5287
a904a7f8
L
5288def determine_file_encoding(data):
5289 """
88f60feb 5290 Detect the text encoding used
a904a7f8
L
5291 @returns (encoding, bytes to skip)
5292 """
5293
88f60feb 5294 # BOM marks are given priority over declarations
a904a7f8 5295 for bom, enc in BOMS:
a904a7f8
L
5296 if data.startswith(bom):
5297 return enc, len(bom)
5298
88f60feb 5299 # Strip off all null bytes to match even when UTF-16 or UTF-32 is used.
5300 # We ignore the endianness to get a good enough match
a904a7f8 5301 data = data.replace(b'\0', b'')
88f60feb 5302 mobj = re.match(rb'(?m)^#\s*coding\s*:\s*(\S+)\s*$', data)
5303 return mobj.group(1).decode() if mobj else None, 0
a904a7f8
L
5304
5305
06e57990 5306class Config:
5307 own_args = None
9e491463 5308 parsed_args = None
06e57990 5309 filename = None
5310 __initialized = False
5311
5312 def __init__(self, parser, label=None):
9e491463 5313 self.parser, self.label = parser, label
06e57990 5314 self._loaded_paths, self.configs = set(), []
5315
5316 def init(self, args=None, filename=None):
5317 assert not self.__initialized
284a60c5 5318 self.own_args, self.filename = args, filename
5319 return self.load_configs()
5320
5321 def load_configs(self):
65662dff 5322 directory = ''
284a60c5 5323 if self.filename:
5324 location = os.path.realpath(self.filename)
65662dff 5325 directory = os.path.dirname(location)
06e57990 5326 if location in self._loaded_paths:
5327 return False
5328 self._loaded_paths.add(location)
5329
284a60c5 5330 self.__initialized = True
5331 opts, _ = self.parser.parse_known_args(self.own_args)
5332 self.parsed_args = self.own_args
9e491463 5333 for location in opts.config_locations or []:
6b9e832d 5334 if location == '-':
1060f82f 5335 if location in self._loaded_paths:
5336 continue
5337 self._loaded_paths.add(location)
6b9e832d 5338 self.append_config(shlex.split(read_stdin('options'), comments=True), label='stdin')
5339 continue
65662dff 5340 location = os.path.join(directory, expand_path(location))
06e57990 5341 if os.path.isdir(location):
5342 location = os.path.join(location, 'yt-dlp.conf')
5343 if not os.path.exists(location):
9e491463 5344 self.parser.error(f'config location {location} does not exist')
06e57990 5345 self.append_config(self.read_file(location), location)
5346 return True
5347
5348 def __str__(self):
5349 label = join_nonempty(
5350 self.label, 'config', f'"{self.filename}"' if self.filename else '',
5351 delim=' ')
5352 return join_nonempty(
5353 self.own_args is not None and f'{label[0].upper()}{label[1:]}: {self.hide_login_info(self.own_args)}',
5354 *(f'\n{c}'.replace('\n', '\n| ')[1:] for c in self.configs),
5355 delim='\n')
5356
7a32c70d 5357 @staticmethod
06e57990 5358 def read_file(filename, default=[]):
5359 try:
a904a7f8 5360 optionf = open(filename, 'rb')
86e5f3ed 5361 except OSError:
06e57990 5362 return default # silently skip if file is not present
a904a7f8
L
5363 try:
5364 enc, skip = determine_file_encoding(optionf.read(512))
5365 optionf.seek(skip, io.SEEK_SET)
5366 except OSError:
5367 enc = None # silently skip read errors
06e57990 5368 try:
5369 # FIXME: https://github.com/ytdl-org/youtube-dl/commit/dfe5fa49aed02cf36ba9f743b11b0903554b5e56
a904a7f8 5370 contents = optionf.read().decode(enc or preferredencoding())
f9934b96 5371 res = shlex.split(contents, comments=True)
44a6fcff 5372 except Exception as err:
5373 raise ValueError(f'Unable to parse "{filename}": {err}')
06e57990 5374 finally:
5375 optionf.close()
5376 return res
5377
7a32c70d 5378 @staticmethod
06e57990 5379 def hide_login_info(opts):
86e5f3ed 5380 PRIVATE_OPTS = {'-p', '--password', '-u', '--username', '--video-password', '--ap-password', '--ap-username'}
06e57990 5381 eqre = re.compile('^(?P<key>' + ('|'.join(re.escape(po) for po in PRIVATE_OPTS)) + ')=.+$')
5382
5383 def _scrub_eq(o):
5384 m = eqre.match(o)
5385 if m:
5386 return m.group('key') + '=PRIVATE'
5387 else:
5388 return o
5389
5390 opts = list(map(_scrub_eq, opts))
5391 for idx, opt in enumerate(opts):
5392 if opt in PRIVATE_OPTS and idx + 1 < len(opts):
5393 opts[idx + 1] = 'PRIVATE'
5394 return opts
5395
5396 def append_config(self, *args, label=None):
9e491463 5397 config = type(self)(self.parser, label)
06e57990 5398 config._loaded_paths = self._loaded_paths
5399 if config.init(*args):
5400 self.configs.append(config)
5401
7a32c70d 5402 @property
06e57990 5403 def all_args(self):
5404 for config in reversed(self.configs):
5405 yield from config.all_args
9e491463 5406 yield from self.parsed_args or []
5407
5408 def parse_known_args(self, **kwargs):
5409 return self.parser.parse_known_args(self.all_args, **kwargs)
06e57990 5410
5411 def parse_args(self):
9e491463 5412 return self.parser.parse_args(self.all_args)
da42679b
LNO
5413
5414
d5d1df8a 5415class WebSocketsWrapper:
da42679b 5416 """Wraps websockets module to use in non-async scopes"""
abfecb7b 5417 pool = None
da42679b 5418
3cea3edd 5419 def __init__(self, url, headers=None, connect=True):
059bc4db 5420 self.loop = asyncio.new_event_loop()
9cd08050 5421 # XXX: "loop" is deprecated
5422 self.conn = websockets.connect(
5423 url, extra_headers=headers, ping_interval=None,
5424 close_timeout=float('inf'), loop=self.loop, ping_timeout=float('inf'))
3cea3edd
LNO
5425 if connect:
5426 self.__enter__()
15dfb392 5427 atexit.register(self.__exit__, None, None, None)
da42679b
LNO
5428
5429 def __enter__(self):
3cea3edd 5430 if not self.pool:
9cd08050 5431 self.pool = self.run_with_loop(self.conn.__aenter__(), self.loop)
da42679b
LNO
5432 return self
5433
5434 def send(self, *args):
5435 self.run_with_loop(self.pool.send(*args), self.loop)
5436
5437 def recv(self, *args):
5438 return self.run_with_loop(self.pool.recv(*args), self.loop)
5439
5440 def __exit__(self, type, value, traceback):
5441 try:
5442 return self.run_with_loop(self.conn.__aexit__(type, value, traceback), self.loop)
5443 finally:
5444 self.loop.close()
15dfb392 5445 self._cancel_all_tasks(self.loop)
da42679b
LNO
5446
5447 # taken from https://github.com/python/cpython/blob/3.9/Lib/asyncio/runners.py with modifications
5448 # for contributors: If there's any new library using asyncio needs to be run in non-async, move these function out of this class
7a32c70d 5449 @staticmethod
da42679b 5450 def run_with_loop(main, loop):
059bc4db 5451 if not asyncio.iscoroutine(main):
da42679b
LNO
5452 raise ValueError(f'a coroutine was expected, got {main!r}')
5453
5454 try:
5455 return loop.run_until_complete(main)
5456 finally:
5457 loop.run_until_complete(loop.shutdown_asyncgens())
5458 if hasattr(loop, 'shutdown_default_executor'):
5459 loop.run_until_complete(loop.shutdown_default_executor())
5460
7a32c70d 5461 @staticmethod
da42679b 5462 def _cancel_all_tasks(loop):
059bc4db 5463 to_cancel = asyncio.all_tasks(loop)
da42679b
LNO
5464
5465 if not to_cancel:
5466 return
5467
5468 for task in to_cancel:
5469 task.cancel()
5470
9cd08050 5471 # XXX: "loop" is removed in python 3.10+
da42679b 5472 loop.run_until_complete(
059bc4db 5473 asyncio.gather(*to_cancel, loop=loop, return_exceptions=True))
da42679b
LNO
5474
5475 for task in to_cancel:
5476 if task.cancelled():
5477 continue
5478 if task.exception() is not None:
5479 loop.call_exception_handler({
5480 'message': 'unhandled exception during asyncio.run() shutdown',
5481 'exception': task.exception(),
5482 'task': task,
5483 })
5484
5485
8b7539d2 5486def merge_headers(*dicts):
08d30158 5487 """Merge dicts of http headers case insensitively, prioritizing the latter ones"""
76aa9913 5488 return {k.title(): v for k, v in itertools.chain.from_iterable(map(dict.items, dicts))}
28787f16 5489
5490
b1f94422 5491def cached_method(f):
5492 """Cache a method"""
5493 signature = inspect.signature(f)
5494
7a32c70d 5495 @functools.wraps(f)
b1f94422 5496 def wrapper(self, *args, **kwargs):
5497 bound_args = signature.bind(self, *args, **kwargs)
5498 bound_args.apply_defaults()
d5d1df8a 5499 key = tuple(bound_args.arguments.values())[1:]
b1f94422 5500
6368e2e6 5501 cache = vars(self).setdefault('_cached_method__cache', {}).setdefault(f.__name__, {})
b1f94422 5502 if key not in cache:
5503 cache[key] = f(self, *args, **kwargs)
5504 return cache[key]
5505 return wrapper
5506
5507
28787f16 5508class classproperty:
83cc7b8a 5509 """property access for class methods with optional caching"""
5510 def __new__(cls, func=None, *args, **kwargs):
5511 if not func:
5512 return functools.partial(cls, *args, **kwargs)
5513 return super().__new__(cls)
c487cf00 5514
83cc7b8a 5515 def __init__(self, func, *, cache=False):
c487cf00 5516 functools.update_wrapper(self, func)
5517 self.func = func
83cc7b8a 5518 self._cache = {} if cache else None
28787f16 5519
5520 def __get__(self, _, cls):
83cc7b8a 5521 if self._cache is None:
5522 return self.func(cls)
5523 elif cls not in self._cache:
5524 self._cache[cls] = self.func(cls)
5525 return self._cache[cls]
19a03940 5526
5527
a5387729 5528class function_with_repr:
b2e0343b 5529 def __init__(self, func, repr_=None):
a5387729 5530 functools.update_wrapper(self, func)
b2e0343b 5531 self.func, self.__repr = func, repr_
a5387729 5532
5533 def __call__(self, *args, **kwargs):
5534 return self.func(*args, **kwargs)
5535
5536 def __repr__(self):
b2e0343b 5537 if self.__repr:
5538 return self.__repr
a5387729 5539 return f'{self.func.__module__}.{self.func.__qualname__}'
5540
5541
64fa820c 5542class Namespace(types.SimpleNamespace):
591bb9d3 5543 """Immutable namespace"""
591bb9d3 5544
7896214c 5545 def __iter__(self):
64fa820c 5546 return iter(self.__dict__.values())
7896214c 5547
7a32c70d 5548 @property
64fa820c 5549 def items_(self):
5550 return self.__dict__.items()
9b8ee23b 5551
5552
8dc59305 5553MEDIA_EXTENSIONS = Namespace(
5554 common_video=('avi', 'flv', 'mkv', 'mov', 'mp4', 'webm'),
5555 video=('3g2', '3gp', 'f4v', 'mk3d', 'divx', 'mpg', 'ogv', 'm4v', 'wmv'),
5556 common_audio=('aiff', 'alac', 'flac', 'm4a', 'mka', 'mp3', 'ogg', 'opus', 'wav'),
fbb73833 5557 audio=('aac', 'ape', 'asf', 'f4a', 'f4b', 'm4b', 'm4p', 'm4r', 'oga', 'ogx', 'spx', 'vorbis', 'wma', 'weba'),
8dc59305 5558 thumbnails=('jpg', 'png', 'webp'),
5559 storyboards=('mhtml', ),
5560 subtitles=('srt', 'vtt', 'ass', 'lrc'),
5561 manifests=('f4f', 'f4m', 'm3u8', 'smil', 'mpd'),
5562)
5563MEDIA_EXTENSIONS.video += MEDIA_EXTENSIONS.common_video
5564MEDIA_EXTENSIONS.audio += MEDIA_EXTENSIONS.common_audio
5565
5566KNOWN_EXTENSIONS = (*MEDIA_EXTENSIONS.video, *MEDIA_EXTENSIONS.audio, *MEDIA_EXTENSIONS.manifests)
5567
5568
be5c1ae8 5569class RetryManager:
5570 """Usage:
5571 for retry in RetryManager(...):
5572 try:
5573 ...
5574 except SomeException as err:
5575 retry.error = err
5576 continue
5577 """
5578 attempt, _error = 0, None
5579
5580 def __init__(self, _retries, _error_callback, **kwargs):
5581 self.retries = _retries or 0
5582 self.error_callback = functools.partial(_error_callback, **kwargs)
5583
5584 def _should_retry(self):
5585 return self._error is not NO_DEFAULT and self.attempt <= self.retries
5586
7a32c70d 5587 @property
be5c1ae8 5588 def error(self):
5589 if self._error is NO_DEFAULT:
5590 return None
5591 return self._error
5592
7a32c70d 5593 @error.setter
be5c1ae8 5594 def error(self, value):
5595 self._error = value
5596
5597 def __iter__(self):
5598 while self._should_retry():
5599 self.error = NO_DEFAULT
5600 self.attempt += 1
5601 yield self
5602 if self.error:
5603 self.error_callback(self.error, self.attempt, self.retries)
5604
7a32c70d 5605 @staticmethod
be5c1ae8 5606 def report_retry(e, count, retries, *, sleep_func, info, warn, error=None, suffix=None):
5607 """Utility function for reporting retries"""
5608 if count > retries:
5609 if error:
5610 return error(f'{e}. Giving up after {count - 1} retries') if count > 1 else error(str(e))
5611 raise e
5612
5613 if not count:
5614 return warn(e)
5615 elif isinstance(e, ExtractorError):
3ce29336 5616 e = remove_end(str_or_none(e.cause) or e.orig_msg, '.')
be5c1ae8 5617 warn(f'{e}. Retrying{format_field(suffix, None, " %s")} ({count}/{retries})...')
5618
5619 delay = float_or_none(sleep_func(n=count - 1)) if callable(sleep_func) else sleep_func
5620 if delay:
5621 info(f'Sleeping {delay:.2f} seconds ...')
5622 time.sleep(delay)
5623
5624
0647d925 5625def make_archive_id(ie, video_id):
5626 ie_key = ie if isinstance(ie, str) else ie.ie_key()
5627 return f'{ie_key.lower()} {video_id}'
5628
5629
a1c5bd82 5630def truncate_string(s, left, right=0):
5631 assert left > 3 and right >= 0
5632 if s is None or len(s) <= left + right:
5633 return s
71df9b7f 5634 return f'{s[:left-3]}...{s[-right:] if right else ""}'
a1c5bd82 5635
5636
5314b521 5637def orderedSet_from_options(options, alias_dict, *, use_regex=False, start=None):
5638 assert 'all' in alias_dict, '"all" alias is required'
5639 requested = list(start or [])
5640 for val in options:
5641 discard = val.startswith('-')
5642 if discard:
5643 val = val[1:]
5644
5645 if val in alias_dict:
5646 val = alias_dict[val] if not discard else [
5647 i[1:] if i.startswith('-') else f'-{i}' for i in alias_dict[val]]
5648 # NB: Do not allow regex in aliases for performance
5649 requested = orderedSet_from_options(val, alias_dict, start=requested)
5650 continue
5651
5652 current = (filter(re.compile(val, re.I).fullmatch, alias_dict['all']) if use_regex
5653 else [val] if val in alias_dict['all'] else None)
5654 if current is None:
5655 raise ValueError(val)
5656
5657 if discard:
5658 for item in current:
5659 while item in requested:
5660 requested.remove(item)
5661 else:
5662 requested.extend(current)
5663
5664 return orderedSet(requested)
5665
5666
d0d74b71 5667class FormatSorter:
5668 regex = r' *((?P<reverse>\+)?(?P<field>[a-zA-Z0-9_]+)((?P<separator>[~:])(?P<limit>.*?))?)? *$'
5669
5670 default = ('hidden', 'aud_or_vid', 'hasvid', 'ie_pref', 'lang', 'quality',
5671 'res', 'fps', 'hdr:12', 'vcodec:vp9.2', 'channels', 'acodec',
5672 'size', 'br', 'asr', 'proto', 'ext', 'hasaud', 'source', 'id') # These must not be aliases
5673 ytdl_default = ('hasaud', 'lang', 'quality', 'tbr', 'filesize', 'vbr',
5674 'height', 'width', 'proto', 'vext', 'abr', 'aext',
5675 'fps', 'fs_approx', 'source', 'id')
5676
5677 settings = {
5678 'vcodec': {'type': 'ordered', 'regex': True,
5679 'order': ['av0?1', 'vp0?9.2', 'vp0?9', '[hx]265|he?vc?', '[hx]264|avc', 'vp0?8', 'mp4v|h263', 'theora', '', None, 'none']},
5680 'acodec': {'type': 'ordered', 'regex': True,
71082216 5681 'order': ['[af]lac', 'wav|aiff', 'opus', 'vorbis|ogg', 'aac', 'mp?4a?', 'mp3', 'ac-?4', 'e-?a?c-?3', 'ac-?3', 'dts', '', None, 'none']},
d0d74b71 5682 'hdr': {'type': 'ordered', 'regex': True, 'field': 'dynamic_range',
5683 'order': ['dv', '(hdr)?12', r'(hdr)?10\+', '(hdr)?10', 'hlg', '', 'sdr', None]},
5684 'proto': {'type': 'ordered', 'regex': True, 'field': 'protocol',
5685 'order': ['(ht|f)tps', '(ht|f)tp$', 'm3u8.*', '.*dash', 'websocket_frag', 'rtmpe?', '', 'mms|rtsp', 'ws|websocket', 'f4']},
5686 'vext': {'type': 'ordered', 'field': 'video_ext',
29ca4082 5687 'order': ('mp4', 'mov', 'webm', 'flv', '', 'none'),
5688 'order_free': ('webm', 'mp4', 'mov', 'flv', '', 'none')},
fbb73833 5689 'aext': {'type': 'ordered', 'regex': True, 'field': 'audio_ext',
5690 'order': ('m4a', 'aac', 'mp3', 'ogg', 'opus', 'web[am]', '', 'none'),
5691 'order_free': ('ogg', 'opus', 'web[am]', 'mp3', 'm4a', 'aac', '', 'none')},
d0d74b71 5692 'hidden': {'visible': False, 'forced': True, 'type': 'extractor', 'max': -1000},
5693 'aud_or_vid': {'visible': False, 'forced': True, 'type': 'multiple',
5694 'field': ('vcodec', 'acodec'),
5695 'function': lambda it: int(any(v != 'none' for v in it))},
5696 'ie_pref': {'priority': True, 'type': 'extractor'},
5697 'hasvid': {'priority': True, 'field': 'vcodec', 'type': 'boolean', 'not_in_list': ('none',)},
5698 'hasaud': {'field': 'acodec', 'type': 'boolean', 'not_in_list': ('none',)},
5699 'lang': {'convert': 'float', 'field': 'language_preference', 'default': -1},
5700 'quality': {'convert': 'float', 'default': -1},
5701 'filesize': {'convert': 'bytes'},
5702 'fs_approx': {'convert': 'bytes', 'field': 'filesize_approx'},
5703 'id': {'convert': 'string', 'field': 'format_id'},
5704 'height': {'convert': 'float_none'},
5705 'width': {'convert': 'float_none'},
5706 'fps': {'convert': 'float_none'},
5707 'channels': {'convert': 'float_none', 'field': 'audio_channels'},
5708 'tbr': {'convert': 'float_none'},
5709 'vbr': {'convert': 'float_none'},
5710 'abr': {'convert': 'float_none'},
5711 'asr': {'convert': 'float_none'},
5712 'source': {'convert': 'float', 'field': 'source_preference', 'default': -1},
5713
5714 'codec': {'type': 'combined', 'field': ('vcodec', 'acodec')},
5715 'br': {'type': 'combined', 'field': ('tbr', 'vbr', 'abr'), 'same_limit': True},
5716 'size': {'type': 'combined', 'same_limit': True, 'field': ('filesize', 'fs_approx')},
5717 'ext': {'type': 'combined', 'field': ('vext', 'aext')},
5718 'res': {'type': 'multiple', 'field': ('height', 'width'),
5719 'function': lambda it: (lambda l: min(l) if l else 0)(tuple(filter(None, it)))},
5720
5721 # Actual field names
5722 'format_id': {'type': 'alias', 'field': 'id'},
5723 'preference': {'type': 'alias', 'field': 'ie_pref'},
5724 'language_preference': {'type': 'alias', 'field': 'lang'},
5725 'source_preference': {'type': 'alias', 'field': 'source'},
5726 'protocol': {'type': 'alias', 'field': 'proto'},
5727 'filesize_approx': {'type': 'alias', 'field': 'fs_approx'},
5728 'audio_channels': {'type': 'alias', 'field': 'channels'},
5729
5730 # Deprecated
5731 'dimension': {'type': 'alias', 'field': 'res', 'deprecated': True},
5732 'resolution': {'type': 'alias', 'field': 'res', 'deprecated': True},
5733 'extension': {'type': 'alias', 'field': 'ext', 'deprecated': True},
5734 'bitrate': {'type': 'alias', 'field': 'br', 'deprecated': True},
5735 'total_bitrate': {'type': 'alias', 'field': 'tbr', 'deprecated': True},
5736 'video_bitrate': {'type': 'alias', 'field': 'vbr', 'deprecated': True},
5737 'audio_bitrate': {'type': 'alias', 'field': 'abr', 'deprecated': True},
5738 'framerate': {'type': 'alias', 'field': 'fps', 'deprecated': True},
5739 'filesize_estimate': {'type': 'alias', 'field': 'size', 'deprecated': True},
5740 'samplerate': {'type': 'alias', 'field': 'asr', 'deprecated': True},
5741 'video_ext': {'type': 'alias', 'field': 'vext', 'deprecated': True},
5742 'audio_ext': {'type': 'alias', 'field': 'aext', 'deprecated': True},
5743 'video_codec': {'type': 'alias', 'field': 'vcodec', 'deprecated': True},
5744 'audio_codec': {'type': 'alias', 'field': 'acodec', 'deprecated': True},
5745 'video': {'type': 'alias', 'field': 'hasvid', 'deprecated': True},
5746 'has_video': {'type': 'alias', 'field': 'hasvid', 'deprecated': True},
5747 'audio': {'type': 'alias', 'field': 'hasaud', 'deprecated': True},
5748 'has_audio': {'type': 'alias', 'field': 'hasaud', 'deprecated': True},
5749 'extractor': {'type': 'alias', 'field': 'ie_pref', 'deprecated': True},
5750 'extractor_preference': {'type': 'alias', 'field': 'ie_pref', 'deprecated': True},
5751 }
5752
5753 def __init__(self, ydl, field_preference):
5754 self.ydl = ydl
5755 self._order = []
5756 self.evaluate_params(self.ydl.params, field_preference)
5757 if ydl.params.get('verbose'):
5758 self.print_verbose_info(self.ydl.write_debug)
5759
5760 def _get_field_setting(self, field, key):
5761 if field not in self.settings:
5762 if key in ('forced', 'priority'):
5763 return False
5764 self.ydl.deprecated_feature(f'Using arbitrary fields ({field}) for format sorting is '
5765 'deprecated and may be removed in a future version')
5766 self.settings[field] = {}
5767 propObj = self.settings[field]
5768 if key not in propObj:
5769 type = propObj.get('type')
5770 if key == 'field':
5771 default = 'preference' if type == 'extractor' else (field,) if type in ('combined', 'multiple') else field
5772 elif key == 'convert':
5773 default = 'order' if type == 'ordered' else 'float_string' if field else 'ignore'
5774 else:
5775 default = {'type': 'field', 'visible': True, 'order': [], 'not_in_list': (None,)}.get(key, None)
5776 propObj[key] = default
5777 return propObj[key]
5778
5779 def _resolve_field_value(self, field, value, convertNone=False):
5780 if value is None:
5781 if not convertNone:
5782 return None
5783 else:
5784 value = value.lower()
5785 conversion = self._get_field_setting(field, 'convert')
5786 if conversion == 'ignore':
5787 return None
5788 if conversion == 'string':
5789 return value
5790 elif conversion == 'float_none':
5791 return float_or_none(value)
5792 elif conversion == 'bytes':
5793 return parse_bytes(value)
5794 elif conversion == 'order':
5795 order_list = (self._use_free_order and self._get_field_setting(field, 'order_free')) or self._get_field_setting(field, 'order')
5796 use_regex = self._get_field_setting(field, 'regex')
5797 list_length = len(order_list)
5798 empty_pos = order_list.index('') if '' in order_list else list_length + 1
5799 if use_regex and value is not None:
5800 for i, regex in enumerate(order_list):
5801 if regex and re.match(regex, value):
5802 return list_length - i
5803 return list_length - empty_pos # not in list
5804 else: # not regex or value = None
5805 return list_length - (order_list.index(value) if value in order_list else empty_pos)
5806 else:
5807 if value.isnumeric():
5808 return float(value)
5809 else:
5810 self.settings[field]['convert'] = 'string'
5811 return value
5812
5813 def evaluate_params(self, params, sort_extractor):
5814 self._use_free_order = params.get('prefer_free_formats', False)
5815 self._sort_user = params.get('format_sort', [])
5816 self._sort_extractor = sort_extractor
5817
5818 def add_item(field, reverse, closest, limit_text):
5819 field = field.lower()
5820 if field in self._order:
5821 return
5822 self._order.append(field)
5823 limit = self._resolve_field_value(field, limit_text)
5824 data = {
5825 'reverse': reverse,
5826 'closest': False if limit is None else closest,
5827 'limit_text': limit_text,
5828 'limit': limit}
5829 if field in self.settings:
5830 self.settings[field].update(data)
5831 else:
5832 self.settings[field] = data
5833
5834 sort_list = (
5835 tuple(field for field in self.default if self._get_field_setting(field, 'forced'))
5836 + (tuple() if params.get('format_sort_force', False)
5837 else tuple(field for field in self.default if self._get_field_setting(field, 'priority')))
5838 + tuple(self._sort_user) + tuple(sort_extractor) + self.default)
5839
5840 for item in sort_list:
5841 match = re.match(self.regex, item)
5842 if match is None:
5843 raise ExtractorError('Invalid format sort string "%s" given by extractor' % item)
5844 field = match.group('field')
5845 if field is None:
5846 continue
5847 if self._get_field_setting(field, 'type') == 'alias':
5848 alias, field = field, self._get_field_setting(field, 'field')
5849 if self._get_field_setting(alias, 'deprecated'):
5850 self.ydl.deprecated_feature(f'Format sorting alias {alias} is deprecated and may '
5851 f'be removed in a future version. Please use {field} instead')
5852 reverse = match.group('reverse') is not None
5853 closest = match.group('separator') == '~'
5854 limit_text = match.group('limit')
5855
5856 has_limit = limit_text is not None
5857 has_multiple_fields = self._get_field_setting(field, 'type') == 'combined'
5858 has_multiple_limits = has_limit and has_multiple_fields and not self._get_field_setting(field, 'same_limit')
5859
5860 fields = self._get_field_setting(field, 'field') if has_multiple_fields else (field,)
5861 limits = limit_text.split(':') if has_multiple_limits else (limit_text,) if has_limit else tuple()
5862 limit_count = len(limits)
5863 for (i, f) in enumerate(fields):
5864 add_item(f, reverse, closest,
5865 limits[i] if i < limit_count
5866 else limits[0] if has_limit and not has_multiple_limits
5867 else None)
5868
5869 def print_verbose_info(self, write_debug):
5870 if self._sort_user:
5871 write_debug('Sort order given by user: %s' % ', '.join(self._sort_user))
5872 if self._sort_extractor:
5873 write_debug('Sort order given by extractor: %s' % ', '.join(self._sort_extractor))
5874 write_debug('Formats sorted by: %s' % ', '.join(['%s%s%s' % (
5875 '+' if self._get_field_setting(field, 'reverse') else '', field,
5876 '%s%s(%s)' % ('~' if self._get_field_setting(field, 'closest') else ':',
5877 self._get_field_setting(field, 'limit_text'),
5878 self._get_field_setting(field, 'limit'))
5879 if self._get_field_setting(field, 'limit_text') is not None else '')
5880 for field in self._order if self._get_field_setting(field, 'visible')]))
5881
5882 def _calculate_field_preference_from_value(self, format, field, type, value):
5883 reverse = self._get_field_setting(field, 'reverse')
5884 closest = self._get_field_setting(field, 'closest')
5885 limit = self._get_field_setting(field, 'limit')
5886
5887 if type == 'extractor':
5888 maximum = self._get_field_setting(field, 'max')
5889 if value is None or (maximum is not None and value >= maximum):
5890 value = -1
5891 elif type == 'boolean':
5892 in_list = self._get_field_setting(field, 'in_list')
5893 not_in_list = self._get_field_setting(field, 'not_in_list')
5894 value = 0 if ((in_list is None or value in in_list) and (not_in_list is None or value not in not_in_list)) else -1
5895 elif type == 'ordered':
5896 value = self._resolve_field_value(field, value, True)
5897
5898 # try to convert to number
5899 val_num = float_or_none(value, default=self._get_field_setting(field, 'default'))
5900 is_num = self._get_field_setting(field, 'convert') != 'string' and val_num is not None
5901 if is_num:
5902 value = val_num
5903
5904 return ((-10, 0) if value is None
5905 else (1, value, 0) if not is_num # if a field has mixed strings and numbers, strings are sorted higher
5906 else (0, -abs(value - limit), value - limit if reverse else limit - value) if closest
5907 else (0, value, 0) if not reverse and (limit is None or value <= limit)
5908 else (0, -value, 0) if limit is None or (reverse and value == limit) or value > limit
5909 else (-1, value, 0))
5910
5911 def _calculate_field_preference(self, format, field):
5912 type = self._get_field_setting(field, 'type') # extractor, boolean, ordered, field, multiple
5913 get_value = lambda f: format.get(self._get_field_setting(f, 'field'))
5914 if type == 'multiple':
5915 type = 'field' # Only 'field' is allowed in multiple for now
5916 actual_fields = self._get_field_setting(field, 'field')
5917
5918 value = self._get_field_setting(field, 'function')(get_value(f) for f in actual_fields)
5919 else:
5920 value = get_value(field)
5921 return self._calculate_field_preference_from_value(format, field, type, value)
5922
5923 def calculate_preference(self, format):
5924 # Determine missing protocol
5925 if not format.get('protocol'):
5926 format['protocol'] = determine_protocol(format)
5927
5928 # Determine missing ext
5929 if not format.get('ext') and 'url' in format:
5930 format['ext'] = determine_ext(format['url'])
5931 if format.get('vcodec') == 'none':
5932 format['audio_ext'] = format['ext'] if format.get('acodec') != 'none' else 'none'
5933 format['video_ext'] = 'none'
5934 else:
5935 format['video_ext'] = format['ext']
5936 format['audio_ext'] = 'none'
5937 # if format.get('preference') is None and format.get('ext') in ('f4f', 'f4m'): # Not supported?
5938 # format['preference'] = -1000
5939
5424dbaf
L
5940 if format.get('preference') is None and format.get('ext') == 'flv' and re.match('[hx]265|he?vc?', format.get('vcodec') or ''):
5941 # HEVC-over-FLV is out-of-spec by FLV's original spec
5942 # ref. https://trac.ffmpeg.org/ticket/6389
5943 # ref. https://github.com/yt-dlp/yt-dlp/pull/5821
5944 format['preference'] = -100
5945
d0d74b71 5946 # Determine missing bitrates
5947 if format.get('tbr') is None:
5948 if format.get('vbr') is not None and format.get('abr') is not None:
5949 format['tbr'] = format.get('vbr', 0) + format.get('abr', 0)
5950 else:
5951 if format.get('vcodec') != 'none' and format.get('vbr') is None:
5952 format['vbr'] = format.get('tbr') - format.get('abr', 0)
5953 if format.get('acodec') != 'none' and format.get('abr') is None:
5954 format['abr'] = format.get('tbr') - format.get('vbr', 0)
5955
5956 return tuple(self._calculate_field_preference(format, field) for field in self._order)