]> jfr.im git - yt-dlp.git/blame - yt_dlp/utils.py
[extractor/yappy] Add extractor (#6111)
[yt-dlp.git] / yt_dlp / utils.py
CommitLineData
6929b41a 1import asyncio
15dfb392 2import atexit
1e399778 3import base64
5bc880b9 4import binascii
912b38b4 5import calendar
676eb3f2 6import codecs
c380cc28 7import collections
ab029d7e 8import collections.abc
62e609ab 9import contextlib
c496ca96 10import datetime
0c265486 11import email.header
f8271158 12import email.utils
f45c185f 13import errno
d77c3dfd 14import gzip
49fa4d9a
N
15import hashlib
16import hmac
ac668111 17import html.entities
18import html.parser
54007a45 19import http.client
20import http.cookiejar
b1f94422 21import inspect
03f9daab 22import io
79a2e94e 23import itertools
f4bfd65f 24import json
d77c3dfd 25import locale
02dbf93f 26import math
f8271158 27import mimetypes
347de493 28import operator
d77c3dfd 29import os
c496ca96 30import platform
773f291d 31import random
d77c3dfd 32import re
f8271158 33import shlex
c496ca96 34import socket
79a2e94e 35import ssl
ac668111 36import struct
1c088fa8 37import subprocess
d77c3dfd 38import sys
181c8655 39import tempfile
c380cc28 40import time
01951dda 41import traceback
64fa820c 42import types
989a01c2 43import unicodedata
14f25df2 44import urllib.error
f8271158 45import urllib.parse
ac668111 46import urllib.request
bcf89ce6 47import xml.etree.ElementTree
d77c3dfd 48import zlib
d77c3dfd 49
6929b41a 50from .compat import functools # isort: split
8c25f81b 51from .compat import (
36e6f62c 52 compat_etree_fromstring,
51098426 53 compat_expanduser,
f8271158 54 compat_HTMLParseError,
efa97bdc 55 compat_os_name,
702ccf2d 56 compat_shlex_quote,
8c25f81b 57)
ac668111 58from .dependencies import brotli, certifi, websockets, xattr
f8271158 59from .socks import ProxyType, sockssocket
71aff188 60
4644ac55 61
51fb4995
YCH
62def register_socks_protocols():
63 # "Register" SOCKS protocols
d5ae6bb5
YCH
64 # In Python < 2.6.5, urlsplit() suffers from bug https://bugs.python.org/issue7904
65 # URLs with protocols not in urlparse.uses_netloc are not handled correctly
51fb4995 66 for scheme in ('socks', 'socks4', 'socks4a', 'socks5'):
14f25df2 67 if scheme not in urllib.parse.uses_netloc:
68 urllib.parse.uses_netloc.append(scheme)
51fb4995
YCH
69
70
468e2e92
FV
71# This is not clearly defined otherwise
72compiled_regex_type = type(re.compile(''))
73
f7a147e3
S
74
75def random_user_agent():
76 _USER_AGENT_TPL = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/%s Safari/537.36'
77 _CHROME_VERSIONS = (
19b4c74d 78 '90.0.4430.212',
79 '90.0.4430.24',
80 '90.0.4430.70',
81 '90.0.4430.72',
82 '90.0.4430.85',
83 '90.0.4430.93',
84 '91.0.4472.101',
85 '91.0.4472.106',
86 '91.0.4472.114',
87 '91.0.4472.124',
88 '91.0.4472.164',
89 '91.0.4472.19',
90 '91.0.4472.77',
91 '92.0.4515.107',
92 '92.0.4515.115',
93 '92.0.4515.131',
94 '92.0.4515.159',
95 '92.0.4515.43',
96 '93.0.4556.0',
97 '93.0.4577.15',
98 '93.0.4577.63',
99 '93.0.4577.82',
100 '94.0.4606.41',
101 '94.0.4606.54',
102 '94.0.4606.61',
103 '94.0.4606.71',
104 '94.0.4606.81',
105 '94.0.4606.85',
106 '95.0.4638.17',
107 '95.0.4638.50',
108 '95.0.4638.54',
109 '95.0.4638.69',
110 '95.0.4638.74',
111 '96.0.4664.18',
112 '96.0.4664.45',
113 '96.0.4664.55',
114 '96.0.4664.93',
115 '97.0.4692.20',
f7a147e3
S
116 )
117 return _USER_AGENT_TPL % random.choice(_CHROME_VERSIONS)
118
119
4390d5ec 120SUPPORTED_ENCODINGS = [
121 'gzip', 'deflate'
122]
9b8ee23b 123if brotli:
4390d5ec 124 SUPPORTED_ENCODINGS.append('br')
125
3e669f36 126std_headers = {
f7a147e3 127 'User-Agent': random_user_agent(),
59ae15a5 128 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
59ae15a5 129 'Accept-Language': 'en-us,en;q=0.5',
b1156c1e 130 'Sec-Fetch-Mode': 'navigate',
3e669f36 131}
f427df17 132
5f6a1245 133
fb37eb25
S
134USER_AGENTS = {
135 'Safari': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27',
136}
137
138
bf42a990 139NO_DEFAULT = object()
7b2c3f47 140IDENTITY = lambda x: x
bf42a990 141
7105440c
YCH
142ENGLISH_MONTH_NAMES = [
143 'January', 'February', 'March', 'April', 'May', 'June',
144 'July', 'August', 'September', 'October', 'November', 'December']
145
f6717dec
S
146MONTH_NAMES = {
147 'en': ENGLISH_MONTH_NAMES,
148 'fr': [
3e4185c3
S
149 'janvier', 'février', 'mars', 'avril', 'mai', 'juin',
150 'juillet', 'août', 'septembre', 'octobre', 'novembre', 'décembre'],
78545664 151 # these follow the genitive grammatical case (dopełniacz)
152 # some websites might be using nominative, which will require another month list
153 # https://en.wikibooks.org/wiki/Polish/Noun_cases
154 'pl': ['stycznia', 'lutego', 'marca', 'kwietnia', 'maja', 'czerwca',
155 'lipca', 'sierpnia', 'września', 'października', 'listopada', 'grudnia'],
f6717dec 156}
a942d6cb 157
8f53dc44 158# From https://github.com/python/cpython/blob/3.11/Lib/email/_parseaddr.py#L36-L42
159TIMEZONE_NAMES = {
160 'UT': 0, 'UTC': 0, 'GMT': 0, 'Z': 0,
161 'AST': -4, 'ADT': -3, # Atlantic (used in Canada)
162 'EST': -5, 'EDT': -4, # Eastern
163 'CST': -6, 'CDT': -5, # Central
164 'MST': -7, 'MDT': -6, # Mountain
165 'PST': -8, 'PDT': -7 # Pacific
166}
167
c587cbb7 168# needed for sanitizing filenames in restricted mode
c8827027 169ACCENT_CHARS = dict(zip('ÂÃÄÀÁÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖŐØŒÙÚÛÜŰÝÞßàáâãäåæçèéêëìíîïðñòóôõöőøœùúûüűýþÿ',
fd35d8cd
JW
170 itertools.chain('AAAAAA', ['AE'], 'CEEEEIIIIDNOOOOOOO', ['OE'], 'UUUUUY', ['TH', 'ss'],
171 'aaaaaa', ['ae'], 'ceeeeiiiionooooooo', ['oe'], 'uuuuuy', ['th'], 'y')))
c587cbb7 172
46f59e89
S
173DATE_FORMATS = (
174 '%d %B %Y',
175 '%d %b %Y',
176 '%B %d %Y',
cb655f34
S
177 '%B %dst %Y',
178 '%B %dnd %Y',
9d30c213 179 '%B %drd %Y',
cb655f34 180 '%B %dth %Y',
46f59e89 181 '%b %d %Y',
cb655f34
S
182 '%b %dst %Y',
183 '%b %dnd %Y',
9d30c213 184 '%b %drd %Y',
cb655f34 185 '%b %dth %Y',
46f59e89
S
186 '%b %dst %Y %I:%M',
187 '%b %dnd %Y %I:%M',
9d30c213 188 '%b %drd %Y %I:%M',
46f59e89
S
189 '%b %dth %Y %I:%M',
190 '%Y %m %d',
191 '%Y-%m-%d',
bccdbd22 192 '%Y.%m.%d.',
46f59e89 193 '%Y/%m/%d',
81c13222 194 '%Y/%m/%d %H:%M',
46f59e89 195 '%Y/%m/%d %H:%M:%S',
1931a55e
THD
196 '%Y%m%d%H%M',
197 '%Y%m%d%H%M%S',
4f3fa23e 198 '%Y%m%d',
0c1c6f4b 199 '%Y-%m-%d %H:%M',
46f59e89
S
200 '%Y-%m-%d %H:%M:%S',
201 '%Y-%m-%d %H:%M:%S.%f',
5014558a 202 '%Y-%m-%d %H:%M:%S:%f',
46f59e89
S
203 '%d.%m.%Y %H:%M',
204 '%d.%m.%Y %H.%M',
205 '%Y-%m-%dT%H:%M:%SZ',
206 '%Y-%m-%dT%H:%M:%S.%fZ',
207 '%Y-%m-%dT%H:%M:%S.%f0Z',
208 '%Y-%m-%dT%H:%M:%S',
209 '%Y-%m-%dT%H:%M:%S.%f',
210 '%Y-%m-%dT%H:%M',
c6eed6b8
S
211 '%b %d %Y at %H:%M',
212 '%b %d %Y at %H:%M:%S',
b555ae9b
S
213 '%B %d %Y at %H:%M',
214 '%B %d %Y at %H:%M:%S',
a63d9bd0 215 '%H:%M %d-%b-%Y',
46f59e89
S
216)
217
218DATE_FORMATS_DAY_FIRST = list(DATE_FORMATS)
219DATE_FORMATS_DAY_FIRST.extend([
220 '%d-%m-%Y',
221 '%d.%m.%Y',
222 '%d.%m.%y',
223 '%d/%m/%Y',
224 '%d/%m/%y',
225 '%d/%m/%Y %H:%M:%S',
47304e07 226 '%d-%m-%Y %H:%M',
46f59e89
S
227])
228
229DATE_FORMATS_MONTH_FIRST = list(DATE_FORMATS)
230DATE_FORMATS_MONTH_FIRST.extend([
231 '%m-%d-%Y',
232 '%m.%d.%Y',
233 '%m/%d/%Y',
234 '%m/%d/%y',
235 '%m/%d/%Y %H:%M:%S',
236])
237
06b3fe29 238PACKED_CODES_RE = r"}\('(.+)',(\d+),(\d+),'([^']+)'\.split\('\|'\)"
0f60ba6e 239JSON_LD_RE = r'(?is)<script[^>]+type=(["\']?)application/ld\+json\1[^>]*>\s*(?P<json_ld>{.+?}|\[.+?\])\s*</script>'
06b3fe29 240
1d485a1a 241NUMBER_RE = r'\d+(?:\.\d+)?'
242
7105440c 243
0b9c08b4 244@functools.cache
d77c3dfd 245def preferredencoding():
59ae15a5 246 """Get preferred encoding.
d77c3dfd 247
59ae15a5
PH
248 Returns the best encoding scheme for the system, based on
249 locale.getpreferredencoding() and some further tweaks.
250 """
251 try:
252 pref = locale.getpreferredencoding()
28e614de 253 'TEST'.encode(pref)
70a1165b 254 except Exception:
59ae15a5 255 pref = 'UTF-8'
bae611f2 256
59ae15a5 257 return pref
d77c3dfd 258
f4bfd65f 259
181c8655 260def write_json_file(obj, fn):
1394646a 261 """ Encode obj as JSON and write it to fn, atomically if possible """
181c8655 262
cfb0511d 263 tf = tempfile.NamedTemporaryFile(
264 prefix=f'{os.path.basename(fn)}.', dir=os.path.dirname(fn),
265 suffix='.tmp', delete=False, mode='w', encoding='utf-8')
181c8655
PH
266
267 try:
268 with tf:
45d86abe 269 json.dump(obj, tf, ensure_ascii=False)
1394646a
IK
270 if sys.platform == 'win32':
271 # Need to remove existing file on Windows, else os.rename raises
272 # WindowsError or FileExistsError.
19a03940 273 with contextlib.suppress(OSError):
1394646a 274 os.unlink(fn)
19a03940 275 with contextlib.suppress(OSError):
9cd5f54e
R
276 mask = os.umask(0)
277 os.umask(mask)
278 os.chmod(tf.name, 0o666 & ~mask)
181c8655 279 os.rename(tf.name, fn)
70a1165b 280 except Exception:
19a03940 281 with contextlib.suppress(OSError):
181c8655 282 os.remove(tf.name)
181c8655
PH
283 raise
284
285
cfb0511d 286def find_xpath_attr(node, xpath, key, val=None):
287 """ Find the xpath xpath[@key=val] """
288 assert re.match(r'^[a-zA-Z_-]+$', key)
86e5f3ed 289 expr = xpath + ('[@%s]' % key if val is None else f"[@{key}='{val}']")
cfb0511d 290 return node.find(expr)
59ae56fa 291
d7e66d39
JMF
292# On python2.6 the xml.etree.ElementTree.Element methods don't support
293# the namespace parameter
5f6a1245
JW
294
295
d7e66d39
JMF
296def xpath_with_ns(path, ns_map):
297 components = [c.split(':') for c in path.split('/')]
298 replaced = []
299 for c in components:
300 if len(c) == 1:
301 replaced.append(c[0])
302 else:
303 ns, tag = c
304 replaced.append('{%s}%s' % (ns_map[ns], tag))
305 return '/'.join(replaced)
306
d77c3dfd 307
a41fb80c 308def xpath_element(node, xpath, name=None, fatal=False, default=NO_DEFAULT):
578c0745 309 def _find_xpath(xpath):
f9934b96 310 return node.find(xpath)
578c0745 311
14f25df2 312 if isinstance(xpath, str):
578c0745
S
313 n = _find_xpath(xpath)
314 else:
315 for xp in xpath:
316 n = _find_xpath(xp)
317 if n is not None:
318 break
d74bebd5 319
8e636da4 320 if n is None:
bf42a990
S
321 if default is not NO_DEFAULT:
322 return default
323 elif fatal:
bf0ff932
PH
324 name = xpath if name is None else name
325 raise ExtractorError('Could not find XML element %s' % name)
326 else:
327 return None
a41fb80c
S
328 return n
329
330
331def xpath_text(node, xpath, name=None, fatal=False, default=NO_DEFAULT):
8e636da4
S
332 n = xpath_element(node, xpath, name, fatal=fatal, default=default)
333 if n is None or n == default:
334 return n
335 if n.text is None:
336 if default is not NO_DEFAULT:
337 return default
338 elif fatal:
339 name = xpath if name is None else name
340 raise ExtractorError('Could not find XML element\'s text %s' % name)
341 else:
342 return None
343 return n.text
a41fb80c
S
344
345
346def xpath_attr(node, xpath, key, name=None, fatal=False, default=NO_DEFAULT):
347 n = find_xpath_attr(node, xpath, key)
348 if n is None:
349 if default is not NO_DEFAULT:
350 return default
351 elif fatal:
86e5f3ed 352 name = f'{xpath}[@{key}]' if name is None else name
a41fb80c
S
353 raise ExtractorError('Could not find XML attribute %s' % name)
354 else:
355 return None
356 return n.attrib[key]
bf0ff932
PH
357
358
c487cf00 359def get_element_by_id(id, html, **kwargs):
43e8fafd 360 """Return the content of the tag with the specified ID in the passed HTML document"""
c487cf00 361 return get_element_by_attribute('id', id, html, **kwargs)
43e8fafd 362
12ea2f30 363
c487cf00 364def get_element_html_by_id(id, html, **kwargs):
6f32a0b5 365 """Return the html of the tag with the specified ID in the passed HTML document"""
c487cf00 366 return get_element_html_by_attribute('id', id, html, **kwargs)
6f32a0b5
ZM
367
368
84c237fb 369def get_element_by_class(class_name, html):
2af12ad9
TC
370 """Return the content of the first tag with the specified class in the passed HTML document"""
371 retval = get_elements_by_class(class_name, html)
372 return retval[0] if retval else None
373
374
6f32a0b5
ZM
375def get_element_html_by_class(class_name, html):
376 """Return the html of the first tag with the specified class in the passed HTML document"""
377 retval = get_elements_html_by_class(class_name, html)
378 return retval[0] if retval else None
379
380
c487cf00 381def get_element_by_attribute(attribute, value, html, **kwargs):
382 retval = get_elements_by_attribute(attribute, value, html, **kwargs)
2af12ad9
TC
383 return retval[0] if retval else None
384
385
c487cf00 386def get_element_html_by_attribute(attribute, value, html, **kargs):
387 retval = get_elements_html_by_attribute(attribute, value, html, **kargs)
6f32a0b5
ZM
388 return retval[0] if retval else None
389
390
c487cf00 391def get_elements_by_class(class_name, html, **kargs):
2af12ad9
TC
392 """Return the content of all tags with the specified class in the passed HTML document as a list"""
393 return get_elements_by_attribute(
64fa820c 394 'class', r'[^\'"]*(?<=[\'"\s])%s(?=[\'"\s])[^\'"]*' % re.escape(class_name),
84c237fb
YCH
395 html, escape_value=False)
396
397
6f32a0b5
ZM
398def get_elements_html_by_class(class_name, html):
399 """Return the html of all tags with the specified class in the passed HTML document as a list"""
400 return get_elements_html_by_attribute(
64fa820c 401 'class', r'[^\'"]*(?<=[\'"\s])%s(?=[\'"\s])[^\'"]*' % re.escape(class_name),
6f32a0b5
ZM
402 html, escape_value=False)
403
404
405def get_elements_by_attribute(*args, **kwargs):
43e8fafd 406 """Return the content of the tag with the specified attribute in the passed HTML document"""
6f32a0b5
ZM
407 return [content for content, _ in get_elements_text_and_html_by_attribute(*args, **kwargs)]
408
409
410def get_elements_html_by_attribute(*args, **kwargs):
411 """Return the html of the tag with the specified attribute in the passed HTML document"""
412 return [whole for _, whole in get_elements_text_and_html_by_attribute(*args, **kwargs)]
413
414
4c9a1a3b 415def get_elements_text_and_html_by_attribute(attribute, value, html, *, tag=r'[\w:.-]+', escape_value=True):
6f32a0b5
ZM
416 """
417 Return the text (content) and the html (whole) of the tag with the specified
418 attribute in the passed HTML document
419 """
c61473c1
M
420 if not value:
421 return
9e6dd238 422
86e5f3ed 423 quote = '' if re.match(r'''[\s"'`=<>]''', value) else '?'
0254f162 424
84c237fb
YCH
425 value = re.escape(value) if escape_value else value
426
86e5f3ed 427 partial_element_re = rf'''(?x)
4c9a1a3b 428 <(?P<tag>{tag})
0254f162 429 (?:\s(?:[^>"']|"[^"]*"|'[^']*')*)?
86e5f3ed 430 \s{re.escape(attribute)}\s*=\s*(?P<_q>['"]{quote})(?-x:{value})(?P=_q)
431 '''
38285056 432
0254f162
ZM
433 for m in re.finditer(partial_element_re, html):
434 content, whole = get_element_text_and_html_by_tag(m.group('tag'), html[m.start():])
a921f407 435
0254f162
ZM
436 yield (
437 unescapeHTML(re.sub(r'^(?P<q>["\'])(?P<content>.*)(?P=q)$', r'\g<content>', content, flags=re.DOTALL)),
438 whole
439 )
a921f407 440
c5229f39 441
ac668111 442class HTMLBreakOnClosingTagParser(html.parser.HTMLParser):
6f32a0b5
ZM
443 """
444 HTML parser which raises HTMLBreakOnClosingTagException upon reaching the
445 closing tag for the first opening tag it has encountered, and can be used
446 as a context manager
447 """
448
449 class HTMLBreakOnClosingTagException(Exception):
450 pass
451
452 def __init__(self):
453 self.tagstack = collections.deque()
ac668111 454 html.parser.HTMLParser.__init__(self)
6f32a0b5
ZM
455
456 def __enter__(self):
457 return self
458
459 def __exit__(self, *_):
460 self.close()
461
462 def close(self):
463 # handle_endtag does not return upon raising HTMLBreakOnClosingTagException,
464 # so data remains buffered; we no longer have any interest in it, thus
465 # override this method to discard it
466 pass
467
468 def handle_starttag(self, tag, _):
469 self.tagstack.append(tag)
470
471 def handle_endtag(self, tag):
472 if not self.tagstack:
473 raise compat_HTMLParseError('no tags in the stack')
474 while self.tagstack:
475 inner_tag = self.tagstack.pop()
476 if inner_tag == tag:
477 break
478 else:
479 raise compat_HTMLParseError(f'matching opening tag for closing {tag} tag not found')
480 if not self.tagstack:
481 raise self.HTMLBreakOnClosingTagException()
482
483
46d09f87 484# XXX: This should be far less strict
6f32a0b5
ZM
485def get_element_text_and_html_by_tag(tag, html):
486 """
487 For the first element with the specified tag in the passed HTML document
488 return its' content (text) and the whole element (html)
489 """
490 def find_or_raise(haystack, needle, exc):
491 try:
492 return haystack.index(needle)
493 except ValueError:
494 raise exc
495 closing_tag = f'</{tag}>'
496 whole_start = find_or_raise(
497 html, f'<{tag}', compat_HTMLParseError(f'opening {tag} tag not found'))
498 content_start = find_or_raise(
499 html[whole_start:], '>', compat_HTMLParseError(f'malformed opening {tag} tag'))
500 content_start += whole_start + 1
501 with HTMLBreakOnClosingTagParser() as parser:
502 parser.feed(html[whole_start:content_start])
503 if not parser.tagstack or parser.tagstack[0] != tag:
504 raise compat_HTMLParseError(f'parser did not match opening {tag} tag')
505 offset = content_start
506 while offset < len(html):
507 next_closing_tag_start = find_or_raise(
508 html[offset:], closing_tag,
509 compat_HTMLParseError(f'closing {tag} tag not found'))
510 next_closing_tag_end = next_closing_tag_start + len(closing_tag)
511 try:
512 parser.feed(html[offset:offset + next_closing_tag_end])
513 offset += next_closing_tag_end
514 except HTMLBreakOnClosingTagParser.HTMLBreakOnClosingTagException:
515 return html[content_start:offset + next_closing_tag_start], \
516 html[whole_start:offset + next_closing_tag_end]
517 raise compat_HTMLParseError('unexpected end of html')
518
519
ac668111 520class HTMLAttributeParser(html.parser.HTMLParser):
8bb56eee 521 """Trivial HTML parser to gather the attributes for a single element"""
b6e0c7d2 522
8bb56eee 523 def __init__(self):
c5229f39 524 self.attrs = {}
ac668111 525 html.parser.HTMLParser.__init__(self)
8bb56eee
BF
526
527 def handle_starttag(self, tag, attrs):
528 self.attrs = dict(attrs)
7053aa3a 529 raise compat_HTMLParseError('done')
8bb56eee 530
c5229f39 531
ac668111 532class HTMLListAttrsParser(html.parser.HTMLParser):
73673ccf
FF
533 """HTML parser to gather the attributes for the elements of a list"""
534
535 def __init__(self):
ac668111 536 html.parser.HTMLParser.__init__(self)
73673ccf
FF
537 self.items = []
538 self._level = 0
539
540 def handle_starttag(self, tag, attrs):
541 if tag == 'li' and self._level == 0:
542 self.items.append(dict(attrs))
543 self._level += 1
544
545 def handle_endtag(self, tag):
546 self._level -= 1
547
548
8bb56eee
BF
549def extract_attributes(html_element):
550 """Given a string for an HTML element such as
551 <el
552 a="foo" B="bar" c="&98;az" d=boz
553 empty= noval entity="&amp;"
554 sq='"' dq="'"
555 >
556 Decode and return a dictionary of attributes.
557 {
558 'a': 'foo', 'b': 'bar', c: 'baz', d: 'boz',
559 'empty': '', 'noval': None, 'entity': '&',
560 'sq': '"', 'dq': '\''
561 }.
8bb56eee
BF
562 """
563 parser = HTMLAttributeParser()
19a03940 564 with contextlib.suppress(compat_HTMLParseError):
b4a3d461
S
565 parser.feed(html_element)
566 parser.close()
8bb56eee 567 return parser.attrs
9e6dd238 568
c5229f39 569
73673ccf
FF
570def parse_list(webpage):
571 """Given a string for an series of HTML <li> elements,
572 return a dictionary of their attributes"""
573 parser = HTMLListAttrsParser()
574 parser.feed(webpage)
575 parser.close()
576 return parser.items
577
578
9e6dd238 579def clean_html(html):
59ae15a5 580 """Clean an HTML snippet into a readable string"""
dd622d7c
PH
581
582 if html is None: # Convenience for sanitizing descriptions etc.
583 return html
584
49185227 585 html = re.sub(r'\s+', ' ', html)
586 html = re.sub(r'(?u)\s?<\s?br\s?/?\s?>\s?', '\n', html)
587 html = re.sub(r'(?u)<\s?/\s?p\s?>\s?<\s?p[^>]*>', '\n', html)
59ae15a5
PH
588 # Strip html tags
589 html = re.sub('<.*?>', '', html)
590 # Replace html entities
591 html = unescapeHTML(html)
7decf895 592 return html.strip()
9e6dd238
FV
593
594
b7c47b74 595class LenientJSONDecoder(json.JSONDecoder):
596 def __init__(self, *args, transform_source=None, ignore_extra=False, **kwargs):
597 self.transform_source, self.ignore_extra = transform_source, ignore_extra
598 super().__init__(*args, **kwargs)
599
600 def decode(self, s):
601 if self.transform_source:
602 s = self.transform_source(s)
2fa669f7 603 try:
604 if self.ignore_extra:
605 return self.raw_decode(s.lstrip())[0]
606 return super().decode(s)
607 except json.JSONDecodeError as e:
608 if e.pos is not None:
609 raise type(e)(f'{e.msg} in {s[e.pos-10:e.pos+10]!r}', s, e.pos)
610 raise
b7c47b74 611
612
d77c3dfd 613def sanitize_open(filename, open_mode):
59ae15a5
PH
614 """Try to open the given filename, and slightly tweak it if this fails.
615
616 Attempts to open the given filename. If this fails, it tries to change
617 the filename slightly, step by step, until it's either able to open it
618 or it fails and raises a final exception, like the standard open()
619 function.
620
621 It returns the tuple (stream, definitive_file_name).
622 """
0edb3e33 623 if filename == '-':
624 if sys.platform == 'win32':
625 import msvcrt
be5c1ae8 626
62b58c09 627 # stdout may be any IO stream, e.g. when using contextlib.redirect_stdout
daef7911 628 with contextlib.suppress(io.UnsupportedOperation):
629 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
0edb3e33 630 return (sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout, filename)
59ae15a5 631
0edb3e33 632 for attempt in range(2):
633 try:
634 try:
89737671 635 if sys.platform == 'win32':
b506289f 636 # FIXME: An exclusive lock also locks the file from being read.
637 # Since windows locks are mandatory, don't lock the file on windows (for now).
638 # Ref: https://github.com/yt-dlp/yt-dlp/issues/3124
89737671 639 raise LockingUnsupportedError()
0edb3e33 640 stream = locked_file(filename, open_mode, block=False).__enter__()
8a82af35 641 except OSError:
0edb3e33 642 stream = open(filename, open_mode)
8a82af35 643 return stream, filename
86e5f3ed 644 except OSError as err:
0edb3e33 645 if attempt or err.errno in (errno.EACCES,):
646 raise
647 old_filename, filename = filename, sanitize_path(filename)
648 if old_filename == filename:
649 raise
d77c3dfd
FV
650
651
652def timeconvert(timestr):
59ae15a5
PH
653 """Convert RFC 2822 defined time string into system timestamp"""
654 timestamp = None
655 timetuple = email.utils.parsedate_tz(timestr)
656 if timetuple is not None:
657 timestamp = email.utils.mktime_tz(timetuple)
658 return timestamp
1c469a94 659
5f6a1245 660
5c3895ff 661def sanitize_filename(s, restricted=False, is_id=NO_DEFAULT):
59ae15a5 662 """Sanitizes a string so it could be used as part of a filename.
5c3895ff 663 @param restricted Use a stricter subset of allowed characters
664 @param is_id Whether this is an ID that should be kept unchanged if possible.
665 If unset, yt-dlp's new sanitization rules are in effect
59ae15a5 666 """
5c3895ff 667 if s == '':
668 return ''
669
59ae15a5 670 def replace_insane(char):
c587cbb7
AT
671 if restricted and char in ACCENT_CHARS:
672 return ACCENT_CHARS[char]
91dd88b9 673 elif not restricted and char == '\n':
5c3895ff 674 return '\0 '
989a01c2 675 elif is_id is NO_DEFAULT and not restricted and char in '"*:<>?|/\\':
676 # Replace with their full-width unicode counterparts
677 return {'/': '\u29F8', '\\': '\u29f9'}.get(char, chr(ord(char) + 0xfee0))
91dd88b9 678 elif char == '?' or ord(char) < 32 or ord(char) == 127:
59ae15a5
PH
679 return ''
680 elif char == '"':
681 return '' if restricted else '\''
682 elif char == ':':
5c3895ff 683 return '\0_\0-' if restricted else '\0 \0-'
59ae15a5 684 elif char in '\\/|*<>':
5c3895ff 685 return '\0_'
686 if restricted and (char in '!&\'()[]{}$;`^,#' or char.isspace() or ord(char) > 127):
687 return '\0_'
59ae15a5
PH
688 return char
689
db4678e4 690 # Replace look-alike Unicode glyphs
691 if restricted and (is_id is NO_DEFAULT or not is_id):
989a01c2 692 s = unicodedata.normalize('NFKC', s)
5c3895ff 693 s = re.sub(r'[0-9]+(?::[0-9]+)+', lambda m: m.group(0).replace(':', '_'), s) # Handle timestamps
28e614de 694 result = ''.join(map(replace_insane, s))
5c3895ff 695 if is_id is NO_DEFAULT:
ae61d108 696 result = re.sub(r'(\0.)(?:(?=\1)..)+', r'\1', result) # Remove repeated substitute chars
697 STRIP_RE = r'(?:\0.|[ _-])*'
5c3895ff 698 result = re.sub(f'^\0.{STRIP_RE}|{STRIP_RE}\0.$', '', result) # Remove substitute chars from start/end
699 result = result.replace('\0', '') or '_'
700
796173d0
PH
701 if not is_id:
702 while '__' in result:
703 result = result.replace('__', '_')
704 result = result.strip('_')
705 # Common case of "Foreign band name - English song title"
706 if restricted and result.startswith('-_'):
707 result = result[2:]
5a42414b
PH
708 if result.startswith('-'):
709 result = '_' + result[len('-'):]
a7440261 710 result = result.lstrip('.')
796173d0
PH
711 if not result:
712 result = '_'
59ae15a5 713 return result
d77c3dfd 714
5f6a1245 715
c2934512 716def sanitize_path(s, force=False):
a2aaf4db 717 """Sanitizes and normalizes path on Windows"""
c2934512 718 if sys.platform == 'win32':
c4218ac3 719 force = False
c2934512 720 drive_or_unc, _ = os.path.splitdrive(s)
c2934512 721 elif force:
722 drive_or_unc = ''
723 else:
a2aaf4db 724 return s
c2934512 725
be531ef1
S
726 norm_path = os.path.normpath(remove_start(s, drive_or_unc)).split(os.path.sep)
727 if drive_or_unc:
a2aaf4db
S
728 norm_path.pop(0)
729 sanitized_path = [
ec85ded8 730 path_part if path_part in ['.', '..'] else re.sub(r'(?:[/<>:"\|\\?\*]|[\s.]$)', '#', path_part)
a2aaf4db 731 for path_part in norm_path]
be531ef1
S
732 if drive_or_unc:
733 sanitized_path.insert(0, drive_or_unc + os.path.sep)
4abea8ca 734 elif force and s and s[0] == os.path.sep:
c4218ac3 735 sanitized_path.insert(0, os.path.sep)
a2aaf4db
S
736 return os.path.join(*sanitized_path)
737
738
8f97a15d 739def sanitize_url(url, *, scheme='http'):
befa4708
S
740 # Prepend protocol-less URLs with `http:` scheme in order to mitigate
741 # the number of unwanted failures due to missing protocol
21633673 742 if url is None:
743 return
744 elif url.startswith('//'):
8f97a15d 745 return f'{scheme}:{url}'
befa4708
S
746 # Fix some common typos seen so far
747 COMMON_TYPOS = (
067aa17e 748 # https://github.com/ytdl-org/youtube-dl/issues/15649
befa4708
S
749 (r'^httpss://', r'https://'),
750 # https://bx1.be/lives/direct-tv/
751 (r'^rmtp([es]?)://', r'rtmp\1://'),
752 )
753 for mistake, fixup in COMMON_TYPOS:
754 if re.match(mistake, url):
755 return re.sub(mistake, fixup, url)
bc6b9bcd 756 return url
17bcc626
S
757
758
5435dcf9 759def extract_basic_auth(url):
14f25df2 760 parts = urllib.parse.urlsplit(url)
5435dcf9
HH
761 if parts.username is None:
762 return url, None
14f25df2 763 url = urllib.parse.urlunsplit(parts._replace(netloc=(
5435dcf9
HH
764 parts.hostname if parts.port is None
765 else '%s:%d' % (parts.hostname, parts.port))))
766 auth_payload = base64.b64encode(
0f06bcd7 767 ('%s:%s' % (parts.username, parts.password or '')).encode())
768 return url, f'Basic {auth_payload.decode()}'
5435dcf9
HH
769
770
67dda517 771def sanitized_Request(url, *args, **kwargs):
bc6b9bcd 772 url, auth_header = extract_basic_auth(escape_url(sanitize_url(url)))
5435dcf9
HH
773 if auth_header is not None:
774 headers = args[1] if len(args) >= 2 else kwargs.setdefault('headers', {})
775 headers['Authorization'] = auth_header
ac668111 776 return urllib.request.Request(url, *args, **kwargs)
67dda517
S
777
778
51098426 779def expand_path(s):
2fa669f7 780 """Expand shell variables and ~"""
51098426
S
781 return os.path.expandvars(compat_expanduser(s))
782
783
7e9a6125 784def orderedSet(iterable, *, lazy=False):
785 """Remove all duplicates from the input iterable"""
786 def _iter():
787 seen = [] # Do not use set since the items can be unhashable
788 for x in iterable:
789 if x not in seen:
790 seen.append(x)
791 yield x
792
793 return _iter() if lazy else list(_iter())
d77c3dfd 794
912b38b4 795
55b2f099 796def _htmlentity_transform(entity_with_semicolon):
4e408e47 797 """Transforms an HTML entity to a character."""
55b2f099
YCH
798 entity = entity_with_semicolon[:-1]
799
4e408e47 800 # Known non-numeric HTML entity
ac668111 801 if entity in html.entities.name2codepoint:
802 return chr(html.entities.name2codepoint[entity])
4e408e47 803
62b58c09
L
804 # TODO: HTML5 allows entities without a semicolon.
805 # E.g. '&Eacuteric' should be decoded as 'Éric'.
ac668111 806 if entity_with_semicolon in html.entities.html5:
807 return html.entities.html5[entity_with_semicolon]
55b2f099 808
91757b0f 809 mobj = re.match(r'#(x[0-9a-fA-F]+|[0-9]+)', entity)
4e408e47
PH
810 if mobj is not None:
811 numstr = mobj.group(1)
28e614de 812 if numstr.startswith('x'):
4e408e47 813 base = 16
28e614de 814 numstr = '0%s' % numstr
4e408e47
PH
815 else:
816 base = 10
067aa17e 817 # See https://github.com/ytdl-org/youtube-dl/issues/7518
19a03940 818 with contextlib.suppress(ValueError):
ac668111 819 return chr(int(numstr, base))
4e408e47
PH
820
821 # Unknown entity in name, return its literal representation
7a3f0c00 822 return '&%s;' % entity
4e408e47
PH
823
824
d77c3dfd 825def unescapeHTML(s):
912b38b4
PH
826 if s is None:
827 return None
19a03940 828 assert isinstance(s, str)
d77c3dfd 829
4e408e47 830 return re.sub(
95f3f7c2 831 r'&([^&;]+;)', lambda m: _htmlentity_transform(m.group(1)), s)
d77c3dfd 832
8bf48f23 833
cdb19aa4 834def escapeHTML(text):
835 return (
836 text
837 .replace('&', '&amp;')
838 .replace('<', '&lt;')
839 .replace('>', '&gt;')
840 .replace('"', '&quot;')
841 .replace("'", '&#39;')
842 )
843
844
f5b1bca9 845def process_communicate_or_kill(p, *args, **kwargs):
da4db748 846 deprecation_warning(f'"{__name__}.process_communicate_or_kill" is deprecated and may be removed '
847 f'in a future version. Use "{__name__}.Popen.communicate_or_kill" instead')
8a82af35 848 return Popen.communicate_or_kill(p, *args, **kwargs)
f5b1bca9 849
850
d3c93ec2 851class Popen(subprocess.Popen):
852 if sys.platform == 'win32':
853 _startupinfo = subprocess.STARTUPINFO()
854 _startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
855 else:
856 _startupinfo = None
857
82ea226c
L
858 @staticmethod
859 def _fix_pyinstaller_ld_path(env):
860 """Restore LD_LIBRARY_PATH when using PyInstaller
861 Ref: https://github.com/pyinstaller/pyinstaller/blob/develop/doc/runtime-information.rst#ld_library_path--libpath-considerations
862 https://github.com/yt-dlp/yt-dlp/issues/4573
863 """
864 if not hasattr(sys, '_MEIPASS'):
865 return
866
867 def _fix(key):
868 orig = env.get(f'{key}_ORIG')
869 if orig is None:
870 env.pop(key, None)
871 else:
872 env[key] = orig
873
874 _fix('LD_LIBRARY_PATH') # Linux
875 _fix('DYLD_LIBRARY_PATH') # macOS
876
877 def __init__(self, *args, env=None, text=False, **kwargs):
878 if env is None:
879 env = os.environ.copy()
880 self._fix_pyinstaller_ld_path(env)
881
f0c9fb96 882 if text is True:
883 kwargs['universal_newlines'] = True # For 3.6 compatibility
884 kwargs.setdefault('encoding', 'utf-8')
885 kwargs.setdefault('errors', 'replace')
82ea226c 886 super().__init__(*args, env=env, **kwargs, startupinfo=self._startupinfo)
d3c93ec2 887
888 def communicate_or_kill(self, *args, **kwargs):
8a82af35 889 try:
890 return self.communicate(*args, **kwargs)
891 except BaseException: # Including KeyboardInterrupt
f0c9fb96 892 self.kill(timeout=None)
8a82af35 893 raise
d3c93ec2 894
f0c9fb96 895 def kill(self, *, timeout=0):
896 super().kill()
897 if timeout != 0:
898 self.wait(timeout=timeout)
899
900 @classmethod
992dc6b4 901 def run(cls, *args, timeout=None, **kwargs):
f0c9fb96 902 with cls(*args, **kwargs) as proc:
914491b8 903 default = '' if proc.text_mode else b''
992dc6b4 904 stdout, stderr = proc.communicate_or_kill(timeout=timeout)
914491b8 905 return stdout or default, stderr or default, proc.returncode
f0c9fb96 906
d3c93ec2 907
aa49acd1
S
908def get_subprocess_encoding():
909 if sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5:
910 # For subprocess calls, encode with locale encoding
911 # Refer to http://stackoverflow.com/a/9951851/35070
912 encoding = preferredencoding()
913 else:
914 encoding = sys.getfilesystemencoding()
915 if encoding is None:
916 encoding = 'utf-8'
917 return encoding
918
919
8bf48f23 920def encodeFilename(s, for_subprocess=False):
19a03940 921 assert isinstance(s, str)
cfb0511d 922 return s
aa49acd1
S
923
924
925def decodeFilename(b, for_subprocess=False):
cfb0511d 926 return b
8bf48f23 927
f07b74fc
PH
928
929def encodeArgument(s):
cfb0511d 930 # Legacy code that uses byte strings
931 # Uncomment the following line after fixing all post processors
14f25df2 932 # assert isinstance(s, str), 'Internal error: %r should be of type %r, is %r' % (s, str, type(s))
cfb0511d 933 return s if isinstance(s, str) else s.decode('ascii')
f07b74fc
PH
934
935
aa49acd1 936def decodeArgument(b):
cfb0511d 937 return b
aa49acd1
S
938
939
8271226a
PH
940def decodeOption(optval):
941 if optval is None:
942 return optval
943 if isinstance(optval, bytes):
944 optval = optval.decode(preferredencoding())
945
14f25df2 946 assert isinstance(optval, str)
8271226a 947 return optval
1c256f70 948
5f6a1245 949
aa7785f8 950_timetuple = collections.namedtuple('Time', ('hours', 'minutes', 'seconds', 'milliseconds'))
951
952
953def timetuple_from_msec(msec):
954 secs, msec = divmod(msec, 1000)
955 mins, secs = divmod(secs, 60)
956 hrs, mins = divmod(mins, 60)
957 return _timetuple(hrs, mins, secs, msec)
958
959
cdb19aa4 960def formatSeconds(secs, delim=':', msec=False):
aa7785f8 961 time = timetuple_from_msec(secs * 1000)
962 if time.hours:
963 ret = '%d%s%02d%s%02d' % (time.hours, delim, time.minutes, delim, time.seconds)
964 elif time.minutes:
965 ret = '%d%s%02d' % (time.minutes, delim, time.seconds)
4539dd30 966 else:
aa7785f8 967 ret = '%d' % time.seconds
968 return '%s.%03d' % (ret, time.milliseconds) if msec else ret
4539dd30 969
a0ddb8a2 970
77562778 971def _ssl_load_windows_store_certs(ssl_context, storename):
972 # Code adapted from _load_windows_store_certs in https://github.com/python/cpython/blob/main/Lib/ssl.py
973 try:
974 certs = [cert for cert, encoding, trust in ssl.enum_certificates(storename)
975 if encoding == 'x509_asn' and (
976 trust is True or ssl.Purpose.SERVER_AUTH.oid in trust)]
977 except PermissionError:
978 return
979 for cert in certs:
19a03940 980 with contextlib.suppress(ssl.SSLError):
77562778 981 ssl_context.load_verify_locations(cadata=cert)
a2366922 982
77562778 983
984def make_HTTPS_handler(params, **kwargs):
985 opts_check_certificate = not params.get('nocheckcertificate')
986 context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
987 context.check_hostname = opts_check_certificate
f81c62a6 988 if params.get('legacyserverconnect'):
989 context.options |= 4 # SSL_OP_LEGACY_SERVER_CONNECT
4f28b537 990 # Allow use of weaker ciphers in Python 3.10+. See https://bugs.python.org/issue43998
991 context.set_ciphers('DEFAULT')
ac8e69dd
M
992 elif (
993 sys.version_info < (3, 10)
994 and ssl.OPENSSL_VERSION_INFO >= (1, 1, 1)
995 and not ssl.OPENSSL_VERSION.startswith('LibreSSL')
996 ):
5b9f253f
M
997 # Backport the default SSL ciphers and minimum TLS version settings from Python 3.10 [1].
998 # This is to ensure consistent behavior across Python versions, and help avoid fingerprinting
999 # in some situations [2][3].
1000 # Python 3.10 only supports OpenSSL 1.1.1+ [4]. Because this change is likely
1001 # untested on older versions, we only apply this to OpenSSL 1.1.1+ to be safe.
ac8e69dd 1002 # LibreSSL is excluded until further investigation due to cipher support issues [5][6].
5b9f253f
M
1003 # 1. https://github.com/python/cpython/commit/e983252b516edb15d4338b0a47631b59ef1e2536
1004 # 2. https://github.com/yt-dlp/yt-dlp/issues/4627
1005 # 3. https://github.com/yt-dlp/yt-dlp/pull/5294
1006 # 4. https://peps.python.org/pep-0644/
ac8e69dd
M
1007 # 5. https://peps.python.org/pep-0644/#libressl-support
1008 # 6. https://github.com/yt-dlp/yt-dlp/commit/5b9f253fa0aee996cf1ed30185d4b502e00609c4#commitcomment-89054368
5b9f253f
M
1009 context.set_ciphers('@SECLEVEL=2:ECDH+AESGCM:ECDH+CHACHA20:ECDH+AES:DHE+AES:!aNULL:!eNULL:!aDSS:!SHA1:!AESCCM')
1010 context.minimum_version = ssl.TLSVersion.TLSv1_2
8a82af35 1011
77562778 1012 context.verify_mode = ssl.CERT_REQUIRED if opts_check_certificate else ssl.CERT_NONE
1013 if opts_check_certificate:
d5820461 1014 if has_certifi and 'no-certifi' not in params.get('compat_opts', []):
1015 context.load_verify_locations(cafile=certifi.where())
168bbc4f 1016 else:
1017 try:
1018 context.load_default_certs()
1019 # Work around the issue in load_default_certs when there are bad certificates. See:
1020 # https://github.com/yt-dlp/yt-dlp/issues/1060,
1021 # https://bugs.python.org/issue35665, https://bugs.python.org/issue45312
1022 except ssl.SSLError:
1023 # enum_certificates is not present in mingw python. See https://github.com/yt-dlp/yt-dlp/issues/1151
1024 if sys.platform == 'win32' and hasattr(ssl, 'enum_certificates'):
1025 for storename in ('CA', 'ROOT'):
1026 _ssl_load_windows_store_certs(context, storename)
1027 context.set_default_verify_paths()
8a82af35 1028
bb58c9ed 1029 client_certfile = params.get('client_certificate')
1030 if client_certfile:
1031 try:
1032 context.load_cert_chain(
1033 client_certfile, keyfile=params.get('client_certificate_key'),
1034 password=params.get('client_certificate_password'))
1035 except ssl.SSLError:
1036 raise YoutubeDLError('Unable to load client certificate')
2c6dcb65 1037
1038 # Some servers may reject requests if ALPN extension is not sent. See:
1039 # https://github.com/python/cpython/issues/85140
1040 # https://github.com/yt-dlp/yt-dlp/issues/3878
1041 with contextlib.suppress(NotImplementedError):
1042 context.set_alpn_protocols(['http/1.1'])
1043
77562778 1044 return YoutubeDLHTTPSHandler(params, context=context, **kwargs)
ea6d901e 1045
732ea2f0 1046
5873d4cc 1047def bug_reports_message(before=';'):
57e0f077 1048 from .update import REPOSITORY
1049
1050 msg = (f'please report this issue on https://github.com/{REPOSITORY}/issues?q= , '
1051 'filling out the appropriate issue template. Confirm you are on the latest version using yt-dlp -U')
5873d4cc
F
1052
1053 before = before.rstrip()
1054 if not before or before.endswith(('.', '!', '?')):
1055 msg = msg[0].title() + msg[1:]
1056
1057 return (before + ' ' if before else '') + msg
08f2a92c
JMF
1058
1059
bf5b9d85
PM
1060class YoutubeDLError(Exception):
1061 """Base exception for YoutubeDL errors."""
aa9369a2 1062 msg = None
1063
1064 def __init__(self, msg=None):
1065 if msg is not None:
1066 self.msg = msg
1067 elif self.msg is None:
1068 self.msg = type(self).__name__
1069 super().__init__(self.msg)
bf5b9d85
PM
1070
1071
ac668111 1072network_exceptions = [urllib.error.URLError, http.client.HTTPException, socket.error]
3158150c 1073if hasattr(ssl, 'CertificateError'):
1074 network_exceptions.append(ssl.CertificateError)
1075network_exceptions = tuple(network_exceptions)
1076
1077
bf5b9d85 1078class ExtractorError(YoutubeDLError):
1c256f70 1079 """Error during info extraction."""
5f6a1245 1080
1151c407 1081 def __init__(self, msg, tb=None, expected=False, cause=None, video_id=None, ie=None):
9a82b238 1082 """ tb, if given, is the original traceback (so that it can be printed out).
7a5c1cfe 1083 If expected is set, this is a normal error message and most likely not a bug in yt-dlp.
9a82b238 1084 """
3158150c 1085 if sys.exc_info()[0] in network_exceptions:
9a82b238 1086 expected = True
d5979c5d 1087
7265a219 1088 self.orig_msg = str(msg)
1c256f70 1089 self.traceback = tb
1151c407 1090 self.expected = expected
2eabb802 1091 self.cause = cause
d11271dd 1092 self.video_id = video_id
1151c407 1093 self.ie = ie
1094 self.exc_info = sys.exc_info() # preserve original exception
5df14442 1095 if isinstance(self.exc_info[1], ExtractorError):
1096 self.exc_info = self.exc_info[1].exc_info
9bcfe33b 1097 super().__init__(self.__msg)
1151c407 1098
9bcfe33b 1099 @property
1100 def __msg(self):
1101 return ''.join((
1102 format_field(self.ie, None, '[%s] '),
1103 format_field(self.video_id, None, '%s: '),
1104 self.orig_msg,
1105 format_field(self.cause, None, ' (caused by %r)'),
1106 '' if self.expected else bug_reports_message()))
1c256f70 1107
01951dda 1108 def format_traceback(self):
497d2fab 1109 return join_nonempty(
1110 self.traceback and ''.join(traceback.format_tb(self.traceback)),
e491d06d 1111 self.cause and ''.join(traceback.format_exception(None, self.cause, self.cause.__traceback__)[1:]),
497d2fab 1112 delim='\n') or None
01951dda 1113
9bcfe33b 1114 def __setattr__(self, name, value):
1115 super().__setattr__(name, value)
1116 if getattr(self, 'msg', None) and name not in ('msg', 'args'):
1117 self.msg = self.__msg or type(self).__name__
1118 self.args = (self.msg, ) # Cannot be property
1119
1c256f70 1120
416c7fcb
PH
1121class UnsupportedError(ExtractorError):
1122 def __init__(self, url):
86e5f3ed 1123 super().__init__(
416c7fcb
PH
1124 'Unsupported URL: %s' % url, expected=True)
1125 self.url = url
1126
1127
55b3e45b
JMF
1128class RegexNotFoundError(ExtractorError):
1129 """Error when a regex didn't match"""
1130 pass
1131
1132
773f291d
S
1133class GeoRestrictedError(ExtractorError):
1134 """Geographic restriction Error exception.
1135
1136 This exception may be thrown when a video is not available from your
1137 geographic location due to geographic restrictions imposed by a website.
1138 """
b6e0c7d2 1139
0db3bae8 1140 def __init__(self, msg, countries=None, **kwargs):
1141 kwargs['expected'] = True
86e5f3ed 1142 super().__init__(msg, **kwargs)
773f291d
S
1143 self.countries = countries
1144
1145
693f0600 1146class UserNotLive(ExtractorError):
1147 """Error when a channel/user is not live"""
1148
1149 def __init__(self, msg=None, **kwargs):
1150 kwargs['expected'] = True
1151 super().__init__(msg or 'The channel is not currently live', **kwargs)
1152
1153
bf5b9d85 1154class DownloadError(YoutubeDLError):
59ae15a5 1155 """Download Error exception.
d77c3dfd 1156
59ae15a5
PH
1157 This exception may be thrown by FileDownloader objects if they are not
1158 configured to continue on errors. They will contain the appropriate
1159 error message.
1160 """
5f6a1245 1161
8cc83b8d
FV
1162 def __init__(self, msg, exc_info=None):
1163 """ exc_info, if given, is the original exception that caused the trouble (as returned by sys.exc_info()). """
86e5f3ed 1164 super().__init__(msg)
8cc83b8d 1165 self.exc_info = exc_info
d77c3dfd
FV
1166
1167
498f5606 1168class EntryNotInPlaylist(YoutubeDLError):
1169 """Entry not in playlist exception.
1170
1171 This exception will be thrown by YoutubeDL when a requested entry
1172 is not found in the playlist info_dict
1173 """
aa9369a2 1174 msg = 'Entry not found in info'
498f5606 1175
1176
bf5b9d85 1177class SameFileError(YoutubeDLError):
59ae15a5 1178 """Same File exception.
d77c3dfd 1179
59ae15a5
PH
1180 This exception will be thrown by FileDownloader objects if they detect
1181 multiple files would have to be downloaded to the same file on disk.
1182 """
aa9369a2 1183 msg = 'Fixed output name but more than one file to download'
1184
1185 def __init__(self, filename=None):
1186 if filename is not None:
1187 self.msg += f': {filename}'
1188 super().__init__(self.msg)
d77c3dfd
FV
1189
1190
bf5b9d85 1191class PostProcessingError(YoutubeDLError):
59ae15a5 1192 """Post Processing exception.
d77c3dfd 1193
59ae15a5
PH
1194 This exception may be raised by PostProcessor's .run() method to
1195 indicate an error in the postprocessing task.
1196 """
5f6a1245 1197
5f6a1245 1198
48f79687 1199class DownloadCancelled(YoutubeDLError):
1200 """ Exception raised when the download queue should be interrupted """
1201 msg = 'The download was cancelled'
8b0d7497 1202
8b0d7497 1203
48f79687 1204class ExistingVideoReached(DownloadCancelled):
1205 """ --break-on-existing triggered """
1206 msg = 'Encountered a video that is already in the archive, stopping due to --break-on-existing'
8b0d7497 1207
48f79687 1208
1209class RejectedVideoReached(DownloadCancelled):
1210 """ --break-on-reject triggered """
1211 msg = 'Encountered a video that did not match filter, stopping due to --break-on-reject'
51d9739f 1212
1213
48f79687 1214class MaxDownloadsReached(DownloadCancelled):
59ae15a5 1215 """ --max-downloads limit has been reached. """
48f79687 1216 msg = 'Maximum number of downloads reached, stopping due to --max-downloads'
1217
1218
f2ebc5c7 1219class ReExtractInfo(YoutubeDLError):
1220 """ Video info needs to be re-extracted. """
1221
1222 def __init__(self, msg, expected=False):
1223 super().__init__(msg)
1224 self.expected = expected
1225
1226
1227class ThrottledDownload(ReExtractInfo):
48f79687 1228 """ Download speed below --throttled-rate. """
aa9369a2 1229 msg = 'The download speed is below throttle limit'
d77c3dfd 1230
43b22906 1231 def __init__(self):
1232 super().__init__(self.msg, expected=False)
f2ebc5c7 1233
d77c3dfd 1234
bf5b9d85 1235class UnavailableVideoError(YoutubeDLError):
59ae15a5 1236 """Unavailable Format exception.
d77c3dfd 1237
59ae15a5
PH
1238 This exception will be thrown when a video is requested
1239 in a format that is not available for that video.
1240 """
aa9369a2 1241 msg = 'Unable to download video'
1242
1243 def __init__(self, err=None):
1244 if err is not None:
1245 self.msg += f': {err}'
1246 super().__init__(self.msg)
d77c3dfd
FV
1247
1248
bf5b9d85 1249class ContentTooShortError(YoutubeDLError):
59ae15a5 1250 """Content Too Short exception.
d77c3dfd 1251
59ae15a5
PH
1252 This exception may be raised by FileDownloader objects when a file they
1253 download is too small for what the server announced first, indicating
1254 the connection was probably interrupted.
1255 """
d77c3dfd 1256
59ae15a5 1257 def __init__(self, downloaded, expected):
86e5f3ed 1258 super().__init__(f'Downloaded {downloaded} bytes, expected {expected} bytes')
2c7ed247 1259 # Both in bytes
59ae15a5
PH
1260 self.downloaded = downloaded
1261 self.expected = expected
d77c3dfd 1262
5f6a1245 1263
bf5b9d85 1264class XAttrMetadataError(YoutubeDLError):
efa97bdc 1265 def __init__(self, code=None, msg='Unknown error'):
86e5f3ed 1266 super().__init__(msg)
efa97bdc 1267 self.code = code
bd264412 1268 self.msg = msg
efa97bdc
YCH
1269
1270 # Parsing code and msg
3089bc74 1271 if (self.code in (errno.ENOSPC, errno.EDQUOT)
a0566bbf 1272 or 'No space left' in self.msg or 'Disk quota exceeded' in self.msg):
efa97bdc
YCH
1273 self.reason = 'NO_SPACE'
1274 elif self.code == errno.E2BIG or 'Argument list too long' in self.msg:
1275 self.reason = 'VALUE_TOO_LONG'
1276 else:
1277 self.reason = 'NOT_SUPPORTED'
1278
1279
bf5b9d85 1280class XAttrUnavailableError(YoutubeDLError):
efa97bdc
YCH
1281 pass
1282
1283
c5a59d93 1284def _create_http_connection(ydl_handler, http_class, is_https, *args, **kwargs):
f9934b96 1285 hc = http_class(*args, **kwargs)
be4a824d 1286 source_address = ydl_handler._params.get('source_address')
8959018a 1287
be4a824d 1288 if source_address is not None:
8959018a
AU
1289 # This is to workaround _create_connection() from socket where it will try all
1290 # address data from getaddrinfo() including IPv6. This filters the result from
1291 # getaddrinfo() based on the source_address value.
1292 # This is based on the cpython socket.create_connection() function.
1293 # https://github.com/python/cpython/blob/master/Lib/socket.py#L691
1294 def _create_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, source_address=None):
1295 host, port = address
1296 err = None
1297 addrs = socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM)
9e21e6d9
S
1298 af = socket.AF_INET if '.' in source_address[0] else socket.AF_INET6
1299 ip_addrs = [addr for addr in addrs if addr[0] == af]
1300 if addrs and not ip_addrs:
1301 ip_version = 'v4' if af == socket.AF_INET else 'v6'
86e5f3ed 1302 raise OSError(
9e21e6d9
S
1303 "No remote IP%s addresses available for connect, can't use '%s' as source address"
1304 % (ip_version, source_address[0]))
8959018a
AU
1305 for res in ip_addrs:
1306 af, socktype, proto, canonname, sa = res
1307 sock = None
1308 try:
1309 sock = socket.socket(af, socktype, proto)
1310 if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT:
1311 sock.settimeout(timeout)
1312 sock.bind(source_address)
1313 sock.connect(sa)
1314 err = None # Explicitly break reference cycle
1315 return sock
86e5f3ed 1316 except OSError as _:
8959018a
AU
1317 err = _
1318 if sock is not None:
1319 sock.close()
1320 if err is not None:
1321 raise err
1322 else:
86e5f3ed 1323 raise OSError('getaddrinfo returns an empty list')
9e21e6d9
S
1324 if hasattr(hc, '_create_connection'):
1325 hc._create_connection = _create_connection
cfb0511d 1326 hc.source_address = (source_address, 0)
be4a824d
PH
1327
1328 return hc
1329
1330
87f0e62d 1331def handle_youtubedl_headers(headers):
992fc9d6
YCH
1332 filtered_headers = headers
1333
1334 if 'Youtubedl-no-compression' in filtered_headers:
86e5f3ed 1335 filtered_headers = {k: v for k, v in filtered_headers.items() if k.lower() != 'accept-encoding'}
87f0e62d 1336 del filtered_headers['Youtubedl-no-compression']
87f0e62d 1337
992fc9d6 1338 return filtered_headers
87f0e62d
YCH
1339
1340
ac668111 1341class YoutubeDLHandler(urllib.request.HTTPHandler):
59ae15a5
PH
1342 """Handler for HTTP requests and responses.
1343
1344 This class, when installed with an OpenerDirector, automatically adds
1345 the standard headers to every HTTP request and handles gzipped and
1346 deflated responses from web servers. If compression is to be avoided in
1347 a particular request, the original request in the program code only has
0424ec30 1348 to include the HTTP header "Youtubedl-no-compression", which will be
59ae15a5
PH
1349 removed before making the real request.
1350
1351 Part of this code was copied from:
1352
1353 http://techknack.net/python-urllib2-handlers/
1354
1355 Andrew Rowls, the author of that code, agreed to release it to the
1356 public domain.
1357 """
1358
be4a824d 1359 def __init__(self, params, *args, **kwargs):
ac668111 1360 urllib.request.HTTPHandler.__init__(self, *args, **kwargs)
be4a824d
PH
1361 self._params = params
1362
1363 def http_open(self, req):
ac668111 1364 conn_class = http.client.HTTPConnection
71aff188
YCH
1365
1366 socks_proxy = req.headers.get('Ytdl-socks-proxy')
1367 if socks_proxy:
1368 conn_class = make_socks_conn_class(conn_class, socks_proxy)
1369 del req.headers['Ytdl-socks-proxy']
1370
be4a824d 1371 return self.do_open(functools.partial(
71aff188 1372 _create_http_connection, self, conn_class, False),
be4a824d
PH
1373 req)
1374
59ae15a5
PH
1375 @staticmethod
1376 def deflate(data):
fc2119f2 1377 if not data:
1378 return data
59ae15a5
PH
1379 try:
1380 return zlib.decompress(data, -zlib.MAX_WBITS)
1381 except zlib.error:
1382 return zlib.decompress(data)
1383
4390d5ec 1384 @staticmethod
1385 def brotli(data):
1386 if not data:
1387 return data
9b8ee23b 1388 return brotli.decompress(data)
4390d5ec 1389
acebc9cd 1390 def http_request(self, req):
51f267d9
S
1391 # According to RFC 3986, URLs can not contain non-ASCII characters, however this is not
1392 # always respected by websites, some tend to give out URLs with non percent-encoded
1393 # non-ASCII characters (see telemb.py, ard.py [#3412])
1394 # urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991)
1395 # To work around aforementioned issue we will replace request's original URL with
1396 # percent-encoded one
1397 # Since redirects are also affected (e.g. http://www.southpark.de/alle-episoden/s18e09)
1398 # the code of this workaround has been moved here from YoutubeDL.urlopen()
1399 url = req.get_full_url()
1400 url_escaped = escape_url(url)
1401
1402 # Substitute URL if any change after escaping
1403 if url != url_escaped:
15d260eb 1404 req = update_Request(req, url=url_escaped)
51f267d9 1405
8b7539d2 1406 for h, v in self._params.get('http_headers', std_headers).items():
3d5f7a39
JK
1407 # Capitalize is needed because of Python bug 2275: http://bugs.python.org/issue2275
1408 # The dict keys are capitalized because of this bug by urllib
1409 if h.capitalize() not in req.headers:
33ac271b 1410 req.add_header(h, v)
87f0e62d 1411
af14914b 1412 if 'Accept-encoding' not in req.headers:
1413 req.add_header('Accept-encoding', ', '.join(SUPPORTED_ENCODINGS))
1414
87f0e62d 1415 req.headers = handle_youtubedl_headers(req.headers)
989b4b2b 1416
379a4f16 1417 return super().do_request_(req)
59ae15a5 1418
acebc9cd 1419 def http_response(self, req, resp):
59ae15a5
PH
1420 old_resp = resp
1421 # gzip
1422 if resp.headers.get('Content-encoding', '') == 'gzip':
aa3e9507
PH
1423 content = resp.read()
1424 gz = gzip.GzipFile(fileobj=io.BytesIO(content), mode='rb')
1425 try:
1426 uncompressed = io.BytesIO(gz.read())
86e5f3ed 1427 except OSError as original_ioerror:
aa3e9507
PH
1428 # There may be junk add the end of the file
1429 # See http://stackoverflow.com/q/4928560/35070 for details
1430 for i in range(1, 1024):
1431 try:
1432 gz = gzip.GzipFile(fileobj=io.BytesIO(content[:-i]), mode='rb')
1433 uncompressed = io.BytesIO(gz.read())
86e5f3ed 1434 except OSError:
aa3e9507
PH
1435 continue
1436 break
1437 else:
1438 raise original_ioerror
ac668111 1439 resp = urllib.request.addinfourl(uncompressed, old_resp.headers, old_resp.url, old_resp.code)
59ae15a5
PH
1440 resp.msg = old_resp.msg
1441 # deflate
1442 if resp.headers.get('Content-encoding', '') == 'deflate':
1443 gz = io.BytesIO(self.deflate(resp.read()))
ac668111 1444 resp = urllib.request.addinfourl(gz, old_resp.headers, old_resp.url, old_resp.code)
59ae15a5 1445 resp.msg = old_resp.msg
4390d5ec 1446 # brotli
1447 if resp.headers.get('Content-encoding', '') == 'br':
ac668111 1448 resp = urllib.request.addinfourl(
4390d5ec 1449 io.BytesIO(self.brotli(resp.read())), old_resp.headers, old_resp.url, old_resp.code)
1450 resp.msg = old_resp.msg
ad729172 1451 # Percent-encode redirect URL of Location HTTP header to satisfy RFC 3986 (see
067aa17e 1452 # https://github.com/ytdl-org/youtube-dl/issues/6457).
5a4d9ddb
S
1453 if 300 <= resp.code < 400:
1454 location = resp.headers.get('Location')
1455 if location:
1456 # As of RFC 2616 default charset is iso-8859-1 that is respected by python 3
0f06bcd7 1457 location = location.encode('iso-8859-1').decode()
5a4d9ddb
S
1458 location_escaped = escape_url(location)
1459 if location != location_escaped:
1460 del resp.headers['Location']
1461 resp.headers['Location'] = location_escaped
59ae15a5 1462 return resp
0f8d03f8 1463
acebc9cd
PH
1464 https_request = http_request
1465 https_response = http_response
bf50b038 1466
5de90176 1467
71aff188
YCH
1468def make_socks_conn_class(base_class, socks_proxy):
1469 assert issubclass(base_class, (
ac668111 1470 http.client.HTTPConnection, http.client.HTTPSConnection))
71aff188 1471
14f25df2 1472 url_components = urllib.parse.urlparse(socks_proxy)
71aff188
YCH
1473 if url_components.scheme.lower() == 'socks5':
1474 socks_type = ProxyType.SOCKS5
1475 elif url_components.scheme.lower() in ('socks', 'socks4'):
1476 socks_type = ProxyType.SOCKS4
51fb4995
YCH
1477 elif url_components.scheme.lower() == 'socks4a':
1478 socks_type = ProxyType.SOCKS4A
71aff188 1479
cdd94c2e
YCH
1480 def unquote_if_non_empty(s):
1481 if not s:
1482 return s
ac668111 1483 return urllib.parse.unquote_plus(s)
cdd94c2e 1484
71aff188
YCH
1485 proxy_args = (
1486 socks_type,
1487 url_components.hostname, url_components.port or 1080,
1488 True, # Remote DNS
cdd94c2e
YCH
1489 unquote_if_non_empty(url_components.username),
1490 unquote_if_non_empty(url_components.password),
71aff188
YCH
1491 )
1492
1493 class SocksConnection(base_class):
1494 def connect(self):
1495 self.sock = sockssocket()
1496 self.sock.setproxy(*proxy_args)
19a03940 1497 if isinstance(self.timeout, (int, float)):
71aff188
YCH
1498 self.sock.settimeout(self.timeout)
1499 self.sock.connect((self.host, self.port))
1500
ac668111 1501 if isinstance(self, http.client.HTTPSConnection):
71aff188
YCH
1502 if hasattr(self, '_context'): # Python > 2.6
1503 self.sock = self._context.wrap_socket(
1504 self.sock, server_hostname=self.host)
1505 else:
1506 self.sock = ssl.wrap_socket(self.sock)
1507
1508 return SocksConnection
1509
1510
ac668111 1511class YoutubeDLHTTPSHandler(urllib.request.HTTPSHandler):
be4a824d 1512 def __init__(self, params, https_conn_class=None, *args, **kwargs):
ac668111 1513 urllib.request.HTTPSHandler.__init__(self, *args, **kwargs)
1514 self._https_conn_class = https_conn_class or http.client.HTTPSConnection
be4a824d
PH
1515 self._params = params
1516
1517 def https_open(self, req):
4f264c02 1518 kwargs = {}
71aff188
YCH
1519 conn_class = self._https_conn_class
1520
4f264c02
JMF
1521 if hasattr(self, '_context'): # python > 2.6
1522 kwargs['context'] = self._context
1523 if hasattr(self, '_check_hostname'): # python 3.x
1524 kwargs['check_hostname'] = self._check_hostname
71aff188
YCH
1525
1526 socks_proxy = req.headers.get('Ytdl-socks-proxy')
1527 if socks_proxy:
1528 conn_class = make_socks_conn_class(conn_class, socks_proxy)
1529 del req.headers['Ytdl-socks-proxy']
1530
4f28b537 1531 try:
1532 return self.do_open(
1533 functools.partial(_create_http_connection, self, conn_class, True), req, **kwargs)
1534 except urllib.error.URLError as e:
1535 if (isinstance(e.reason, ssl.SSLError)
1536 and getattr(e.reason, 'reason', None) == 'SSLV3_ALERT_HANDSHAKE_FAILURE'):
1537 raise YoutubeDLError('SSLV3_ALERT_HANDSHAKE_FAILURE: Try using --legacy-server-connect')
1538 raise
be4a824d
PH
1539
1540
941e881e 1541def is_path_like(f):
1542 return isinstance(f, (str, bytes, os.PathLike))
1543
1544
ac668111 1545class YoutubeDLCookieJar(http.cookiejar.MozillaCookieJar):
f1a8511f
S
1546 """
1547 See [1] for cookie file format.
1548
1549 1. https://curl.haxx.se/docs/http-cookies.html
1550 """
e7e62441 1551 _HTTPONLY_PREFIX = '#HttpOnly_'
c380cc28
S
1552 _ENTRY_LEN = 7
1553 _HEADER = '''# Netscape HTTP Cookie File
7a5c1cfe 1554# This file is generated by yt-dlp. Do not edit.
c380cc28
S
1555
1556'''
1557 _CookieFileEntry = collections.namedtuple(
1558 'CookieFileEntry',
1559 ('domain_name', 'include_subdomains', 'path', 'https_only', 'expires_at', 'name', 'value'))
e7e62441 1560
d76fa1f3 1561 def __init__(self, filename=None, *args, **kwargs):
1562 super().__init__(None, *args, **kwargs)
941e881e 1563 if is_path_like(filename):
d76fa1f3 1564 filename = os.fspath(filename)
1565 self.filename = filename
1566
24146491 1567 @staticmethod
1568 def _true_or_false(cndn):
1569 return 'TRUE' if cndn else 'FALSE'
1570
d76fa1f3 1571 @contextlib.contextmanager
1572 def open(self, file, *, write=False):
941e881e 1573 if is_path_like(file):
d76fa1f3 1574 with open(file, 'w' if write else 'r', encoding='utf-8') as f:
1575 yield f
1576 else:
1577 if write:
1578 file.truncate(0)
1579 yield file
1580
24146491 1581 def _really_save(self, f, ignore_discard=False, ignore_expires=False):
1582 now = time.time()
1583 for cookie in self:
1584 if (not ignore_discard and cookie.discard
1585 or not ignore_expires and cookie.is_expired(now)):
1586 continue
1587 name, value = cookie.name, cookie.value
1588 if value is None:
1589 # cookies.txt regards 'Set-Cookie: foo' as a cookie
1590 # with no name, whereas http.cookiejar regards it as a
1591 # cookie with no value.
1592 name, value = '', name
1593 f.write('%s\n' % '\t'.join((
1594 cookie.domain,
1595 self._true_or_false(cookie.domain.startswith('.')),
1596 cookie.path,
1597 self._true_or_false(cookie.secure),
1598 str_or_none(cookie.expires, default=''),
1599 name, value
1600 )))
1601
1602 def save(self, filename=None, *args, **kwargs):
c380cc28
S
1603 """
1604 Save cookies to a file.
24146491 1605 Code is taken from CPython 3.6
1606 https://github.com/python/cpython/blob/8d999cbf4adea053be6dbb612b9844635c4dfb8e/Lib/http/cookiejar.py#L2091-L2117 """
c380cc28 1607
c380cc28
S
1608 if filename is None:
1609 if self.filename is not None:
1610 filename = self.filename
1611 else:
ac668111 1612 raise ValueError(http.cookiejar.MISSING_FILENAME_TEXT)
c380cc28 1613
24146491 1614 # Store session cookies with `expires` set to 0 instead of an empty string
1bab3437
S
1615 for cookie in self:
1616 if cookie.expires is None:
1617 cookie.expires = 0
c380cc28 1618
d76fa1f3 1619 with self.open(filename, write=True) as f:
c380cc28 1620 f.write(self._HEADER)
24146491 1621 self._really_save(f, *args, **kwargs)
1bab3437
S
1622
1623 def load(self, filename=None, ignore_discard=False, ignore_expires=False):
e7e62441 1624 """Load cookies from a file."""
1625 if filename is None:
1626 if self.filename is not None:
1627 filename = self.filename
1628 else:
ac668111 1629 raise ValueError(http.cookiejar.MISSING_FILENAME_TEXT)
e7e62441 1630
c380cc28
S
1631 def prepare_line(line):
1632 if line.startswith(self._HTTPONLY_PREFIX):
1633 line = line[len(self._HTTPONLY_PREFIX):]
1634 # comments and empty lines are fine
1635 if line.startswith('#') or not line.strip():
1636 return line
1637 cookie_list = line.split('\t')
1638 if len(cookie_list) != self._ENTRY_LEN:
ac668111 1639 raise http.cookiejar.LoadError('invalid length %d' % len(cookie_list))
c380cc28
S
1640 cookie = self._CookieFileEntry(*cookie_list)
1641 if cookie.expires_at and not cookie.expires_at.isdigit():
ac668111 1642 raise http.cookiejar.LoadError('invalid expires at %s' % cookie.expires_at)
c380cc28
S
1643 return line
1644
e7e62441 1645 cf = io.StringIO()
d76fa1f3 1646 with self.open(filename) as f:
e7e62441 1647 for line in f:
c380cc28
S
1648 try:
1649 cf.write(prepare_line(line))
ac668111 1650 except http.cookiejar.LoadError as e:
94aa0644 1651 if f'{line.strip()} '[0] in '[{"':
ac668111 1652 raise http.cookiejar.LoadError(
94aa0644 1653 'Cookies file must be Netscape formatted, not JSON. See '
17ffed18 1654 'https://github.com/yt-dlp/yt-dlp/wiki/FAQ#how-do-i-pass-cookies-to-yt-dlp')
19a03940 1655 write_string(f'WARNING: skipping cookie file entry due to {e}: {line!r}\n')
c380cc28 1656 continue
e7e62441 1657 cf.seek(0)
1658 self._really_load(cf, filename, ignore_discard, ignore_expires)
1bab3437
S
1659 # Session cookies are denoted by either `expires` field set to
1660 # an empty string or 0. MozillaCookieJar only recognizes the former
1661 # (see [1]). So we need force the latter to be recognized as session
1662 # cookies on our own.
1663 # Session cookies may be important for cookies-based authentication,
1664 # e.g. usually, when user does not check 'Remember me' check box while
1665 # logging in on a site, some important cookies are stored as session
1666 # cookies so that not recognizing them will result in failed login.
1667 # 1. https://bugs.python.org/issue17164
1668 for cookie in self:
1669 # Treat `expires=0` cookies as session cookies
1670 if cookie.expires == 0:
1671 cookie.expires = None
1672 cookie.discard = True
1673
1674
ac668111 1675class YoutubeDLCookieProcessor(urllib.request.HTTPCookieProcessor):
a6420bf5 1676 def __init__(self, cookiejar=None):
ac668111 1677 urllib.request.HTTPCookieProcessor.__init__(self, cookiejar)
a6420bf5
S
1678
1679 def http_response(self, request, response):
ac668111 1680 return urllib.request.HTTPCookieProcessor.http_response(self, request, response)
a6420bf5 1681
ac668111 1682 https_request = urllib.request.HTTPCookieProcessor.http_request
a6420bf5
S
1683 https_response = http_response
1684
1685
ac668111 1686class YoutubeDLRedirectHandler(urllib.request.HTTPRedirectHandler):
201c1459 1687 """YoutubeDL redirect handler
1688
1689 The code is based on HTTPRedirectHandler implementation from CPython [1].
1690
1691 This redirect handler solves two issues:
1692 - ensures redirect URL is always unicode under python 2
1693 - introduces support for experimental HTTP response status code
1694 308 Permanent Redirect [2] used by some sites [3]
1695
1696 1. https://github.com/python/cpython/blob/master/Lib/urllib/request.py
1697 2. https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/308
1698 3. https://github.com/ytdl-org/youtube-dl/issues/28768
1699 """
1700
ac668111 1701 http_error_301 = http_error_303 = http_error_307 = http_error_308 = urllib.request.HTTPRedirectHandler.http_error_302
201c1459 1702
1703 def redirect_request(self, req, fp, code, msg, headers, newurl):
1704 """Return a Request or None in response to a redirect.
1705
1706 This is called by the http_error_30x methods when a
1707 redirection response is received. If a redirection should
1708 take place, return a new Request to allow http_error_30x to
1709 perform the redirect. Otherwise, raise HTTPError if no-one
1710 else should try to handle this url. Return None if you can't
1711 but another Handler might.
1712 """
1713 m = req.get_method()
1714 if (not (code in (301, 302, 303, 307, 308) and m in ("GET", "HEAD")
1715 or code in (301, 302, 303) and m == "POST")):
14f25df2 1716 raise urllib.error.HTTPError(req.full_url, code, msg, headers, fp)
201c1459 1717 # Strictly (according to RFC 2616), 301 or 302 in response to
1718 # a POST MUST NOT cause a redirection without confirmation
1719 # from the user (of urllib.request, in this case). In practice,
1720 # essentially all clients do redirect in this case, so we do
1721 # the same.
1722
201c1459 1723 # Be conciliant with URIs containing a space. This is mainly
1724 # redundant with the more complete encoding done in http_error_302(),
1725 # but it is kept for compatibility with other callers.
1726 newurl = newurl.replace(' ', '%20')
1727
1728 CONTENT_HEADERS = ("content-length", "content-type")
1729 # NB: don't use dict comprehension for python 2.6 compatibility
86e5f3ed 1730 newheaders = {k: v for k, v in req.headers.items() if k.lower() not in CONTENT_HEADERS}
afac4caa 1731
1732 # A 303 must either use GET or HEAD for subsequent request
1733 # https://datatracker.ietf.org/doc/html/rfc7231#section-6.4.4
1734 if code == 303 and m != 'HEAD':
1735 m = 'GET'
1736 # 301 and 302 redirects are commonly turned into a GET from a POST
1737 # for subsequent requests by browsers, so we'll do the same.
1738 # https://datatracker.ietf.org/doc/html/rfc7231#section-6.4.2
1739 # https://datatracker.ietf.org/doc/html/rfc7231#section-6.4.3
1740 if code in (301, 302) and m == 'POST':
1741 m = 'GET'
1742
ac668111 1743 return urllib.request.Request(
201c1459 1744 newurl, headers=newheaders, origin_req_host=req.origin_req_host,
afac4caa 1745 unverifiable=True, method=m)
fca6dba8
S
1746
1747
46f59e89
S
1748def extract_timezone(date_str):
1749 m = re.search(
f137e4c2 1750 r'''(?x)
1751 ^.{8,}? # >=8 char non-TZ prefix, if present
1752 (?P<tz>Z| # just the UTC Z, or
1753 (?:(?<=.\b\d{4}|\b\d{2}:\d\d)| # preceded by 4 digits or hh:mm or
1754 (?<!.\b[a-zA-Z]{3}|[a-zA-Z]{4}|..\b\d\d)) # not preceded by 3 alpha word or >= 4 alpha or 2 digits
1755 [ ]? # optional space
1756 (?P<sign>\+|-) # +/-
1757 (?P<hours>[0-9]{2}):?(?P<minutes>[0-9]{2}) # hh[:]mm
1758 $)
1759 ''', date_str)
46f59e89 1760 if not m:
8f53dc44 1761 m = re.search(r'\d{1,2}:\d{1,2}(?:\.\d+)?(?P<tz>\s*[A-Z]+)$', date_str)
1762 timezone = TIMEZONE_NAMES.get(m and m.group('tz').strip())
1763 if timezone is not None:
1764 date_str = date_str[:-len(m.group('tz'))]
1765 timezone = datetime.timedelta(hours=timezone or 0)
46f59e89
S
1766 else:
1767 date_str = date_str[:-len(m.group('tz'))]
1768 if not m.group('sign'):
1769 timezone = datetime.timedelta()
1770 else:
1771 sign = 1 if m.group('sign') == '+' else -1
1772 timezone = datetime.timedelta(
1773 hours=sign * int(m.group('hours')),
1774 minutes=sign * int(m.group('minutes')))
1775 return timezone, date_str
1776
1777
08b38d54 1778def parse_iso8601(date_str, delimiter='T', timezone=None):
912b38b4
PH
1779 """ Return a UNIX timestamp from the given date """
1780
1781 if date_str is None:
1782 return None
1783
52c3a6e4
S
1784 date_str = re.sub(r'\.[0-9]+', '', date_str)
1785
08b38d54 1786 if timezone is None:
46f59e89
S
1787 timezone, date_str = extract_timezone(date_str)
1788
19a03940 1789 with contextlib.suppress(ValueError):
86e5f3ed 1790 date_format = f'%Y-%m-%d{delimiter}%H:%M:%S'
52c3a6e4
S
1791 dt = datetime.datetime.strptime(date_str, date_format) - timezone
1792 return calendar.timegm(dt.timetuple())
912b38b4
PH
1793
1794
46f59e89
S
1795def date_formats(day_first=True):
1796 return DATE_FORMATS_DAY_FIRST if day_first else DATE_FORMATS_MONTH_FIRST
1797
1798
42bdd9d0 1799def unified_strdate(date_str, day_first=True):
bf50b038 1800 """Return a string with the date in the format YYYYMMDD"""
64e7ad60
PH
1801
1802 if date_str is None:
1803 return None
bf50b038 1804 upload_date = None
5f6a1245 1805 # Replace commas
026fcc04 1806 date_str = date_str.replace(',', ' ')
42bdd9d0 1807 # Remove AM/PM + timezone
9bb8e0a3 1808 date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str)
46f59e89 1809 _, date_str = extract_timezone(date_str)
42bdd9d0 1810
46f59e89 1811 for expression in date_formats(day_first):
19a03940 1812 with contextlib.suppress(ValueError):
bf50b038 1813 upload_date = datetime.datetime.strptime(date_str, expression).strftime('%Y%m%d')
42393ce2
PH
1814 if upload_date is None:
1815 timetuple = email.utils.parsedate_tz(date_str)
1816 if timetuple:
19a03940 1817 with contextlib.suppress(ValueError):
c6b9cf05 1818 upload_date = datetime.datetime(*timetuple[:6]).strftime('%Y%m%d')
6a750402 1819 if upload_date is not None:
14f25df2 1820 return str(upload_date)
bf50b038 1821
5f6a1245 1822
46f59e89
S
1823def unified_timestamp(date_str, day_first=True):
1824 if date_str is None:
1825 return None
1826
8f53dc44 1827 date_str = re.sub(r'\s+', ' ', re.sub(
1828 r'(?i)[,|]|(mon|tues?|wed(nes)?|thu(rs)?|fri|sat(ur)?)(day)?', '', date_str))
46f59e89 1829
7dc2a74e 1830 pm_delta = 12 if re.search(r'(?i)PM', date_str) else 0
46f59e89
S
1831 timezone, date_str = extract_timezone(date_str)
1832
1833 # Remove AM/PM + timezone
1834 date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str)
1835
deef3195
S
1836 # Remove unrecognized timezones from ISO 8601 alike timestamps
1837 m = re.search(r'\d{1,2}:\d{1,2}(?:\.\d+)?(?P<tz>\s*[A-Z]+)$', date_str)
1838 if m:
1839 date_str = date_str[:-len(m.group('tz'))]
1840
f226880c
PH
1841 # Python only supports microseconds, so remove nanoseconds
1842 m = re.search(r'^([0-9]{4,}-[0-9]{1,2}-[0-9]{1,2}T[0-9]{1,2}:[0-9]{1,2}:[0-9]{1,2}\.[0-9]{6})[0-9]+$', date_str)
1843 if m:
1844 date_str = m.group(1)
1845
46f59e89 1846 for expression in date_formats(day_first):
19a03940 1847 with contextlib.suppress(ValueError):
7dc2a74e 1848 dt = datetime.datetime.strptime(date_str, expression) - timezone + datetime.timedelta(hours=pm_delta)
46f59e89 1849 return calendar.timegm(dt.timetuple())
8f53dc44 1850
46f59e89
S
1851 timetuple = email.utils.parsedate_tz(date_str)
1852 if timetuple:
8f53dc44 1853 return calendar.timegm(timetuple) + pm_delta * 3600 - timezone.total_seconds()
46f59e89
S
1854
1855
28e614de 1856def determine_ext(url, default_ext='unknown_video'):
85750f89 1857 if url is None or '.' not in url:
f4776371 1858 return default_ext
9cb9a5df 1859 guess = url.partition('?')[0].rpartition('.')[2]
73e79f2a
PH
1860 if re.match(r'^[A-Za-z0-9]+$', guess):
1861 return guess
a7aaa398
S
1862 # Try extract ext from URLs like http://example.com/foo/bar.mp4/?download
1863 elif guess.rstrip('/') in KNOWN_EXTENSIONS:
9cb9a5df 1864 return guess.rstrip('/')
73e79f2a 1865 else:
cbdbb766 1866 return default_ext
73e79f2a 1867
5f6a1245 1868
824fa511
S
1869def subtitles_filename(filename, sub_lang, sub_format, expected_real_ext=None):
1870 return replace_extension(filename, sub_lang + '.' + sub_format, expected_real_ext)
d4051a8e 1871
5f6a1245 1872
9e62f283 1873def datetime_from_str(date_str, precision='auto', format='%Y%m%d'):
3d38b2d6 1874 R"""
1875 Return a datetime object from a string.
1876 Supported format:
1877 (now|today|yesterday|DATE)([+-]\d+(microsecond|second|minute|hour|day|week|month|year)s?)?
1878
1879 @param format strftime format of DATE
1880 @param precision Round the datetime object: auto|microsecond|second|minute|hour|day
1881 auto: round to the unit provided in date_str (if applicable).
9e62f283 1882 """
1883 auto_precision = False
1884 if precision == 'auto':
1885 auto_precision = True
1886 precision = 'microsecond'
396a76f7 1887 today = datetime_round(datetime.datetime.utcnow(), precision)
f8795e10 1888 if date_str in ('now', 'today'):
37254abc 1889 return today
f8795e10
PH
1890 if date_str == 'yesterday':
1891 return today - datetime.timedelta(days=1)
9e62f283 1892 match = re.match(
3d38b2d6 1893 r'(?P<start>.+)(?P<sign>[+-])(?P<time>\d+)(?P<unit>microsecond|second|minute|hour|day|week|month|year)s?',
9e62f283 1894 date_str)
37254abc 1895 if match is not None:
9e62f283 1896 start_time = datetime_from_str(match.group('start'), precision, format)
1897 time = int(match.group('time')) * (-1 if match.group('sign') == '-' else 1)
37254abc 1898 unit = match.group('unit')
9e62f283 1899 if unit == 'month' or unit == 'year':
1900 new_date = datetime_add_months(start_time, time * 12 if unit == 'year' else time)
37254abc 1901 unit = 'day'
9e62f283 1902 else:
1903 if unit == 'week':
1904 unit = 'day'
1905 time *= 7
1906 delta = datetime.timedelta(**{unit + 's': time})
1907 new_date = start_time + delta
1908 if auto_precision:
1909 return datetime_round(new_date, unit)
1910 return new_date
1911
1912 return datetime_round(datetime.datetime.strptime(date_str, format), precision)
1913
1914
d49f8db3 1915def date_from_str(date_str, format='%Y%m%d', strict=False):
3d38b2d6 1916 R"""
1917 Return a date object from a string using datetime_from_str
9e62f283 1918
3d38b2d6 1919 @param strict Restrict allowed patterns to "YYYYMMDD" and
1920 (now|today|yesterday)(-\d+(day|week|month|year)s?)?
9e62f283 1921 """
3d38b2d6 1922 if strict and not re.fullmatch(r'\d{8}|(now|today|yesterday)(-\d+(day|week|month|year)s?)?', date_str):
1923 raise ValueError(f'Invalid date format "{date_str}"')
9e62f283 1924 return datetime_from_str(date_str, precision='microsecond', format=format).date()
1925
1926
1927def datetime_add_months(dt, months):
1928 """Increment/Decrement a datetime object by months."""
1929 month = dt.month + months - 1
1930 year = dt.year + month // 12
1931 month = month % 12 + 1
1932 day = min(dt.day, calendar.monthrange(year, month)[1])
1933 return dt.replace(year, month, day)
1934
1935
1936def datetime_round(dt, precision='day'):
1937 """
1938 Round a datetime object's time to a specific precision
1939 """
1940 if precision == 'microsecond':
1941 return dt
1942
1943 unit_seconds = {
1944 'day': 86400,
1945 'hour': 3600,
1946 'minute': 60,
1947 'second': 1,
1948 }
1949 roundto = lambda x, n: ((x + n / 2) // n) * n
1950 timestamp = calendar.timegm(dt.timetuple())
1951 return datetime.datetime.utcfromtimestamp(roundto(timestamp, unit_seconds[precision]))
5f6a1245
JW
1952
1953
e63fc1be 1954def hyphenate_date(date_str):
1955 """
1956 Convert a date in 'YYYYMMDD' format to 'YYYY-MM-DD' format"""
1957 match = re.match(r'^(\d\d\d\d)(\d\d)(\d\d)$', date_str)
1958 if match is not None:
1959 return '-'.join(match.groups())
1960 else:
1961 return date_str
1962
5f6a1245 1963
86e5f3ed 1964class DateRange:
bd558525 1965 """Represents a time interval between two dates"""
5f6a1245 1966
bd558525
JMF
1967 def __init__(self, start=None, end=None):
1968 """start and end must be strings in the format accepted by date"""
1969 if start is not None:
d49f8db3 1970 self.start = date_from_str(start, strict=True)
bd558525
JMF
1971 else:
1972 self.start = datetime.datetime.min.date()
1973 if end is not None:
d49f8db3 1974 self.end = date_from_str(end, strict=True)
bd558525
JMF
1975 else:
1976 self.end = datetime.datetime.max.date()
37254abc 1977 if self.start > self.end:
bd558525 1978 raise ValueError('Date range: "%s" , the start date must be before the end date' % self)
5f6a1245 1979
bd558525
JMF
1980 @classmethod
1981 def day(cls, day):
1982 """Returns a range that only contains the given day"""
5f6a1245
JW
1983 return cls(day, day)
1984
bd558525
JMF
1985 def __contains__(self, date):
1986 """Check if the date is in the range"""
37254abc
JMF
1987 if not isinstance(date, datetime.date):
1988 date = date_from_str(date)
1989 return self.start <= date <= self.end
5f6a1245 1990
bd558525 1991 def __str__(self):
86e5f3ed 1992 return f'{self.start.isoformat()} - {self.end.isoformat()}'
c496ca96 1993
f2df4071 1994 def __eq__(self, other):
1995 return (isinstance(other, DateRange)
1996 and self.start == other.start and self.end == other.end)
1997
c496ca96
PH
1998
1999def platform_name():
14f25df2 2000 """ Returns the platform name as a str """
da4db748 2001 deprecation_warning(f'"{__name__}.platform_name" is deprecated, use "platform.platform" instead')
b1f94422 2002 return platform.platform()
c496ca96 2003
b1f94422 2004
2005@functools.cache
2006def system_identifier():
2007 python_implementation = platform.python_implementation()
2008 if python_implementation == 'PyPy' and hasattr(sys, 'pypy_version_info'):
2009 python_implementation += ' version %d.%d.%d' % sys.pypy_version_info[:3]
dab284f8 2010 libc_ver = []
2011 with contextlib.suppress(OSError): # We may not have access to the executable
2012 libc_ver = platform.libc_ver()
b1f94422 2013
17fc3dc4 2014 return 'Python %s (%s %s %s) - %s (%s%s)' % (
b1f94422 2015 platform.python_version(),
2016 python_implementation,
17fc3dc4 2017 platform.machine(),
b1f94422 2018 platform.architecture()[0],
2019 platform.platform(),
5b9f253f
M
2020 ssl.OPENSSL_VERSION,
2021 format_field(join_nonempty(*libc_ver, delim=' '), None, ', %s'),
b1f94422 2022 )
c257baff
PH
2023
2024
0b9c08b4 2025@functools.cache
49fa4d9a 2026def get_windows_version():
8a82af35 2027 ''' Get Windows version. returns () if it's not running on Windows '''
49fa4d9a
N
2028 if compat_os_name == 'nt':
2029 return version_tuple(platform.win32_ver()[1])
2030 else:
8a82af35 2031 return ()
49fa4d9a
N
2032
2033
734f90bb 2034def write_string(s, out=None, encoding=None):
19a03940 2035 assert isinstance(s, str)
2036 out = out or sys.stderr
7459e3a2 2037
fe1daad3 2038 if compat_os_name == 'nt' and supports_terminal_sequences(out):
3fe75fdc 2039 s = re.sub(r'([\r\n]+)', r' \1', s)
59f943cd 2040
8a82af35 2041 enc, buffer = None, out
cfb0511d 2042 if 'b' in getattr(out, 'mode', ''):
c487cf00 2043 enc = encoding or preferredencoding()
104aa738 2044 elif hasattr(out, 'buffer'):
8a82af35 2045 buffer = out.buffer
104aa738 2046 enc = encoding or getattr(out, 'encoding', None) or preferredencoding()
c487cf00 2047
8a82af35 2048 buffer.write(s.encode(enc, 'ignore') if enc else s)
7459e3a2
PH
2049 out.flush()
2050
2051
da4db748 2052def deprecation_warning(msg, *, printer=None, stacklevel=0, **kwargs):
2053 from . import _IN_CLI
2054 if _IN_CLI:
2055 if msg in deprecation_warning._cache:
2056 return
2057 deprecation_warning._cache.add(msg)
2058 if printer:
2059 return printer(f'{msg}{bug_reports_message()}', **kwargs)
2060 return write_string(f'ERROR: {msg}{bug_reports_message()}\n', **kwargs)
2061 else:
2062 import warnings
2063 warnings.warn(DeprecationWarning(msg), stacklevel=stacklevel + 3)
2064
2065
2066deprecation_warning._cache = set()
2067
2068
48ea9cea
PH
2069def bytes_to_intlist(bs):
2070 if not bs:
2071 return []
2072 if isinstance(bs[0], int): # Python 3
2073 return list(bs)
2074 else:
2075 return [ord(c) for c in bs]
2076
c257baff 2077
cba892fa 2078def intlist_to_bytes(xs):
2079 if not xs:
2080 return b''
ac668111 2081 return struct.pack('%dB' % len(xs), *xs)
c38b1e77
PH
2082
2083
8a82af35 2084class LockingUnsupportedError(OSError):
1890fc63 2085 msg = 'File locking is not supported'
0edb3e33 2086
2087 def __init__(self):
2088 super().__init__(self.msg)
2089
2090
c1c9a79c
PH
2091# Cross-platform file locking
2092if sys.platform == 'win32':
fe0918bb 2093 import ctypes
c1c9a79c
PH
2094 import ctypes.wintypes
2095 import msvcrt
2096
2097 class OVERLAPPED(ctypes.Structure):
2098 _fields_ = [
2099 ('Internal', ctypes.wintypes.LPVOID),
2100 ('InternalHigh', ctypes.wintypes.LPVOID),
2101 ('Offset', ctypes.wintypes.DWORD),
2102 ('OffsetHigh', ctypes.wintypes.DWORD),
2103 ('hEvent', ctypes.wintypes.HANDLE),
2104 ]
2105
37e325b9 2106 kernel32 = ctypes.WinDLL('kernel32')
c1c9a79c
PH
2107 LockFileEx = kernel32.LockFileEx
2108 LockFileEx.argtypes = [
2109 ctypes.wintypes.HANDLE, # hFile
2110 ctypes.wintypes.DWORD, # dwFlags
2111 ctypes.wintypes.DWORD, # dwReserved
2112 ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow
2113 ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh
2114 ctypes.POINTER(OVERLAPPED) # Overlapped
2115 ]
2116 LockFileEx.restype = ctypes.wintypes.BOOL
2117 UnlockFileEx = kernel32.UnlockFileEx
2118 UnlockFileEx.argtypes = [
2119 ctypes.wintypes.HANDLE, # hFile
2120 ctypes.wintypes.DWORD, # dwReserved
2121 ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow
2122 ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh
2123 ctypes.POINTER(OVERLAPPED) # Overlapped
2124 ]
2125 UnlockFileEx.restype = ctypes.wintypes.BOOL
2126 whole_low = 0xffffffff
2127 whole_high = 0x7fffffff
2128
747c0bd1 2129 def _lock_file(f, exclusive, block):
c1c9a79c
PH
2130 overlapped = OVERLAPPED()
2131 overlapped.Offset = 0
2132 overlapped.OffsetHigh = 0
2133 overlapped.hEvent = 0
2134 f._lock_file_overlapped_p = ctypes.pointer(overlapped)
747c0bd1 2135
2136 if not LockFileEx(msvcrt.get_osfhandle(f.fileno()),
2137 (0x2 if exclusive else 0x0) | (0x0 if block else 0x1),
2138 0, whole_low, whole_high, f._lock_file_overlapped_p):
2cb19820 2139 # NB: No argument form of "ctypes.FormatError" does not work on PyPy
2140 raise BlockingIOError(f'Locking file failed: {ctypes.FormatError(ctypes.GetLastError())!r}')
c1c9a79c
PH
2141
2142 def _unlock_file(f):
2143 assert f._lock_file_overlapped_p
2144 handle = msvcrt.get_osfhandle(f.fileno())
747c0bd1 2145 if not UnlockFileEx(handle, 0, whole_low, whole_high, f._lock_file_overlapped_p):
c1c9a79c
PH
2146 raise OSError('Unlocking file failed: %r' % ctypes.FormatError())
2147
2148else:
399a76e6
YCH
2149 try:
2150 import fcntl
c1c9a79c 2151
a3125791 2152 def _lock_file(f, exclusive, block):
b63837bc 2153 flags = fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH
2154 if not block:
2155 flags |= fcntl.LOCK_NB
acea8d7c 2156 try:
b63837bc 2157 fcntl.flock(f, flags)
acea8d7c
JK
2158 except BlockingIOError:
2159 raise
2160 except OSError: # AOSP does not have flock()
b63837bc 2161 fcntl.lockf(f, flags)
c1c9a79c 2162
399a76e6 2163 def _unlock_file(f):
acea8d7c
JK
2164 try:
2165 fcntl.flock(f, fcntl.LOCK_UN)
2166 except OSError:
2167 fcntl.lockf(f, fcntl.LOCK_UN)
a3125791 2168
399a76e6 2169 except ImportError:
399a76e6 2170
a3125791 2171 def _lock_file(f, exclusive, block):
0edb3e33 2172 raise LockingUnsupportedError()
399a76e6
YCH
2173
2174 def _unlock_file(f):
0edb3e33 2175 raise LockingUnsupportedError()
c1c9a79c
PH
2176
2177
86e5f3ed 2178class locked_file:
0edb3e33 2179 locked = False
747c0bd1 2180
a3125791 2181 def __init__(self, filename, mode, block=True, encoding=None):
fcfa8853
JK
2182 if mode not in {'r', 'rb', 'a', 'ab', 'w', 'wb'}:
2183 raise NotImplementedError(mode)
2184 self.mode, self.block = mode, block
2185
2186 writable = any(f in mode for f in 'wax+')
2187 readable = any(f in mode for f in 'r+')
2188 flags = functools.reduce(operator.ior, (
2189 getattr(os, 'O_CLOEXEC', 0), # UNIX only
2190 getattr(os, 'O_BINARY', 0), # Windows only
2191 getattr(os, 'O_NOINHERIT', 0), # Windows only
2192 os.O_CREAT if writable else 0, # O_TRUNC only after locking
2193 os.O_APPEND if 'a' in mode else 0,
2194 os.O_EXCL if 'x' in mode else 0,
2195 os.O_RDONLY if not writable else os.O_RDWR if readable else os.O_WRONLY,
2196 ))
2197
98804d03 2198 self.f = os.fdopen(os.open(filename, flags, 0o666), mode, encoding=encoding)
c1c9a79c
PH
2199
2200 def __enter__(self):
a3125791 2201 exclusive = 'r' not in self.mode
c1c9a79c 2202 try:
a3125791 2203 _lock_file(self.f, exclusive, self.block)
0edb3e33 2204 self.locked = True
86e5f3ed 2205 except OSError:
c1c9a79c
PH
2206 self.f.close()
2207 raise
fcfa8853 2208 if 'w' in self.mode:
131e14dc
JK
2209 try:
2210 self.f.truncate()
2211 except OSError as e:
1890fc63 2212 if e.errno not in (
2213 errno.ESPIPE, # Illegal seek - expected for FIFO
2214 errno.EINVAL, # Invalid argument - expected for /dev/null
2215 ):
2216 raise
c1c9a79c
PH
2217 return self
2218
0edb3e33 2219 def unlock(self):
2220 if not self.locked:
2221 return
c1c9a79c 2222 try:
0edb3e33 2223 _unlock_file(self.f)
c1c9a79c 2224 finally:
0edb3e33 2225 self.locked = False
c1c9a79c 2226
0edb3e33 2227 def __exit__(self, *_):
2228 try:
2229 self.unlock()
2230 finally:
2231 self.f.close()
4eb7f1d1 2232
0edb3e33 2233 open = __enter__
2234 close = __exit__
a3125791 2235
0edb3e33 2236 def __getattr__(self, attr):
2237 return getattr(self.f, attr)
a3125791 2238
0edb3e33 2239 def __iter__(self):
2240 return iter(self.f)
a3125791 2241
4eb7f1d1 2242
0b9c08b4 2243@functools.cache
4644ac55
S
2244def get_filesystem_encoding():
2245 encoding = sys.getfilesystemencoding()
2246 return encoding if encoding is not None else 'utf-8'
2247
2248
4eb7f1d1 2249def shell_quote(args):
a6a173c2 2250 quoted_args = []
4644ac55 2251 encoding = get_filesystem_encoding()
a6a173c2
JMF
2252 for a in args:
2253 if isinstance(a, bytes):
2254 # We may get a filename encoded with 'encodeFilename'
2255 a = a.decode(encoding)
aefce8e6 2256 quoted_args.append(compat_shlex_quote(a))
28e614de 2257 return ' '.join(quoted_args)
9d4660ca
PH
2258
2259
2260def smuggle_url(url, data):
2261 """ Pass additional data in a URL for internal use. """
2262
81953d1a
RA
2263 url, idata = unsmuggle_url(url, {})
2264 data.update(idata)
14f25df2 2265 sdata = urllib.parse.urlencode(
28e614de
PH
2266 {'__youtubedl_smuggle': json.dumps(data)})
2267 return url + '#' + sdata
9d4660ca
PH
2268
2269
79f82953 2270def unsmuggle_url(smug_url, default=None):
83e865a3 2271 if '#__youtubedl_smuggle' not in smug_url:
79f82953 2272 return smug_url, default
28e614de 2273 url, _, sdata = smug_url.rpartition('#')
14f25df2 2274 jsond = urllib.parse.parse_qs(sdata)['__youtubedl_smuggle'][0]
9d4660ca
PH
2275 data = json.loads(jsond)
2276 return url, data
02dbf93f
PH
2277
2278
e0fd9573 2279def format_decimal_suffix(num, fmt='%d%s', *, factor=1000):
2280 """ Formats numbers with decimal sufixes like K, M, etc """
2281 num, factor = float_or_none(num), float(factor)
4c3f8c3f 2282 if num is None or num < 0:
e0fd9573 2283 return None
eeb2a770 2284 POSSIBLE_SUFFIXES = 'kMGTPEZY'
2285 exponent = 0 if num == 0 else min(int(math.log(num, factor)), len(POSSIBLE_SUFFIXES))
2286 suffix = ['', *POSSIBLE_SUFFIXES][exponent]
abbeeebc 2287 if factor == 1024:
2288 suffix = {'k': 'Ki', '': ''}.get(suffix, f'{suffix}i')
e0fd9573 2289 converted = num / (factor ** exponent)
abbeeebc 2290 return fmt % (converted, suffix)
e0fd9573 2291
2292
02dbf93f 2293def format_bytes(bytes):
f02d24d8 2294 return format_decimal_suffix(bytes, '%.2f%sB', factor=1024) or 'N/A'
f53c966a 2295
1c088fa8 2296
64c464a1 2297def lookup_unit_table(unit_table, s, strict=False):
2298 num_re = NUMBER_RE if strict else NUMBER_RE.replace(R'\.', '[,.]')
fb47597b 2299 units_re = '|'.join(re.escape(u) for u in unit_table)
64c464a1 2300 m = (re.fullmatch if strict else re.match)(
2301 rf'(?P<num>{num_re})\s*(?P<unit>{units_re})\b', s)
fb47597b
S
2302 if not m:
2303 return None
64c464a1 2304
2305 num = float(m.group('num').replace(',', '.'))
fb47597b 2306 mult = unit_table[m.group('unit')]
64c464a1 2307 return round(num * mult)
2308
2309
2310def parse_bytes(s):
2311 """Parse a string indicating a byte quantity into an integer"""
2312 return lookup_unit_table(
2313 {u: 1024**i for i, u in enumerate(['', *'KMGTPEZY'])},
2314 s.upper(), strict=True)
fb47597b
S
2315
2316
be64b5b0
PH
2317def parse_filesize(s):
2318 if s is None:
2319 return None
2320
dfb1b146 2321 # The lower-case forms are of course incorrect and unofficial,
be64b5b0
PH
2322 # but we support those too
2323 _UNIT_TABLE = {
2324 'B': 1,
2325 'b': 1,
70852b47 2326 'bytes': 1,
be64b5b0
PH
2327 'KiB': 1024,
2328 'KB': 1000,
2329 'kB': 1024,
2330 'Kb': 1000,
13585d76 2331 'kb': 1000,
70852b47
YCH
2332 'kilobytes': 1000,
2333 'kibibytes': 1024,
be64b5b0
PH
2334 'MiB': 1024 ** 2,
2335 'MB': 1000 ** 2,
2336 'mB': 1024 ** 2,
2337 'Mb': 1000 ** 2,
13585d76 2338 'mb': 1000 ** 2,
70852b47
YCH
2339 'megabytes': 1000 ** 2,
2340 'mebibytes': 1024 ** 2,
be64b5b0
PH
2341 'GiB': 1024 ** 3,
2342 'GB': 1000 ** 3,
2343 'gB': 1024 ** 3,
2344 'Gb': 1000 ** 3,
13585d76 2345 'gb': 1000 ** 3,
70852b47
YCH
2346 'gigabytes': 1000 ** 3,
2347 'gibibytes': 1024 ** 3,
be64b5b0
PH
2348 'TiB': 1024 ** 4,
2349 'TB': 1000 ** 4,
2350 'tB': 1024 ** 4,
2351 'Tb': 1000 ** 4,
13585d76 2352 'tb': 1000 ** 4,
70852b47
YCH
2353 'terabytes': 1000 ** 4,
2354 'tebibytes': 1024 ** 4,
be64b5b0
PH
2355 'PiB': 1024 ** 5,
2356 'PB': 1000 ** 5,
2357 'pB': 1024 ** 5,
2358 'Pb': 1000 ** 5,
13585d76 2359 'pb': 1000 ** 5,
70852b47
YCH
2360 'petabytes': 1000 ** 5,
2361 'pebibytes': 1024 ** 5,
be64b5b0
PH
2362 'EiB': 1024 ** 6,
2363 'EB': 1000 ** 6,
2364 'eB': 1024 ** 6,
2365 'Eb': 1000 ** 6,
13585d76 2366 'eb': 1000 ** 6,
70852b47
YCH
2367 'exabytes': 1000 ** 6,
2368 'exbibytes': 1024 ** 6,
be64b5b0
PH
2369 'ZiB': 1024 ** 7,
2370 'ZB': 1000 ** 7,
2371 'zB': 1024 ** 7,
2372 'Zb': 1000 ** 7,
13585d76 2373 'zb': 1000 ** 7,
70852b47
YCH
2374 'zettabytes': 1000 ** 7,
2375 'zebibytes': 1024 ** 7,
be64b5b0
PH
2376 'YiB': 1024 ** 8,
2377 'YB': 1000 ** 8,
2378 'yB': 1024 ** 8,
2379 'Yb': 1000 ** 8,
13585d76 2380 'yb': 1000 ** 8,
70852b47
YCH
2381 'yottabytes': 1000 ** 8,
2382 'yobibytes': 1024 ** 8,
be64b5b0
PH
2383 }
2384
fb47597b
S
2385 return lookup_unit_table(_UNIT_TABLE, s)
2386
2387
2388def parse_count(s):
2389 if s is None:
be64b5b0
PH
2390 return None
2391
352d5da8 2392 s = re.sub(r'^[^\d]+\s', '', s).strip()
fb47597b
S
2393
2394 if re.match(r'^[\d,.]+$', s):
2395 return str_to_int(s)
2396
2397 _UNIT_TABLE = {
2398 'k': 1000,
2399 'K': 1000,
2400 'm': 1000 ** 2,
2401 'M': 1000 ** 2,
2402 'kk': 1000 ** 2,
2403 'KK': 1000 ** 2,
352d5da8 2404 'b': 1000 ** 3,
2405 'B': 1000 ** 3,
fb47597b 2406 }
be64b5b0 2407
352d5da8 2408 ret = lookup_unit_table(_UNIT_TABLE, s)
2409 if ret is not None:
2410 return ret
2411
2412 mobj = re.match(r'([\d,.]+)(?:$|\s)', s)
2413 if mobj:
2414 return str_to_int(mobj.group(1))
be64b5b0 2415
2f7ae819 2416
5d45484c 2417def parse_resolution(s, *, lenient=False):
b871d7e9
S
2418 if s is None:
2419 return {}
2420
5d45484c
LNO
2421 if lenient:
2422 mobj = re.search(r'(?P<w>\d+)\s*[xX×,]\s*(?P<h>\d+)', s)
2423 else:
2424 mobj = re.search(r'(?<![a-zA-Z0-9])(?P<w>\d+)\s*[xX×,]\s*(?P<h>\d+)(?![a-zA-Z0-9])', s)
b871d7e9
S
2425 if mobj:
2426 return {
2427 'width': int(mobj.group('w')),
2428 'height': int(mobj.group('h')),
2429 }
2430
17ec8bcf 2431 mobj = re.search(r'(?<![a-zA-Z0-9])(\d+)[pPiI](?![a-zA-Z0-9])', s)
b871d7e9
S
2432 if mobj:
2433 return {'height': int(mobj.group(1))}
2434
2435 mobj = re.search(r'\b([48])[kK]\b', s)
2436 if mobj:
2437 return {'height': int(mobj.group(1)) * 540}
2438
2439 return {}
2440
2441
0dc41787 2442def parse_bitrate(s):
14f25df2 2443 if not isinstance(s, str):
0dc41787
S
2444 return
2445 mobj = re.search(r'\b(\d+)\s*kbps', s)
2446 if mobj:
2447 return int(mobj.group(1))
2448
2449
a942d6cb 2450def month_by_name(name, lang='en'):
caefb1de
PH
2451 """ Return the number of a month by (locale-independently) English name """
2452
f6717dec 2453 month_names = MONTH_NAMES.get(lang, MONTH_NAMES['en'])
a942d6cb 2454
caefb1de 2455 try:
f6717dec 2456 return month_names.index(name) + 1
7105440c
YCH
2457 except ValueError:
2458 return None
2459
2460
2461def month_by_abbreviation(abbrev):
2462 """ Return the number of a month by (locale-independently) English
2463 abbreviations """
2464
2465 try:
2466 return [s[:3] for s in ENGLISH_MONTH_NAMES].index(abbrev) + 1
caefb1de
PH
2467 except ValueError:
2468 return None
18258362
JMF
2469
2470
5aafe895 2471def fix_xml_ampersands(xml_str):
18258362 2472 """Replace all the '&' by '&amp;' in XML"""
5aafe895
PH
2473 return re.sub(
2474 r'&(?!amp;|lt;|gt;|apos;|quot;|#x[0-9a-fA-F]{,4};|#[0-9]{,4};)',
28e614de 2475 '&amp;',
5aafe895 2476 xml_str)
e3946f98
PH
2477
2478
2479def setproctitle(title):
14f25df2 2480 assert isinstance(title, str)
c1c05c67 2481
fe0918bb 2482 # Workaround for https://github.com/yt-dlp/yt-dlp/issues/4541
2483 try:
2484 import ctypes
2485 except ImportError:
c1c05c67
YCH
2486 return
2487
e3946f98 2488 try:
611c1dd9 2489 libc = ctypes.cdll.LoadLibrary('libc.so.6')
e3946f98
PH
2490 except OSError:
2491 return
2f49bcd6
RC
2492 except TypeError:
2493 # LoadLibrary in Windows Python 2.7.13 only expects
2494 # a bytestring, but since unicode_literals turns
2495 # every string into a unicode string, it fails.
2496 return
0f06bcd7 2497 title_bytes = title.encode()
6eefe533
PH
2498 buf = ctypes.create_string_buffer(len(title_bytes))
2499 buf.value = title_bytes
e3946f98 2500 try:
6eefe533 2501 libc.prctl(15, buf, 0, 0, 0)
e3946f98
PH
2502 except AttributeError:
2503 return # Strange libc, just skip this
d7dda168
PH
2504
2505
2506def remove_start(s, start):
46bc9b7d 2507 return s[len(start):] if s is not None and s.startswith(start) else s
29eb5174
PH
2508
2509
2b9faf55 2510def remove_end(s, end):
46bc9b7d 2511 return s[:-len(end)] if s is not None and s.endswith(end) else s
2b9faf55
PH
2512
2513
31b2051e
S
2514def remove_quotes(s):
2515 if s is None or len(s) < 2:
2516 return s
2517 for quote in ('"', "'", ):
2518 if s[0] == quote and s[-1] == quote:
2519 return s[1:-1]
2520 return s
2521
2522
b6e0c7d2 2523def get_domain(url):
ebf99aaf 2524 """
2525 This implementation is inconsistent, but is kept for compatibility.
2526 Use this only for "webpage_url_domain"
2527 """
2528 return remove_start(urllib.parse.urlparse(url).netloc, 'www.') or None
b6e0c7d2
U
2529
2530
29eb5174 2531def url_basename(url):
14f25df2 2532 path = urllib.parse.urlparse(url).path
28e614de 2533 return path.strip('/').split('/')[-1]
aa94a6d3
PH
2534
2535
02dc0a36 2536def base_url(url):
7657ec7e 2537 return re.match(r'https?://[^?#]+/', url).group()
02dc0a36
S
2538
2539
e34c3361 2540def urljoin(base, path):
4b5de77b 2541 if isinstance(path, bytes):
0f06bcd7 2542 path = path.decode()
14f25df2 2543 if not isinstance(path, str) or not path:
e34c3361 2544 return None
fad4ceb5 2545 if re.match(r'^(?:[a-zA-Z][a-zA-Z0-9+-.]*:)?//', path):
e34c3361 2546 return path
4b5de77b 2547 if isinstance(base, bytes):
0f06bcd7 2548 base = base.decode()
14f25df2 2549 if not isinstance(base, str) or not re.match(
4b5de77b 2550 r'^(?:https?:)?//', base):
e34c3361 2551 return None
14f25df2 2552 return urllib.parse.urljoin(base, path)
e34c3361
S
2553
2554
ac668111 2555class HEADRequest(urllib.request.Request):
aa94a6d3 2556 def get_method(self):
611c1dd9 2557 return 'HEAD'
7217e148
PH
2558
2559
ac668111 2560class PUTRequest(urllib.request.Request):
95cf60e8
S
2561 def get_method(self):
2562 return 'PUT'
2563
2564
9732d77e 2565def int_or_none(v, scale=1, default=None, get_attr=None, invscale=1):
9e907ebd 2566 if get_attr and v is not None:
2567 v = getattr(v, get_attr, None)
1812afb7
S
2568 try:
2569 return int(v) * invscale // scale
31c49255 2570 except (ValueError, TypeError, OverflowError):
af98f8ff 2571 return default
9732d77e 2572
9572013d 2573
40a90862 2574def str_or_none(v, default=None):
14f25df2 2575 return default if v is None else str(v)
40a90862 2576
9732d77e
PH
2577
2578def str_to_int(int_str):
48d4681e 2579 """ A more relaxed version of int_or_none """
f9934b96 2580 if isinstance(int_str, int):
348c6bf1 2581 return int_str
14f25df2 2582 elif isinstance(int_str, str):
42db58ec
S
2583 int_str = re.sub(r'[,\.\+]', '', int_str)
2584 return int_or_none(int_str)
608d11f5
PH
2585
2586
9732d77e 2587def float_or_none(v, scale=1, invscale=1, default=None):
caf80631
S
2588 if v is None:
2589 return default
2590 try:
2591 return float(v) * invscale / scale
5e1271c5 2592 except (ValueError, TypeError):
caf80631 2593 return default
43f775e4
PH
2594
2595
c7e327c4
S
2596def bool_or_none(v, default=None):
2597 return v if isinstance(v, bool) else default
2598
2599
53cd37ba 2600def strip_or_none(v, default=None):
14f25df2 2601 return v.strip() if isinstance(v, str) else default
b72b4431
S
2602
2603
af03000a 2604def url_or_none(url):
14f25df2 2605 if not url or not isinstance(url, str):
af03000a
S
2606 return None
2607 url = url.strip()
29f7c58a 2608 return url if re.match(r'^(?:(?:https?|rt(?:m(?:pt?[es]?|fp)|sp[su]?)|mms|ftps?):)?//', url) else None
af03000a
S
2609
2610
3e9b66d7 2611def request_to_url(req):
ac668111 2612 if isinstance(req, urllib.request.Request):
3e9b66d7
LNO
2613 return req.get_full_url()
2614 else:
2615 return req
2616
2617
e29663c6 2618def strftime_or_none(timestamp, date_format, default=None):
2619 datetime_object = None
2620 try:
f9934b96 2621 if isinstance(timestamp, (int, float)): # unix timestamp
d509c1f5 2622 # Using naive datetime here can break timestamp() in Windows
2623 # Ref: https://github.com/yt-dlp/yt-dlp/issues/5185, https://github.com/python/cpython/issues/94414
2624 datetime_object = datetime.datetime.fromtimestamp(timestamp, datetime.timezone.utc)
14f25df2 2625 elif isinstance(timestamp, str): # assume YYYYMMDD
e29663c6 2626 datetime_object = datetime.datetime.strptime(timestamp, '%Y%m%d')
9665f15a 2627 date_format = re.sub( # Support %s on windows
2628 r'(?<!%)(%%)*%s', rf'\g<1>{int(datetime_object.timestamp())}', date_format)
e29663c6 2629 return datetime_object.strftime(date_format)
2630 except (ValueError, TypeError, AttributeError):
2631 return default
2632
2633
608d11f5 2634def parse_duration(s):
f9934b96 2635 if not isinstance(s, str):
608d11f5 2636 return None
ca7b3246 2637 s = s.strip()
38d79fd1 2638 if not s:
2639 return None
ca7b3246 2640
acaff495 2641 days, hours, mins, secs, ms = [None] * 5
8bd1c00b 2642 m = re.match(r'''(?x)
2643 (?P<before_secs>
2644 (?:(?:(?P<days>[0-9]+):)?(?P<hours>[0-9]+):)?(?P<mins>[0-9]+):)?
2645 (?P<secs>(?(before_secs)[0-9]{1,2}|[0-9]+))
2646 (?P<ms>[.:][0-9]+)?Z?$
2647 ''', s)
acaff495 2648 if m:
8bd1c00b 2649 days, hours, mins, secs, ms = m.group('days', 'hours', 'mins', 'secs', 'ms')
acaff495 2650 else:
2651 m = re.match(
056653bb
S
2652 r'''(?ix)(?:P?
2653 (?:
1c1b2f96 2654 [0-9]+\s*y(?:ears?)?,?\s*
056653bb
S
2655 )?
2656 (?:
1c1b2f96 2657 [0-9]+\s*m(?:onths?)?,?\s*
056653bb
S
2658 )?
2659 (?:
1c1b2f96 2660 [0-9]+\s*w(?:eeks?)?,?\s*
056653bb 2661 )?
8f4b58d7 2662 (?:
1c1b2f96 2663 (?P<days>[0-9]+)\s*d(?:ays?)?,?\s*
8f4b58d7 2664 )?
056653bb 2665 T)?
acaff495 2666 (?:
1c1b2f96 2667 (?P<hours>[0-9]+)\s*h(?:ours?)?,?\s*
acaff495 2668 )?
2669 (?:
1c1b2f96 2670 (?P<mins>[0-9]+)\s*m(?:in(?:ute)?s?)?,?\s*
acaff495 2671 )?
2672 (?:
2673 (?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?\s*s(?:ec(?:ond)?s?)?\s*
15846398 2674 )?Z?$''', s)
acaff495 2675 if m:
2676 days, hours, mins, secs, ms = m.groups()
2677 else:
15846398 2678 m = re.match(r'(?i)(?:(?P<hours>[0-9.]+)\s*(?:hours?)|(?P<mins>[0-9.]+)\s*(?:mins?\.?|minutes?)\s*)Z?$', s)
acaff495 2679 if m:
2680 hours, mins = m.groups()
2681 else:
2682 return None
2683
acaff495 2684 if ms:
19a03940 2685 ms = ms.replace(':', '.')
2686 return sum(float(part or 0) * mult for part, mult in (
2687 (days, 86400), (hours, 3600), (mins, 60), (secs, 1), (ms, 1)))
91d7d0b3
JMF
2688
2689
e65e4c88 2690def prepend_extension(filename, ext, expected_real_ext=None):
5f6a1245 2691 name, real_ext = os.path.splitext(filename)
e65e4c88 2692 return (
86e5f3ed 2693 f'{name}.{ext}{real_ext}'
e65e4c88 2694 if not expected_real_ext or real_ext[1:] == expected_real_ext
86e5f3ed 2695 else f'{filename}.{ext}')
d70ad093
PH
2696
2697
b3ed15b7
S
2698def replace_extension(filename, ext, expected_real_ext=None):
2699 name, real_ext = os.path.splitext(filename)
86e5f3ed 2700 return '{}.{}'.format(
b3ed15b7
S
2701 name if not expected_real_ext or real_ext[1:] == expected_real_ext else filename,
2702 ext)
2703
2704
d70ad093
PH
2705def check_executable(exe, args=[]):
2706 """ Checks if the given binary is installed somewhere in PATH, and returns its name.
2707 args can be a list of arguments for a short output (like -version) """
2708 try:
f0c9fb96 2709 Popen.run([exe] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
d70ad093
PH
2710 except OSError:
2711 return False
2712 return exe
b7ab0590
PH
2713
2714
7aaf4cd2 2715def _get_exe_version_output(exe, args):
95807118 2716 try:
b64d04c1 2717 # STDIN should be redirected too. On UNIX-like systems, ffmpeg triggers
7a5c1cfe 2718 # SIGTTOU if yt-dlp is run in the background.
067aa17e 2719 # See https://github.com/ytdl-org/youtube-dl/issues/955#issuecomment-209789656
1cdda329 2720 stdout, _, ret = Popen.run([encodeArgument(exe)] + args, text=True,
2721 stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
2722 if ret:
2723 return None
95807118
PH
2724 except OSError:
2725 return False
f0c9fb96 2726 return stdout
cae97f65
PH
2727
2728
2729def detect_exe_version(output, version_re=None, unrecognized='present'):
14f25df2 2730 assert isinstance(output, str)
cae97f65
PH
2731 if version_re is None:
2732 version_re = r'version\s+([-0-9._a-zA-Z]+)'
2733 m = re.search(version_re, output)
95807118
PH
2734 if m:
2735 return m.group(1)
2736 else:
2737 return unrecognized
2738
2739
9af98e17 2740def get_exe_version(exe, args=['--version'],
1cdda329 2741 version_re=None, unrecognized=('present', 'broken')):
9af98e17 2742 """ Returns the version of the specified executable,
2743 or False if the executable is not present """
1cdda329 2744 unrecognized = variadic(unrecognized)
2745 assert len(unrecognized) in (1, 2)
9af98e17 2746 out = _get_exe_version_output(exe, args)
1cdda329 2747 if out is None:
2748 return unrecognized[-1]
2749 return out and detect_exe_version(out, version_re, unrecognized[0])
9af98e17 2750
2751
7e88d7d7 2752def frange(start=0, stop=None, step=1):
2753 """Float range"""
2754 if stop is None:
2755 start, stop = 0, start
2756 sign = [-1, 1][step > 0] if step else 0
2757 while sign * start < sign * stop:
2758 yield start
2759 start += step
2760
2761
cb89cfc1 2762class LazyList(collections.abc.Sequence):
0f06bcd7 2763 """Lazy immutable list from an iterable
2764 Note that slices of a LazyList are lists and not LazyList"""
483336e7 2765
8e5fecc8 2766 class IndexError(IndexError):
2767 pass
2768
282f5709 2769 def __init__(self, iterable, *, reverse=False, _cache=None):
0f06bcd7 2770 self._iterable = iter(iterable)
2771 self._cache = [] if _cache is None else _cache
2772 self._reversed = reverse
483336e7 2773
2774 def __iter__(self):
0f06bcd7 2775 if self._reversed:
28419ca2 2776 # We need to consume the entire iterable to iterate in reverse
981052c9 2777 yield from self.exhaust()
28419ca2 2778 return
0f06bcd7 2779 yield from self._cache
2780 for item in self._iterable:
2781 self._cache.append(item)
483336e7 2782 yield item
2783
0f06bcd7 2784 def _exhaust(self):
2785 self._cache.extend(self._iterable)
2786 self._iterable = [] # Discard the emptied iterable to make it pickle-able
2787 return self._cache
28419ca2 2788
981052c9 2789 def exhaust(self):
0f06bcd7 2790 """Evaluate the entire iterable"""
2791 return self._exhaust()[::-1 if self._reversed else 1]
981052c9 2792
28419ca2 2793 @staticmethod
0f06bcd7 2794 def _reverse_index(x):
f2df4071 2795 return None if x is None else ~x
483336e7 2796
2797 def __getitem__(self, idx):
2798 if isinstance(idx, slice):
0f06bcd7 2799 if self._reversed:
2800 idx = slice(self._reverse_index(idx.start), self._reverse_index(idx.stop), -(idx.step or 1))
e0f2b4b4 2801 start, stop, step = idx.start, idx.stop, idx.step or 1
483336e7 2802 elif isinstance(idx, int):
0f06bcd7 2803 if self._reversed:
2804 idx = self._reverse_index(idx)
e0f2b4b4 2805 start, stop, step = idx, idx, 0
483336e7 2806 else:
2807 raise TypeError('indices must be integers or slices')
e0f2b4b4 2808 if ((start or 0) < 0 or (stop or 0) < 0
2809 or (start is None and step < 0)
2810 or (stop is None and step > 0)):
483336e7 2811 # We need to consume the entire iterable to be able to slice from the end
2812 # Obviously, never use this with infinite iterables
0f06bcd7 2813 self._exhaust()
8e5fecc8 2814 try:
0f06bcd7 2815 return self._cache[idx]
8e5fecc8 2816 except IndexError as e:
2817 raise self.IndexError(e) from e
0f06bcd7 2818 n = max(start or 0, stop or 0) - len(self._cache) + 1
28419ca2 2819 if n > 0:
0f06bcd7 2820 self._cache.extend(itertools.islice(self._iterable, n))
8e5fecc8 2821 try:
0f06bcd7 2822 return self._cache[idx]
8e5fecc8 2823 except IndexError as e:
2824 raise self.IndexError(e) from e
483336e7 2825
2826 def __bool__(self):
2827 try:
0f06bcd7 2828 self[-1] if self._reversed else self[0]
8e5fecc8 2829 except self.IndexError:
483336e7 2830 return False
2831 return True
2832
2833 def __len__(self):
0f06bcd7 2834 self._exhaust()
2835 return len(self._cache)
483336e7 2836
282f5709 2837 def __reversed__(self):
0f06bcd7 2838 return type(self)(self._iterable, reverse=not self._reversed, _cache=self._cache)
282f5709 2839
2840 def __copy__(self):
0f06bcd7 2841 return type(self)(self._iterable, reverse=self._reversed, _cache=self._cache)
282f5709 2842
28419ca2 2843 def __repr__(self):
2844 # repr and str should mimic a list. So we exhaust the iterable
2845 return repr(self.exhaust())
2846
2847 def __str__(self):
2848 return repr(self.exhaust())
2849
483336e7 2850
7be9ccff 2851class PagedList:
c07a39ae 2852
2853 class IndexError(IndexError):
2854 pass
2855
dd26ced1
PH
2856 def __len__(self):
2857 # This is only useful for tests
2858 return len(self.getslice())
2859
7be9ccff 2860 def __init__(self, pagefunc, pagesize, use_cache=True):
2861 self._pagefunc = pagefunc
2862 self._pagesize = pagesize
f1d13090 2863 self._pagecount = float('inf')
7be9ccff 2864 self._use_cache = use_cache
2865 self._cache = {}
2866
2867 def getpage(self, pagenum):
d8cf8d97 2868 page_results = self._cache.get(pagenum)
2869 if page_results is None:
f1d13090 2870 page_results = [] if pagenum > self._pagecount else list(self._pagefunc(pagenum))
7be9ccff 2871 if self._use_cache:
2872 self._cache[pagenum] = page_results
2873 return page_results
2874
2875 def getslice(self, start=0, end=None):
2876 return list(self._getslice(start, end))
2877
2878 def _getslice(self, start, end):
55575225 2879 raise NotImplementedError('This method must be implemented by subclasses')
2880
2881 def __getitem__(self, idx):
f1d13090 2882 assert self._use_cache, 'Indexing PagedList requires cache'
55575225 2883 if not isinstance(idx, int) or idx < 0:
2884 raise TypeError('indices must be non-negative integers')
2885 entries = self.getslice(idx, idx + 1)
d8cf8d97 2886 if not entries:
c07a39ae 2887 raise self.IndexError()
d8cf8d97 2888 return entries[0]
55575225 2889
9c44d242
PH
2890
2891class OnDemandPagedList(PagedList):
a44ca5a4 2892 """Download pages until a page with less than maximum results"""
86e5f3ed 2893
7be9ccff 2894 def _getslice(self, start, end):
b7ab0590
PH
2895 for pagenum in itertools.count(start // self._pagesize):
2896 firstid = pagenum * self._pagesize
2897 nextfirstid = pagenum * self._pagesize + self._pagesize
2898 if start >= nextfirstid:
2899 continue
2900
b7ab0590
PH
2901 startv = (
2902 start % self._pagesize
2903 if firstid <= start < nextfirstid
2904 else 0)
b7ab0590
PH
2905 endv = (
2906 ((end - 1) % self._pagesize) + 1
2907 if (end is not None and firstid <= end <= nextfirstid)
2908 else None)
2909
f1d13090 2910 try:
2911 page_results = self.getpage(pagenum)
2912 except Exception:
2913 self._pagecount = pagenum - 1
2914 raise
b7ab0590
PH
2915 if startv != 0 or endv is not None:
2916 page_results = page_results[startv:endv]
7be9ccff 2917 yield from page_results
b7ab0590
PH
2918
2919 # A little optimization - if current page is not "full", ie. does
2920 # not contain page_size videos then we can assume that this page
2921 # is the last one - there are no more ids on further pages -
2922 # i.e. no need to query again.
2923 if len(page_results) + startv < self._pagesize:
2924 break
2925
2926 # If we got the whole page, but the next page is not interesting,
2927 # break out early as well
2928 if end == nextfirstid:
2929 break
81c2f20b
PH
2930
2931
9c44d242 2932class InAdvancePagedList(PagedList):
a44ca5a4 2933 """PagedList with total number of pages known in advance"""
86e5f3ed 2934
9c44d242 2935 def __init__(self, pagefunc, pagecount, pagesize):
7be9ccff 2936 PagedList.__init__(self, pagefunc, pagesize, True)
f1d13090 2937 self._pagecount = pagecount
9c44d242 2938
7be9ccff 2939 def _getslice(self, start, end):
9c44d242 2940 start_page = start // self._pagesize
d37707bd 2941 end_page = self._pagecount if end is None else min(self._pagecount, end // self._pagesize + 1)
9c44d242
PH
2942 skip_elems = start - start_page * self._pagesize
2943 only_more = None if end is None else end - start
2944 for pagenum in range(start_page, end_page):
7be9ccff 2945 page_results = self.getpage(pagenum)
9c44d242 2946 if skip_elems:
7be9ccff 2947 page_results = page_results[skip_elems:]
9c44d242
PH
2948 skip_elems = None
2949 if only_more is not None:
7be9ccff 2950 if len(page_results) < only_more:
2951 only_more -= len(page_results)
9c44d242 2952 else:
7be9ccff 2953 yield from page_results[:only_more]
9c44d242 2954 break
7be9ccff 2955 yield from page_results
9c44d242
PH
2956
2957
7e88d7d7 2958class PlaylistEntries:
2959 MissingEntry = object()
2960 is_exhausted = False
2961
2962 def __init__(self, ydl, info_dict):
7e9a6125 2963 self.ydl = ydl
2964
2965 # _entries must be assigned now since infodict can change during iteration
2966 entries = info_dict.get('entries')
2967 if entries is None:
2968 raise EntryNotInPlaylist('There are no entries')
2969 elif isinstance(entries, list):
2970 self.is_exhausted = True
2971
2972 requested_entries = info_dict.get('requested_entries')
bc5c2f8a 2973 self.is_incomplete = requested_entries is not None
7e9a6125 2974 if self.is_incomplete:
2975 assert self.is_exhausted
bc5c2f8a 2976 self._entries = [self.MissingEntry] * max(requested_entries or [0])
7e9a6125 2977 for i, entry in zip(requested_entries, entries):
2978 self._entries[i - 1] = entry
2979 elif isinstance(entries, (list, PagedList, LazyList)):
2980 self._entries = entries
2981 else:
2982 self._entries = LazyList(entries)
7e88d7d7 2983
2984 PLAYLIST_ITEMS_RE = re.compile(r'''(?x)
2985 (?P<start>[+-]?\d+)?
2986 (?P<range>[:-]
2987 (?P<end>[+-]?\d+|inf(?:inite)?)?
2988 (?::(?P<step>[+-]?\d+))?
2989 )?''')
2990
2991 @classmethod
2992 def parse_playlist_items(cls, string):
2993 for segment in string.split(','):
2994 if not segment:
2995 raise ValueError('There is two or more consecutive commas')
2996 mobj = cls.PLAYLIST_ITEMS_RE.fullmatch(segment)
2997 if not mobj:
2998 raise ValueError(f'{segment!r} is not a valid specification')
2999 start, end, step, has_range = mobj.group('start', 'end', 'step', 'range')
3000 if int_or_none(step) == 0:
3001 raise ValueError(f'Step in {segment!r} cannot be zero')
3002 yield slice(int_or_none(start), float_or_none(end), int_or_none(step)) if has_range else int(start)
3003
3004 def get_requested_items(self):
3005 playlist_items = self.ydl.params.get('playlist_items')
3006 playlist_start = self.ydl.params.get('playliststart', 1)
3007 playlist_end = self.ydl.params.get('playlistend')
3008 # For backwards compatibility, interpret -1 as whole list
3009 if playlist_end in (-1, None):
3010 playlist_end = ''
3011 if not playlist_items:
3012 playlist_items = f'{playlist_start}:{playlist_end}'
3013 elif playlist_start != 1 or playlist_end:
3014 self.ydl.report_warning('Ignoring playliststart and playlistend because playlistitems was given', only_once=True)
3015
3016 for index in self.parse_playlist_items(playlist_items):
3017 for i, entry in self[index]:
3018 yield i, entry
1ac4fd80 3019 if not entry:
3020 continue
7e88d7d7 3021 try:
3022 # TODO: Add auto-generated fields
3023 self.ydl._match_entry(entry, incomplete=True, silent=True)
3024 except (ExistingVideoReached, RejectedVideoReached):
3025 return
3026
7e9a6125 3027 def get_full_count(self):
3028 if self.is_exhausted and not self.is_incomplete:
7e88d7d7 3029 return len(self)
3030 elif isinstance(self._entries, InAdvancePagedList):
3031 if self._entries._pagesize == 1:
3032 return self._entries._pagecount
3033
7e88d7d7 3034 @functools.cached_property
3035 def _getter(self):
3036 if isinstance(self._entries, list):
3037 def get_entry(i):
3038 try:
3039 entry = self._entries[i]
3040 except IndexError:
3041 entry = self.MissingEntry
3042 if not self.is_incomplete:
3043 raise self.IndexError()
3044 if entry is self.MissingEntry:
bc5c2f8a 3045 raise EntryNotInPlaylist(f'Entry {i + 1} cannot be found')
7e88d7d7 3046 return entry
3047 else:
3048 def get_entry(i):
3049 try:
3050 return type(self.ydl)._handle_extraction_exceptions(lambda _, i: self._entries[i])(self.ydl, i)
3051 except (LazyList.IndexError, PagedList.IndexError):
3052 raise self.IndexError()
3053 return get_entry
3054
3055 def __getitem__(self, idx):
3056 if isinstance(idx, int):
3057 idx = slice(idx, idx)
3058
3059 # NB: PlaylistEntries[1:10] => (0, 1, ... 9)
3060 step = 1 if idx.step is None else idx.step
3061 if idx.start is None:
3062 start = 0 if step > 0 else len(self) - 1
3063 else:
3064 start = idx.start - 1 if idx.start >= 0 else len(self) + idx.start
3065
3066 # NB: Do not call len(self) when idx == [:]
3067 if idx.stop is None:
3068 stop = 0 if step < 0 else float('inf')
3069 else:
3070 stop = idx.stop - 1 if idx.stop >= 0 else len(self) + idx.stop
3071 stop += [-1, 1][step > 0]
3072
3073 for i in frange(start, stop, step):
3074 if i < 0:
3075 continue
3076 try:
7e9a6125 3077 entry = self._getter(i)
3078 except self.IndexError:
3079 self.is_exhausted = True
3080 if step > 0:
7e88d7d7 3081 break
7e9a6125 3082 continue
7e88d7d7 3083 yield i + 1, entry
3084
3085 def __len__(self):
3086 return len(tuple(self[:]))
3087
3088 class IndexError(IndexError):
3089 pass
3090
3091
81c2f20b 3092def uppercase_escape(s):
676eb3f2 3093 unicode_escape = codecs.getdecoder('unicode_escape')
81c2f20b 3094 return re.sub(
a612753d 3095 r'\\U[0-9a-fA-F]{8}',
676eb3f2
PH
3096 lambda m: unicode_escape(m.group(0))[0],
3097 s)
0fe2ff78
YCH
3098
3099
3100def lowercase_escape(s):
3101 unicode_escape = codecs.getdecoder('unicode_escape')
3102 return re.sub(
3103 r'\\u[0-9a-fA-F]{4}',
3104 lambda m: unicode_escape(m.group(0))[0],
3105 s)
b53466e1 3106
d05cfe06
S
3107
3108def escape_rfc3986(s):
3109 """Escape non-ASCII characters as suggested by RFC 3986"""
f9934b96 3110 return urllib.parse.quote(s, b"%/;:@&=+$,!~*'()?#[]")
d05cfe06
S
3111
3112
3113def escape_url(url):
3114 """Escape URL as suggested by RFC 3986"""
14f25df2 3115 url_parsed = urllib.parse.urlparse(url)
d05cfe06 3116 return url_parsed._replace(
efbed08d 3117 netloc=url_parsed.netloc.encode('idna').decode('ascii'),
d05cfe06
S
3118 path=escape_rfc3986(url_parsed.path),
3119 params=escape_rfc3986(url_parsed.params),
3120 query=escape_rfc3986(url_parsed.query),
3121 fragment=escape_rfc3986(url_parsed.fragment)
3122 ).geturl()
3123
62e609ab 3124
96b9e9cf 3125def parse_qs(url, **kwargs):
3126 return urllib.parse.parse_qs(urllib.parse.urlparse(url).query, **kwargs)
4dfbf869 3127
3128
62e609ab
PH
3129def read_batch_urls(batch_fd):
3130 def fixup(url):
14f25df2 3131 if not isinstance(url, str):
62e609ab 3132 url = url.decode('utf-8', 'replace')
8c04f0be 3133 BOM_UTF8 = ('\xef\xbb\xbf', '\ufeff')
3134 for bom in BOM_UTF8:
3135 if url.startswith(bom):
3136 url = url[len(bom):]
3137 url = url.lstrip()
3138 if not url or url.startswith(('#', ';', ']')):
62e609ab 3139 return False
8c04f0be 3140 # "#" cannot be stripped out since it is part of the URI
962ffcf8 3141 # However, it can be safely stripped out if following a whitespace
8c04f0be 3142 return re.split(r'\s#', url, 1)[0].rstrip()
62e609ab
PH
3143
3144 with contextlib.closing(batch_fd) as fd:
3145 return [url for url in map(fixup, fd) if url]
b74fa8cd
JMF
3146
3147
3148def urlencode_postdata(*args, **kargs):
14f25df2 3149 return urllib.parse.urlencode(*args, **kargs).encode('ascii')
bcf89ce6
PH
3150
3151
38f9ef31 3152def update_url_query(url, query):
cacd9966
YCH
3153 if not query:
3154 return url
14f25df2 3155 parsed_url = urllib.parse.urlparse(url)
3156 qs = urllib.parse.parse_qs(parsed_url.query)
38f9ef31 3157 qs.update(query)
14f25df2 3158 return urllib.parse.urlunparse(parsed_url._replace(
3159 query=urllib.parse.urlencode(qs, True)))
16392824 3160
8e60dc75 3161
c043c246 3162def update_Request(req, url=None, data=None, headers=None, query=None):
ed0291d1 3163 req_headers = req.headers.copy()
c043c246 3164 req_headers.update(headers or {})
ed0291d1
S
3165 req_data = data or req.data
3166 req_url = update_url_query(url or req.get_full_url(), query)
95cf60e8
S
3167 req_get_method = req.get_method()
3168 if req_get_method == 'HEAD':
3169 req_type = HEADRequest
3170 elif req_get_method == 'PUT':
3171 req_type = PUTRequest
3172 else:
ac668111 3173 req_type = urllib.request.Request
ed0291d1
S
3174 new_req = req_type(
3175 req_url, data=req_data, headers=req_headers,
3176 origin_req_host=req.origin_req_host, unverifiable=req.unverifiable)
3177 if hasattr(req, 'timeout'):
3178 new_req.timeout = req.timeout
3179 return new_req
3180
3181
10c87c15 3182def _multipart_encode_impl(data, boundary):
0c265486
YCH
3183 content_type = 'multipart/form-data; boundary=%s' % boundary
3184
3185 out = b''
3186 for k, v in data.items():
3187 out += b'--' + boundary.encode('ascii') + b'\r\n'
14f25df2 3188 if isinstance(k, str):
0f06bcd7 3189 k = k.encode()
14f25df2 3190 if isinstance(v, str):
0f06bcd7 3191 v = v.encode()
0c265486
YCH
3192 # RFC 2047 requires non-ASCII field names to be encoded, while RFC 7578
3193 # suggests sending UTF-8 directly. Firefox sends UTF-8, too
b2ad479d 3194 content = b'Content-Disposition: form-data; name="' + k + b'"\r\n\r\n' + v + b'\r\n'
0c265486
YCH
3195 if boundary.encode('ascii') in content:
3196 raise ValueError('Boundary overlaps with data')
3197 out += content
3198
3199 out += b'--' + boundary.encode('ascii') + b'--\r\n'
3200
3201 return out, content_type
3202
3203
3204def multipart_encode(data, boundary=None):
3205 '''
3206 Encode a dict to RFC 7578-compliant form-data
3207
3208 data:
3209 A dict where keys and values can be either Unicode or bytes-like
3210 objects.
3211 boundary:
3212 If specified a Unicode object, it's used as the boundary. Otherwise
3213 a random boundary is generated.
3214
3215 Reference: https://tools.ietf.org/html/rfc7578
3216 '''
3217 has_specified_boundary = boundary is not None
3218
3219 while True:
3220 if boundary is None:
3221 boundary = '---------------' + str(random.randrange(0x0fffffff, 0xffffffff))
3222
3223 try:
10c87c15 3224 out, content_type = _multipart_encode_impl(data, boundary)
0c265486
YCH
3225 break
3226 except ValueError:
3227 if has_specified_boundary:
3228 raise
3229 boundary = None
3230
3231 return out, content_type
3232
3233
304ad45a 3234def variadic(x, allowed_types=(str, bytes, dict)):
3235 return x if isinstance(x, collections.abc.Iterable) and not isinstance(x, allowed_types) else (x,)
3236
3237
86296ad2 3238def dict_get(d, key_or_keys, default=None, skip_false_values=True):
a44ca5a4 3239 for val in map(d.get, variadic(key_or_keys)):
3240 if val is not None and (val or not skip_false_values):
3241 return val
3242 return default
cbecc9b9
S
3243
3244
c4f60dd7 3245def try_call(*funcs, expected_type=None, args=[], kwargs={}):
3246 for f in funcs:
a32a9a7e 3247 try:
c4f60dd7 3248 val = f(*args, **kwargs)
ab029d7e 3249 except (AttributeError, KeyError, TypeError, IndexError, ValueError, ZeroDivisionError):
a32a9a7e
S
3250 pass
3251 else:
c4f60dd7 3252 if expected_type is None or isinstance(val, expected_type):
3253 return val
3254
3255
3256def try_get(src, getter, expected_type=None):
3257 return try_call(*variadic(getter), args=(src,), expected_type=expected_type)
329ca3be
S
3258
3259
90137ca4 3260def filter_dict(dct, cndn=lambda _, v: v is not None):
3261 return {k: v for k, v in dct.items() if cndn(k, v)}
3262
3263
6cc62232
S
3264def merge_dicts(*dicts):
3265 merged = {}
3266 for a_dict in dicts:
3267 for k, v in a_dict.items():
90137ca4 3268 if (v is not None and k not in merged
3269 or isinstance(v, str) and merged[k] == ''):
6cc62232
S
3270 merged[k] = v
3271 return merged
3272
3273
8e60dc75 3274def encode_compat_str(string, encoding=preferredencoding(), errors='strict'):
14f25df2 3275 return string if isinstance(string, str) else str(string, encoding, errors)
8e60dc75 3276
16392824 3277
a1a530b0
PH
3278US_RATINGS = {
3279 'G': 0,
3280 'PG': 10,
3281 'PG-13': 13,
3282 'R': 16,
3283 'NC': 18,
3284}
fac55558
PH
3285
3286
a8795327 3287TV_PARENTAL_GUIDELINES = {
5a16c9d9
RA
3288 'TV-Y': 0,
3289 'TV-Y7': 7,
3290 'TV-G': 0,
3291 'TV-PG': 0,
3292 'TV-14': 14,
3293 'TV-MA': 17,
a8795327
S
3294}
3295
3296
146c80e2 3297def parse_age_limit(s):
19a03940 3298 # isinstance(False, int) is True. So type() must be used instead
c487cf00 3299 if type(s) is int: # noqa: E721
a8795327 3300 return s if 0 <= s <= 21 else None
19a03940 3301 elif not isinstance(s, str):
d838b1bd 3302 return None
146c80e2 3303 m = re.match(r'^(?P<age>\d{1,2})\+?$', s)
a8795327
S
3304 if m:
3305 return int(m.group('age'))
5c5fae6d 3306 s = s.upper()
a8795327
S
3307 if s in US_RATINGS:
3308 return US_RATINGS[s]
5a16c9d9 3309 m = re.match(r'^TV[_-]?(%s)$' % '|'.join(k[3:] for k in TV_PARENTAL_GUIDELINES), s)
b8361187 3310 if m:
5a16c9d9 3311 return TV_PARENTAL_GUIDELINES['TV-' + m.group(1)]
b8361187 3312 return None
146c80e2
S
3313
3314
fac55558 3315def strip_jsonp(code):
609a61e3 3316 return re.sub(
5552c9eb 3317 r'''(?sx)^
e9c671d5 3318 (?:window\.)?(?P<func_name>[a-zA-Z0-9_.$]*)
5552c9eb
YCH
3319 (?:\s*&&\s*(?P=func_name))?
3320 \s*\(\s*(?P<callback_data>.*)\);?
3321 \s*?(?://[^\n]*)*$''',
3322 r'\g<callback_data>', code)
478c2c61
PH
3323
3324
8f53dc44 3325def js_to_json(code, vars={}, *, strict=False):
5c610515 3326 # vars is a dict of var, val pairs to substitute
a71b812f
SS
3327 STRING_QUOTES = '\'"'
3328 STRING_RE = '|'.join(rf'{q}(?:\\.|[^\\{q}])*{q}' for q in STRING_QUOTES)
c843e685 3329 COMMENT_RE = r'/\*(?:(?!\*/).)*?\*/|//[^\n]*\n'
86e5f3ed 3330 SKIP_RE = fr'\s*(?:{COMMENT_RE})?\s*'
4195096e 3331 INTEGER_TABLE = (
86e5f3ed 3332 (fr'(?s)^(0[xX][0-9a-fA-F]+){SKIP_RE}:?$', 16),
3333 (fr'(?s)^(0+[0-7]+){SKIP_RE}:?$', 8),
4195096e
S
3334 )
3335
a71b812f
SS
3336 def process_escape(match):
3337 JSON_PASSTHROUGH_ESCAPES = R'"\bfnrtu'
3338 escape = match.group(1) or match.group(2)
3339
3340 return (Rf'\{escape}' if escape in JSON_PASSTHROUGH_ESCAPES
3341 else R'\u00' if escape == 'x'
3342 else '' if escape == '\n'
3343 else escape)
3344
e05f6939 3345 def fix_kv(m):
e7b6d122
PH
3346 v = m.group(0)
3347 if v in ('true', 'false', 'null'):
3348 return v
421ddcb8
C
3349 elif v in ('undefined', 'void 0'):
3350 return 'null'
8bdd16b4 3351 elif v.startswith('/*') or v.startswith('//') or v.startswith('!') or v == ',':
a71b812f
SS
3352 return ''
3353
3354 if v[0] in STRING_QUOTES:
3355 escaped = re.sub(r'(?s)(")|\\(.)', process_escape, v[1:-1])
3356 return f'"{escaped}"'
3357
3358 for regex, base in INTEGER_TABLE:
3359 im = re.match(regex, v)
3360 if im:
3361 i = int(im.group(1), base)
3362 return f'"{i}":' if v.endswith(':') else str(i)
3363
3364 if v in vars:
d5f043d1
C
3365 try:
3366 if not strict:
3367 json.loads(vars[v])
08e29b9f 3368 except json.JSONDecodeError:
d5f043d1
C
3369 return json.dumps(vars[v])
3370 else:
3371 return vars[v]
89ac4a19 3372
a71b812f
SS
3373 if not strict:
3374 return f'"{v}"'
5c610515 3375
a71b812f 3376 raise ValueError(f'Unknown value: {v}')
e05f6939 3377
8072ef2b 3378 def create_map(mobj):
3379 return json.dumps(dict(json.loads(js_to_json(mobj.group(1) or '[]', vars=vars))))
3380
8072ef2b 3381 code = re.sub(r'new Map\((\[.*?\])?\)', create_map, code)
8f53dc44 3382 if not strict:
3383 code = re.sub(r'new Date\((".+")\)', r'\g<1>', code)
f55523cf 3384 code = re.sub(r'new \w+\((.*?)\)', lambda m: json.dumps(m.group(0)), code)
389896df 3385 code = re.sub(r'parseInt\([^\d]+(\d+)[^\d]+\)', r'\1', code)
3386 code = re.sub(r'\(function\([^)]*\)\s*\{[^}]*\}\s*\)\s*\(\s*(["\'][^)]*["\'])\s*\)', r'\1', code)
febff4c1 3387
a71b812f
SS
3388 return re.sub(rf'''(?sx)
3389 {STRING_RE}|
3390 {COMMENT_RE}|,(?={SKIP_RE}[\]}}])|
421ddcb8 3391 void\s0|(?:(?<![0-9])[eE]|[a-df-zA-DF-Z_$])[.a-zA-Z_$0-9]*|
a71b812f
SS
3392 \b(?:0[xX][0-9a-fA-F]+|0+[0-7]+)(?:{SKIP_RE}:)?|
3393 [0-9]+(?={SKIP_RE}:)|
8bdd16b4 3394 !+
a71b812f 3395 ''', fix_kv, code)
e05f6939
PH
3396
3397
478c2c61
PH
3398def qualities(quality_ids):
3399 """ Get a numeric quality value out of a list of possible values """
3400 def q(qid):
3401 try:
3402 return quality_ids.index(qid)
3403 except ValueError:
3404 return -1
3405 return q
3406
acd69589 3407
119e40ef 3408POSTPROCESS_WHEN = ('pre_process', 'after_filter', 'video', 'before_dl', 'post_process', 'after_move', 'after_video', 'playlist')
1e43a6f7 3409
3410
de6000d9 3411DEFAULT_OUTTMPL = {
3412 'default': '%(title)s [%(id)s].%(ext)s',
72755351 3413 'chapter': '%(title)s - %(section_number)03d %(section_title)s [%(id)s].%(ext)s',
de6000d9 3414}
3415OUTTMPL_TYPES = {
72755351 3416 'chapter': None,
de6000d9 3417 'subtitle': None,
3418 'thumbnail': None,
3419 'description': 'description',
3420 'annotation': 'annotations.xml',
3421 'infojson': 'info.json',
08438d2c 3422 'link': None,
3b603dbd 3423 'pl_video': None,
5112f26a 3424 'pl_thumbnail': None,
de6000d9 3425 'pl_description': 'description',
3426 'pl_infojson': 'info.json',
3427}
0a871f68 3428
143db31d 3429# As of [1] format syntax is:
3430# %[mapping_key][conversion_flags][minimum_width][.precision][length_modifier]type
3431# 1. https://docs.python.org/2/library/stdtypes.html#string-formatting
901130bb 3432STR_FORMAT_RE_TMPL = r'''(?x)
3433 (?<!%)(?P<prefix>(?:%%)*)
143db31d 3434 %
524e2e4f 3435 (?P<has_key>\((?P<key>{0})\))?
752cda38 3436 (?P<format>
524e2e4f 3437 (?P<conversion>[#0\-+ ]+)?
3438 (?P<min_width>\d+)?
3439 (?P<precision>\.\d+)?
3440 (?P<len_mod>[hlL])? # unused in python
901130bb 3441 {1} # conversion type
752cda38 3442 )
143db31d 3443'''
3444
7d1eb38a 3445
901130bb 3446STR_FORMAT_TYPES = 'diouxXeEfFgGcrs'
a020a0dc 3447
7d1eb38a 3448
a020a0dc
PH
3449def limit_length(s, length):
3450 """ Add ellipses to overly long strings """
3451 if s is None:
3452 return None
3453 ELLIPSES = '...'
3454 if len(s) > length:
3455 return s[:length - len(ELLIPSES)] + ELLIPSES
3456 return s
48844745
PH
3457
3458
3459def version_tuple(v):
5f9b8394 3460 return tuple(int(e) for e in re.split(r'[-.]', v))
48844745
PH
3461
3462
3463def is_outdated_version(version, limit, assume_new=True):
3464 if not version:
3465 return not assume_new
3466 try:
3467 return version_tuple(version) < version_tuple(limit)
3468 except ValueError:
3469 return not assume_new
732ea2f0
PH
3470
3471
3472def ytdl_is_updateable():
7a5c1cfe 3473 """ Returns if yt-dlp can be updated with -U """
735d865e 3474
5d535b4a 3475 from .update import is_non_updateable
732ea2f0 3476
5d535b4a 3477 return not is_non_updateable()
7d4111ed
PH
3478
3479
3480def args_to_str(args):
3481 # Get a short string representation for a subprocess command
702ccf2d 3482 return ' '.join(compat_shlex_quote(a) for a in args)
2ccd1b10
PH
3483
3484
9b9c5355 3485def error_to_compat_str(err):
cfb0511d 3486 return str(err)
fdae2358
S
3487
3488
a44ca5a4 3489def error_to_str(err):
3490 return f'{type(err).__name__}: {err}'
3491
3492
2647c933 3493def mimetype2ext(mt, default=NO_DEFAULT):
3494 if not isinstance(mt, str):
3495 if default is not NO_DEFAULT:
3496 return default
eb9ee194
S
3497 return None
3498
2647c933 3499 MAP = {
3500 # video
f6861ec9 3501 '3gpp': '3gp',
2647c933 3502 'mp2t': 'ts',
3503 'mp4': 'mp4',
3504 'mpeg': 'mpeg',
3505 'mpegurl': 'm3u8',
3506 'quicktime': 'mov',
3507 'webm': 'webm',
3508 'vp9': 'vp9',
f6861ec9 3509 'x-flv': 'flv',
2647c933 3510 'x-m4v': 'm4v',
3511 'x-matroska': 'mkv',
3512 'x-mng': 'mng',
a0d8d704 3513 'x-mp4-fragmented': 'mp4',
2647c933 3514 'x-ms-asf': 'asf',
a0d8d704 3515 'x-ms-wmv': 'wmv',
2647c933 3516 'x-msvideo': 'avi',
3517
3518 # application (streaming playlists)
b4173f15 3519 'dash+xml': 'mpd',
b4173f15 3520 'f4m+xml': 'f4m',
f164b971 3521 'hds+xml': 'f4m',
2647c933 3522 'vnd.apple.mpegurl': 'm3u8',
e910fe2f 3523 'vnd.ms-sstr+xml': 'ism',
2647c933 3524 'x-mpegurl': 'm3u8',
3525
3526 # audio
3527 'audio/mp4': 'm4a',
3528 # Per RFC 3003, audio/mpeg can be .mp1, .mp2 or .mp3.
3529 # Using .mp3 as it's the most popular one
3530 'audio/mpeg': 'mp3',
d80ca5de 3531 'audio/webm': 'webm',
2647c933 3532 'audio/x-matroska': 'mka',
3533 'audio/x-mpegurl': 'm3u',
3534 'midi': 'mid',
3535 'ogg': 'ogg',
3536 'wav': 'wav',
3537 'wave': 'wav',
3538 'x-aac': 'aac',
3539 'x-flac': 'flac',
3540 'x-m4a': 'm4a',
3541 'x-realaudio': 'ra',
39e7107d 3542 'x-wav': 'wav',
9359f3d4 3543
2647c933 3544 # image
3545 'avif': 'avif',
3546 'bmp': 'bmp',
3547 'gif': 'gif',
3548 'jpeg': 'jpg',
3549 'png': 'png',
3550 'svg+xml': 'svg',
3551 'tiff': 'tif',
3552 'vnd.wap.wbmp': 'wbmp',
3553 'webp': 'webp',
3554 'x-icon': 'ico',
3555 'x-jng': 'jng',
3556 'x-ms-bmp': 'bmp',
3557
3558 # caption
3559 'filmstrip+json': 'fs',
3560 'smptett+xml': 'tt',
3561 'ttaf+xml': 'dfxp',
3562 'ttml+xml': 'ttml',
3563 'x-ms-sami': 'sami',
9359f3d4 3564
2647c933 3565 # misc
3566 'gzip': 'gz',
9359f3d4
F
3567 'json': 'json',
3568 'xml': 'xml',
3569 'zip': 'zip',
9359f3d4
F
3570 }
3571
2647c933 3572 mimetype = mt.partition(';')[0].strip().lower()
3573 _, _, subtype = mimetype.rpartition('/')
9359f3d4 3574
2647c933 3575 ext = traverse_obj(MAP, mimetype, subtype, subtype.rsplit('+')[-1])
3576 if ext:
3577 return ext
3578 elif default is not NO_DEFAULT:
3579 return default
9359f3d4 3580 return subtype.replace('+', '.')
c460bdd5
PH
3581
3582
2814f12b
THD
3583def ext2mimetype(ext_or_url):
3584 if not ext_or_url:
3585 return None
3586 if '.' not in ext_or_url:
3587 ext_or_url = f'file.{ext_or_url}'
3588 return mimetypes.guess_type(ext_or_url)[0]
3589
3590
4f3c5e06 3591def parse_codecs(codecs_str):
3592 # http://tools.ietf.org/html/rfc6381
3593 if not codecs_str:
3594 return {}
a0566bbf 3595 split_codecs = list(filter(None, map(
dbf5416a 3596 str.strip, codecs_str.strip().strip(',').split(','))))
3fe75fdc 3597 vcodec, acodec, scodec, hdr = None, None, None, None
a0566bbf 3598 for full_codec in split_codecs:
d816f61f 3599 parts = re.sub(r'0+(?=\d)', '', full_codec).split('.')
3600 if parts[0] in ('avc1', 'avc2', 'avc3', 'avc4', 'vp9', 'vp8', 'hev1', 'hev2',
3601 'h263', 'h264', 'mp4v', 'hvc1', 'av1', 'theora', 'dvh1', 'dvhe'):
3602 if vcodec:
3603 continue
3604 vcodec = full_codec
3605 if parts[0] in ('dvh1', 'dvhe'):
3606 hdr = 'DV'
3607 elif parts[0] == 'av1' and traverse_obj(parts, 3) == '10':
3608 hdr = 'HDR10'
3609 elif parts[:2] == ['vp9', '2']:
3610 hdr = 'HDR10'
71082216 3611 elif parts[0] in ('flac', 'mp4a', 'opus', 'vorbis', 'mp3', 'aac', 'ac-4',
d816f61f 3612 'ac-3', 'ec-3', 'eac3', 'dtsc', 'dtse', 'dtsh', 'dtsl'):
3613 acodec = acodec or full_codec
3614 elif parts[0] in ('stpp', 'wvtt'):
3615 scodec = scodec or full_codec
4f3c5e06 3616 else:
19a03940 3617 write_string(f'WARNING: Unknown codec {full_codec}\n')
3fe75fdc 3618 if vcodec or acodec or scodec:
4f3c5e06 3619 return {
3620 'vcodec': vcodec or 'none',
3621 'acodec': acodec or 'none',
176f1866 3622 'dynamic_range': hdr,
3fe75fdc 3623 **({'scodec': scodec} if scodec is not None else {}),
4f3c5e06 3624 }
b69fd25c 3625 elif len(split_codecs) == 2:
3626 return {
3627 'vcodec': split_codecs[0],
3628 'acodec': split_codecs[1],
3629 }
4f3c5e06 3630 return {}
3631
3632
fc61aff4
LL
3633def get_compatible_ext(*, vcodecs, acodecs, vexts, aexts, preferences=None):
3634 assert len(vcodecs) == len(vexts) and len(acodecs) == len(aexts)
3635
3636 allow_mkv = not preferences or 'mkv' in preferences
3637
3638 if allow_mkv and max(len(acodecs), len(vcodecs)) > 1:
3639 return 'mkv' # TODO: any other format allows this?
3640
3641 # TODO: All codecs supported by parse_codecs isn't handled here
3642 COMPATIBLE_CODECS = {
3643 'mp4': {
71082216 3644 'av1', 'hevc', 'avc1', 'mp4a', 'ac-4', # fourcc (m3u8, mpd)
81b6102d 3645 'h264', 'aacl', 'ec-3', # Set in ISM
fc61aff4
LL
3646 },
3647 'webm': {
3648 'av1', 'vp9', 'vp8', 'opus', 'vrbs',
3649 'vp9x', 'vp8x', # in the webm spec
3650 },
3651 }
3652
8f84770a 3653 sanitize_codec = functools.partial(try_get, getter=lambda x: x[0].split('.')[0].replace('0', ''))
3654 vcodec, acodec = sanitize_codec(vcodecs), sanitize_codec(acodecs)
fc61aff4
LL
3655
3656 for ext in preferences or COMPATIBLE_CODECS.keys():
3657 codec_set = COMPATIBLE_CODECS.get(ext, set())
3658 if ext == 'mkv' or codec_set.issuperset((vcodec, acodec)):
3659 return ext
3660
3661 COMPATIBLE_EXTS = (
3662 {'mp3', 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'ismv', 'isma', 'mov'},
fbb73833 3663 {'webm', 'weba'},
fc61aff4
LL
3664 )
3665 for ext in preferences or vexts:
3666 current_exts = {ext, *vexts, *aexts}
3667 if ext == 'mkv' or current_exts == {ext} or any(
3668 ext_sets.issuperset(current_exts) for ext_sets in COMPATIBLE_EXTS):
3669 return ext
3670 return 'mkv' if allow_mkv else preferences[-1]
3671
3672
2647c933 3673def urlhandle_detect_ext(url_handle, default=NO_DEFAULT):
79298173 3674 getheader = url_handle.headers.get
2ccd1b10 3675
b55ee18f
PH
3676 cd = getheader('Content-Disposition')
3677 if cd:
3678 m = re.match(r'attachment;\s*filename="(?P<filename>[^"]+)"', cd)
3679 if m:
3680 e = determine_ext(m.group('filename'), default_ext=None)
3681 if e:
3682 return e
3683
2647c933 3684 meta_ext = getheader('x-amz-meta-name')
3685 if meta_ext:
3686 e = meta_ext.rpartition('.')[2]
3687 if e:
3688 return e
3689
3690 return mimetype2ext(getheader('Content-Type'), default=default)
05900629
PH
3691
3692
1e399778
YCH
3693def encode_data_uri(data, mime_type):
3694 return 'data:%s;base64,%s' % (mime_type, base64.b64encode(data).decode('ascii'))
3695
3696
05900629 3697def age_restricted(content_limit, age_limit):
6ec6cb4e 3698 """ Returns True iff the content should be blocked """
05900629
PH
3699
3700 if age_limit is None: # No limit set
3701 return False
3702 if content_limit is None:
3703 return False # Content available for everyone
3704 return age_limit < content_limit
61ca9a80
PH
3705
3706
88f60feb 3707# List of known byte-order-marks (BOM)
a904a7f8
L
3708BOMS = [
3709 (b'\xef\xbb\xbf', 'utf-8'),
3710 (b'\x00\x00\xfe\xff', 'utf-32-be'),
3711 (b'\xff\xfe\x00\x00', 'utf-32-le'),
3712 (b'\xff\xfe', 'utf-16-le'),
3713 (b'\xfe\xff', 'utf-16-be'),
3714]
a904a7f8
L
3715
3716
61ca9a80
PH
3717def is_html(first_bytes):
3718 """ Detect whether a file contains HTML by examining its first bytes. """
3719
80e8493e 3720 encoding = 'utf-8'
61ca9a80 3721 for bom, enc in BOMS:
80e8493e 3722 while first_bytes.startswith(bom):
3723 encoding, first_bytes = enc, first_bytes[len(bom):]
61ca9a80 3724
80e8493e 3725 return re.match(r'^\s*<', first_bytes.decode(encoding, 'replace'))
a055469f
PH
3726
3727
3728def determine_protocol(info_dict):
3729 protocol = info_dict.get('protocol')
3730 if protocol is not None:
3731 return protocol
3732
7de837a5 3733 url = sanitize_url(info_dict['url'])
a055469f
PH
3734 if url.startswith('rtmp'):
3735 return 'rtmp'
3736 elif url.startswith('mms'):
3737 return 'mms'
3738 elif url.startswith('rtsp'):
3739 return 'rtsp'
3740
3741 ext = determine_ext(url)
3742 if ext == 'm3u8':
deae7c17 3743 return 'm3u8' if info_dict.get('is_live') else 'm3u8_native'
a055469f
PH
3744 elif ext == 'f4m':
3745 return 'f4m'
3746
14f25df2 3747 return urllib.parse.urlparse(url).scheme
cfb56d1a
PH
3748
3749
c5e3f849 3750def render_table(header_row, data, delim=False, extra_gap=0, hide_empty=False):
3751 """ Render a list of rows, each as a list of values.
3752 Text after a \t will be right aligned """
ec11a9f4 3753 def width(string):
c5e3f849 3754 return len(remove_terminal_sequences(string).replace('\t', ''))
76d321f6 3755
3756 def get_max_lens(table):
ec11a9f4 3757 return [max(width(str(v)) for v in col) for col in zip(*table)]
76d321f6 3758
3759 def filter_using_list(row, filterArray):
d16df59d 3760 return [col for take, col in itertools.zip_longest(filterArray, row, fillvalue=True) if take]
76d321f6 3761
d16df59d 3762 max_lens = get_max_lens(data) if hide_empty else []
3763 header_row = filter_using_list(header_row, max_lens)
3764 data = [filter_using_list(row, max_lens) for row in data]
76d321f6 3765
cfb56d1a 3766 table = [header_row] + data
76d321f6 3767 max_lens = get_max_lens(table)
c5e3f849 3768 extra_gap += 1
76d321f6 3769 if delim:
c5e3f849 3770 table = [header_row, [delim * (ml + extra_gap) for ml in max_lens]] + data
1ed7953a 3771 table[1][-1] = table[1][-1][:-extra_gap * len(delim)] # Remove extra_gap from end of delimiter
ec11a9f4 3772 for row in table:
3773 for pos, text in enumerate(map(str, row)):
c5e3f849 3774 if '\t' in text:
3775 row[pos] = text.replace('\t', ' ' * (max_lens[pos] - width(text))) + ' ' * extra_gap
3776 else:
3777 row[pos] = text + ' ' * (max_lens[pos] - width(text) + extra_gap)
3778 ret = '\n'.join(''.join(row).rstrip() for row in table)
ec11a9f4 3779 return ret
347de493
PH
3780
3781
8f18aca8 3782def _match_one(filter_part, dct, incomplete):
77b87f05 3783 # TODO: Generalize code with YoutubeDL._build_format_filter
a047eeb6 3784 STRING_OPERATORS = {
3785 '*=': operator.contains,
3786 '^=': lambda attr, value: attr.startswith(value),
3787 '$=': lambda attr, value: attr.endswith(value),
3788 '~=': lambda attr, value: re.search(value, attr),
3789 }
347de493 3790 COMPARISON_OPERATORS = {
a047eeb6 3791 **STRING_OPERATORS,
3792 '<=': operator.le, # "<=" must be defined above "<"
347de493 3793 '<': operator.lt,
347de493 3794 '>=': operator.ge,
a047eeb6 3795 '>': operator.gt,
347de493 3796 '=': operator.eq,
347de493 3797 }
a047eeb6 3798
6db9c4d5 3799 if isinstance(incomplete, bool):
3800 is_incomplete = lambda _: incomplete
3801 else:
3802 is_incomplete = lambda k: k in incomplete
3803
64fa820c 3804 operator_rex = re.compile(r'''(?x)
347de493 3805 (?P<key>[a-z_]+)
77b87f05 3806 \s*(?P<negation>!\s*)?(?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
347de493 3807 (?:
a047eeb6 3808 (?P<quote>["\'])(?P<quotedstrval>.+?)(?P=quote)|
3809 (?P<strval>.+?)
347de493 3810 )
347de493 3811 ''' % '|'.join(map(re.escape, COMPARISON_OPERATORS.keys())))
64fa820c 3812 m = operator_rex.fullmatch(filter_part.strip())
347de493 3813 if m:
18f96d12 3814 m = m.groupdict()
3815 unnegated_op = COMPARISON_OPERATORS[m['op']]
3816 if m['negation']:
77b87f05
MT
3817 op = lambda attr, value: not unnegated_op(attr, value)
3818 else:
3819 op = unnegated_op
18f96d12 3820 comparison_value = m['quotedstrval'] or m['strval'] or m['intval']
3821 if m['quote']:
3822 comparison_value = comparison_value.replace(r'\%s' % m['quote'], m['quote'])
3823 actual_value = dct.get(m['key'])
3824 numeric_comparison = None
f9934b96 3825 if isinstance(actual_value, (int, float)):
e5a088dc
S
3826 # If the original field is a string and matching comparisonvalue is
3827 # a number we should respect the origin of the original field
3828 # and process comparison value as a string (see
18f96d12 3829 # https://github.com/ytdl-org/youtube-dl/issues/11082)
347de493 3830 try:
18f96d12 3831 numeric_comparison = int(comparison_value)
347de493 3832 except ValueError:
18f96d12 3833 numeric_comparison = parse_filesize(comparison_value)
3834 if numeric_comparison is None:
3835 numeric_comparison = parse_filesize(f'{comparison_value}B')
3836 if numeric_comparison is None:
3837 numeric_comparison = parse_duration(comparison_value)
3838 if numeric_comparison is not None and m['op'] in STRING_OPERATORS:
3839 raise ValueError('Operator %s only supports string values!' % m['op'])
347de493 3840 if actual_value is None:
6db9c4d5 3841 return is_incomplete(m['key']) or m['none_inclusive']
18f96d12 3842 return op(actual_value, comparison_value if numeric_comparison is None else numeric_comparison)
347de493
PH
3843
3844 UNARY_OPERATORS = {
1cc47c66
S
3845 '': lambda v: (v is True) if isinstance(v, bool) else (v is not None),
3846 '!': lambda v: (v is False) if isinstance(v, bool) else (v is None),
347de493 3847 }
64fa820c 3848 operator_rex = re.compile(r'''(?x)
347de493 3849 (?P<op>%s)\s*(?P<key>[a-z_]+)
347de493 3850 ''' % '|'.join(map(re.escape, UNARY_OPERATORS.keys())))
64fa820c 3851 m = operator_rex.fullmatch(filter_part.strip())
347de493
PH
3852 if m:
3853 op = UNARY_OPERATORS[m.group('op')]
3854 actual_value = dct.get(m.group('key'))
6db9c4d5 3855 if is_incomplete(m.group('key')) and actual_value is None:
8f18aca8 3856 return True
347de493
PH
3857 return op(actual_value)
3858
3859 raise ValueError('Invalid filter part %r' % filter_part)
3860
3861
8f18aca8 3862def match_str(filter_str, dct, incomplete=False):
6db9c4d5 3863 """ Filter a dictionary with a simple string syntax.
3864 @returns Whether the filter passes
3865 @param incomplete Set of keys that is expected to be missing from dct.
3866 Can be True/False to indicate all/none of the keys may be missing.
3867 All conditions on incomplete keys pass if the key is missing
8f18aca8 3868 """
347de493 3869 return all(
8f18aca8 3870 _match_one(filter_part.replace(r'\&', '&'), dct, incomplete)
a047eeb6 3871 for filter_part in re.split(r'(?<!\\)&', filter_str))
347de493
PH
3872
3873
b1a7cd05 3874def match_filter_func(filters):
3875 if not filters:
d1b5f70b 3876 return None
492272fe 3877 filters = set(variadic(filters))
d1b5f70b 3878
492272fe 3879 interactive = '-' in filters
3880 if interactive:
3881 filters.remove('-')
3882
3883 def _match_func(info_dict, incomplete=False):
3884 if not filters or any(match_str(f, info_dict, incomplete) for f in filters):
3885 return NO_DEFAULT if interactive and not incomplete else None
347de493 3886 else:
3bec830a 3887 video_title = info_dict.get('title') or info_dict.get('id') or 'entry'
b1a7cd05 3888 filter_str = ') | ('.join(map(str.strip, filters))
3889 return f'{video_title} does not pass filter ({filter_str}), skipping ..'
347de493 3890 return _match_func
91410c9b
PH
3891
3892
f2df4071 3893class download_range_func:
3894 def __init__(self, chapters, ranges):
3895 self.chapters, self.ranges = chapters, ranges
3896
3897 def __call__(self, info_dict, ydl):
0500ee3d 3898 if not self.ranges and not self.chapters:
3899 yield {}
3900
5ec1b6b7 3901 warning = ('There are no chapters matching the regex' if info_dict.get('chapters')
56ba69e4 3902 else 'Cannot match chapters since chapter information is unavailable')
f2df4071 3903 for regex in self.chapters or []:
5ec1b6b7 3904 for i, chapter in enumerate(info_dict.get('chapters') or []):
3905 if re.search(regex, chapter['title']):
3906 warning = None
3907 yield {**chapter, 'index': i}
f2df4071 3908 if self.chapters and warning:
5ec1b6b7 3909 ydl.to_screen(f'[info] {info_dict["id"]}: {warning}')
3910
f2df4071 3911 yield from ({'start_time': start, 'end_time': end} for start, end in self.ranges or [])
5ec1b6b7 3912
f2df4071 3913 def __eq__(self, other):
3914 return (isinstance(other, download_range_func)
3915 and self.chapters == other.chapters and self.ranges == other.ranges)
5ec1b6b7 3916
71df9b7f 3917 def __repr__(self):
3918 return f'{type(self).__name__}({self.chapters}, {self.ranges})'
3919
5ec1b6b7 3920
bf6427d2
YCH
3921def parse_dfxp_time_expr(time_expr):
3922 if not time_expr:
d631d5f9 3923 return
bf6427d2 3924
1d485a1a 3925 mobj = re.match(rf'^(?P<time_offset>{NUMBER_RE})s?$', time_expr)
bf6427d2
YCH
3926 if mobj:
3927 return float(mobj.group('time_offset'))
3928
db2fe38b 3929 mobj = re.match(r'^(\d+):(\d\d):(\d\d(?:(?:\.|:)\d+)?)$', time_expr)
bf6427d2 3930 if mobj:
db2fe38b 3931 return 3600 * int(mobj.group(1)) + 60 * int(mobj.group(2)) + float(mobj.group(3).replace(':', '.'))
bf6427d2
YCH
3932
3933
c1c924ab 3934def srt_subtitles_timecode(seconds):
aa7785f8 3935 return '%02d:%02d:%02d,%03d' % timetuple_from_msec(seconds * 1000)
3936
3937
3938def ass_subtitles_timecode(seconds):
3939 time = timetuple_from_msec(seconds * 1000)
3940 return '%01d:%02d:%02d.%02d' % (*time[:-1], time.milliseconds / 10)
bf6427d2
YCH
3941
3942
3943def dfxp2srt(dfxp_data):
3869028f
YCH
3944 '''
3945 @param dfxp_data A bytes-like object containing DFXP data
3946 @returns A unicode object containing converted SRT data
3947 '''
5b995f71 3948 LEGACY_NAMESPACES = (
3869028f
YCH
3949 (b'http://www.w3.org/ns/ttml', [
3950 b'http://www.w3.org/2004/11/ttaf1',
3951 b'http://www.w3.org/2006/04/ttaf1',
3952 b'http://www.w3.org/2006/10/ttaf1',
5b995f71 3953 ]),
3869028f
YCH
3954 (b'http://www.w3.org/ns/ttml#styling', [
3955 b'http://www.w3.org/ns/ttml#style',
5b995f71
RA
3956 ]),
3957 )
3958
3959 SUPPORTED_STYLING = [
3960 'color',
3961 'fontFamily',
3962 'fontSize',
3963 'fontStyle',
3964 'fontWeight',
3965 'textDecoration'
3966 ]
3967
4e335771 3968 _x = functools.partial(xpath_with_ns, ns_map={
261f4730 3969 'xml': 'http://www.w3.org/XML/1998/namespace',
4e335771 3970 'ttml': 'http://www.w3.org/ns/ttml',
5b995f71 3971 'tts': 'http://www.w3.org/ns/ttml#styling',
4e335771 3972 })
bf6427d2 3973
5b995f71
RA
3974 styles = {}
3975 default_style = {}
3976
86e5f3ed 3977 class TTMLPElementParser:
5b995f71
RA
3978 _out = ''
3979 _unclosed_elements = []
3980 _applied_styles = []
bf6427d2 3981
2b14cb56 3982 def start(self, tag, attrib):
5b995f71
RA
3983 if tag in (_x('ttml:br'), 'br'):
3984 self._out += '\n'
3985 else:
3986 unclosed_elements = []
3987 style = {}
3988 element_style_id = attrib.get('style')
3989 if default_style:
3990 style.update(default_style)
3991 if element_style_id:
3992 style.update(styles.get(element_style_id, {}))
3993 for prop in SUPPORTED_STYLING:
3994 prop_val = attrib.get(_x('tts:' + prop))
3995 if prop_val:
3996 style[prop] = prop_val
3997 if style:
3998 font = ''
3999 for k, v in sorted(style.items()):
4000 if self._applied_styles and self._applied_styles[-1].get(k) == v:
4001 continue
4002 if k == 'color':
4003 font += ' color="%s"' % v
4004 elif k == 'fontSize':
4005 font += ' size="%s"' % v
4006 elif k == 'fontFamily':
4007 font += ' face="%s"' % v
4008 elif k == 'fontWeight' and v == 'bold':
4009 self._out += '<b>'
4010 unclosed_elements.append('b')
4011 elif k == 'fontStyle' and v == 'italic':
4012 self._out += '<i>'
4013 unclosed_elements.append('i')
4014 elif k == 'textDecoration' and v == 'underline':
4015 self._out += '<u>'
4016 unclosed_elements.append('u')
4017 if font:
4018 self._out += '<font' + font + '>'
4019 unclosed_elements.append('font')
4020 applied_style = {}
4021 if self._applied_styles:
4022 applied_style.update(self._applied_styles[-1])
4023 applied_style.update(style)
4024 self._applied_styles.append(applied_style)
4025 self._unclosed_elements.append(unclosed_elements)
bf6427d2 4026
2b14cb56 4027 def end(self, tag):
5b995f71
RA
4028 if tag not in (_x('ttml:br'), 'br'):
4029 unclosed_elements = self._unclosed_elements.pop()
4030 for element in reversed(unclosed_elements):
4031 self._out += '</%s>' % element
4032 if unclosed_elements and self._applied_styles:
4033 self._applied_styles.pop()
bf6427d2 4034
2b14cb56 4035 def data(self, data):
5b995f71 4036 self._out += data
2b14cb56 4037
4038 def close(self):
5b995f71 4039 return self._out.strip()
2b14cb56 4040
4041 def parse_node(node):
4042 target = TTMLPElementParser()
4043 parser = xml.etree.ElementTree.XMLParser(target=target)
4044 parser.feed(xml.etree.ElementTree.tostring(node))
4045 return parser.close()
bf6427d2 4046
5b995f71
RA
4047 for k, v in LEGACY_NAMESPACES:
4048 for ns in v:
4049 dfxp_data = dfxp_data.replace(ns, k)
4050
3869028f 4051 dfxp = compat_etree_fromstring(dfxp_data)
bf6427d2 4052 out = []
5b995f71 4053 paras = dfxp.findall(_x('.//ttml:p')) or dfxp.findall('.//p')
1b0427e6
YCH
4054
4055 if not paras:
4056 raise ValueError('Invalid dfxp/TTML subtitle')
bf6427d2 4057
5b995f71
RA
4058 repeat = False
4059 while True:
4060 for style in dfxp.findall(_x('.//ttml:style')):
261f4730
RA
4061 style_id = style.get('id') or style.get(_x('xml:id'))
4062 if not style_id:
4063 continue
5b995f71
RA
4064 parent_style_id = style.get('style')
4065 if parent_style_id:
4066 if parent_style_id not in styles:
4067 repeat = True
4068 continue
4069 styles[style_id] = styles[parent_style_id].copy()
4070 for prop in SUPPORTED_STYLING:
4071 prop_val = style.get(_x('tts:' + prop))
4072 if prop_val:
4073 styles.setdefault(style_id, {})[prop] = prop_val
4074 if repeat:
4075 repeat = False
4076 else:
4077 break
4078
4079 for p in ('body', 'div'):
4080 ele = xpath_element(dfxp, [_x('.//ttml:' + p), './/' + p])
4081 if ele is None:
4082 continue
4083 style = styles.get(ele.get('style'))
4084 if not style:
4085 continue
4086 default_style.update(style)
4087
bf6427d2 4088 for para, index in zip(paras, itertools.count(1)):
d631d5f9 4089 begin_time = parse_dfxp_time_expr(para.attrib.get('begin'))
7dff0363 4090 end_time = parse_dfxp_time_expr(para.attrib.get('end'))
d631d5f9
YCH
4091 dur = parse_dfxp_time_expr(para.attrib.get('dur'))
4092 if begin_time is None:
4093 continue
7dff0363 4094 if not end_time:
d631d5f9
YCH
4095 if not dur:
4096 continue
4097 end_time = begin_time + dur
bf6427d2
YCH
4098 out.append('%d\n%s --> %s\n%s\n\n' % (
4099 index,
c1c924ab
YCH
4100 srt_subtitles_timecode(begin_time),
4101 srt_subtitles_timecode(end_time),
bf6427d2
YCH
4102 parse_node(para)))
4103
4104 return ''.join(out)
4105
4106
c487cf00 4107def cli_option(params, command_option, param, separator=None):
66e289ba 4108 param = params.get(param)
c487cf00 4109 return ([] if param is None
4110 else [command_option, str(param)] if separator is None
4111 else [f'{command_option}{separator}{param}'])
66e289ba
S
4112
4113
4114def cli_bool_option(params, command_option, param, true_value='true', false_value='false', separator=None):
4115 param = params.get(param)
c487cf00 4116 assert param in (True, False, None)
4117 return cli_option({True: true_value, False: false_value}, command_option, param, separator)
66e289ba
S
4118
4119
4120def cli_valueless_option(params, command_option, param, expected_value=True):
c487cf00 4121 return [command_option] if params.get(param) == expected_value else []
66e289ba
S
4122
4123
e92caff5 4124def cli_configuration_args(argdict, keys, default=[], use_compat=True):
eab9b2bc 4125 if isinstance(argdict, (list, tuple)): # for backward compatibility
e92caff5 4126 if use_compat:
5b1ecbb3 4127 return argdict
4128 else:
4129 argdict = None
eab9b2bc 4130 if argdict is None:
5b1ecbb3 4131 return default
eab9b2bc 4132 assert isinstance(argdict, dict)
4133
e92caff5 4134 assert isinstance(keys, (list, tuple))
4135 for key_list in keys:
e92caff5 4136 arg_list = list(filter(
4137 lambda x: x is not None,
6606817a 4138 [argdict.get(key.lower()) for key in variadic(key_list)]))
e92caff5 4139 if arg_list:
4140 return [arg for args in arg_list for arg in args]
4141 return default
66e289ba 4142
6251555f 4143
330690a2 4144def _configuration_args(main_key, argdict, exe, keys=None, default=[], use_compat=True):
4145 main_key, exe = main_key.lower(), exe.lower()
4146 root_key = exe if main_key == exe else f'{main_key}+{exe}'
4147 keys = [f'{root_key}{k}' for k in (keys or [''])]
4148 if root_key in keys:
4149 if main_key != exe:
4150 keys.append((main_key, exe))
4151 keys.append('default')
4152 else:
4153 use_compat = False
4154 return cli_configuration_args(argdict, keys, default, use_compat)
4155
66e289ba 4156
86e5f3ed 4157class ISO639Utils:
39672624
YCH
4158 # See http://www.loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt
4159 _lang_map = {
4160 'aa': 'aar',
4161 'ab': 'abk',
4162 'ae': 'ave',
4163 'af': 'afr',
4164 'ak': 'aka',
4165 'am': 'amh',
4166 'an': 'arg',
4167 'ar': 'ara',
4168 'as': 'asm',
4169 'av': 'ava',
4170 'ay': 'aym',
4171 'az': 'aze',
4172 'ba': 'bak',
4173 'be': 'bel',
4174 'bg': 'bul',
4175 'bh': 'bih',
4176 'bi': 'bis',
4177 'bm': 'bam',
4178 'bn': 'ben',
4179 'bo': 'bod',
4180 'br': 'bre',
4181 'bs': 'bos',
4182 'ca': 'cat',
4183 'ce': 'che',
4184 'ch': 'cha',
4185 'co': 'cos',
4186 'cr': 'cre',
4187 'cs': 'ces',
4188 'cu': 'chu',
4189 'cv': 'chv',
4190 'cy': 'cym',
4191 'da': 'dan',
4192 'de': 'deu',
4193 'dv': 'div',
4194 'dz': 'dzo',
4195 'ee': 'ewe',
4196 'el': 'ell',
4197 'en': 'eng',
4198 'eo': 'epo',
4199 'es': 'spa',
4200 'et': 'est',
4201 'eu': 'eus',
4202 'fa': 'fas',
4203 'ff': 'ful',
4204 'fi': 'fin',
4205 'fj': 'fij',
4206 'fo': 'fao',
4207 'fr': 'fra',
4208 'fy': 'fry',
4209 'ga': 'gle',
4210 'gd': 'gla',
4211 'gl': 'glg',
4212 'gn': 'grn',
4213 'gu': 'guj',
4214 'gv': 'glv',
4215 'ha': 'hau',
4216 'he': 'heb',
b7acc835 4217 'iw': 'heb', # Replaced by he in 1989 revision
39672624
YCH
4218 'hi': 'hin',
4219 'ho': 'hmo',
4220 'hr': 'hrv',
4221 'ht': 'hat',
4222 'hu': 'hun',
4223 'hy': 'hye',
4224 'hz': 'her',
4225 'ia': 'ina',
4226 'id': 'ind',
b7acc835 4227 'in': 'ind', # Replaced by id in 1989 revision
39672624
YCH
4228 'ie': 'ile',
4229 'ig': 'ibo',
4230 'ii': 'iii',
4231 'ik': 'ipk',
4232 'io': 'ido',
4233 'is': 'isl',
4234 'it': 'ita',
4235 'iu': 'iku',
4236 'ja': 'jpn',
4237 'jv': 'jav',
4238 'ka': 'kat',
4239 'kg': 'kon',
4240 'ki': 'kik',
4241 'kj': 'kua',
4242 'kk': 'kaz',
4243 'kl': 'kal',
4244 'km': 'khm',
4245 'kn': 'kan',
4246 'ko': 'kor',
4247 'kr': 'kau',
4248 'ks': 'kas',
4249 'ku': 'kur',
4250 'kv': 'kom',
4251 'kw': 'cor',
4252 'ky': 'kir',
4253 'la': 'lat',
4254 'lb': 'ltz',
4255 'lg': 'lug',
4256 'li': 'lim',
4257 'ln': 'lin',
4258 'lo': 'lao',
4259 'lt': 'lit',
4260 'lu': 'lub',
4261 'lv': 'lav',
4262 'mg': 'mlg',
4263 'mh': 'mah',
4264 'mi': 'mri',
4265 'mk': 'mkd',
4266 'ml': 'mal',
4267 'mn': 'mon',
4268 'mr': 'mar',
4269 'ms': 'msa',
4270 'mt': 'mlt',
4271 'my': 'mya',
4272 'na': 'nau',
4273 'nb': 'nob',
4274 'nd': 'nde',
4275 'ne': 'nep',
4276 'ng': 'ndo',
4277 'nl': 'nld',
4278 'nn': 'nno',
4279 'no': 'nor',
4280 'nr': 'nbl',
4281 'nv': 'nav',
4282 'ny': 'nya',
4283 'oc': 'oci',
4284 'oj': 'oji',
4285 'om': 'orm',
4286 'or': 'ori',
4287 'os': 'oss',
4288 'pa': 'pan',
4289 'pi': 'pli',
4290 'pl': 'pol',
4291 'ps': 'pus',
4292 'pt': 'por',
4293 'qu': 'que',
4294 'rm': 'roh',
4295 'rn': 'run',
4296 'ro': 'ron',
4297 'ru': 'rus',
4298 'rw': 'kin',
4299 'sa': 'san',
4300 'sc': 'srd',
4301 'sd': 'snd',
4302 'se': 'sme',
4303 'sg': 'sag',
4304 'si': 'sin',
4305 'sk': 'slk',
4306 'sl': 'slv',
4307 'sm': 'smo',
4308 'sn': 'sna',
4309 'so': 'som',
4310 'sq': 'sqi',
4311 'sr': 'srp',
4312 'ss': 'ssw',
4313 'st': 'sot',
4314 'su': 'sun',
4315 'sv': 'swe',
4316 'sw': 'swa',
4317 'ta': 'tam',
4318 'te': 'tel',
4319 'tg': 'tgk',
4320 'th': 'tha',
4321 'ti': 'tir',
4322 'tk': 'tuk',
4323 'tl': 'tgl',
4324 'tn': 'tsn',
4325 'to': 'ton',
4326 'tr': 'tur',
4327 'ts': 'tso',
4328 'tt': 'tat',
4329 'tw': 'twi',
4330 'ty': 'tah',
4331 'ug': 'uig',
4332 'uk': 'ukr',
4333 'ur': 'urd',
4334 'uz': 'uzb',
4335 've': 'ven',
4336 'vi': 'vie',
4337 'vo': 'vol',
4338 'wa': 'wln',
4339 'wo': 'wol',
4340 'xh': 'xho',
4341 'yi': 'yid',
e9a50fba 4342 'ji': 'yid', # Replaced by yi in 1989 revision
39672624
YCH
4343 'yo': 'yor',
4344 'za': 'zha',
4345 'zh': 'zho',
4346 'zu': 'zul',
4347 }
4348
4349 @classmethod
4350 def short2long(cls, code):
4351 """Convert language code from ISO 639-1 to ISO 639-2/T"""
4352 return cls._lang_map.get(code[:2])
4353
4354 @classmethod
4355 def long2short(cls, code):
4356 """Convert language code from ISO 639-2/T to ISO 639-1"""
4357 for short_name, long_name in cls._lang_map.items():
4358 if long_name == code:
4359 return short_name
4360
4361
86e5f3ed 4362class ISO3166Utils:
4eb10f66
YCH
4363 # From http://data.okfn.org/data/core/country-list
4364 _country_map = {
4365 'AF': 'Afghanistan',
4366 'AX': 'Åland Islands',
4367 'AL': 'Albania',
4368 'DZ': 'Algeria',
4369 'AS': 'American Samoa',
4370 'AD': 'Andorra',
4371 'AO': 'Angola',
4372 'AI': 'Anguilla',
4373 'AQ': 'Antarctica',
4374 'AG': 'Antigua and Barbuda',
4375 'AR': 'Argentina',
4376 'AM': 'Armenia',
4377 'AW': 'Aruba',
4378 'AU': 'Australia',
4379 'AT': 'Austria',
4380 'AZ': 'Azerbaijan',
4381 'BS': 'Bahamas',
4382 'BH': 'Bahrain',
4383 'BD': 'Bangladesh',
4384 'BB': 'Barbados',
4385 'BY': 'Belarus',
4386 'BE': 'Belgium',
4387 'BZ': 'Belize',
4388 'BJ': 'Benin',
4389 'BM': 'Bermuda',
4390 'BT': 'Bhutan',
4391 'BO': 'Bolivia, Plurinational State of',
4392 'BQ': 'Bonaire, Sint Eustatius and Saba',
4393 'BA': 'Bosnia and Herzegovina',
4394 'BW': 'Botswana',
4395 'BV': 'Bouvet Island',
4396 'BR': 'Brazil',
4397 'IO': 'British Indian Ocean Territory',
4398 'BN': 'Brunei Darussalam',
4399 'BG': 'Bulgaria',
4400 'BF': 'Burkina Faso',
4401 'BI': 'Burundi',
4402 'KH': 'Cambodia',
4403 'CM': 'Cameroon',
4404 'CA': 'Canada',
4405 'CV': 'Cape Verde',
4406 'KY': 'Cayman Islands',
4407 'CF': 'Central African Republic',
4408 'TD': 'Chad',
4409 'CL': 'Chile',
4410 'CN': 'China',
4411 'CX': 'Christmas Island',
4412 'CC': 'Cocos (Keeling) Islands',
4413 'CO': 'Colombia',
4414 'KM': 'Comoros',
4415 'CG': 'Congo',
4416 'CD': 'Congo, the Democratic Republic of the',
4417 'CK': 'Cook Islands',
4418 'CR': 'Costa Rica',
4419 'CI': 'Côte d\'Ivoire',
4420 'HR': 'Croatia',
4421 'CU': 'Cuba',
4422 'CW': 'Curaçao',
4423 'CY': 'Cyprus',
4424 'CZ': 'Czech Republic',
4425 'DK': 'Denmark',
4426 'DJ': 'Djibouti',
4427 'DM': 'Dominica',
4428 'DO': 'Dominican Republic',
4429 'EC': 'Ecuador',
4430 'EG': 'Egypt',
4431 'SV': 'El Salvador',
4432 'GQ': 'Equatorial Guinea',
4433 'ER': 'Eritrea',
4434 'EE': 'Estonia',
4435 'ET': 'Ethiopia',
4436 'FK': 'Falkland Islands (Malvinas)',
4437 'FO': 'Faroe Islands',
4438 'FJ': 'Fiji',
4439 'FI': 'Finland',
4440 'FR': 'France',
4441 'GF': 'French Guiana',
4442 'PF': 'French Polynesia',
4443 'TF': 'French Southern Territories',
4444 'GA': 'Gabon',
4445 'GM': 'Gambia',
4446 'GE': 'Georgia',
4447 'DE': 'Germany',
4448 'GH': 'Ghana',
4449 'GI': 'Gibraltar',
4450 'GR': 'Greece',
4451 'GL': 'Greenland',
4452 'GD': 'Grenada',
4453 'GP': 'Guadeloupe',
4454 'GU': 'Guam',
4455 'GT': 'Guatemala',
4456 'GG': 'Guernsey',
4457 'GN': 'Guinea',
4458 'GW': 'Guinea-Bissau',
4459 'GY': 'Guyana',
4460 'HT': 'Haiti',
4461 'HM': 'Heard Island and McDonald Islands',
4462 'VA': 'Holy See (Vatican City State)',
4463 'HN': 'Honduras',
4464 'HK': 'Hong Kong',
4465 'HU': 'Hungary',
4466 'IS': 'Iceland',
4467 'IN': 'India',
4468 'ID': 'Indonesia',
4469 'IR': 'Iran, Islamic Republic of',
4470 'IQ': 'Iraq',
4471 'IE': 'Ireland',
4472 'IM': 'Isle of Man',
4473 'IL': 'Israel',
4474 'IT': 'Italy',
4475 'JM': 'Jamaica',
4476 'JP': 'Japan',
4477 'JE': 'Jersey',
4478 'JO': 'Jordan',
4479 'KZ': 'Kazakhstan',
4480 'KE': 'Kenya',
4481 'KI': 'Kiribati',
4482 'KP': 'Korea, Democratic People\'s Republic of',
4483 'KR': 'Korea, Republic of',
4484 'KW': 'Kuwait',
4485 'KG': 'Kyrgyzstan',
4486 'LA': 'Lao People\'s Democratic Republic',
4487 'LV': 'Latvia',
4488 'LB': 'Lebanon',
4489 'LS': 'Lesotho',
4490 'LR': 'Liberia',
4491 'LY': 'Libya',
4492 'LI': 'Liechtenstein',
4493 'LT': 'Lithuania',
4494 'LU': 'Luxembourg',
4495 'MO': 'Macao',
4496 'MK': 'Macedonia, the Former Yugoslav Republic of',
4497 'MG': 'Madagascar',
4498 'MW': 'Malawi',
4499 'MY': 'Malaysia',
4500 'MV': 'Maldives',
4501 'ML': 'Mali',
4502 'MT': 'Malta',
4503 'MH': 'Marshall Islands',
4504 'MQ': 'Martinique',
4505 'MR': 'Mauritania',
4506 'MU': 'Mauritius',
4507 'YT': 'Mayotte',
4508 'MX': 'Mexico',
4509 'FM': 'Micronesia, Federated States of',
4510 'MD': 'Moldova, Republic of',
4511 'MC': 'Monaco',
4512 'MN': 'Mongolia',
4513 'ME': 'Montenegro',
4514 'MS': 'Montserrat',
4515 'MA': 'Morocco',
4516 'MZ': 'Mozambique',
4517 'MM': 'Myanmar',
4518 'NA': 'Namibia',
4519 'NR': 'Nauru',
4520 'NP': 'Nepal',
4521 'NL': 'Netherlands',
4522 'NC': 'New Caledonia',
4523 'NZ': 'New Zealand',
4524 'NI': 'Nicaragua',
4525 'NE': 'Niger',
4526 'NG': 'Nigeria',
4527 'NU': 'Niue',
4528 'NF': 'Norfolk Island',
4529 'MP': 'Northern Mariana Islands',
4530 'NO': 'Norway',
4531 'OM': 'Oman',
4532 'PK': 'Pakistan',
4533 'PW': 'Palau',
4534 'PS': 'Palestine, State of',
4535 'PA': 'Panama',
4536 'PG': 'Papua New Guinea',
4537 'PY': 'Paraguay',
4538 'PE': 'Peru',
4539 'PH': 'Philippines',
4540 'PN': 'Pitcairn',
4541 'PL': 'Poland',
4542 'PT': 'Portugal',
4543 'PR': 'Puerto Rico',
4544 'QA': 'Qatar',
4545 'RE': 'Réunion',
4546 'RO': 'Romania',
4547 'RU': 'Russian Federation',
4548 'RW': 'Rwanda',
4549 'BL': 'Saint Barthélemy',
4550 'SH': 'Saint Helena, Ascension and Tristan da Cunha',
4551 'KN': 'Saint Kitts and Nevis',
4552 'LC': 'Saint Lucia',
4553 'MF': 'Saint Martin (French part)',
4554 'PM': 'Saint Pierre and Miquelon',
4555 'VC': 'Saint Vincent and the Grenadines',
4556 'WS': 'Samoa',
4557 'SM': 'San Marino',
4558 'ST': 'Sao Tome and Principe',
4559 'SA': 'Saudi Arabia',
4560 'SN': 'Senegal',
4561 'RS': 'Serbia',
4562 'SC': 'Seychelles',
4563 'SL': 'Sierra Leone',
4564 'SG': 'Singapore',
4565 'SX': 'Sint Maarten (Dutch part)',
4566 'SK': 'Slovakia',
4567 'SI': 'Slovenia',
4568 'SB': 'Solomon Islands',
4569 'SO': 'Somalia',
4570 'ZA': 'South Africa',
4571 'GS': 'South Georgia and the South Sandwich Islands',
4572 'SS': 'South Sudan',
4573 'ES': 'Spain',
4574 'LK': 'Sri Lanka',
4575 'SD': 'Sudan',
4576 'SR': 'Suriname',
4577 'SJ': 'Svalbard and Jan Mayen',
4578 'SZ': 'Swaziland',
4579 'SE': 'Sweden',
4580 'CH': 'Switzerland',
4581 'SY': 'Syrian Arab Republic',
4582 'TW': 'Taiwan, Province of China',
4583 'TJ': 'Tajikistan',
4584 'TZ': 'Tanzania, United Republic of',
4585 'TH': 'Thailand',
4586 'TL': 'Timor-Leste',
4587 'TG': 'Togo',
4588 'TK': 'Tokelau',
4589 'TO': 'Tonga',
4590 'TT': 'Trinidad and Tobago',
4591 'TN': 'Tunisia',
4592 'TR': 'Turkey',
4593 'TM': 'Turkmenistan',
4594 'TC': 'Turks and Caicos Islands',
4595 'TV': 'Tuvalu',
4596 'UG': 'Uganda',
4597 'UA': 'Ukraine',
4598 'AE': 'United Arab Emirates',
4599 'GB': 'United Kingdom',
4600 'US': 'United States',
4601 'UM': 'United States Minor Outlying Islands',
4602 'UY': 'Uruguay',
4603 'UZ': 'Uzbekistan',
4604 'VU': 'Vanuatu',
4605 'VE': 'Venezuela, Bolivarian Republic of',
4606 'VN': 'Viet Nam',
4607 'VG': 'Virgin Islands, British',
4608 'VI': 'Virgin Islands, U.S.',
4609 'WF': 'Wallis and Futuna',
4610 'EH': 'Western Sahara',
4611 'YE': 'Yemen',
4612 'ZM': 'Zambia',
4613 'ZW': 'Zimbabwe',
2f97cc61 4614 # Not ISO 3166 codes, but used for IP blocks
4615 'AP': 'Asia/Pacific Region',
4616 'EU': 'Europe',
4eb10f66
YCH
4617 }
4618
4619 @classmethod
4620 def short2full(cls, code):
4621 """Convert an ISO 3166-2 country code to the corresponding full name"""
4622 return cls._country_map.get(code.upper())
4623
4624
86e5f3ed 4625class GeoUtils:
773f291d
S
4626 # Major IPv4 address blocks per country
4627 _country_ip_map = {
53896ca5 4628 'AD': '46.172.224.0/19',
773f291d
S
4629 'AE': '94.200.0.0/13',
4630 'AF': '149.54.0.0/17',
4631 'AG': '209.59.64.0/18',
4632 'AI': '204.14.248.0/21',
4633 'AL': '46.99.0.0/16',
4634 'AM': '46.70.0.0/15',
4635 'AO': '105.168.0.0/13',
53896ca5
S
4636 'AP': '182.50.184.0/21',
4637 'AQ': '23.154.160.0/24',
773f291d
S
4638 'AR': '181.0.0.0/12',
4639 'AS': '202.70.112.0/20',
53896ca5 4640 'AT': '77.116.0.0/14',
773f291d
S
4641 'AU': '1.128.0.0/11',
4642 'AW': '181.41.0.0/18',
53896ca5
S
4643 'AX': '185.217.4.0/22',
4644 'AZ': '5.197.0.0/16',
773f291d
S
4645 'BA': '31.176.128.0/17',
4646 'BB': '65.48.128.0/17',
4647 'BD': '114.130.0.0/16',
4648 'BE': '57.0.0.0/8',
53896ca5 4649 'BF': '102.178.0.0/15',
773f291d
S
4650 'BG': '95.42.0.0/15',
4651 'BH': '37.131.0.0/17',
4652 'BI': '154.117.192.0/18',
4653 'BJ': '137.255.0.0/16',
53896ca5 4654 'BL': '185.212.72.0/23',
773f291d
S
4655 'BM': '196.12.64.0/18',
4656 'BN': '156.31.0.0/16',
4657 'BO': '161.56.0.0/16',
4658 'BQ': '161.0.80.0/20',
53896ca5 4659 'BR': '191.128.0.0/12',
773f291d
S
4660 'BS': '24.51.64.0/18',
4661 'BT': '119.2.96.0/19',
4662 'BW': '168.167.0.0/16',
4663 'BY': '178.120.0.0/13',
4664 'BZ': '179.42.192.0/18',
4665 'CA': '99.224.0.0/11',
4666 'CD': '41.243.0.0/16',
53896ca5
S
4667 'CF': '197.242.176.0/21',
4668 'CG': '160.113.0.0/16',
773f291d 4669 'CH': '85.0.0.0/13',
53896ca5 4670 'CI': '102.136.0.0/14',
773f291d
S
4671 'CK': '202.65.32.0/19',
4672 'CL': '152.172.0.0/14',
53896ca5 4673 'CM': '102.244.0.0/14',
773f291d
S
4674 'CN': '36.128.0.0/10',
4675 'CO': '181.240.0.0/12',
4676 'CR': '201.192.0.0/12',
4677 'CU': '152.206.0.0/15',
4678 'CV': '165.90.96.0/19',
4679 'CW': '190.88.128.0/17',
53896ca5 4680 'CY': '31.153.0.0/16',
773f291d
S
4681 'CZ': '88.100.0.0/14',
4682 'DE': '53.0.0.0/8',
4683 'DJ': '197.241.0.0/17',
4684 'DK': '87.48.0.0/12',
4685 'DM': '192.243.48.0/20',
4686 'DO': '152.166.0.0/15',
4687 'DZ': '41.96.0.0/12',
4688 'EC': '186.68.0.0/15',
4689 'EE': '90.190.0.0/15',
4690 'EG': '156.160.0.0/11',
4691 'ER': '196.200.96.0/20',
4692 'ES': '88.0.0.0/11',
4693 'ET': '196.188.0.0/14',
4694 'EU': '2.16.0.0/13',
4695 'FI': '91.152.0.0/13',
4696 'FJ': '144.120.0.0/16',
53896ca5 4697 'FK': '80.73.208.0/21',
773f291d
S
4698 'FM': '119.252.112.0/20',
4699 'FO': '88.85.32.0/19',
4700 'FR': '90.0.0.0/9',
4701 'GA': '41.158.0.0/15',
4702 'GB': '25.0.0.0/8',
4703 'GD': '74.122.88.0/21',
4704 'GE': '31.146.0.0/16',
4705 'GF': '161.22.64.0/18',
4706 'GG': '62.68.160.0/19',
53896ca5
S
4707 'GH': '154.160.0.0/12',
4708 'GI': '95.164.0.0/16',
773f291d
S
4709 'GL': '88.83.0.0/19',
4710 'GM': '160.182.0.0/15',
4711 'GN': '197.149.192.0/18',
4712 'GP': '104.250.0.0/19',
4713 'GQ': '105.235.224.0/20',
4714 'GR': '94.64.0.0/13',
4715 'GT': '168.234.0.0/16',
4716 'GU': '168.123.0.0/16',
4717 'GW': '197.214.80.0/20',
4718 'GY': '181.41.64.0/18',
4719 'HK': '113.252.0.0/14',
4720 'HN': '181.210.0.0/16',
4721 'HR': '93.136.0.0/13',
4722 'HT': '148.102.128.0/17',
4723 'HU': '84.0.0.0/14',
4724 'ID': '39.192.0.0/10',
4725 'IE': '87.32.0.0/12',
4726 'IL': '79.176.0.0/13',
4727 'IM': '5.62.80.0/20',
4728 'IN': '117.192.0.0/10',
4729 'IO': '203.83.48.0/21',
4730 'IQ': '37.236.0.0/14',
4731 'IR': '2.176.0.0/12',
4732 'IS': '82.221.0.0/16',
4733 'IT': '79.0.0.0/10',
4734 'JE': '87.244.64.0/18',
4735 'JM': '72.27.0.0/17',
4736 'JO': '176.29.0.0/16',
53896ca5 4737 'JP': '133.0.0.0/8',
773f291d
S
4738 'KE': '105.48.0.0/12',
4739 'KG': '158.181.128.0/17',
4740 'KH': '36.37.128.0/17',
4741 'KI': '103.25.140.0/22',
4742 'KM': '197.255.224.0/20',
53896ca5 4743 'KN': '198.167.192.0/19',
773f291d
S
4744 'KP': '175.45.176.0/22',
4745 'KR': '175.192.0.0/10',
4746 'KW': '37.36.0.0/14',
4747 'KY': '64.96.0.0/15',
4748 'KZ': '2.72.0.0/13',
4749 'LA': '115.84.64.0/18',
4750 'LB': '178.135.0.0/16',
53896ca5 4751 'LC': '24.92.144.0/20',
773f291d
S
4752 'LI': '82.117.0.0/19',
4753 'LK': '112.134.0.0/15',
53896ca5 4754 'LR': '102.183.0.0/16',
773f291d
S
4755 'LS': '129.232.0.0/17',
4756 'LT': '78.56.0.0/13',
4757 'LU': '188.42.0.0/16',
4758 'LV': '46.109.0.0/16',
4759 'LY': '41.252.0.0/14',
4760 'MA': '105.128.0.0/11',
4761 'MC': '88.209.64.0/18',
4762 'MD': '37.246.0.0/16',
4763 'ME': '178.175.0.0/17',
4764 'MF': '74.112.232.0/21',
4765 'MG': '154.126.0.0/17',
4766 'MH': '117.103.88.0/21',
4767 'MK': '77.28.0.0/15',
4768 'ML': '154.118.128.0/18',
4769 'MM': '37.111.0.0/17',
4770 'MN': '49.0.128.0/17',
4771 'MO': '60.246.0.0/16',
4772 'MP': '202.88.64.0/20',
4773 'MQ': '109.203.224.0/19',
4774 'MR': '41.188.64.0/18',
4775 'MS': '208.90.112.0/22',
4776 'MT': '46.11.0.0/16',
4777 'MU': '105.16.0.0/12',
4778 'MV': '27.114.128.0/18',
53896ca5 4779 'MW': '102.70.0.0/15',
773f291d
S
4780 'MX': '187.192.0.0/11',
4781 'MY': '175.136.0.0/13',
4782 'MZ': '197.218.0.0/15',
4783 'NA': '41.182.0.0/16',
4784 'NC': '101.101.0.0/18',
4785 'NE': '197.214.0.0/18',
4786 'NF': '203.17.240.0/22',
4787 'NG': '105.112.0.0/12',
4788 'NI': '186.76.0.0/15',
4789 'NL': '145.96.0.0/11',
4790 'NO': '84.208.0.0/13',
4791 'NP': '36.252.0.0/15',
4792 'NR': '203.98.224.0/19',
4793 'NU': '49.156.48.0/22',
4794 'NZ': '49.224.0.0/14',
4795 'OM': '5.36.0.0/15',
4796 'PA': '186.72.0.0/15',
4797 'PE': '186.160.0.0/14',
4798 'PF': '123.50.64.0/18',
4799 'PG': '124.240.192.0/19',
4800 'PH': '49.144.0.0/13',
4801 'PK': '39.32.0.0/11',
4802 'PL': '83.0.0.0/11',
4803 'PM': '70.36.0.0/20',
4804 'PR': '66.50.0.0/16',
4805 'PS': '188.161.0.0/16',
4806 'PT': '85.240.0.0/13',
4807 'PW': '202.124.224.0/20',
4808 'PY': '181.120.0.0/14',
4809 'QA': '37.210.0.0/15',
53896ca5 4810 'RE': '102.35.0.0/16',
773f291d 4811 'RO': '79.112.0.0/13',
53896ca5 4812 'RS': '93.86.0.0/15',
773f291d 4813 'RU': '5.136.0.0/13',
53896ca5 4814 'RW': '41.186.0.0/16',
773f291d
S
4815 'SA': '188.48.0.0/13',
4816 'SB': '202.1.160.0/19',
4817 'SC': '154.192.0.0/11',
53896ca5 4818 'SD': '102.120.0.0/13',
773f291d 4819 'SE': '78.64.0.0/12',
53896ca5 4820 'SG': '8.128.0.0/10',
773f291d
S
4821 'SI': '188.196.0.0/14',
4822 'SK': '78.98.0.0/15',
53896ca5 4823 'SL': '102.143.0.0/17',
773f291d
S
4824 'SM': '89.186.32.0/19',
4825 'SN': '41.82.0.0/15',
53896ca5 4826 'SO': '154.115.192.0/18',
773f291d
S
4827 'SR': '186.179.128.0/17',
4828 'SS': '105.235.208.0/21',
4829 'ST': '197.159.160.0/19',
4830 'SV': '168.243.0.0/16',
4831 'SX': '190.102.0.0/20',
4832 'SY': '5.0.0.0/16',
4833 'SZ': '41.84.224.0/19',
4834 'TC': '65.255.48.0/20',
4835 'TD': '154.68.128.0/19',
4836 'TG': '196.168.0.0/14',
4837 'TH': '171.96.0.0/13',
4838 'TJ': '85.9.128.0/18',
4839 'TK': '27.96.24.0/21',
4840 'TL': '180.189.160.0/20',
4841 'TM': '95.85.96.0/19',
4842 'TN': '197.0.0.0/11',
4843 'TO': '175.176.144.0/21',
4844 'TR': '78.160.0.0/11',
4845 'TT': '186.44.0.0/15',
4846 'TV': '202.2.96.0/19',
4847 'TW': '120.96.0.0/11',
4848 'TZ': '156.156.0.0/14',
53896ca5
S
4849 'UA': '37.52.0.0/14',
4850 'UG': '102.80.0.0/13',
4851 'US': '6.0.0.0/8',
773f291d 4852 'UY': '167.56.0.0/13',
53896ca5 4853 'UZ': '84.54.64.0/18',
773f291d 4854 'VA': '212.77.0.0/19',
53896ca5 4855 'VC': '207.191.240.0/21',
773f291d 4856 'VE': '186.88.0.0/13',
53896ca5 4857 'VG': '66.81.192.0/20',
773f291d
S
4858 'VI': '146.226.0.0/16',
4859 'VN': '14.160.0.0/11',
4860 'VU': '202.80.32.0/20',
4861 'WF': '117.20.32.0/21',
4862 'WS': '202.4.32.0/19',
4863 'YE': '134.35.0.0/16',
4864 'YT': '41.242.116.0/22',
4865 'ZA': '41.0.0.0/11',
53896ca5
S
4866 'ZM': '102.144.0.0/13',
4867 'ZW': '102.177.192.0/18',
773f291d
S
4868 }
4869
4870 @classmethod
5f95927a
S
4871 def random_ipv4(cls, code_or_block):
4872 if len(code_or_block) == 2:
4873 block = cls._country_ip_map.get(code_or_block.upper())
4874 if not block:
4875 return None
4876 else:
4877 block = code_or_block
773f291d 4878 addr, preflen = block.split('/')
ac668111 4879 addr_min = struct.unpack('!L', socket.inet_aton(addr))[0]
773f291d 4880 addr_max = addr_min | (0xffffffff >> int(preflen))
14f25df2 4881 return str(socket.inet_ntoa(
ac668111 4882 struct.pack('!L', random.randint(addr_min, addr_max))))
773f291d
S
4883
4884
ac668111 4885class PerRequestProxyHandler(urllib.request.ProxyHandler):
2461f79d
PH
4886 def __init__(self, proxies=None):
4887 # Set default handlers
4888 for type in ('http', 'https'):
4889 setattr(self, '%s_open' % type,
4890 lambda r, proxy='__noproxy__', type=type, meth=self.proxy_open:
4891 meth(r, proxy, type))
ac668111 4892 urllib.request.ProxyHandler.__init__(self, proxies)
2461f79d 4893
91410c9b 4894 def proxy_open(self, req, proxy, type):
2461f79d 4895 req_proxy = req.headers.get('Ytdl-request-proxy')
91410c9b
PH
4896 if req_proxy is not None:
4897 proxy = req_proxy
2461f79d
PH
4898 del req.headers['Ytdl-request-proxy']
4899
4900 if proxy == '__noproxy__':
4901 return None # No Proxy
14f25df2 4902 if urllib.parse.urlparse(proxy).scheme.lower() in ('socks', 'socks4', 'socks4a', 'socks5'):
71aff188 4903 req.add_header('Ytdl-socks-proxy', proxy)
7a5c1cfe 4904 # yt-dlp's http/https handlers do wrapping the socket with socks
71aff188 4905 return None
ac668111 4906 return urllib.request.ProxyHandler.proxy_open(
91410c9b 4907 self, req, proxy, type)
5bc880b9
YCH
4908
4909
0a5445dd
YCH
4910# Both long_to_bytes and bytes_to_long are adapted from PyCrypto, which is
4911# released into Public Domain
4912# https://github.com/dlitz/pycrypto/blob/master/lib/Crypto/Util/number.py#L387
4913
4914def long_to_bytes(n, blocksize=0):
4915 """long_to_bytes(n:long, blocksize:int) : string
4916 Convert a long integer to a byte string.
4917
4918 If optional blocksize is given and greater than zero, pad the front of the
4919 byte string with binary zeros so that the length is a multiple of
4920 blocksize.
4921 """
4922 # after much testing, this algorithm was deemed to be the fastest
4923 s = b''
4924 n = int(n)
4925 while n > 0:
ac668111 4926 s = struct.pack('>I', n & 0xffffffff) + s
0a5445dd
YCH
4927 n = n >> 32
4928 # strip off leading zeros
4929 for i in range(len(s)):
4930 if s[i] != b'\000'[0]:
4931 break
4932 else:
4933 # only happens when n == 0
4934 s = b'\000'
4935 i = 0
4936 s = s[i:]
4937 # add back some pad bytes. this could be done more efficiently w.r.t. the
4938 # de-padding being done above, but sigh...
4939 if blocksize > 0 and len(s) % blocksize:
4940 s = (blocksize - len(s) % blocksize) * b'\000' + s
4941 return s
4942
4943
4944def bytes_to_long(s):
4945 """bytes_to_long(string) : long
4946 Convert a byte string to a long integer.
4947
4948 This is (essentially) the inverse of long_to_bytes().
4949 """
4950 acc = 0
4951 length = len(s)
4952 if length % 4:
4953 extra = (4 - length % 4)
4954 s = b'\000' * extra + s
4955 length = length + extra
4956 for i in range(0, length, 4):
ac668111 4957 acc = (acc << 32) + struct.unpack('>I', s[i:i + 4])[0]
0a5445dd
YCH
4958 return acc
4959
4960
5bc880b9
YCH
4961def ohdave_rsa_encrypt(data, exponent, modulus):
4962 '''
4963 Implement OHDave's RSA algorithm. See http://www.ohdave.com/rsa/
4964
4965 Input:
4966 data: data to encrypt, bytes-like object
4967 exponent, modulus: parameter e and N of RSA algorithm, both integer
4968 Output: hex string of encrypted data
4969
4970 Limitation: supports one block encryption only
4971 '''
4972
4973 payload = int(binascii.hexlify(data[::-1]), 16)
4974 encrypted = pow(payload, exponent, modulus)
4975 return '%x' % encrypted
81bdc8fd
YCH
4976
4977
f48409c7
YCH
4978def pkcs1pad(data, length):
4979 """
4980 Padding input data with PKCS#1 scheme
4981
4982 @param {int[]} data input data
4983 @param {int} length target length
4984 @returns {int[]} padded data
4985 """
4986 if len(data) > length - 11:
4987 raise ValueError('Input data too long for PKCS#1 padding')
4988
4989 pseudo_random = [random.randint(0, 254) for _ in range(length - len(data) - 3)]
4990 return [0, 2] + pseudo_random + [0] + data
4991
4992
7b2c3f47 4993def _base_n_table(n, table):
4994 if not table and not n:
4995 raise ValueError('Either table or n must be specified')
612f2be5 4996 table = (table or '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ')[:n]
4997
44f14eb4 4998 if n and n != len(table):
612f2be5 4999 raise ValueError(f'base {n} exceeds table length {len(table)}')
5000 return table
59f898b7 5001
5eb6bdce 5002
7b2c3f47 5003def encode_base_n(num, n=None, table=None):
5004 """Convert given int to a base-n string"""
612f2be5 5005 table = _base_n_table(n, table)
7b2c3f47 5006 if not num:
5eb6bdce
YCH
5007 return table[0]
5008
7b2c3f47 5009 result, base = '', len(table)
81bdc8fd 5010 while num:
7b2c3f47 5011 result = table[num % base] + result
612f2be5 5012 num = num // base
7b2c3f47 5013 return result
5014
5015
5016def decode_base_n(string, n=None, table=None):
5017 """Convert given base-n string to int"""
5018 table = {char: index for index, char in enumerate(_base_n_table(n, table))}
5019 result, base = 0, len(table)
5020 for char in string:
5021 result = result * base + table[char]
5022 return result
5023
5024
5025def decode_base(value, digits):
da4db748 5026 deprecation_warning(f'{__name__}.decode_base is deprecated and may be removed '
5027 f'in a future version. Use {__name__}.decode_base_n instead')
7b2c3f47 5028 return decode_base_n(value, table=digits)
f52354a8
YCH
5029
5030
5031def decode_packed_codes(code):
06b3fe29 5032 mobj = re.search(PACKED_CODES_RE, code)
a0566bbf 5033 obfuscated_code, base, count, symbols = mobj.groups()
f52354a8
YCH
5034 base = int(base)
5035 count = int(count)
5036 symbols = symbols.split('|')
5037 symbol_table = {}
5038
5039 while count:
5040 count -= 1
5eb6bdce 5041 base_n_count = encode_base_n(count, base)
f52354a8
YCH
5042 symbol_table[base_n_count] = symbols[count] or base_n_count
5043
5044 return re.sub(
5045 r'\b(\w+)\b', lambda mobj: symbol_table[mobj.group(0)],
a0566bbf 5046 obfuscated_code)
e154c651 5047
5048
1ced2221
S
5049def caesar(s, alphabet, shift):
5050 if shift == 0:
5051 return s
5052 l = len(alphabet)
5053 return ''.join(
5054 alphabet[(alphabet.index(c) + shift) % l] if c in alphabet else c
5055 for c in s)
5056
5057
5058def rot47(s):
5059 return caesar(s, r'''!"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~''', 47)
5060
5061
e154c651 5062def parse_m3u8_attributes(attrib):
5063 info = {}
5064 for (key, val) in re.findall(r'(?P<key>[A-Z0-9-]+)=(?P<val>"[^"]+"|[^",]+)(?:,|$)', attrib):
5065 if val.startswith('"'):
5066 val = val[1:-1]
5067 info[key] = val
5068 return info
1143535d
YCH
5069
5070
5071def urshift(val, n):
5072 return val >> n if val >= 0 else (val + 0x100000000) >> n
d3f8e038
YCH
5073
5074
5075# Based on png2str() written by @gdkchan and improved by @yokrysty
067aa17e 5076# Originally posted at https://github.com/ytdl-org/youtube-dl/issues/9706
d3f8e038
YCH
5077def decode_png(png_data):
5078 # Reference: https://www.w3.org/TR/PNG/
5079 header = png_data[8:]
5080
5081 if png_data[:8] != b'\x89PNG\x0d\x0a\x1a\x0a' or header[4:8] != b'IHDR':
86e5f3ed 5082 raise OSError('Not a valid PNG file.')
d3f8e038
YCH
5083
5084 int_map = {1: '>B', 2: '>H', 4: '>I'}
ac668111 5085 unpack_integer = lambda x: struct.unpack(int_map[len(x)], x)[0]
d3f8e038
YCH
5086
5087 chunks = []
5088
5089 while header:
5090 length = unpack_integer(header[:4])
5091 header = header[4:]
5092
5093 chunk_type = header[:4]
5094 header = header[4:]
5095
5096 chunk_data = header[:length]
5097 header = header[length:]
5098
5099 header = header[4:] # Skip CRC
5100
5101 chunks.append({
5102 'type': chunk_type,
5103 'length': length,
5104 'data': chunk_data
5105 })
5106
5107 ihdr = chunks[0]['data']
5108
5109 width = unpack_integer(ihdr[:4])
5110 height = unpack_integer(ihdr[4:8])
5111
5112 idat = b''
5113
5114 for chunk in chunks:
5115 if chunk['type'] == b'IDAT':
5116 idat += chunk['data']
5117
5118 if not idat:
86e5f3ed 5119 raise OSError('Unable to read PNG data.')
d3f8e038
YCH
5120
5121 decompressed_data = bytearray(zlib.decompress(idat))
5122
5123 stride = width * 3
5124 pixels = []
5125
5126 def _get_pixel(idx):
5127 x = idx % stride
5128 y = idx // stride
5129 return pixels[y][x]
5130
5131 for y in range(height):
5132 basePos = y * (1 + stride)
5133 filter_type = decompressed_data[basePos]
5134
5135 current_row = []
5136
5137 pixels.append(current_row)
5138
5139 for x in range(stride):
5140 color = decompressed_data[1 + basePos + x]
5141 basex = y * stride + x
5142 left = 0
5143 up = 0
5144
5145 if x > 2:
5146 left = _get_pixel(basex - 3)
5147 if y > 0:
5148 up = _get_pixel(basex - stride)
5149
5150 if filter_type == 1: # Sub
5151 color = (color + left) & 0xff
5152 elif filter_type == 2: # Up
5153 color = (color + up) & 0xff
5154 elif filter_type == 3: # Average
5155 color = (color + ((left + up) >> 1)) & 0xff
5156 elif filter_type == 4: # Paeth
5157 a = left
5158 b = up
5159 c = 0
5160
5161 if x > 2 and y > 0:
5162 c = _get_pixel(basex - stride - 3)
5163
5164 p = a + b - c
5165
5166 pa = abs(p - a)
5167 pb = abs(p - b)
5168 pc = abs(p - c)
5169
5170 if pa <= pb and pa <= pc:
5171 color = (color + a) & 0xff
5172 elif pb <= pc:
5173 color = (color + b) & 0xff
5174 else:
5175 color = (color + c) & 0xff
5176
5177 current_row.append(color)
5178
5179 return width, height, pixels
efa97bdc
YCH
5180
5181
5182def write_xattr(path, key, value):
6f7563be 5183 # Windows: Write xattrs to NTFS Alternate Data Streams:
5184 # http://en.wikipedia.org/wiki/NTFS#Alternate_data_streams_.28ADS.29
5185 if compat_os_name == 'nt':
5186 assert ':' not in key
5187 assert os.path.exists(path)
efa97bdc
YCH
5188
5189 try:
6f7563be 5190 with open(f'{path}:{key}', 'wb') as f:
5191 f.write(value)
86e5f3ed 5192 except OSError as e:
efa97bdc 5193 raise XAttrMetadataError(e.errno, e.strerror)
6f7563be 5194 return
efa97bdc 5195
6f7563be 5196 # UNIX Method 1. Use xattrs/pyxattrs modules
efa97bdc 5197
6f7563be 5198 setxattr = None
5199 if getattr(xattr, '_yt_dlp__identifier', None) == 'pyxattr':
5200 # Unicode arguments are not supported in pyxattr until version 0.5.0
5201 # See https://github.com/ytdl-org/youtube-dl/issues/5498
5202 if version_tuple(xattr.__version__) >= (0, 5, 0):
5203 setxattr = xattr.set
5204 elif xattr:
5205 setxattr = xattr.setxattr
efa97bdc 5206
6f7563be 5207 if setxattr:
5208 try:
5209 setxattr(path, key, value)
5210 except OSError as e:
5211 raise XAttrMetadataError(e.errno, e.strerror)
5212 return
efa97bdc 5213
6f7563be 5214 # UNIX Method 2. Use setfattr/xattr executables
5215 exe = ('setfattr' if check_executable('setfattr', ['--version'])
5216 else 'xattr' if check_executable('xattr', ['-h']) else None)
5217 if not exe:
5218 raise XAttrUnavailableError(
5219 'Couldn\'t find a tool to set the xattrs. Install either the python "xattr" or "pyxattr" modules or the '
5220 + ('"xattr" binary' if sys.platform != 'linux' else 'GNU "attr" package (which contains the "setfattr" tool)'))
efa97bdc 5221
0f06bcd7 5222 value = value.decode()
6f7563be 5223 try:
f0c9fb96 5224 _, stderr, returncode = Popen.run(
6f7563be 5225 [exe, '-w', key, value, path] if exe == 'xattr' else [exe, '-n', key, '-v', value, path],
e121e3ce 5226 text=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
6f7563be 5227 except OSError as e:
5228 raise XAttrMetadataError(e.errno, e.strerror)
f0c9fb96 5229 if returncode:
5230 raise XAttrMetadataError(returncode, stderr)
0c265486
YCH
5231
5232
5233def random_birthday(year_field, month_field, day_field):
aa374bc7
AS
5234 start_date = datetime.date(1950, 1, 1)
5235 end_date = datetime.date(1995, 12, 31)
5236 offset = random.randint(0, (end_date - start_date).days)
5237 random_date = start_date + datetime.timedelta(offset)
0c265486 5238 return {
aa374bc7
AS
5239 year_field: str(random_date.year),
5240 month_field: str(random_date.month),
5241 day_field: str(random_date.day),
0c265486 5242 }
732044af 5243
c76eb41b 5244
8c53322c
L
5245def find_available_port(interface=''):
5246 try:
5247 with socket.socket() as sock:
5248 sock.bind((interface, 0))
5249 return sock.getsockname()[1]
5250 except OSError:
5251 return None
5252
5253
732044af 5254# Templates for internet shortcut files, which are plain text files.
e5a998f3 5255DOT_URL_LINK_TEMPLATE = '''\
732044af 5256[InternetShortcut]
5257URL=%(url)s
e5a998f3 5258'''
732044af 5259
e5a998f3 5260DOT_WEBLOC_LINK_TEMPLATE = '''\
732044af 5261<?xml version="1.0" encoding="UTF-8"?>
5262<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
5263<plist version="1.0">
5264<dict>
5265\t<key>URL</key>
5266\t<string>%(url)s</string>
5267</dict>
5268</plist>
e5a998f3 5269'''
732044af 5270
e5a998f3 5271DOT_DESKTOP_LINK_TEMPLATE = '''\
732044af 5272[Desktop Entry]
5273Encoding=UTF-8
5274Name=%(filename)s
5275Type=Link
5276URL=%(url)s
5277Icon=text-html
e5a998f3 5278'''
732044af 5279
08438d2c 5280LINK_TEMPLATES = {
5281 'url': DOT_URL_LINK_TEMPLATE,
5282 'desktop': DOT_DESKTOP_LINK_TEMPLATE,
5283 'webloc': DOT_WEBLOC_LINK_TEMPLATE,
5284}
5285
732044af 5286
5287def iri_to_uri(iri):
5288 """
5289 Converts an IRI (Internationalized Resource Identifier, allowing Unicode characters) to a URI (Uniform Resource Identifier, ASCII-only).
5290
5291 The function doesn't add an additional layer of escaping; e.g., it doesn't escape `%3C` as `%253C`. Instead, it percent-escapes characters with an underlying UTF-8 encoding *besides* those already escaped, leaving the URI intact.
5292 """
5293
14f25df2 5294 iri_parts = urllib.parse.urlparse(iri)
732044af 5295
5296 if '[' in iri_parts.netloc:
5297 raise ValueError('IPv6 URIs are not, yet, supported.')
5298 # Querying `.netloc`, when there's only one bracket, also raises a ValueError.
5299
5300 # The `safe` argument values, that the following code uses, contain the characters that should not be percent-encoded. Everything else but letters, digits and '_.-' will be percent-encoded with an underlying UTF-8 encoding. Everything already percent-encoded will be left as is.
5301
5302 net_location = ''
5303 if iri_parts.username:
f9934b96 5304 net_location += urllib.parse.quote(iri_parts.username, safe=r"!$%&'()*+,~")
732044af 5305 if iri_parts.password is not None:
f9934b96 5306 net_location += ':' + urllib.parse.quote(iri_parts.password, safe=r"!$%&'()*+,~")
732044af 5307 net_location += '@'
5308
0f06bcd7 5309 net_location += iri_parts.hostname.encode('idna').decode() # Punycode for Unicode hostnames.
732044af 5310 # The 'idna' encoding produces ASCII text.
5311 if iri_parts.port is not None and iri_parts.port != 80:
5312 net_location += ':' + str(iri_parts.port)
5313
f9934b96 5314 return urllib.parse.urlunparse(
732044af 5315 (iri_parts.scheme,
5316 net_location,
5317
f9934b96 5318 urllib.parse.quote_plus(iri_parts.path, safe=r"!$%&'()*+,/:;=@|~"),
732044af 5319
5320 # Unsure about the `safe` argument, since this is a legacy way of handling parameters.
f9934b96 5321 urllib.parse.quote_plus(iri_parts.params, safe=r"!$%&'()*+,/:;=@|~"),
732044af 5322
5323 # Not totally sure about the `safe` argument, since the source does not explicitly mention the query URI component.
f9934b96 5324 urllib.parse.quote_plus(iri_parts.query, safe=r"!$%&'()*+,/:;=?@{|}~"),
732044af 5325
f9934b96 5326 urllib.parse.quote_plus(iri_parts.fragment, safe=r"!#$%&'()*+,/:;=?@{|}~")))
732044af 5327
5328 # Source for `safe` arguments: https://url.spec.whatwg.org/#percent-encoded-bytes.
5329
5330
5331def to_high_limit_path(path):
5332 if sys.platform in ['win32', 'cygwin']:
5333 # Work around MAX_PATH limitation on Windows. The maximum allowed length for the individual path segments may still be quite limited.
e5a998f3 5334 return '\\\\?\\' + os.path.abspath(path)
732044af 5335
5336 return path
76d321f6 5337
c76eb41b 5338
7b2c3f47 5339def format_field(obj, field=None, template='%s', ignore=NO_DEFAULT, default='', func=IDENTITY):
e0ddbd02 5340 val = traverse_obj(obj, *variadic(field))
7b2c3f47 5341 if (not val and val != 0) if ignore is NO_DEFAULT else val in variadic(ignore):
e0ddbd02 5342 return default
7b2c3f47 5343 return template % func(val)
00dd0cd5 5344
5345
5346def clean_podcast_url(url):
5347 return re.sub(r'''(?x)
5348 (?:
5349 (?:
5350 chtbl\.com/track|
5351 media\.blubrry\.com| # https://create.blubrry.com/resources/podcast-media-download-statistics/getting-started/
5352 play\.podtrac\.com
5353 )/[^/]+|
5354 (?:dts|www)\.podtrac\.com/(?:pts/)?redirect\.[0-9a-z]{3,4}| # http://analytics.podtrac.com/how-to-measure
5355 flex\.acast\.com|
5356 pd(?:
5357 cn\.co| # https://podcorn.com/analytics-prefix/
5358 st\.fm # https://podsights.com/docs/
5359 )/e
5360 )/''', '', url)
ffcb8191
THD
5361
5362
5363_HEX_TABLE = '0123456789abcdef'
5364
5365
5366def random_uuidv4():
5367 return re.sub(r'[xy]', lambda x: _HEX_TABLE[random.randint(0, 15)], 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx')
0202b52a 5368
5369
5370def make_dir(path, to_screen=None):
5371 try:
5372 dn = os.path.dirname(path)
5373 if dn and not os.path.exists(dn):
5374 os.makedirs(dn)
5375 return True
86e5f3ed 5376 except OSError as err:
0202b52a 5377 if callable(to_screen) is not None:
5378 to_screen('unable to create directory ' + error_to_compat_str(err))
5379 return False
f74980cb 5380
5381
5382def get_executable_path():
b5899f4f 5383 from .update import _get_variant_and_executable_path
c487cf00 5384
b5899f4f 5385 return os.path.dirname(os.path.abspath(_get_variant_and_executable_path()[1]))
f74980cb 5386
5387
8e40b9d1 5388def get_user_config_dirs(package_name):
8e40b9d1
M
5389 # .config (e.g. ~/.config/package_name)
5390 xdg_config_home = os.getenv('XDG_CONFIG_HOME') or compat_expanduser('~/.config')
773c272d 5391 yield os.path.join(xdg_config_home, package_name)
8e40b9d1
M
5392
5393 # appdata (%APPDATA%/package_name)
5394 appdata_dir = os.getenv('appdata')
5395 if appdata_dir:
773c272d 5396 yield os.path.join(appdata_dir, package_name)
8e40b9d1
M
5397
5398 # home (~/.package_name)
773c272d 5399 yield os.path.join(compat_expanduser('~'), f'.{package_name}')
8e40b9d1
M
5400
5401
5402def get_system_config_dirs(package_name):
8e40b9d1 5403 # /etc/package_name
773c272d 5404 yield os.path.join('/etc', package_name)
06167fbb 5405
5406
325ebc17 5407def traverse_obj(
f99bbfc9 5408 obj, *paths, default=NO_DEFAULT, expected_type=None, get_all=True,
325ebc17 5409 casesense=True, is_user_input=False, traverse_string=False):
ab029d7e
SS
5410 """
5411 Safely traverse nested `dict`s and `Sequence`s
5412
5413 >>> obj = [{}, {"key": "value"}]
5414 >>> traverse_obj(obj, (1, "key"))
5415 "value"
5416
5417 Each of the provided `paths` is tested and the first producing a valid result will be returned.
f99bbfc9 5418 The next path will also be tested if the path branched but no results could be found.
7b0127e1 5419 Supported values for traversal are `Mapping`, `Sequence` and `re.Match`.
6839ae1f 5420 Unhelpful values (`{}`, `None`) are treated as the absence of a value and discarded.
ab029d7e
SS
5421
5422 The paths will be wrapped in `variadic`, so that `'key'` is conveniently the same as `('key', )`.
5423
5424 The keys in the path can be one of:
5425 - `None`: Return the current object.
776995bc
SS
5426 - `set`: Requires the only item in the set to be a type or function,
5427 like `{type}`/`{func}`. If a `type`, returns only values
5428 of this type. If a function, returns `func(obj)`.
8e174ba7 5429 - `str`/`int`: Return `obj[key]`. For `re.Match`, return `obj.group(key)`.
ab029d7e
SS
5430 - `slice`: Branch out and return all values in `obj[key]`.
5431 - `Ellipsis`: Branch out and return a list of all values.
5432 - `tuple`/`list`: Branch out and return a list of all matching values.
5433 Read as: `[traverse_obj(obj, branch) for branch in branches]`.
5434 - `function`: Branch out and return values filtered by the function.
5435 Read as: `[value for key, value in obj if function(key, value)]`.
5436 For `Sequence`s, `key` is the index of the value.
776995bc
SS
5437 For `re.Match`es, `key` is the group number (0 = full match)
5438 as well as additionally any group names, if given.
ab029d7e
SS
5439 - `dict` Transform the current object and return a matching dict.
5440 Read as: `{key: traverse_obj(obj, path) for key, path in dct.items()}`.
5441
7b0127e1 5442 `tuple`, `list`, and `dict` all support nested paths and branches.
ab029d7e
SS
5443
5444 @params paths Paths which to traverse by.
5445 @param default Value to return if the paths do not match.
b1bde57b
SS
5446 If the last key in the path is a `dict`, it will apply to each value inside
5447 the dict instead, depth first. Try to avoid if using nested `dict` keys.
ab029d7e
SS
5448 @param expected_type If a `type`, only accept final values of this type.
5449 If any other callable, try to call the function on each result.
776995bc
SS
5450 If the last key in the path is a `dict`, it will apply to each value inside
5451 the dict instead, recursively. This does respect branching paths.
ab029d7e
SS
5452 @param get_all If `False`, return the first matching result, otherwise all matching ones.
5453 @param casesense If `False`, consider string dictionary keys as case insensitive.
5454
5455 The following are only meant to be used by YoutubeDL.prepare_outtmpl and are not part of the API
5456
5457 @param is_user_input Whether the keys are generated from user input.
5458 If `True` strings get converted to `int`/`slice` if needed.
5459 @param traverse_string Whether to traverse into objects as strings.
5460 If `True`, any non-compatible object will first be
5461 converted into a string and then traversed into.
b1bde57b
SS
5462 The return value of that path will be a string instead,
5463 not respecting any further branching.
ab029d7e
SS
5464
5465
5466 @returns The result of the object traversal.
5467 If successful, `get_all=True`, and the path branches at least once,
5468 then a list of results is returned instead.
b1bde57b
SS
5469 If no `default` is given and the last path branches, a `list` of results
5470 is always returned. If a path ends on a `dict` that result will always be a `dict`.
ab029d7e
SS
5471 """
5472 is_sequence = lambda x: isinstance(x, collections.abc.Sequence) and not isinstance(x, (str, bytes))
5473 casefold = lambda k: k.casefold() if isinstance(k, str) else k
325ebc17 5474
352d63fd 5475 if isinstance(expected_type, type):
5476 type_test = lambda val: val if isinstance(val, expected_type) else None
352d63fd 5477 else:
ab029d7e
SS
5478 type_test = lambda val: try_call(expected_type or IDENTITY, args=(val,))
5479
b1bde57b
SS
5480 def apply_key(key, obj, is_last):
5481 branching = False
5482 result = None
5483
6839ae1f 5484 if obj is None and traverse_string:
b1bde57b 5485 pass
ab029d7e
SS
5486
5487 elif key is None:
b1bde57b 5488 result = obj
ab029d7e 5489
776995bc
SS
5490 elif isinstance(key, set):
5491 assert len(key) == 1, 'Set should only be used to wrap a single item'
5492 item = next(iter(key))
5493 if isinstance(item, type):
5494 if isinstance(obj, item):
b1bde57b 5495 result = obj
776995bc 5496 else:
b1bde57b 5497 result = try_call(item, args=(obj,))
776995bc 5498
ab029d7e 5499 elif isinstance(key, (list, tuple)):
b1bde57b
SS
5500 branching = True
5501 result = itertools.chain.from_iterable(
5502 apply_path(obj, branch, is_last)[0] for branch in key)
ab029d7e
SS
5503
5504 elif key is ...:
b1bde57b 5505 branching = True
ab029d7e 5506 if isinstance(obj, collections.abc.Mapping):
b1bde57b 5507 result = obj.values()
ab029d7e 5508 elif is_sequence(obj):
b1bde57b 5509 result = obj
7b0127e1 5510 elif isinstance(obj, re.Match):
b1bde57b 5511 result = obj.groups()
ab029d7e 5512 elif traverse_string:
b1bde57b
SS
5513 branching = False
5514 result = str(obj)
5515 else:
5516 result = ()
ab029d7e
SS
5517
5518 elif callable(key):
b1bde57b
SS
5519 branching = True
5520 if isinstance(obj, collections.abc.Mapping):
ab029d7e 5521 iter_obj = obj.items()
b1bde57b
SS
5522 elif is_sequence(obj):
5523 iter_obj = enumerate(obj)
7b0127e1 5524 elif isinstance(obj, re.Match):
776995bc
SS
5525 iter_obj = itertools.chain(
5526 enumerate((obj.group(), *obj.groups())),
5527 obj.groupdict().items())
ab029d7e 5528 elif traverse_string:
b1bde57b 5529 branching = False
ab029d7e 5530 iter_obj = enumerate(str(obj))
352d63fd 5531 else:
b1bde57b
SS
5532 iter_obj = ()
5533
5534 result = (v for k, v in iter_obj if try_call(key, args=(k, v)))
5535 if not branching: # string traversal
5536 result = ''.join(result)
ab029d7e
SS
5537
5538 elif isinstance(key, dict):
b1bde57b
SS
5539 iter_obj = ((k, _traverse_obj(obj, v, False, is_last)) for k, v in key.items())
5540 result = {
5541 k: v if v is not None else default for k, v in iter_obj
5542 if v is not None or default is not NO_DEFAULT
5543 } or None
ab029d7e 5544
7b0127e1 5545 elif isinstance(obj, collections.abc.Mapping):
b1bde57b
SS
5546 result = (obj.get(key) if casesense or (key in obj) else
5547 next((v for k, v in obj.items() if casefold(k) == key), None))
ab029d7e 5548
7b0127e1
SS
5549 elif isinstance(obj, re.Match):
5550 if isinstance(key, int) or casesense:
5551 with contextlib.suppress(IndexError):
b1bde57b 5552 result = obj.group(key)
7b0127e1 5553
b1bde57b
SS
5554 elif isinstance(key, str):
5555 result = next((v for k, v in obj.groupdict().items() if casefold(k) == key), None)
ab029d7e 5556
b1bde57b 5557 elif isinstance(key, (int, slice)):
6839ae1f 5558 if is_sequence(obj):
b1bde57b
SS
5559 branching = isinstance(key, slice)
5560 with contextlib.suppress(IndexError):
5561 result = obj[key]
6839ae1f
SS
5562 elif traverse_string:
5563 with contextlib.suppress(IndexError):
5564 result = str(obj)[key]
ab029d7e 5565
b1bde57b 5566 return branching, result if branching else (result,)
ab029d7e 5567
776995bc
SS
5568 def lazy_last(iterable):
5569 iterator = iter(iterable)
5570 prev = next(iterator, NO_DEFAULT)
5571 if prev is NO_DEFAULT:
5572 return
5573
5574 for item in iterator:
5575 yield False, prev
5576 prev = item
5577
5578 yield True, prev
5579
b1bde57b 5580 def apply_path(start_obj, path, test_type):
ab029d7e
SS
5581 objs = (start_obj,)
5582 has_branched = False
5583
776995bc
SS
5584 key = None
5585 for last, key in lazy_last(variadic(path, (str, bytes, dict, set))):
b1bde57b
SS
5586 if is_user_input and isinstance(key, str):
5587 if key == ':':
5588 key = ...
5589 elif ':' in key:
5590 key = slice(*map(int_or_none, key.split(':')))
5591 elif int_or_none(key) is not None:
5592 key = int(key)
ab029d7e
SS
5593
5594 if not casesense and isinstance(key, str):
5595 key = key.casefold()
5596
776995bc
SS
5597 if __debug__ and callable(key):
5598 # Verify function signature
5599 inspect.signature(key).bind(None, None)
5600
b1bde57b
SS
5601 new_objs = []
5602 for obj in objs:
5603 branching, results = apply_key(key, obj, last)
5604 has_branched |= branching
5605 new_objs.append(results)
5606
5607 objs = itertools.chain.from_iterable(new_objs)
ab029d7e 5608
776995bc
SS
5609 if test_type and not isinstance(key, (dict, list, tuple)):
5610 objs = map(type_test, objs)
5611
b1bde57b 5612 return objs, has_branched, isinstance(key, dict)
ab029d7e 5613
b1bde57b
SS
5614 def _traverse_obj(obj, path, allow_empty, test_type):
5615 results, has_branched, is_dict = apply_path(obj, path, test_type)
6839ae1f 5616 results = LazyList(item for item in results if item not in (None, {}))
f99bbfc9 5617 if get_all and has_branched:
b1bde57b
SS
5618 if results:
5619 return results.exhaust()
5620 if allow_empty:
5621 return [] if default is NO_DEFAULT else default
5622 return None
f99bbfc9 5623
b1bde57b 5624 return results[0] if results else {} if allow_empty and is_dict else None
f99bbfc9
SS
5625
5626 for index, path in enumerate(paths, 1):
b1bde57b 5627 result = _traverse_obj(obj, path, index == len(paths), True)
ab029d7e
SS
5628 if result is not None:
5629 return result
5630
f99bbfc9 5631 return None if default is NO_DEFAULT else default
324ad820 5632
5633
5634def traverse_dict(dictn, keys, casesense=True):
da4db748 5635 deprecation_warning(f'"{__name__}.traverse_dict" is deprecated and may be removed '
5636 f'in a future version. Use "{__name__}.traverse_obj" instead')
ee8dd27a 5637 return traverse_obj(dictn, keys, casesense=casesense, is_user_input=True, traverse_string=True)
6606817a 5638
5639
ff91cf74 5640def get_first(obj, keys, **kwargs):
5641 return traverse_obj(obj, (..., *variadic(keys)), **kwargs, get_all=False)
5642
5643
3e9b66d7 5644def time_seconds(**kwargs):
83c4970e
L
5645 """
5646 Returns TZ-aware time in seconds since the epoch (1970-01-01T00:00:00Z)
5647 """
5648 return time.time() + datetime.timedelta(**kwargs).total_seconds()
3e9b66d7
LNO
5649
5650
49fa4d9a
N
5651# create a JSON Web Signature (jws) with HS256 algorithm
5652# the resulting format is in JWS Compact Serialization
5653# implemented following JWT https://www.rfc-editor.org/rfc/rfc7519.html
5654# implemented following JWS https://www.rfc-editor.org/rfc/rfc7515.html
5655def jwt_encode_hs256(payload_data, key, headers={}):
5656 header_data = {
5657 'alg': 'HS256',
5658 'typ': 'JWT',
5659 }
5660 if headers:
5661 header_data.update(headers)
0f06bcd7 5662 header_b64 = base64.b64encode(json.dumps(header_data).encode())
5663 payload_b64 = base64.b64encode(json.dumps(payload_data).encode())
5664 h = hmac.new(key.encode(), header_b64 + b'.' + payload_b64, hashlib.sha256)
49fa4d9a
N
5665 signature_b64 = base64.b64encode(h.digest())
5666 token = header_b64 + b'.' + payload_b64 + b'.' + signature_b64
5667 return token
819e0531 5668
5669
16b0d7e6 5670# can be extended in future to verify the signature and parse header and return the algorithm used if it's not HS256
5671def jwt_decode_hs256(jwt):
5672 header_b64, payload_b64, signature_b64 = jwt.split('.')
2c98d998 5673 # add trailing ='s that may have been stripped, superfluous ='s are ignored
5674 payload_data = json.loads(base64.urlsafe_b64decode(f'{payload_b64}==='))
16b0d7e6 5675 return payload_data
5676
5677
53973b4d 5678WINDOWS_VT_MODE = False if compat_os_name == 'nt' else None
5679
5680
7a32c70d 5681@functools.cache
819e0531 5682def supports_terminal_sequences(stream):
5683 if compat_os_name == 'nt':
8a82af35 5684 if not WINDOWS_VT_MODE:
819e0531 5685 return False
5686 elif not os.getenv('TERM'):
5687 return False
5688 try:
5689 return stream.isatty()
5690 except BaseException:
5691 return False
5692
5693
c53a18f0 5694def windows_enable_vt_mode():
5695 """Ref: https://bugs.python.org/issue30075 """
8a82af35 5696 if get_windows_version() < (10, 0, 10586):
53973b4d 5697 return
53973b4d 5698
c53a18f0 5699 import ctypes
5700 import ctypes.wintypes
5701 import msvcrt
5702
5703 ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x0004
5704
5705 dll = ctypes.WinDLL('kernel32', use_last_error=False)
5706 handle = os.open('CONOUT$', os.O_RDWR)
c53a18f0 5707 try:
5708 h_out = ctypes.wintypes.HANDLE(msvcrt.get_osfhandle(handle))
5709 dw_original_mode = ctypes.wintypes.DWORD()
5710 success = dll.GetConsoleMode(h_out, ctypes.byref(dw_original_mode))
5711 if not success:
5712 raise Exception('GetConsoleMode failed')
5713
5714 success = dll.SetConsoleMode(h_out, ctypes.wintypes.DWORD(
5715 dw_original_mode.value | ENABLE_VIRTUAL_TERMINAL_PROCESSING))
5716 if not success:
5717 raise Exception('SetConsoleMode failed')
c53a18f0 5718 finally:
5719 os.close(handle)
53973b4d 5720
f0795149 5721 global WINDOWS_VT_MODE
5722 WINDOWS_VT_MODE = True
5723 supports_terminal_sequences.cache_clear()
5724
53973b4d 5725
ec11a9f4 5726_terminal_sequences_re = re.compile('\033\\[[^m]+m')
5727
5728
5729def remove_terminal_sequences(string):
5730 return _terminal_sequences_re.sub('', string)
5731
5732
5733def number_of_digits(number):
5734 return len('%d' % number)
34921b43 5735
5736
5737def join_nonempty(*values, delim='-', from_dict=None):
5738 if from_dict is not None:
7b2c3f47 5739 values = (traverse_obj(from_dict, variadic(v)) for v in values)
34921b43 5740 return delim.join(map(str, filter(None, values)))
06e57990 5741
5742
27231526
ZM
5743def scale_thumbnails_to_max_format_width(formats, thumbnails, url_width_re):
5744 """
5745 Find the largest format dimensions in terms of video width and, for each thumbnail:
5746 * Modify the URL: Match the width with the provided regex and replace with the former width
5747 * Update dimensions
5748
5749 This function is useful with video services that scale the provided thumbnails on demand
5750 """
5751 _keys = ('width', 'height')
5752 max_dimensions = max(
86e5f3ed 5753 (tuple(format.get(k) or 0 for k in _keys) for format in formats),
27231526
ZM
5754 default=(0, 0))
5755 if not max_dimensions[0]:
5756 return thumbnails
5757 return [
5758 merge_dicts(
5759 {'url': re.sub(url_width_re, str(max_dimensions[0]), thumbnail['url'])},
5760 dict(zip(_keys, max_dimensions)), thumbnail)
5761 for thumbnail in thumbnails
5762 ]
5763
5764
93c8410d
LNO
5765def parse_http_range(range):
5766 """ Parse value of "Range" or "Content-Range" HTTP header into tuple. """
5767 if not range:
5768 return None, None, None
5769 crg = re.search(r'bytes[ =](\d+)-(\d+)?(?:/(\d+))?', range)
5770 if not crg:
5771 return None, None, None
5772 return int(crg.group(1)), int_or_none(crg.group(2)), int_or_none(crg.group(3))
5773
5774
6b9e832d 5775def read_stdin(what):
5776 eof = 'Ctrl+Z' if compat_os_name == 'nt' else 'Ctrl+D'
5777 write_string(f'Reading {what} from STDIN - EOF ({eof}) to end:\n')
5778 return sys.stdin
5779
5780
a904a7f8
L
5781def determine_file_encoding(data):
5782 """
88f60feb 5783 Detect the text encoding used
a904a7f8
L
5784 @returns (encoding, bytes to skip)
5785 """
5786
88f60feb 5787 # BOM marks are given priority over declarations
a904a7f8 5788 for bom, enc in BOMS:
a904a7f8
L
5789 if data.startswith(bom):
5790 return enc, len(bom)
5791
88f60feb 5792 # Strip off all null bytes to match even when UTF-16 or UTF-32 is used.
5793 # We ignore the endianness to get a good enough match
a904a7f8 5794 data = data.replace(b'\0', b'')
88f60feb 5795 mobj = re.match(rb'(?m)^#\s*coding\s*:\s*(\S+)\s*$', data)
5796 return mobj.group(1).decode() if mobj else None, 0
a904a7f8
L
5797
5798
06e57990 5799class Config:
5800 own_args = None
9e491463 5801 parsed_args = None
06e57990 5802 filename = None
5803 __initialized = False
5804
5805 def __init__(self, parser, label=None):
9e491463 5806 self.parser, self.label = parser, label
06e57990 5807 self._loaded_paths, self.configs = set(), []
5808
5809 def init(self, args=None, filename=None):
5810 assert not self.__initialized
284a60c5 5811 self.own_args, self.filename = args, filename
5812 return self.load_configs()
5813
5814 def load_configs(self):
65662dff 5815 directory = ''
284a60c5 5816 if self.filename:
5817 location = os.path.realpath(self.filename)
65662dff 5818 directory = os.path.dirname(location)
06e57990 5819 if location in self._loaded_paths:
5820 return False
5821 self._loaded_paths.add(location)
5822
284a60c5 5823 self.__initialized = True
5824 opts, _ = self.parser.parse_known_args(self.own_args)
5825 self.parsed_args = self.own_args
9e491463 5826 for location in opts.config_locations or []:
6b9e832d 5827 if location == '-':
1060f82f 5828 if location in self._loaded_paths:
5829 continue
5830 self._loaded_paths.add(location)
6b9e832d 5831 self.append_config(shlex.split(read_stdin('options'), comments=True), label='stdin')
5832 continue
65662dff 5833 location = os.path.join(directory, expand_path(location))
06e57990 5834 if os.path.isdir(location):
5835 location = os.path.join(location, 'yt-dlp.conf')
5836 if not os.path.exists(location):
9e491463 5837 self.parser.error(f'config location {location} does not exist')
06e57990 5838 self.append_config(self.read_file(location), location)
5839 return True
5840
5841 def __str__(self):
5842 label = join_nonempty(
5843 self.label, 'config', f'"{self.filename}"' if self.filename else '',
5844 delim=' ')
5845 return join_nonempty(
5846 self.own_args is not None and f'{label[0].upper()}{label[1:]}: {self.hide_login_info(self.own_args)}',
5847 *(f'\n{c}'.replace('\n', '\n| ')[1:] for c in self.configs),
5848 delim='\n')
5849
7a32c70d 5850 @staticmethod
06e57990 5851 def read_file(filename, default=[]):
5852 try:
a904a7f8 5853 optionf = open(filename, 'rb')
86e5f3ed 5854 except OSError:
06e57990 5855 return default # silently skip if file is not present
a904a7f8
L
5856 try:
5857 enc, skip = determine_file_encoding(optionf.read(512))
5858 optionf.seek(skip, io.SEEK_SET)
5859 except OSError:
5860 enc = None # silently skip read errors
06e57990 5861 try:
5862 # FIXME: https://github.com/ytdl-org/youtube-dl/commit/dfe5fa49aed02cf36ba9f743b11b0903554b5e56
a904a7f8 5863 contents = optionf.read().decode(enc or preferredencoding())
f9934b96 5864 res = shlex.split(contents, comments=True)
44a6fcff 5865 except Exception as err:
5866 raise ValueError(f'Unable to parse "{filename}": {err}')
06e57990 5867 finally:
5868 optionf.close()
5869 return res
5870
7a32c70d 5871 @staticmethod
06e57990 5872 def hide_login_info(opts):
86e5f3ed 5873 PRIVATE_OPTS = {'-p', '--password', '-u', '--username', '--video-password', '--ap-password', '--ap-username'}
06e57990 5874 eqre = re.compile('^(?P<key>' + ('|'.join(re.escape(po) for po in PRIVATE_OPTS)) + ')=.+$')
5875
5876 def _scrub_eq(o):
5877 m = eqre.match(o)
5878 if m:
5879 return m.group('key') + '=PRIVATE'
5880 else:
5881 return o
5882
5883 opts = list(map(_scrub_eq, opts))
5884 for idx, opt in enumerate(opts):
5885 if opt in PRIVATE_OPTS and idx + 1 < len(opts):
5886 opts[idx + 1] = 'PRIVATE'
5887 return opts
5888
5889 def append_config(self, *args, label=None):
9e491463 5890 config = type(self)(self.parser, label)
06e57990 5891 config._loaded_paths = self._loaded_paths
5892 if config.init(*args):
5893 self.configs.append(config)
5894
7a32c70d 5895 @property
06e57990 5896 def all_args(self):
5897 for config in reversed(self.configs):
5898 yield from config.all_args
9e491463 5899 yield from self.parsed_args or []
5900
5901 def parse_known_args(self, **kwargs):
5902 return self.parser.parse_known_args(self.all_args, **kwargs)
06e57990 5903
5904 def parse_args(self):
9e491463 5905 return self.parser.parse_args(self.all_args)
da42679b
LNO
5906
5907
d5d1df8a 5908class WebSocketsWrapper:
da42679b 5909 """Wraps websockets module to use in non-async scopes"""
abfecb7b 5910 pool = None
da42679b 5911
3cea3edd 5912 def __init__(self, url, headers=None, connect=True):
059bc4db 5913 self.loop = asyncio.new_event_loop()
9cd08050 5914 # XXX: "loop" is deprecated
5915 self.conn = websockets.connect(
5916 url, extra_headers=headers, ping_interval=None,
5917 close_timeout=float('inf'), loop=self.loop, ping_timeout=float('inf'))
3cea3edd
LNO
5918 if connect:
5919 self.__enter__()
15dfb392 5920 atexit.register(self.__exit__, None, None, None)
da42679b
LNO
5921
5922 def __enter__(self):
3cea3edd 5923 if not self.pool:
9cd08050 5924 self.pool = self.run_with_loop(self.conn.__aenter__(), self.loop)
da42679b
LNO
5925 return self
5926
5927 def send(self, *args):
5928 self.run_with_loop(self.pool.send(*args), self.loop)
5929
5930 def recv(self, *args):
5931 return self.run_with_loop(self.pool.recv(*args), self.loop)
5932
5933 def __exit__(self, type, value, traceback):
5934 try:
5935 return self.run_with_loop(self.conn.__aexit__(type, value, traceback), self.loop)
5936 finally:
5937 self.loop.close()
15dfb392 5938 self._cancel_all_tasks(self.loop)
da42679b
LNO
5939
5940 # taken from https://github.com/python/cpython/blob/3.9/Lib/asyncio/runners.py with modifications
5941 # for contributors: If there's any new library using asyncio needs to be run in non-async, move these function out of this class
7a32c70d 5942 @staticmethod
da42679b 5943 def run_with_loop(main, loop):
059bc4db 5944 if not asyncio.iscoroutine(main):
da42679b
LNO
5945 raise ValueError(f'a coroutine was expected, got {main!r}')
5946
5947 try:
5948 return loop.run_until_complete(main)
5949 finally:
5950 loop.run_until_complete(loop.shutdown_asyncgens())
5951 if hasattr(loop, 'shutdown_default_executor'):
5952 loop.run_until_complete(loop.shutdown_default_executor())
5953
7a32c70d 5954 @staticmethod
da42679b 5955 def _cancel_all_tasks(loop):
059bc4db 5956 to_cancel = asyncio.all_tasks(loop)
da42679b
LNO
5957
5958 if not to_cancel:
5959 return
5960
5961 for task in to_cancel:
5962 task.cancel()
5963
9cd08050 5964 # XXX: "loop" is removed in python 3.10+
da42679b 5965 loop.run_until_complete(
059bc4db 5966 asyncio.gather(*to_cancel, loop=loop, return_exceptions=True))
da42679b
LNO
5967
5968 for task in to_cancel:
5969 if task.cancelled():
5970 continue
5971 if task.exception() is not None:
5972 loop.call_exception_handler({
5973 'message': 'unhandled exception during asyncio.run() shutdown',
5974 'exception': task.exception(),
5975 'task': task,
5976 })
5977
5978
8b7539d2 5979def merge_headers(*dicts):
08d30158 5980 """Merge dicts of http headers case insensitively, prioritizing the latter ones"""
76aa9913 5981 return {k.title(): v for k, v in itertools.chain.from_iterable(map(dict.items, dicts))}
28787f16 5982
5983
b1f94422 5984def cached_method(f):
5985 """Cache a method"""
5986 signature = inspect.signature(f)
5987
7a32c70d 5988 @functools.wraps(f)
b1f94422 5989 def wrapper(self, *args, **kwargs):
5990 bound_args = signature.bind(self, *args, **kwargs)
5991 bound_args.apply_defaults()
d5d1df8a 5992 key = tuple(bound_args.arguments.values())[1:]
b1f94422 5993
6368e2e6 5994 cache = vars(self).setdefault('_cached_method__cache', {}).setdefault(f.__name__, {})
b1f94422 5995 if key not in cache:
5996 cache[key] = f(self, *args, **kwargs)
5997 return cache[key]
5998 return wrapper
5999
6000
28787f16 6001class classproperty:
83cc7b8a 6002 """property access for class methods with optional caching"""
6003 def __new__(cls, func=None, *args, **kwargs):
6004 if not func:
6005 return functools.partial(cls, *args, **kwargs)
6006 return super().__new__(cls)
c487cf00 6007
83cc7b8a 6008 def __init__(self, func, *, cache=False):
c487cf00 6009 functools.update_wrapper(self, func)
6010 self.func = func
83cc7b8a 6011 self._cache = {} if cache else None
28787f16 6012
6013 def __get__(self, _, cls):
83cc7b8a 6014 if self._cache is None:
6015 return self.func(cls)
6016 elif cls not in self._cache:
6017 self._cache[cls] = self.func(cls)
6018 return self._cache[cls]
19a03940 6019
6020
64fa820c 6021class Namespace(types.SimpleNamespace):
591bb9d3 6022 """Immutable namespace"""
591bb9d3 6023
7896214c 6024 def __iter__(self):
64fa820c 6025 return iter(self.__dict__.values())
7896214c 6026
7a32c70d 6027 @property
64fa820c 6028 def items_(self):
6029 return self.__dict__.items()
9b8ee23b 6030
6031
8dc59305 6032MEDIA_EXTENSIONS = Namespace(
6033 common_video=('avi', 'flv', 'mkv', 'mov', 'mp4', 'webm'),
6034 video=('3g2', '3gp', 'f4v', 'mk3d', 'divx', 'mpg', 'ogv', 'm4v', 'wmv'),
6035 common_audio=('aiff', 'alac', 'flac', 'm4a', 'mka', 'mp3', 'ogg', 'opus', 'wav'),
fbb73833 6036 audio=('aac', 'ape', 'asf', 'f4a', 'f4b', 'm4b', 'm4p', 'm4r', 'oga', 'ogx', 'spx', 'vorbis', 'wma', 'weba'),
8dc59305 6037 thumbnails=('jpg', 'png', 'webp'),
6038 storyboards=('mhtml', ),
6039 subtitles=('srt', 'vtt', 'ass', 'lrc'),
6040 manifests=('f4f', 'f4m', 'm3u8', 'smil', 'mpd'),
6041)
6042MEDIA_EXTENSIONS.video += MEDIA_EXTENSIONS.common_video
6043MEDIA_EXTENSIONS.audio += MEDIA_EXTENSIONS.common_audio
6044
6045KNOWN_EXTENSIONS = (*MEDIA_EXTENSIONS.video, *MEDIA_EXTENSIONS.audio, *MEDIA_EXTENSIONS.manifests)
6046
6047
be5c1ae8 6048class RetryManager:
6049 """Usage:
6050 for retry in RetryManager(...):
6051 try:
6052 ...
6053 except SomeException as err:
6054 retry.error = err
6055 continue
6056 """
6057 attempt, _error = 0, None
6058
6059 def __init__(self, _retries, _error_callback, **kwargs):
6060 self.retries = _retries or 0
6061 self.error_callback = functools.partial(_error_callback, **kwargs)
6062
6063 def _should_retry(self):
6064 return self._error is not NO_DEFAULT and self.attempt <= self.retries
6065
7a32c70d 6066 @property
be5c1ae8 6067 def error(self):
6068 if self._error is NO_DEFAULT:
6069 return None
6070 return self._error
6071
7a32c70d 6072 @error.setter
be5c1ae8 6073 def error(self, value):
6074 self._error = value
6075
6076 def __iter__(self):
6077 while self._should_retry():
6078 self.error = NO_DEFAULT
6079 self.attempt += 1
6080 yield self
6081 if self.error:
6082 self.error_callback(self.error, self.attempt, self.retries)
6083
7a32c70d 6084 @staticmethod
be5c1ae8 6085 def report_retry(e, count, retries, *, sleep_func, info, warn, error=None, suffix=None):
6086 """Utility function for reporting retries"""
6087 if count > retries:
6088 if error:
6089 return error(f'{e}. Giving up after {count - 1} retries') if count > 1 else error(str(e))
6090 raise e
6091
6092 if not count:
6093 return warn(e)
6094 elif isinstance(e, ExtractorError):
3ce29336 6095 e = remove_end(str_or_none(e.cause) or e.orig_msg, '.')
be5c1ae8 6096 warn(f'{e}. Retrying{format_field(suffix, None, " %s")} ({count}/{retries})...')
6097
6098 delay = float_or_none(sleep_func(n=count - 1)) if callable(sleep_func) else sleep_func
6099 if delay:
6100 info(f'Sleeping {delay:.2f} seconds ...')
6101 time.sleep(delay)
6102
6103
0647d925 6104def make_archive_id(ie, video_id):
6105 ie_key = ie if isinstance(ie, str) else ie.ie_key()
6106 return f'{ie_key.lower()} {video_id}'
6107
6108
a1c5bd82 6109def truncate_string(s, left, right=0):
6110 assert left > 3 and right >= 0
6111 if s is None or len(s) <= left + right:
6112 return s
71df9b7f 6113 return f'{s[:left-3]}...{s[-right:] if right else ""}'
a1c5bd82 6114
6115
5314b521 6116def orderedSet_from_options(options, alias_dict, *, use_regex=False, start=None):
6117 assert 'all' in alias_dict, '"all" alias is required'
6118 requested = list(start or [])
6119 for val in options:
6120 discard = val.startswith('-')
6121 if discard:
6122 val = val[1:]
6123
6124 if val in alias_dict:
6125 val = alias_dict[val] if not discard else [
6126 i[1:] if i.startswith('-') else f'-{i}' for i in alias_dict[val]]
6127 # NB: Do not allow regex in aliases for performance
6128 requested = orderedSet_from_options(val, alias_dict, start=requested)
6129 continue
6130
6131 current = (filter(re.compile(val, re.I).fullmatch, alias_dict['all']) if use_regex
6132 else [val] if val in alias_dict['all'] else None)
6133 if current is None:
6134 raise ValueError(val)
6135
6136 if discard:
6137 for item in current:
6138 while item in requested:
6139 requested.remove(item)
6140 else:
6141 requested.extend(current)
6142
6143 return orderedSet(requested)
6144
6145
d0d74b71 6146class FormatSorter:
6147 regex = r' *((?P<reverse>\+)?(?P<field>[a-zA-Z0-9_]+)((?P<separator>[~:])(?P<limit>.*?))?)? *$'
6148
6149 default = ('hidden', 'aud_or_vid', 'hasvid', 'ie_pref', 'lang', 'quality',
6150 'res', 'fps', 'hdr:12', 'vcodec:vp9.2', 'channels', 'acodec',
6151 'size', 'br', 'asr', 'proto', 'ext', 'hasaud', 'source', 'id') # These must not be aliases
6152 ytdl_default = ('hasaud', 'lang', 'quality', 'tbr', 'filesize', 'vbr',
6153 'height', 'width', 'proto', 'vext', 'abr', 'aext',
6154 'fps', 'fs_approx', 'source', 'id')
6155
6156 settings = {
6157 'vcodec': {'type': 'ordered', 'regex': True,
6158 'order': ['av0?1', 'vp0?9.2', 'vp0?9', '[hx]265|he?vc?', '[hx]264|avc', 'vp0?8', 'mp4v|h263', 'theora', '', None, 'none']},
6159 'acodec': {'type': 'ordered', 'regex': True,
71082216 6160 'order': ['[af]lac', 'wav|aiff', 'opus', 'vorbis|ogg', 'aac', 'mp?4a?', 'mp3', 'ac-?4', 'e-?a?c-?3', 'ac-?3', 'dts', '', None, 'none']},
d0d74b71 6161 'hdr': {'type': 'ordered', 'regex': True, 'field': 'dynamic_range',
6162 'order': ['dv', '(hdr)?12', r'(hdr)?10\+', '(hdr)?10', 'hlg', '', 'sdr', None]},
6163 'proto': {'type': 'ordered', 'regex': True, 'field': 'protocol',
6164 'order': ['(ht|f)tps', '(ht|f)tp$', 'm3u8.*', '.*dash', 'websocket_frag', 'rtmpe?', '', 'mms|rtsp', 'ws|websocket', 'f4']},
6165 'vext': {'type': 'ordered', 'field': 'video_ext',
29ca4082 6166 'order': ('mp4', 'mov', 'webm', 'flv', '', 'none'),
6167 'order_free': ('webm', 'mp4', 'mov', 'flv', '', 'none')},
fbb73833 6168 'aext': {'type': 'ordered', 'regex': True, 'field': 'audio_ext',
6169 'order': ('m4a', 'aac', 'mp3', 'ogg', 'opus', 'web[am]', '', 'none'),
6170 'order_free': ('ogg', 'opus', 'web[am]', 'mp3', 'm4a', 'aac', '', 'none')},
d0d74b71 6171 'hidden': {'visible': False, 'forced': True, 'type': 'extractor', 'max': -1000},
6172 'aud_or_vid': {'visible': False, 'forced': True, 'type': 'multiple',
6173 'field': ('vcodec', 'acodec'),
6174 'function': lambda it: int(any(v != 'none' for v in it))},
6175 'ie_pref': {'priority': True, 'type': 'extractor'},
6176 'hasvid': {'priority': True, 'field': 'vcodec', 'type': 'boolean', 'not_in_list': ('none',)},
6177 'hasaud': {'field': 'acodec', 'type': 'boolean', 'not_in_list': ('none',)},
6178 'lang': {'convert': 'float', 'field': 'language_preference', 'default': -1},
6179 'quality': {'convert': 'float', 'default': -1},
6180 'filesize': {'convert': 'bytes'},
6181 'fs_approx': {'convert': 'bytes', 'field': 'filesize_approx'},
6182 'id': {'convert': 'string', 'field': 'format_id'},
6183 'height': {'convert': 'float_none'},
6184 'width': {'convert': 'float_none'},
6185 'fps': {'convert': 'float_none'},
6186 'channels': {'convert': 'float_none', 'field': 'audio_channels'},
6187 'tbr': {'convert': 'float_none'},
6188 'vbr': {'convert': 'float_none'},
6189 'abr': {'convert': 'float_none'},
6190 'asr': {'convert': 'float_none'},
6191 'source': {'convert': 'float', 'field': 'source_preference', 'default': -1},
6192
6193 'codec': {'type': 'combined', 'field': ('vcodec', 'acodec')},
6194 'br': {'type': 'combined', 'field': ('tbr', 'vbr', 'abr'), 'same_limit': True},
6195 'size': {'type': 'combined', 'same_limit': True, 'field': ('filesize', 'fs_approx')},
6196 'ext': {'type': 'combined', 'field': ('vext', 'aext')},
6197 'res': {'type': 'multiple', 'field': ('height', 'width'),
6198 'function': lambda it: (lambda l: min(l) if l else 0)(tuple(filter(None, it)))},
6199
6200 # Actual field names
6201 'format_id': {'type': 'alias', 'field': 'id'},
6202 'preference': {'type': 'alias', 'field': 'ie_pref'},
6203 'language_preference': {'type': 'alias', 'field': 'lang'},
6204 'source_preference': {'type': 'alias', 'field': 'source'},
6205 'protocol': {'type': 'alias', 'field': 'proto'},
6206 'filesize_approx': {'type': 'alias', 'field': 'fs_approx'},
6207 'audio_channels': {'type': 'alias', 'field': 'channels'},
6208
6209 # Deprecated
6210 'dimension': {'type': 'alias', 'field': 'res', 'deprecated': True},
6211 'resolution': {'type': 'alias', 'field': 'res', 'deprecated': True},
6212 'extension': {'type': 'alias', 'field': 'ext', 'deprecated': True},
6213 'bitrate': {'type': 'alias', 'field': 'br', 'deprecated': True},
6214 'total_bitrate': {'type': 'alias', 'field': 'tbr', 'deprecated': True},
6215 'video_bitrate': {'type': 'alias', 'field': 'vbr', 'deprecated': True},
6216 'audio_bitrate': {'type': 'alias', 'field': 'abr', 'deprecated': True},
6217 'framerate': {'type': 'alias', 'field': 'fps', 'deprecated': True},
6218 'filesize_estimate': {'type': 'alias', 'field': 'size', 'deprecated': True},
6219 'samplerate': {'type': 'alias', 'field': 'asr', 'deprecated': True},
6220 'video_ext': {'type': 'alias', 'field': 'vext', 'deprecated': True},
6221 'audio_ext': {'type': 'alias', 'field': 'aext', 'deprecated': True},
6222 'video_codec': {'type': 'alias', 'field': 'vcodec', 'deprecated': True},
6223 'audio_codec': {'type': 'alias', 'field': 'acodec', 'deprecated': True},
6224 'video': {'type': 'alias', 'field': 'hasvid', 'deprecated': True},
6225 'has_video': {'type': 'alias', 'field': 'hasvid', 'deprecated': True},
6226 'audio': {'type': 'alias', 'field': 'hasaud', 'deprecated': True},
6227 'has_audio': {'type': 'alias', 'field': 'hasaud', 'deprecated': True},
6228 'extractor': {'type': 'alias', 'field': 'ie_pref', 'deprecated': True},
6229 'extractor_preference': {'type': 'alias', 'field': 'ie_pref', 'deprecated': True},
6230 }
6231
6232 def __init__(self, ydl, field_preference):
6233 self.ydl = ydl
6234 self._order = []
6235 self.evaluate_params(self.ydl.params, field_preference)
6236 if ydl.params.get('verbose'):
6237 self.print_verbose_info(self.ydl.write_debug)
6238
6239 def _get_field_setting(self, field, key):
6240 if field not in self.settings:
6241 if key in ('forced', 'priority'):
6242 return False
6243 self.ydl.deprecated_feature(f'Using arbitrary fields ({field}) for format sorting is '
6244 'deprecated and may be removed in a future version')
6245 self.settings[field] = {}
6246 propObj = self.settings[field]
6247 if key not in propObj:
6248 type = propObj.get('type')
6249 if key == 'field':
6250 default = 'preference' if type == 'extractor' else (field,) if type in ('combined', 'multiple') else field
6251 elif key == 'convert':
6252 default = 'order' if type == 'ordered' else 'float_string' if field else 'ignore'
6253 else:
6254 default = {'type': 'field', 'visible': True, 'order': [], 'not_in_list': (None,)}.get(key, None)
6255 propObj[key] = default
6256 return propObj[key]
6257
6258 def _resolve_field_value(self, field, value, convertNone=False):
6259 if value is None:
6260 if not convertNone:
6261 return None
6262 else:
6263 value = value.lower()
6264 conversion = self._get_field_setting(field, 'convert')
6265 if conversion == 'ignore':
6266 return None
6267 if conversion == 'string':
6268 return value
6269 elif conversion == 'float_none':
6270 return float_or_none(value)
6271 elif conversion == 'bytes':
6272 return parse_bytes(value)
6273 elif conversion == 'order':
6274 order_list = (self._use_free_order and self._get_field_setting(field, 'order_free')) or self._get_field_setting(field, 'order')
6275 use_regex = self._get_field_setting(field, 'regex')
6276 list_length = len(order_list)
6277 empty_pos = order_list.index('') if '' in order_list else list_length + 1
6278 if use_regex and value is not None:
6279 for i, regex in enumerate(order_list):
6280 if regex and re.match(regex, value):
6281 return list_length - i
6282 return list_length - empty_pos # not in list
6283 else: # not regex or value = None
6284 return list_length - (order_list.index(value) if value in order_list else empty_pos)
6285 else:
6286 if value.isnumeric():
6287 return float(value)
6288 else:
6289 self.settings[field]['convert'] = 'string'
6290 return value
6291
6292 def evaluate_params(self, params, sort_extractor):
6293 self._use_free_order = params.get('prefer_free_formats', False)
6294 self._sort_user = params.get('format_sort', [])
6295 self._sort_extractor = sort_extractor
6296
6297 def add_item(field, reverse, closest, limit_text):
6298 field = field.lower()
6299 if field in self._order:
6300 return
6301 self._order.append(field)
6302 limit = self._resolve_field_value(field, limit_text)
6303 data = {
6304 'reverse': reverse,
6305 'closest': False if limit is None else closest,
6306 'limit_text': limit_text,
6307 'limit': limit}
6308 if field in self.settings:
6309 self.settings[field].update(data)
6310 else:
6311 self.settings[field] = data
6312
6313 sort_list = (
6314 tuple(field for field in self.default if self._get_field_setting(field, 'forced'))
6315 + (tuple() if params.get('format_sort_force', False)
6316 else tuple(field for field in self.default if self._get_field_setting(field, 'priority')))
6317 + tuple(self._sort_user) + tuple(sort_extractor) + self.default)
6318
6319 for item in sort_list:
6320 match = re.match(self.regex, item)
6321 if match is None:
6322 raise ExtractorError('Invalid format sort string "%s" given by extractor' % item)
6323 field = match.group('field')
6324 if field is None:
6325 continue
6326 if self._get_field_setting(field, 'type') == 'alias':
6327 alias, field = field, self._get_field_setting(field, 'field')
6328 if self._get_field_setting(alias, 'deprecated'):
6329 self.ydl.deprecated_feature(f'Format sorting alias {alias} is deprecated and may '
6330 f'be removed in a future version. Please use {field} instead')
6331 reverse = match.group('reverse') is not None
6332 closest = match.group('separator') == '~'
6333 limit_text = match.group('limit')
6334
6335 has_limit = limit_text is not None
6336 has_multiple_fields = self._get_field_setting(field, 'type') == 'combined'
6337 has_multiple_limits = has_limit and has_multiple_fields and not self._get_field_setting(field, 'same_limit')
6338
6339 fields = self._get_field_setting(field, 'field') if has_multiple_fields else (field,)
6340 limits = limit_text.split(':') if has_multiple_limits else (limit_text,) if has_limit else tuple()
6341 limit_count = len(limits)
6342 for (i, f) in enumerate(fields):
6343 add_item(f, reverse, closest,
6344 limits[i] if i < limit_count
6345 else limits[0] if has_limit and not has_multiple_limits
6346 else None)
6347
6348 def print_verbose_info(self, write_debug):
6349 if self._sort_user:
6350 write_debug('Sort order given by user: %s' % ', '.join(self._sort_user))
6351 if self._sort_extractor:
6352 write_debug('Sort order given by extractor: %s' % ', '.join(self._sort_extractor))
6353 write_debug('Formats sorted by: %s' % ', '.join(['%s%s%s' % (
6354 '+' if self._get_field_setting(field, 'reverse') else '', field,
6355 '%s%s(%s)' % ('~' if self._get_field_setting(field, 'closest') else ':',
6356 self._get_field_setting(field, 'limit_text'),
6357 self._get_field_setting(field, 'limit'))
6358 if self._get_field_setting(field, 'limit_text') is not None else '')
6359 for field in self._order if self._get_field_setting(field, 'visible')]))
6360
6361 def _calculate_field_preference_from_value(self, format, field, type, value):
6362 reverse = self._get_field_setting(field, 'reverse')
6363 closest = self._get_field_setting(field, 'closest')
6364 limit = self._get_field_setting(field, 'limit')
6365
6366 if type == 'extractor':
6367 maximum = self._get_field_setting(field, 'max')
6368 if value is None or (maximum is not None and value >= maximum):
6369 value = -1
6370 elif type == 'boolean':
6371 in_list = self._get_field_setting(field, 'in_list')
6372 not_in_list = self._get_field_setting(field, 'not_in_list')
6373 value = 0 if ((in_list is None or value in in_list) and (not_in_list is None or value not in not_in_list)) else -1
6374 elif type == 'ordered':
6375 value = self._resolve_field_value(field, value, True)
6376
6377 # try to convert to number
6378 val_num = float_or_none(value, default=self._get_field_setting(field, 'default'))
6379 is_num = self._get_field_setting(field, 'convert') != 'string' and val_num is not None
6380 if is_num:
6381 value = val_num
6382
6383 return ((-10, 0) if value is None
6384 else (1, value, 0) if not is_num # if a field has mixed strings and numbers, strings are sorted higher
6385 else (0, -abs(value - limit), value - limit if reverse else limit - value) if closest
6386 else (0, value, 0) if not reverse and (limit is None or value <= limit)
6387 else (0, -value, 0) if limit is None or (reverse and value == limit) or value > limit
6388 else (-1, value, 0))
6389
6390 def _calculate_field_preference(self, format, field):
6391 type = self._get_field_setting(field, 'type') # extractor, boolean, ordered, field, multiple
6392 get_value = lambda f: format.get(self._get_field_setting(f, 'field'))
6393 if type == 'multiple':
6394 type = 'field' # Only 'field' is allowed in multiple for now
6395 actual_fields = self._get_field_setting(field, 'field')
6396
6397 value = self._get_field_setting(field, 'function')(get_value(f) for f in actual_fields)
6398 else:
6399 value = get_value(field)
6400 return self._calculate_field_preference_from_value(format, field, type, value)
6401
6402 def calculate_preference(self, format):
6403 # Determine missing protocol
6404 if not format.get('protocol'):
6405 format['protocol'] = determine_protocol(format)
6406
6407 # Determine missing ext
6408 if not format.get('ext') and 'url' in format:
6409 format['ext'] = determine_ext(format['url'])
6410 if format.get('vcodec') == 'none':
6411 format['audio_ext'] = format['ext'] if format.get('acodec') != 'none' else 'none'
6412 format['video_ext'] = 'none'
6413 else:
6414 format['video_ext'] = format['ext']
6415 format['audio_ext'] = 'none'
6416 # if format.get('preference') is None and format.get('ext') in ('f4f', 'f4m'): # Not supported?
6417 # format['preference'] = -1000
6418
5424dbaf
L
6419 if format.get('preference') is None and format.get('ext') == 'flv' and re.match('[hx]265|he?vc?', format.get('vcodec') or ''):
6420 # HEVC-over-FLV is out-of-spec by FLV's original spec
6421 # ref. https://trac.ffmpeg.org/ticket/6389
6422 # ref. https://github.com/yt-dlp/yt-dlp/pull/5821
6423 format['preference'] = -100
6424
d0d74b71 6425 # Determine missing bitrates
6426 if format.get('tbr') is None:
6427 if format.get('vbr') is not None and format.get('abr') is not None:
6428 format['tbr'] = format.get('vbr', 0) + format.get('abr', 0)
6429 else:
6430 if format.get('vcodec') != 'none' and format.get('vbr') is None:
6431 format['vbr'] = format.get('tbr') - format.get('abr', 0)
6432 if format.get('acodec') != 'none' and format.get('abr') is None:
6433 format['abr'] = format.get('tbr') - format.get('vbr', 0)
6434
6435 return tuple(self._calculate_field_preference(format, field) for field in self._order)
6436
6437
9b8ee23b 6438# Deprecated
6439has_certifi = bool(certifi)
6440has_websockets = bool(websockets)
8e40b9d1
M
6441
6442
6443def load_plugins(name, suffix, namespace):
6444 from .plugins import load_plugins
6445 ret = load_plugins(name, suffix)
6446 namespace.update(ret)
6447 return ret