]> jfr.im git - yt-dlp.git/blame - yt_dlp/utils.py
[manyvids] Extract `uploader` (#2913)
[yt-dlp.git] / yt_dlp / utils.py
CommitLineData
cc52de43 1#!/usr/bin/env python3
dcdb292f 2# coding: utf-8
d77c3dfd 3
ecc0c5ee
PH
4from __future__ import unicode_literals
5
da42679b 6import asyncio
15dfb392 7import atexit
1e399778 8import base64
5bc880b9 9import binascii
912b38b4 10import calendar
676eb3f2 11import codecs
c380cc28 12import collections
62e609ab 13import contextlib
e3946f98 14import ctypes
c496ca96
PH
15import datetime
16import email.utils
0c265486 17import email.header
f45c185f 18import errno
be4a824d 19import functools
d77c3dfd 20import gzip
49fa4d9a
N
21import hashlib
22import hmac
019a94f7 23import importlib.util
03f9daab 24import io
79a2e94e 25import itertools
f4bfd65f 26import json
d77c3dfd 27import locale
02dbf93f 28import math
347de493 29import operator
d77c3dfd 30import os
c496ca96 31import platform
773f291d 32import random
d77c3dfd 33import re
c496ca96 34import socket
79a2e94e 35import ssl
1c088fa8 36import subprocess
d77c3dfd 37import sys
181c8655 38import tempfile
c380cc28 39import time
01951dda 40import traceback
bcf89ce6 41import xml.etree.ElementTree
d77c3dfd 42import zlib
2814f12b 43import mimetypes
d77c3dfd 44
8c25f81b 45from .compat import (
b4a3d461 46 compat_HTMLParseError,
8bb56eee 47 compat_HTMLParser,
201c1459 48 compat_HTTPError,
8f9312c3 49 compat_basestring,
8c25f81b 50 compat_chr,
1bab3437 51 compat_cookiejar,
d7cd9a9e 52 compat_ctypes_WINFUNCTYPE,
36e6f62c 53 compat_etree_fromstring,
51098426 54 compat_expanduser,
8c25f81b 55 compat_html_entities,
55b2f099 56 compat_html_entities_html5,
be4a824d 57 compat_http_client,
42db58ec 58 compat_integer_types,
e29663c6 59 compat_numeric_types,
c86b6142 60 compat_kwargs,
efa97bdc 61 compat_os_name,
8c25f81b 62 compat_parse_qs,
06e57990 63 compat_shlex_split,
702ccf2d 64 compat_shlex_quote,
8c25f81b 65 compat_str,
edaa23f8 66 compat_struct_pack,
d3f8e038 67 compat_struct_unpack,
8c25f81b
PH
68 compat_urllib_error,
69 compat_urllib_parse,
15707c7e 70 compat_urllib_parse_urlencode,
8c25f81b 71 compat_urllib_parse_urlparse,
732044af 72 compat_urllib_parse_urlunparse,
73 compat_urllib_parse_quote,
74 compat_urllib_parse_quote_plus,
7581bfc9 75 compat_urllib_parse_unquote_plus,
8c25f81b
PH
76 compat_urllib_request,
77 compat_urlparse,
da42679b 78 compat_websockets,
810c10ba 79 compat_xpath,
8c25f81b 80)
4644ac55 81
71aff188
YCH
82from .socks import (
83 ProxyType,
84 sockssocket,
85)
86
4644ac55 87
51fb4995
YCH
88def register_socks_protocols():
89 # "Register" SOCKS protocols
d5ae6bb5
YCH
90 # In Python < 2.6.5, urlsplit() suffers from bug https://bugs.python.org/issue7904
91 # URLs with protocols not in urlparse.uses_netloc are not handled correctly
51fb4995
YCH
92 for scheme in ('socks', 'socks4', 'socks4a', 'socks5'):
93 if scheme not in compat_urlparse.uses_netloc:
94 compat_urlparse.uses_netloc.append(scheme)
95
96
468e2e92
FV
97# This is not clearly defined otherwise
98compiled_regex_type = type(re.compile(''))
99
f7a147e3
S
100
101def random_user_agent():
102 _USER_AGENT_TPL = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/%s Safari/537.36'
103 _CHROME_VERSIONS = (
19b4c74d 104 '90.0.4430.212',
105 '90.0.4430.24',
106 '90.0.4430.70',
107 '90.0.4430.72',
108 '90.0.4430.85',
109 '90.0.4430.93',
110 '91.0.4472.101',
111 '91.0.4472.106',
112 '91.0.4472.114',
113 '91.0.4472.124',
114 '91.0.4472.164',
115 '91.0.4472.19',
116 '91.0.4472.77',
117 '92.0.4515.107',
118 '92.0.4515.115',
119 '92.0.4515.131',
120 '92.0.4515.159',
121 '92.0.4515.43',
122 '93.0.4556.0',
123 '93.0.4577.15',
124 '93.0.4577.63',
125 '93.0.4577.82',
126 '94.0.4606.41',
127 '94.0.4606.54',
128 '94.0.4606.61',
129 '94.0.4606.71',
130 '94.0.4606.81',
131 '94.0.4606.85',
132 '95.0.4638.17',
133 '95.0.4638.50',
134 '95.0.4638.54',
135 '95.0.4638.69',
136 '95.0.4638.74',
137 '96.0.4664.18',
138 '96.0.4664.45',
139 '96.0.4664.55',
140 '96.0.4664.93',
141 '97.0.4692.20',
f7a147e3
S
142 )
143 return _USER_AGENT_TPL % random.choice(_CHROME_VERSIONS)
144
145
3e669f36 146std_headers = {
f7a147e3 147 'User-Agent': random_user_agent(),
59ae15a5
PH
148 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
149 'Accept-Encoding': 'gzip, deflate',
150 'Accept-Language': 'en-us,en;q=0.5',
b1156c1e 151 'Sec-Fetch-Mode': 'navigate',
3e669f36 152}
f427df17 153
5f6a1245 154
fb37eb25
S
155USER_AGENTS = {
156 'Safari': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27',
157}
158
159
bf42a990
S
160NO_DEFAULT = object()
161
7105440c
YCH
162ENGLISH_MONTH_NAMES = [
163 'January', 'February', 'March', 'April', 'May', 'June',
164 'July', 'August', 'September', 'October', 'November', 'December']
165
f6717dec
S
166MONTH_NAMES = {
167 'en': ENGLISH_MONTH_NAMES,
168 'fr': [
3e4185c3
S
169 'janvier', 'février', 'mars', 'avril', 'mai', 'juin',
170 'juillet', 'août', 'septembre', 'octobre', 'novembre', 'décembre'],
f6717dec 171}
a942d6cb 172
a7aaa398
S
173KNOWN_EXTENSIONS = (
174 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'aac',
175 'flv', 'f4v', 'f4a', 'f4b',
176 'webm', 'ogg', 'ogv', 'oga', 'ogx', 'spx', 'opus',
177 'mkv', 'mka', 'mk3d',
178 'avi', 'divx',
179 'mov',
180 'asf', 'wmv', 'wma',
181 '3gp', '3g2',
182 'mp3',
183 'flac',
184 'ape',
185 'wav',
186 'f4f', 'f4m', 'm3u8', 'smil')
187
c587cbb7 188# needed for sanitizing filenames in restricted mode
c8827027 189ACCENT_CHARS = dict(zip('ÂÃÄÀÁÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖŐØŒÙÚÛÜŰÝÞßàáâãäåæçèéêëìíîïðñòóôõöőøœùúûüűýþÿ',
fd35d8cd
JW
190 itertools.chain('AAAAAA', ['AE'], 'CEEEEIIIIDNOOOOOOO', ['OE'], 'UUUUUY', ['TH', 'ss'],
191 'aaaaaa', ['ae'], 'ceeeeiiiionooooooo', ['oe'], 'uuuuuy', ['th'], 'y')))
c587cbb7 192
46f59e89
S
193DATE_FORMATS = (
194 '%d %B %Y',
195 '%d %b %Y',
196 '%B %d %Y',
cb655f34
S
197 '%B %dst %Y',
198 '%B %dnd %Y',
9d30c213 199 '%B %drd %Y',
cb655f34 200 '%B %dth %Y',
46f59e89 201 '%b %d %Y',
cb655f34
S
202 '%b %dst %Y',
203 '%b %dnd %Y',
9d30c213 204 '%b %drd %Y',
cb655f34 205 '%b %dth %Y',
46f59e89
S
206 '%b %dst %Y %I:%M',
207 '%b %dnd %Y %I:%M',
9d30c213 208 '%b %drd %Y %I:%M',
46f59e89
S
209 '%b %dth %Y %I:%M',
210 '%Y %m %d',
211 '%Y-%m-%d',
bccdbd22 212 '%Y.%m.%d.',
46f59e89 213 '%Y/%m/%d',
81c13222 214 '%Y/%m/%d %H:%M',
46f59e89 215 '%Y/%m/%d %H:%M:%S',
1931a55e
THD
216 '%Y%m%d%H%M',
217 '%Y%m%d%H%M%S',
4f3fa23e 218 '%Y%m%d',
0c1c6f4b 219 '%Y-%m-%d %H:%M',
46f59e89
S
220 '%Y-%m-%d %H:%M:%S',
221 '%Y-%m-%d %H:%M:%S.%f',
5014558a 222 '%Y-%m-%d %H:%M:%S:%f',
46f59e89
S
223 '%d.%m.%Y %H:%M',
224 '%d.%m.%Y %H.%M',
225 '%Y-%m-%dT%H:%M:%SZ',
226 '%Y-%m-%dT%H:%M:%S.%fZ',
227 '%Y-%m-%dT%H:%M:%S.%f0Z',
228 '%Y-%m-%dT%H:%M:%S',
229 '%Y-%m-%dT%H:%M:%S.%f',
230 '%Y-%m-%dT%H:%M',
c6eed6b8
S
231 '%b %d %Y at %H:%M',
232 '%b %d %Y at %H:%M:%S',
b555ae9b
S
233 '%B %d %Y at %H:%M',
234 '%B %d %Y at %H:%M:%S',
a63d9bd0 235 '%H:%M %d-%b-%Y',
46f59e89
S
236)
237
238DATE_FORMATS_DAY_FIRST = list(DATE_FORMATS)
239DATE_FORMATS_DAY_FIRST.extend([
240 '%d-%m-%Y',
241 '%d.%m.%Y',
242 '%d.%m.%y',
243 '%d/%m/%Y',
244 '%d/%m/%y',
245 '%d/%m/%Y %H:%M:%S',
246])
247
248DATE_FORMATS_MONTH_FIRST = list(DATE_FORMATS)
249DATE_FORMATS_MONTH_FIRST.extend([
250 '%m-%d-%Y',
251 '%m.%d.%Y',
252 '%m/%d/%Y',
253 '%m/%d/%y',
254 '%m/%d/%Y %H:%M:%S',
255])
256
06b3fe29 257PACKED_CODES_RE = r"}\('(.+)',(\d+),(\d+),'([^']+)'\.split\('\|'\)"
22f5f5c6 258JSON_LD_RE = r'(?is)<script[^>]+type=(["\']?)application/ld\+json\1[^>]*>(?P<json_ld>.+?)</script>'
06b3fe29 259
7105440c 260
d77c3dfd 261def preferredencoding():
59ae15a5 262 """Get preferred encoding.
d77c3dfd 263
59ae15a5
PH
264 Returns the best encoding scheme for the system, based on
265 locale.getpreferredencoding() and some further tweaks.
266 """
267 try:
268 pref = locale.getpreferredencoding()
28e614de 269 'TEST'.encode(pref)
70a1165b 270 except Exception:
59ae15a5 271 pref = 'UTF-8'
bae611f2 272
59ae15a5 273 return pref
d77c3dfd 274
f4bfd65f 275
181c8655 276def write_json_file(obj, fn):
1394646a 277 """ Encode obj as JSON and write it to fn, atomically if possible """
181c8655 278
92120217 279 fn = encodeFilename(fn)
61ee5aeb 280 if sys.version_info < (3, 0) and sys.platform != 'win32':
ec5f6016
JMF
281 encoding = get_filesystem_encoding()
282 # os.path.basename returns a bytes object, but NamedTemporaryFile
283 # will fail if the filename contains non ascii characters unless we
284 # use a unicode object
285 path_basename = lambda f: os.path.basename(fn).decode(encoding)
286 # the same for os.path.dirname
287 path_dirname = lambda f: os.path.dirname(fn).decode(encoding)
288 else:
289 path_basename = os.path.basename
290 path_dirname = os.path.dirname
291
73159f99
S
292 args = {
293 'suffix': '.tmp',
ec5f6016
JMF
294 'prefix': path_basename(fn) + '.',
295 'dir': path_dirname(fn),
73159f99
S
296 'delete': False,
297 }
298
181c8655
PH
299 # In Python 2.x, json.dump expects a bytestream.
300 # In Python 3.x, it writes to a character stream
301 if sys.version_info < (3, 0):
73159f99 302 args['mode'] = 'wb'
181c8655 303 else:
73159f99
S
304 args.update({
305 'mode': 'w',
306 'encoding': 'utf-8',
307 })
308
c86b6142 309 tf = tempfile.NamedTemporaryFile(**compat_kwargs(args))
181c8655
PH
310
311 try:
312 with tf:
45d86abe 313 json.dump(obj, tf, ensure_ascii=False)
1394646a
IK
314 if sys.platform == 'win32':
315 # Need to remove existing file on Windows, else os.rename raises
316 # WindowsError or FileExistsError.
317 try:
318 os.unlink(fn)
319 except OSError:
320 pass
9cd5f54e
R
321 try:
322 mask = os.umask(0)
323 os.umask(mask)
324 os.chmod(tf.name, 0o666 & ~mask)
325 except OSError:
326 pass
181c8655 327 os.rename(tf.name, fn)
70a1165b 328 except Exception:
181c8655
PH
329 try:
330 os.remove(tf.name)
331 except OSError:
332 pass
333 raise
334
335
336if sys.version_info >= (2, 7):
ee114368 337 def find_xpath_attr(node, xpath, key, val=None):
59ae56fa 338 """ Find the xpath xpath[@key=val] """
5d2354f1 339 assert re.match(r'^[a-zA-Z_-]+$', key)
ee114368 340 expr = xpath + ('[@%s]' % key if val is None else "[@%s='%s']" % (key, val))
59ae56fa
PH
341 return node.find(expr)
342else:
ee114368 343 def find_xpath_attr(node, xpath, key, val=None):
810c10ba 344 for f in node.findall(compat_xpath(xpath)):
ee114368
S
345 if key not in f.attrib:
346 continue
347 if val is None or f.attrib.get(key) == val:
59ae56fa
PH
348 return f
349 return None
350
d7e66d39
JMF
351# On python2.6 the xml.etree.ElementTree.Element methods don't support
352# the namespace parameter
5f6a1245
JW
353
354
d7e66d39
JMF
355def xpath_with_ns(path, ns_map):
356 components = [c.split(':') for c in path.split('/')]
357 replaced = []
358 for c in components:
359 if len(c) == 1:
360 replaced.append(c[0])
361 else:
362 ns, tag = c
363 replaced.append('{%s}%s' % (ns_map[ns], tag))
364 return '/'.join(replaced)
365
d77c3dfd 366
a41fb80c 367def xpath_element(node, xpath, name=None, fatal=False, default=NO_DEFAULT):
578c0745 368 def _find_xpath(xpath):
810c10ba 369 return node.find(compat_xpath(xpath))
578c0745
S
370
371 if isinstance(xpath, (str, compat_str)):
372 n = _find_xpath(xpath)
373 else:
374 for xp in xpath:
375 n = _find_xpath(xp)
376 if n is not None:
377 break
d74bebd5 378
8e636da4 379 if n is None:
bf42a990
S
380 if default is not NO_DEFAULT:
381 return default
382 elif fatal:
bf0ff932
PH
383 name = xpath if name is None else name
384 raise ExtractorError('Could not find XML element %s' % name)
385 else:
386 return None
a41fb80c
S
387 return n
388
389
390def xpath_text(node, xpath, name=None, fatal=False, default=NO_DEFAULT):
8e636da4
S
391 n = xpath_element(node, xpath, name, fatal=fatal, default=default)
392 if n is None or n == default:
393 return n
394 if n.text is None:
395 if default is not NO_DEFAULT:
396 return default
397 elif fatal:
398 name = xpath if name is None else name
399 raise ExtractorError('Could not find XML element\'s text %s' % name)
400 else:
401 return None
402 return n.text
a41fb80c
S
403
404
405def xpath_attr(node, xpath, key, name=None, fatal=False, default=NO_DEFAULT):
406 n = find_xpath_attr(node, xpath, key)
407 if n is None:
408 if default is not NO_DEFAULT:
409 return default
410 elif fatal:
411 name = '%s[@%s]' % (xpath, key) if name is None else name
412 raise ExtractorError('Could not find XML attribute %s' % name)
413 else:
414 return None
415 return n.attrib[key]
bf0ff932
PH
416
417
9e6dd238 418def get_element_by_id(id, html):
43e8fafd 419 """Return the content of the tag with the specified ID in the passed HTML document"""
611c1dd9 420 return get_element_by_attribute('id', id, html)
43e8fafd 421
12ea2f30 422
6f32a0b5
ZM
423def get_element_html_by_id(id, html):
424 """Return the html of the tag with the specified ID in the passed HTML document"""
425 return get_element_html_by_attribute('id', id, html)
426
427
84c237fb 428def get_element_by_class(class_name, html):
2af12ad9
TC
429 """Return the content of the first tag with the specified class in the passed HTML document"""
430 retval = get_elements_by_class(class_name, html)
431 return retval[0] if retval else None
432
433
6f32a0b5
ZM
434def get_element_html_by_class(class_name, html):
435 """Return the html of the first tag with the specified class in the passed HTML document"""
436 retval = get_elements_html_by_class(class_name, html)
437 return retval[0] if retval else None
438
439
2af12ad9
TC
440def get_element_by_attribute(attribute, value, html, escape_value=True):
441 retval = get_elements_by_attribute(attribute, value, html, escape_value)
442 return retval[0] if retval else None
443
444
6f32a0b5
ZM
445def get_element_html_by_attribute(attribute, value, html, escape_value=True):
446 retval = get_elements_html_by_attribute(attribute, value, html, escape_value)
447 return retval[0] if retval else None
448
449
2af12ad9
TC
450def get_elements_by_class(class_name, html):
451 """Return the content of all tags with the specified class in the passed HTML document as a list"""
452 return get_elements_by_attribute(
84c237fb
YCH
453 'class', r'[^\'"]*\b%s\b[^\'"]*' % re.escape(class_name),
454 html, escape_value=False)
455
456
6f32a0b5
ZM
457def get_elements_html_by_class(class_name, html):
458 """Return the html of all tags with the specified class in the passed HTML document as a list"""
459 return get_elements_html_by_attribute(
460 'class', r'[^\'"]*\b%s\b[^\'"]*' % re.escape(class_name),
461 html, escape_value=False)
462
463
464def get_elements_by_attribute(*args, **kwargs):
43e8fafd 465 """Return the content of the tag with the specified attribute in the passed HTML document"""
6f32a0b5
ZM
466 return [content for content, _ in get_elements_text_and_html_by_attribute(*args, **kwargs)]
467
468
469def get_elements_html_by_attribute(*args, **kwargs):
470 """Return the html of the tag with the specified attribute in the passed HTML document"""
471 return [whole for _, whole in get_elements_text_and_html_by_attribute(*args, **kwargs)]
472
473
474def get_elements_text_and_html_by_attribute(attribute, value, html, escape_value=True):
475 """
476 Return the text (content) and the html (whole) of the tag with the specified
477 attribute in the passed HTML document
478 """
9e6dd238 479
0254f162
ZM
480 value_quote_optional = '' if re.match(r'''[\s"'`=<>]''', value) else '?'
481
84c237fb
YCH
482 value = re.escape(value) if escape_value else value
483
0254f162 484 partial_element_re = r'''(?x)
6f32a0b5 485 <(?P<tag>[a-zA-Z0-9:._-]+)
0254f162
ZM
486 (?:\s(?:[^>"']|"[^"]*"|'[^']*')*)?
487 \s%(attribute)s\s*=\s*(?P<_q>['"]%(vqo)s)(?-x:%(value)s)(?P=_q)
488 ''' % {'attribute': re.escape(attribute), 'value': value, 'vqo': value_quote_optional}
38285056 489
0254f162
ZM
490 for m in re.finditer(partial_element_re, html):
491 content, whole = get_element_text_and_html_by_tag(m.group('tag'), html[m.start():])
a921f407 492
0254f162
ZM
493 yield (
494 unescapeHTML(re.sub(r'^(?P<q>["\'])(?P<content>.*)(?P=q)$', r'\g<content>', content, flags=re.DOTALL)),
495 whole
496 )
a921f407 497
c5229f39 498
6f32a0b5
ZM
499class HTMLBreakOnClosingTagParser(compat_HTMLParser):
500 """
501 HTML parser which raises HTMLBreakOnClosingTagException upon reaching the
502 closing tag for the first opening tag it has encountered, and can be used
503 as a context manager
504 """
505
506 class HTMLBreakOnClosingTagException(Exception):
507 pass
508
509 def __init__(self):
510 self.tagstack = collections.deque()
511 compat_HTMLParser.__init__(self)
512
513 def __enter__(self):
514 return self
515
516 def __exit__(self, *_):
517 self.close()
518
519 def close(self):
520 # handle_endtag does not return upon raising HTMLBreakOnClosingTagException,
521 # so data remains buffered; we no longer have any interest in it, thus
522 # override this method to discard it
523 pass
524
525 def handle_starttag(self, tag, _):
526 self.tagstack.append(tag)
527
528 def handle_endtag(self, tag):
529 if not self.tagstack:
530 raise compat_HTMLParseError('no tags in the stack')
531 while self.tagstack:
532 inner_tag = self.tagstack.pop()
533 if inner_tag == tag:
534 break
535 else:
536 raise compat_HTMLParseError(f'matching opening tag for closing {tag} tag not found')
537 if not self.tagstack:
538 raise self.HTMLBreakOnClosingTagException()
539
540
541def get_element_text_and_html_by_tag(tag, html):
542 """
543 For the first element with the specified tag in the passed HTML document
544 return its' content (text) and the whole element (html)
545 """
546 def find_or_raise(haystack, needle, exc):
547 try:
548 return haystack.index(needle)
549 except ValueError:
550 raise exc
551 closing_tag = f'</{tag}>'
552 whole_start = find_or_raise(
553 html, f'<{tag}', compat_HTMLParseError(f'opening {tag} tag not found'))
554 content_start = find_or_raise(
555 html[whole_start:], '>', compat_HTMLParseError(f'malformed opening {tag} tag'))
556 content_start += whole_start + 1
557 with HTMLBreakOnClosingTagParser() as parser:
558 parser.feed(html[whole_start:content_start])
559 if not parser.tagstack or parser.tagstack[0] != tag:
560 raise compat_HTMLParseError(f'parser did not match opening {tag} tag')
561 offset = content_start
562 while offset < len(html):
563 next_closing_tag_start = find_or_raise(
564 html[offset:], closing_tag,
565 compat_HTMLParseError(f'closing {tag} tag not found'))
566 next_closing_tag_end = next_closing_tag_start + len(closing_tag)
567 try:
568 parser.feed(html[offset:offset + next_closing_tag_end])
569 offset += next_closing_tag_end
570 except HTMLBreakOnClosingTagParser.HTMLBreakOnClosingTagException:
571 return html[content_start:offset + next_closing_tag_start], \
572 html[whole_start:offset + next_closing_tag_end]
573 raise compat_HTMLParseError('unexpected end of html')
574
575
8bb56eee
BF
576class HTMLAttributeParser(compat_HTMLParser):
577 """Trivial HTML parser to gather the attributes for a single element"""
b6e0c7d2 578
8bb56eee 579 def __init__(self):
c5229f39 580 self.attrs = {}
8bb56eee
BF
581 compat_HTMLParser.__init__(self)
582
583 def handle_starttag(self, tag, attrs):
584 self.attrs = dict(attrs)
585
c5229f39 586
73673ccf
FF
587class HTMLListAttrsParser(compat_HTMLParser):
588 """HTML parser to gather the attributes for the elements of a list"""
589
590 def __init__(self):
591 compat_HTMLParser.__init__(self)
592 self.items = []
593 self._level = 0
594
595 def handle_starttag(self, tag, attrs):
596 if tag == 'li' and self._level == 0:
597 self.items.append(dict(attrs))
598 self._level += 1
599
600 def handle_endtag(self, tag):
601 self._level -= 1
602
603
8bb56eee
BF
604def extract_attributes(html_element):
605 """Given a string for an HTML element such as
606 <el
607 a="foo" B="bar" c="&98;az" d=boz
608 empty= noval entity="&amp;"
609 sq='"' dq="'"
610 >
611 Decode and return a dictionary of attributes.
612 {
613 'a': 'foo', 'b': 'bar', c: 'baz', d: 'boz',
614 'empty': '', 'noval': None, 'entity': '&',
615 'sq': '"', 'dq': '\''
616 }.
617 NB HTMLParser is stricter in Python 2.6 & 3.2 than in later versions,
618 but the cases in the unit test will work for all of 2.6, 2.7, 3.2-3.5.
619 """
620 parser = HTMLAttributeParser()
b4a3d461
S
621 try:
622 parser.feed(html_element)
623 parser.close()
624 # Older Python may throw HTMLParseError in case of malformed HTML
625 except compat_HTMLParseError:
626 pass
8bb56eee 627 return parser.attrs
9e6dd238 628
c5229f39 629
73673ccf
FF
630def parse_list(webpage):
631 """Given a string for an series of HTML <li> elements,
632 return a dictionary of their attributes"""
633 parser = HTMLListAttrsParser()
634 parser.feed(webpage)
635 parser.close()
636 return parser.items
637
638
9e6dd238 639def clean_html(html):
59ae15a5 640 """Clean an HTML snippet into a readable string"""
dd622d7c
PH
641
642 if html is None: # Convenience for sanitizing descriptions etc.
643 return html
644
49185227 645 html = re.sub(r'\s+', ' ', html)
646 html = re.sub(r'(?u)\s?<\s?br\s?/?\s?>\s?', '\n', html)
647 html = re.sub(r'(?u)<\s?/\s?p\s?>\s?<\s?p[^>]*>', '\n', html)
59ae15a5
PH
648 # Strip html tags
649 html = re.sub('<.*?>', '', html)
650 # Replace html entities
651 html = unescapeHTML(html)
7decf895 652 return html.strip()
9e6dd238
FV
653
654
d77c3dfd 655def sanitize_open(filename, open_mode):
59ae15a5
PH
656 """Try to open the given filename, and slightly tweak it if this fails.
657
658 Attempts to open the given filename. If this fails, it tries to change
659 the filename slightly, step by step, until it's either able to open it
660 or it fails and raises a final exception, like the standard open()
661 function.
662
663 It returns the tuple (stream, definitive_file_name).
664 """
665 try:
28e614de 666 if filename == '-':
59ae15a5
PH
667 if sys.platform == 'win32':
668 import msvcrt
669 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
898280a0 670 return (sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout, filename)
a3125791 671 stream = locked_file(filename, open_mode, block=False).open()
59ae15a5
PH
672 return (stream, filename)
673 except (IOError, OSError) as err:
f45c185f
PH
674 if err.errno in (errno.EACCES,):
675 raise
59ae15a5 676
f45c185f 677 # In case of error, try to remove win32 forbidden chars
d55de57b 678 alt_filename = sanitize_path(filename)
f45c185f
PH
679 if alt_filename == filename:
680 raise
681 else:
682 # An exception here should be caught in the caller
a3125791 683 stream = locked_file(filename, open_mode, block=False).open()
f45c185f 684 return (stream, alt_filename)
d77c3dfd
FV
685
686
687def timeconvert(timestr):
59ae15a5
PH
688 """Convert RFC 2822 defined time string into system timestamp"""
689 timestamp = None
690 timetuple = email.utils.parsedate_tz(timestr)
691 if timetuple is not None:
692 timestamp = email.utils.mktime_tz(timetuple)
693 return timestamp
1c469a94 694
5f6a1245 695
796173d0 696def sanitize_filename(s, restricted=False, is_id=False):
59ae15a5
PH
697 """Sanitizes a string so it could be used as part of a filename.
698 If restricted is set, use a stricter subset of allowed characters.
158af524
S
699 Set is_id if this is not an arbitrary string, but an ID that should be kept
700 if possible.
59ae15a5
PH
701 """
702 def replace_insane(char):
c587cbb7
AT
703 if restricted and char in ACCENT_CHARS:
704 return ACCENT_CHARS[char]
91dd88b9 705 elif not restricted and char == '\n':
706 return ' '
707 elif char == '?' or ord(char) < 32 or ord(char) == 127:
59ae15a5
PH
708 return ''
709 elif char == '"':
710 return '' if restricted else '\''
711 elif char == ':':
712 return '_-' if restricted else ' -'
713 elif char in '\\/|*<>':
714 return '_'
627dcfff 715 if restricted and (char in '!&\'()[]{}$;`^,#' or char.isspace()):
59ae15a5
PH
716 return '_'
717 if restricted and ord(char) > 127:
718 return '_'
719 return char
720
639f1cea 721 if s == '':
722 return ''
2aeb06d6
PH
723 # Handle timestamps
724 s = re.sub(r'[0-9]+(?::[0-9]+)+', lambda m: m.group(0).replace(':', '_'), s)
28e614de 725 result = ''.join(map(replace_insane, s))
796173d0
PH
726 if not is_id:
727 while '__' in result:
728 result = result.replace('__', '_')
729 result = result.strip('_')
730 # Common case of "Foreign band name - English song title"
731 if restricted and result.startswith('-_'):
732 result = result[2:]
5a42414b
PH
733 if result.startswith('-'):
734 result = '_' + result[len('-'):]
a7440261 735 result = result.lstrip('.')
796173d0
PH
736 if not result:
737 result = '_'
59ae15a5 738 return result
d77c3dfd 739
5f6a1245 740
c2934512 741def sanitize_path(s, force=False):
a2aaf4db 742 """Sanitizes and normalizes path on Windows"""
c2934512 743 if sys.platform == 'win32':
c4218ac3 744 force = False
c2934512 745 drive_or_unc, _ = os.path.splitdrive(s)
746 if sys.version_info < (2, 7) and not drive_or_unc:
747 drive_or_unc, _ = os.path.splitunc(s)
748 elif force:
749 drive_or_unc = ''
750 else:
a2aaf4db 751 return s
c2934512 752
be531ef1
S
753 norm_path = os.path.normpath(remove_start(s, drive_or_unc)).split(os.path.sep)
754 if drive_or_unc:
a2aaf4db
S
755 norm_path.pop(0)
756 sanitized_path = [
ec85ded8 757 path_part if path_part in ['.', '..'] else re.sub(r'(?:[/<>:"\|\\?\*]|[\s.]$)', '#', path_part)
a2aaf4db 758 for path_part in norm_path]
be531ef1
S
759 if drive_or_unc:
760 sanitized_path.insert(0, drive_or_unc + os.path.sep)
c4218ac3 761 elif force and s[0] == os.path.sep:
762 sanitized_path.insert(0, os.path.sep)
a2aaf4db
S
763 return os.path.join(*sanitized_path)
764
765
17bcc626 766def sanitize_url(url):
befa4708
S
767 # Prepend protocol-less URLs with `http:` scheme in order to mitigate
768 # the number of unwanted failures due to missing protocol
769 if url.startswith('//'):
770 return 'http:%s' % url
771 # Fix some common typos seen so far
772 COMMON_TYPOS = (
067aa17e 773 # https://github.com/ytdl-org/youtube-dl/issues/15649
befa4708
S
774 (r'^httpss://', r'https://'),
775 # https://bx1.be/lives/direct-tv/
776 (r'^rmtp([es]?)://', r'rtmp\1://'),
777 )
778 for mistake, fixup in COMMON_TYPOS:
779 if re.match(mistake, url):
780 return re.sub(mistake, fixup, url)
bc6b9bcd 781 return url
17bcc626
S
782
783
5435dcf9
HH
784def extract_basic_auth(url):
785 parts = compat_urlparse.urlsplit(url)
786 if parts.username is None:
787 return url, None
788 url = compat_urlparse.urlunsplit(parts._replace(netloc=(
789 parts.hostname if parts.port is None
790 else '%s:%d' % (parts.hostname, parts.port))))
791 auth_payload = base64.b64encode(
792 ('%s:%s' % (parts.username, parts.password or '')).encode('utf-8'))
793 return url, 'Basic ' + auth_payload.decode('utf-8')
794
795
67dda517 796def sanitized_Request(url, *args, **kwargs):
bc6b9bcd 797 url, auth_header = extract_basic_auth(escape_url(sanitize_url(url)))
5435dcf9
HH
798 if auth_header is not None:
799 headers = args[1] if len(args) >= 2 else kwargs.setdefault('headers', {})
800 headers['Authorization'] = auth_header
801 return compat_urllib_request.Request(url, *args, **kwargs)
67dda517
S
802
803
51098426
S
804def expand_path(s):
805 """Expand shell variables and ~"""
806 return os.path.expandvars(compat_expanduser(s))
807
808
d77c3dfd 809def orderedSet(iterable):
59ae15a5
PH
810 """ Remove all duplicates from the input iterable """
811 res = []
812 for el in iterable:
813 if el not in res:
814 res.append(el)
815 return res
d77c3dfd 816
912b38b4 817
55b2f099 818def _htmlentity_transform(entity_with_semicolon):
4e408e47 819 """Transforms an HTML entity to a character."""
55b2f099
YCH
820 entity = entity_with_semicolon[:-1]
821
4e408e47
PH
822 # Known non-numeric HTML entity
823 if entity in compat_html_entities.name2codepoint:
824 return compat_chr(compat_html_entities.name2codepoint[entity])
825
55b2f099
YCH
826 # TODO: HTML5 allows entities without a semicolon. For example,
827 # '&Eacuteric' should be decoded as 'Éric'.
828 if entity_with_semicolon in compat_html_entities_html5:
829 return compat_html_entities_html5[entity_with_semicolon]
830
91757b0f 831 mobj = re.match(r'#(x[0-9a-fA-F]+|[0-9]+)', entity)
4e408e47
PH
832 if mobj is not None:
833 numstr = mobj.group(1)
28e614de 834 if numstr.startswith('x'):
4e408e47 835 base = 16
28e614de 836 numstr = '0%s' % numstr
4e408e47
PH
837 else:
838 base = 10
067aa17e 839 # See https://github.com/ytdl-org/youtube-dl/issues/7518
7aefc49c
S
840 try:
841 return compat_chr(int(numstr, base))
842 except ValueError:
843 pass
4e408e47
PH
844
845 # Unknown entity in name, return its literal representation
7a3f0c00 846 return '&%s;' % entity
4e408e47
PH
847
848
d77c3dfd 849def unescapeHTML(s):
912b38b4
PH
850 if s is None:
851 return None
852 assert type(s) == compat_str
d77c3dfd 853
4e408e47 854 return re.sub(
95f3f7c2 855 r'&([^&;]+;)', lambda m: _htmlentity_transform(m.group(1)), s)
d77c3dfd 856
8bf48f23 857
cdb19aa4 858def escapeHTML(text):
859 return (
860 text
861 .replace('&', '&amp;')
862 .replace('<', '&lt;')
863 .replace('>', '&gt;')
864 .replace('"', '&quot;')
865 .replace("'", '&#39;')
866 )
867
868
f5b1bca9 869def process_communicate_or_kill(p, *args, **kwargs):
870 try:
871 return p.communicate(*args, **kwargs)
872 except BaseException: # Including KeyboardInterrupt
873 p.kill()
874 p.wait()
875 raise
876
877
d3c93ec2 878class Popen(subprocess.Popen):
879 if sys.platform == 'win32':
880 _startupinfo = subprocess.STARTUPINFO()
881 _startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
882 else:
883 _startupinfo = None
884
885 def __init__(self, *args, **kwargs):
886 super(Popen, self).__init__(*args, **kwargs, startupinfo=self._startupinfo)
887
888 def communicate_or_kill(self, *args, **kwargs):
889 return process_communicate_or_kill(self, *args, **kwargs)
890
891
aa49acd1
S
892def get_subprocess_encoding():
893 if sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5:
894 # For subprocess calls, encode with locale encoding
895 # Refer to http://stackoverflow.com/a/9951851/35070
896 encoding = preferredencoding()
897 else:
898 encoding = sys.getfilesystemencoding()
899 if encoding is None:
900 encoding = 'utf-8'
901 return encoding
902
903
8bf48f23 904def encodeFilename(s, for_subprocess=False):
59ae15a5
PH
905 """
906 @param s The name of the file
907 """
d77c3dfd 908
8bf48f23 909 assert type(s) == compat_str
d77c3dfd 910
59ae15a5
PH
911 # Python 3 has a Unicode API
912 if sys.version_info >= (3, 0):
913 return s
0f00efed 914
aa49acd1
S
915 # Pass '' directly to use Unicode APIs on Windows 2000 and up
916 # (Detecting Windows NT 4 is tricky because 'major >= 4' would
917 # match Windows 9x series as well. Besides, NT 4 is obsolete.)
918 if not for_subprocess and sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5:
919 return s
920
8ee239e9
YCH
921 # Jython assumes filenames are Unicode strings though reported as Python 2.x compatible
922 if sys.platform.startswith('java'):
923 return s
924
aa49acd1
S
925 return s.encode(get_subprocess_encoding(), 'ignore')
926
927
928def decodeFilename(b, for_subprocess=False):
929
930 if sys.version_info >= (3, 0):
931 return b
932
933 if not isinstance(b, bytes):
934 return b
935
936 return b.decode(get_subprocess_encoding(), 'ignore')
8bf48f23 937
f07b74fc
PH
938
939def encodeArgument(s):
940 if not isinstance(s, compat_str):
941 # Legacy code that uses byte strings
942 # Uncomment the following line after fixing all post processors
7af808a5 943 # assert False, 'Internal error: %r should be of type %r, is %r' % (s, compat_str, type(s))
f07b74fc
PH
944 s = s.decode('ascii')
945 return encodeFilename(s, True)
946
947
aa49acd1
S
948def decodeArgument(b):
949 return decodeFilename(b, True)
950
951
8271226a
PH
952def decodeOption(optval):
953 if optval is None:
954 return optval
955 if isinstance(optval, bytes):
956 optval = optval.decode(preferredencoding())
957
958 assert isinstance(optval, compat_str)
959 return optval
1c256f70 960
5f6a1245 961
aa7785f8 962_timetuple = collections.namedtuple('Time', ('hours', 'minutes', 'seconds', 'milliseconds'))
963
964
965def timetuple_from_msec(msec):
966 secs, msec = divmod(msec, 1000)
967 mins, secs = divmod(secs, 60)
968 hrs, mins = divmod(mins, 60)
969 return _timetuple(hrs, mins, secs, msec)
970
971
cdb19aa4 972def formatSeconds(secs, delim=':', msec=False):
aa7785f8 973 time = timetuple_from_msec(secs * 1000)
974 if time.hours:
975 ret = '%d%s%02d%s%02d' % (time.hours, delim, time.minutes, delim, time.seconds)
976 elif time.minutes:
977 ret = '%d%s%02d' % (time.minutes, delim, time.seconds)
4539dd30 978 else:
aa7785f8 979 ret = '%d' % time.seconds
980 return '%s.%03d' % (ret, time.milliseconds) if msec else ret
4539dd30 981
a0ddb8a2 982
77562778 983def _ssl_load_windows_store_certs(ssl_context, storename):
984 # Code adapted from _load_windows_store_certs in https://github.com/python/cpython/blob/main/Lib/ssl.py
985 try:
986 certs = [cert for cert, encoding, trust in ssl.enum_certificates(storename)
987 if encoding == 'x509_asn' and (
988 trust is True or ssl.Purpose.SERVER_AUTH.oid in trust)]
989 except PermissionError:
990 return
991 for cert in certs:
a2366922 992 try:
77562778 993 ssl_context.load_verify_locations(cadata=cert)
994 except ssl.SSLError:
a2366922
PH
995 pass
996
77562778 997
998def make_HTTPS_handler(params, **kwargs):
999 opts_check_certificate = not params.get('nocheckcertificate')
1000 context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
1001 context.check_hostname = opts_check_certificate
f81c62a6 1002 if params.get('legacyserverconnect'):
1003 context.options |= 4 # SSL_OP_LEGACY_SERVER_CONNECT
77562778 1004 context.verify_mode = ssl.CERT_REQUIRED if opts_check_certificate else ssl.CERT_NONE
1005 if opts_check_certificate:
4e3d1898 1006 try:
1007 context.load_default_certs()
1008 # Work around the issue in load_default_certs when there are bad certificates. See:
1009 # https://github.com/yt-dlp/yt-dlp/issues/1060,
1010 # https://bugs.python.org/issue35665, https://bugs.python.org/issue45312
1011 except ssl.SSLError:
1012 # enum_certificates is not present in mingw python. See https://github.com/yt-dlp/yt-dlp/issues/1151
1013 if sys.platform == 'win32' and hasattr(ssl, 'enum_certificates'):
1014 # Create a new context to discard any certificates that were already loaded
1015 context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
1016 context.check_hostname, context.verify_mode = True, ssl.CERT_REQUIRED
1017 for storename in ('CA', 'ROOT'):
1018 _ssl_load_windows_store_certs(context, storename)
1019 context.set_default_verify_paths()
77562778 1020 return YoutubeDLHTTPSHandler(params, context=context, **kwargs)
ea6d901e 1021
732ea2f0 1022
5873d4cc 1023def bug_reports_message(before=';'):
455a15e2 1024 msg = ('please report this issue on https://github.com/yt-dlp/yt-dlp , '
1025 'filling out the "Broken site" issue template properly. '
1026 'Confirm you are on the latest version using -U')
5873d4cc
F
1027
1028 before = before.rstrip()
1029 if not before or before.endswith(('.', '!', '?')):
1030 msg = msg[0].title() + msg[1:]
1031
1032 return (before + ' ' if before else '') + msg
08f2a92c
JMF
1033
1034
bf5b9d85
PM
1035class YoutubeDLError(Exception):
1036 """Base exception for YoutubeDL errors."""
aa9369a2 1037 msg = None
1038
1039 def __init__(self, msg=None):
1040 if msg is not None:
1041 self.msg = msg
1042 elif self.msg is None:
1043 self.msg = type(self).__name__
1044 super().__init__(self.msg)
bf5b9d85
PM
1045
1046
3158150c 1047network_exceptions = [compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error]
1048if hasattr(ssl, 'CertificateError'):
1049 network_exceptions.append(ssl.CertificateError)
1050network_exceptions = tuple(network_exceptions)
1051
1052
bf5b9d85 1053class ExtractorError(YoutubeDLError):
1c256f70 1054 """Error during info extraction."""
5f6a1245 1055
1151c407 1056 def __init__(self, msg, tb=None, expected=False, cause=None, video_id=None, ie=None):
9a82b238 1057 """ tb, if given, is the original traceback (so that it can be printed out).
7a5c1cfe 1058 If expected is set, this is a normal error message and most likely not a bug in yt-dlp.
9a82b238 1059 """
3158150c 1060 if sys.exc_info()[0] in network_exceptions:
9a82b238 1061 expected = True
d5979c5d 1062
526d74ec 1063 self.msg = str(msg)
1c256f70 1064 self.traceback = tb
1151c407 1065 self.expected = expected
2eabb802 1066 self.cause = cause
d11271dd 1067 self.video_id = video_id
1151c407 1068 self.ie = ie
1069 self.exc_info = sys.exc_info() # preserve original exception
1070
1071 super(ExtractorError, self).__init__(''.join((
1072 format_field(ie, template='[%s] '),
1073 format_field(video_id, template='%s: '),
526d74ec 1074 self.msg,
1151c407 1075 format_field(cause, template=' (caused by %r)'),
1076 '' if expected else bug_reports_message())))
1c256f70 1077
01951dda
PH
1078 def format_traceback(self):
1079 if self.traceback is None:
1080 return None
28e614de 1081 return ''.join(traceback.format_tb(self.traceback))
01951dda 1082
1c256f70 1083
416c7fcb
PH
1084class UnsupportedError(ExtractorError):
1085 def __init__(self, url):
1086 super(UnsupportedError, self).__init__(
1087 'Unsupported URL: %s' % url, expected=True)
1088 self.url = url
1089
1090
55b3e45b
JMF
1091class RegexNotFoundError(ExtractorError):
1092 """Error when a regex didn't match"""
1093 pass
1094
1095
773f291d
S
1096class GeoRestrictedError(ExtractorError):
1097 """Geographic restriction Error exception.
1098
1099 This exception may be thrown when a video is not available from your
1100 geographic location due to geographic restrictions imposed by a website.
1101 """
b6e0c7d2 1102
0db3bae8 1103 def __init__(self, msg, countries=None, **kwargs):
1104 kwargs['expected'] = True
1105 super(GeoRestrictedError, self).__init__(msg, **kwargs)
773f291d
S
1106 self.countries = countries
1107
1108
bf5b9d85 1109class DownloadError(YoutubeDLError):
59ae15a5 1110 """Download Error exception.
d77c3dfd 1111
59ae15a5
PH
1112 This exception may be thrown by FileDownloader objects if they are not
1113 configured to continue on errors. They will contain the appropriate
1114 error message.
1115 """
5f6a1245 1116
8cc83b8d
FV
1117 def __init__(self, msg, exc_info=None):
1118 """ exc_info, if given, is the original exception that caused the trouble (as returned by sys.exc_info()). """
1119 super(DownloadError, self).__init__(msg)
1120 self.exc_info = exc_info
d77c3dfd
FV
1121
1122
498f5606 1123class EntryNotInPlaylist(YoutubeDLError):
1124 """Entry not in playlist exception.
1125
1126 This exception will be thrown by YoutubeDL when a requested entry
1127 is not found in the playlist info_dict
1128 """
aa9369a2 1129 msg = 'Entry not found in info'
498f5606 1130
1131
bf5b9d85 1132class SameFileError(YoutubeDLError):
59ae15a5 1133 """Same File exception.
d77c3dfd 1134
59ae15a5
PH
1135 This exception will be thrown by FileDownloader objects if they detect
1136 multiple files would have to be downloaded to the same file on disk.
1137 """
aa9369a2 1138 msg = 'Fixed output name but more than one file to download'
1139
1140 def __init__(self, filename=None):
1141 if filename is not None:
1142 self.msg += f': {filename}'
1143 super().__init__(self.msg)
d77c3dfd
FV
1144
1145
bf5b9d85 1146class PostProcessingError(YoutubeDLError):
59ae15a5 1147 """Post Processing exception.
d77c3dfd 1148
59ae15a5
PH
1149 This exception may be raised by PostProcessor's .run() method to
1150 indicate an error in the postprocessing task.
1151 """
5f6a1245 1152
5f6a1245 1153
48f79687 1154class DownloadCancelled(YoutubeDLError):
1155 """ Exception raised when the download queue should be interrupted """
1156 msg = 'The download was cancelled'
8b0d7497 1157
8b0d7497 1158
48f79687 1159class ExistingVideoReached(DownloadCancelled):
1160 """ --break-on-existing triggered """
1161 msg = 'Encountered a video that is already in the archive, stopping due to --break-on-existing'
8b0d7497 1162
48f79687 1163
1164class RejectedVideoReached(DownloadCancelled):
1165 """ --break-on-reject triggered """
1166 msg = 'Encountered a video that did not match filter, stopping due to --break-on-reject'
51d9739f 1167
1168
48f79687 1169class MaxDownloadsReached(DownloadCancelled):
59ae15a5 1170 """ --max-downloads limit has been reached. """
48f79687 1171 msg = 'Maximum number of downloads reached, stopping due to --max-downloads'
1172
1173
f2ebc5c7 1174class ReExtractInfo(YoutubeDLError):
1175 """ Video info needs to be re-extracted. """
1176
1177 def __init__(self, msg, expected=False):
1178 super().__init__(msg)
1179 self.expected = expected
1180
1181
1182class ThrottledDownload(ReExtractInfo):
48f79687 1183 """ Download speed below --throttled-rate. """
aa9369a2 1184 msg = 'The download speed is below throttle limit'
d77c3dfd 1185
43b22906 1186 def __init__(self):
1187 super().__init__(self.msg, expected=False)
f2ebc5c7 1188
d77c3dfd 1189
bf5b9d85 1190class UnavailableVideoError(YoutubeDLError):
59ae15a5 1191 """Unavailable Format exception.
d77c3dfd 1192
59ae15a5
PH
1193 This exception will be thrown when a video is requested
1194 in a format that is not available for that video.
1195 """
aa9369a2 1196 msg = 'Unable to download video'
1197
1198 def __init__(self, err=None):
1199 if err is not None:
1200 self.msg += f': {err}'
1201 super().__init__(self.msg)
d77c3dfd
FV
1202
1203
bf5b9d85 1204class ContentTooShortError(YoutubeDLError):
59ae15a5 1205 """Content Too Short exception.
d77c3dfd 1206
59ae15a5
PH
1207 This exception may be raised by FileDownloader objects when a file they
1208 download is too small for what the server announced first, indicating
1209 the connection was probably interrupted.
1210 """
d77c3dfd 1211
59ae15a5 1212 def __init__(self, downloaded, expected):
bf5b9d85
PM
1213 super(ContentTooShortError, self).__init__(
1214 'Downloaded {0} bytes, expected {1} bytes'.format(downloaded, expected)
1215 )
2c7ed247 1216 # Both in bytes
59ae15a5
PH
1217 self.downloaded = downloaded
1218 self.expected = expected
d77c3dfd 1219
5f6a1245 1220
bf5b9d85 1221class XAttrMetadataError(YoutubeDLError):
efa97bdc
YCH
1222 def __init__(self, code=None, msg='Unknown error'):
1223 super(XAttrMetadataError, self).__init__(msg)
1224 self.code = code
bd264412 1225 self.msg = msg
efa97bdc
YCH
1226
1227 # Parsing code and msg
3089bc74 1228 if (self.code in (errno.ENOSPC, errno.EDQUOT)
a0566bbf 1229 or 'No space left' in self.msg or 'Disk quota exceeded' in self.msg):
efa97bdc
YCH
1230 self.reason = 'NO_SPACE'
1231 elif self.code == errno.E2BIG or 'Argument list too long' in self.msg:
1232 self.reason = 'VALUE_TOO_LONG'
1233 else:
1234 self.reason = 'NOT_SUPPORTED'
1235
1236
bf5b9d85 1237class XAttrUnavailableError(YoutubeDLError):
efa97bdc
YCH
1238 pass
1239
1240
c5a59d93 1241def _create_http_connection(ydl_handler, http_class, is_https, *args, **kwargs):
e5e78797
S
1242 # Working around python 2 bug (see http://bugs.python.org/issue17849) by limiting
1243 # expected HTTP responses to meet HTTP/1.0 or later (see also
067aa17e 1244 # https://github.com/ytdl-org/youtube-dl/issues/6727)
e5e78797 1245 if sys.version_info < (3, 0):
65220c3b
S
1246 kwargs['strict'] = True
1247 hc = http_class(*args, **compat_kwargs(kwargs))
be4a824d 1248 source_address = ydl_handler._params.get('source_address')
8959018a 1249
be4a824d 1250 if source_address is not None:
8959018a
AU
1251 # This is to workaround _create_connection() from socket where it will try all
1252 # address data from getaddrinfo() including IPv6. This filters the result from
1253 # getaddrinfo() based on the source_address value.
1254 # This is based on the cpython socket.create_connection() function.
1255 # https://github.com/python/cpython/blob/master/Lib/socket.py#L691
1256 def _create_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, source_address=None):
1257 host, port = address
1258 err = None
1259 addrs = socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM)
9e21e6d9
S
1260 af = socket.AF_INET if '.' in source_address[0] else socket.AF_INET6
1261 ip_addrs = [addr for addr in addrs if addr[0] == af]
1262 if addrs and not ip_addrs:
1263 ip_version = 'v4' if af == socket.AF_INET else 'v6'
1264 raise socket.error(
1265 "No remote IP%s addresses available for connect, can't use '%s' as source address"
1266 % (ip_version, source_address[0]))
8959018a
AU
1267 for res in ip_addrs:
1268 af, socktype, proto, canonname, sa = res
1269 sock = None
1270 try:
1271 sock = socket.socket(af, socktype, proto)
1272 if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT:
1273 sock.settimeout(timeout)
1274 sock.bind(source_address)
1275 sock.connect(sa)
1276 err = None # Explicitly break reference cycle
1277 return sock
1278 except socket.error as _:
1279 err = _
1280 if sock is not None:
1281 sock.close()
1282 if err is not None:
1283 raise err
1284 else:
9e21e6d9
S
1285 raise socket.error('getaddrinfo returns an empty list')
1286 if hasattr(hc, '_create_connection'):
1287 hc._create_connection = _create_connection
be4a824d
PH
1288 sa = (source_address, 0)
1289 if hasattr(hc, 'source_address'): # Python 2.7+
1290 hc.source_address = sa
1291 else: # Python 2.6
1292 def _hc_connect(self, *args, **kwargs):
9e21e6d9 1293 sock = _create_connection(
be4a824d
PH
1294 (self.host, self.port), self.timeout, sa)
1295 if is_https:
d7932313
PH
1296 self.sock = ssl.wrap_socket(
1297 sock, self.key_file, self.cert_file,
1298 ssl_version=ssl.PROTOCOL_TLSv1)
be4a824d
PH
1299 else:
1300 self.sock = sock
1301 hc.connect = functools.partial(_hc_connect, hc)
1302
1303 return hc
1304
1305
87f0e62d 1306def handle_youtubedl_headers(headers):
992fc9d6
YCH
1307 filtered_headers = headers
1308
1309 if 'Youtubedl-no-compression' in filtered_headers:
1310 filtered_headers = dict((k, v) for k, v in filtered_headers.items() if k.lower() != 'accept-encoding')
87f0e62d 1311 del filtered_headers['Youtubedl-no-compression']
87f0e62d 1312
992fc9d6 1313 return filtered_headers
87f0e62d
YCH
1314
1315
acebc9cd 1316class YoutubeDLHandler(compat_urllib_request.HTTPHandler):
59ae15a5
PH
1317 """Handler for HTTP requests and responses.
1318
1319 This class, when installed with an OpenerDirector, automatically adds
1320 the standard headers to every HTTP request and handles gzipped and
1321 deflated responses from web servers. If compression is to be avoided in
1322 a particular request, the original request in the program code only has
0424ec30 1323 to include the HTTP header "Youtubedl-no-compression", which will be
59ae15a5
PH
1324 removed before making the real request.
1325
1326 Part of this code was copied from:
1327
1328 http://techknack.net/python-urllib2-handlers/
1329
1330 Andrew Rowls, the author of that code, agreed to release it to the
1331 public domain.
1332 """
1333
be4a824d
PH
1334 def __init__(self, params, *args, **kwargs):
1335 compat_urllib_request.HTTPHandler.__init__(self, *args, **kwargs)
1336 self._params = params
1337
1338 def http_open(self, req):
71aff188
YCH
1339 conn_class = compat_http_client.HTTPConnection
1340
1341 socks_proxy = req.headers.get('Ytdl-socks-proxy')
1342 if socks_proxy:
1343 conn_class = make_socks_conn_class(conn_class, socks_proxy)
1344 del req.headers['Ytdl-socks-proxy']
1345
be4a824d 1346 return self.do_open(functools.partial(
71aff188 1347 _create_http_connection, self, conn_class, False),
be4a824d
PH
1348 req)
1349
59ae15a5
PH
1350 @staticmethod
1351 def deflate(data):
fc2119f2 1352 if not data:
1353 return data
59ae15a5
PH
1354 try:
1355 return zlib.decompress(data, -zlib.MAX_WBITS)
1356 except zlib.error:
1357 return zlib.decompress(data)
1358
acebc9cd 1359 def http_request(self, req):
51f267d9
S
1360 # According to RFC 3986, URLs can not contain non-ASCII characters, however this is not
1361 # always respected by websites, some tend to give out URLs with non percent-encoded
1362 # non-ASCII characters (see telemb.py, ard.py [#3412])
1363 # urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991)
1364 # To work around aforementioned issue we will replace request's original URL with
1365 # percent-encoded one
1366 # Since redirects are also affected (e.g. http://www.southpark.de/alle-episoden/s18e09)
1367 # the code of this workaround has been moved here from YoutubeDL.urlopen()
1368 url = req.get_full_url()
1369 url_escaped = escape_url(url)
1370
1371 # Substitute URL if any change after escaping
1372 if url != url_escaped:
15d260eb 1373 req = update_Request(req, url=url_escaped)
51f267d9 1374
33ac271b 1375 for h, v in std_headers.items():
3d5f7a39
JK
1376 # Capitalize is needed because of Python bug 2275: http://bugs.python.org/issue2275
1377 # The dict keys are capitalized because of this bug by urllib
1378 if h.capitalize() not in req.headers:
33ac271b 1379 req.add_header(h, v)
87f0e62d
YCH
1380
1381 req.headers = handle_youtubedl_headers(req.headers)
989b4b2b
PH
1382
1383 if sys.version_info < (2, 7) and '#' in req.get_full_url():
1384 # Python 2.6 is brain-dead when it comes to fragments
1385 req._Request__original = req._Request__original.partition('#')[0]
1386 req._Request__r_type = req._Request__r_type.partition('#')[0]
1387
59ae15a5
PH
1388 return req
1389
acebc9cd 1390 def http_response(self, req, resp):
59ae15a5
PH
1391 old_resp = resp
1392 # gzip
1393 if resp.headers.get('Content-encoding', '') == 'gzip':
aa3e9507
PH
1394 content = resp.read()
1395 gz = gzip.GzipFile(fileobj=io.BytesIO(content), mode='rb')
1396 try:
1397 uncompressed = io.BytesIO(gz.read())
1398 except IOError as original_ioerror:
1399 # There may be junk add the end of the file
1400 # See http://stackoverflow.com/q/4928560/35070 for details
1401 for i in range(1, 1024):
1402 try:
1403 gz = gzip.GzipFile(fileobj=io.BytesIO(content[:-i]), mode='rb')
1404 uncompressed = io.BytesIO(gz.read())
1405 except IOError:
1406 continue
1407 break
1408 else:
1409 raise original_ioerror
b407d853 1410 resp = compat_urllib_request.addinfourl(uncompressed, old_resp.headers, old_resp.url, old_resp.code)
59ae15a5 1411 resp.msg = old_resp.msg
c047270c 1412 del resp.headers['Content-encoding']
59ae15a5
PH
1413 # deflate
1414 if resp.headers.get('Content-encoding', '') == 'deflate':
1415 gz = io.BytesIO(self.deflate(resp.read()))
b407d853 1416 resp = compat_urllib_request.addinfourl(gz, old_resp.headers, old_resp.url, old_resp.code)
59ae15a5 1417 resp.msg = old_resp.msg
c047270c 1418 del resp.headers['Content-encoding']
ad729172 1419 # Percent-encode redirect URL of Location HTTP header to satisfy RFC 3986 (see
067aa17e 1420 # https://github.com/ytdl-org/youtube-dl/issues/6457).
5a4d9ddb
S
1421 if 300 <= resp.code < 400:
1422 location = resp.headers.get('Location')
1423 if location:
1424 # As of RFC 2616 default charset is iso-8859-1 that is respected by python 3
1425 if sys.version_info >= (3, 0):
1426 location = location.encode('iso-8859-1').decode('utf-8')
0ea59007
YCH
1427 else:
1428 location = location.decode('utf-8')
5a4d9ddb
S
1429 location_escaped = escape_url(location)
1430 if location != location_escaped:
1431 del resp.headers['Location']
9a4aec8b
YCH
1432 if sys.version_info < (3, 0):
1433 location_escaped = location_escaped.encode('utf-8')
5a4d9ddb 1434 resp.headers['Location'] = location_escaped
59ae15a5 1435 return resp
0f8d03f8 1436
acebc9cd
PH
1437 https_request = http_request
1438 https_response = http_response
bf50b038 1439
5de90176 1440
71aff188
YCH
1441def make_socks_conn_class(base_class, socks_proxy):
1442 assert issubclass(base_class, (
1443 compat_http_client.HTTPConnection, compat_http_client.HTTPSConnection))
1444
1445 url_components = compat_urlparse.urlparse(socks_proxy)
1446 if url_components.scheme.lower() == 'socks5':
1447 socks_type = ProxyType.SOCKS5
1448 elif url_components.scheme.lower() in ('socks', 'socks4'):
1449 socks_type = ProxyType.SOCKS4
51fb4995
YCH
1450 elif url_components.scheme.lower() == 'socks4a':
1451 socks_type = ProxyType.SOCKS4A
71aff188 1452
cdd94c2e
YCH
1453 def unquote_if_non_empty(s):
1454 if not s:
1455 return s
1456 return compat_urllib_parse_unquote_plus(s)
1457
71aff188
YCH
1458 proxy_args = (
1459 socks_type,
1460 url_components.hostname, url_components.port or 1080,
1461 True, # Remote DNS
cdd94c2e
YCH
1462 unquote_if_non_empty(url_components.username),
1463 unquote_if_non_empty(url_components.password),
71aff188
YCH
1464 )
1465
1466 class SocksConnection(base_class):
1467 def connect(self):
1468 self.sock = sockssocket()
1469 self.sock.setproxy(*proxy_args)
1470 if type(self.timeout) in (int, float):
1471 self.sock.settimeout(self.timeout)
1472 self.sock.connect((self.host, self.port))
1473
1474 if isinstance(self, compat_http_client.HTTPSConnection):
1475 if hasattr(self, '_context'): # Python > 2.6
1476 self.sock = self._context.wrap_socket(
1477 self.sock, server_hostname=self.host)
1478 else:
1479 self.sock = ssl.wrap_socket(self.sock)
1480
1481 return SocksConnection
1482
1483
be4a824d
PH
1484class YoutubeDLHTTPSHandler(compat_urllib_request.HTTPSHandler):
1485 def __init__(self, params, https_conn_class=None, *args, **kwargs):
1486 compat_urllib_request.HTTPSHandler.__init__(self, *args, **kwargs)
1487 self._https_conn_class = https_conn_class or compat_http_client.HTTPSConnection
1488 self._params = params
1489
1490 def https_open(self, req):
4f264c02 1491 kwargs = {}
71aff188
YCH
1492 conn_class = self._https_conn_class
1493
4f264c02
JMF
1494 if hasattr(self, '_context'): # python > 2.6
1495 kwargs['context'] = self._context
1496 if hasattr(self, '_check_hostname'): # python 3.x
1497 kwargs['check_hostname'] = self._check_hostname
71aff188
YCH
1498
1499 socks_proxy = req.headers.get('Ytdl-socks-proxy')
1500 if socks_proxy:
1501 conn_class = make_socks_conn_class(conn_class, socks_proxy)
1502 del req.headers['Ytdl-socks-proxy']
1503
be4a824d 1504 return self.do_open(functools.partial(
71aff188 1505 _create_http_connection, self, conn_class, True),
4f264c02 1506 req, **kwargs)
be4a824d
PH
1507
1508
1bab3437 1509class YoutubeDLCookieJar(compat_cookiejar.MozillaCookieJar):
f1a8511f
S
1510 """
1511 See [1] for cookie file format.
1512
1513 1. https://curl.haxx.se/docs/http-cookies.html
1514 """
e7e62441 1515 _HTTPONLY_PREFIX = '#HttpOnly_'
c380cc28
S
1516 _ENTRY_LEN = 7
1517 _HEADER = '''# Netscape HTTP Cookie File
7a5c1cfe 1518# This file is generated by yt-dlp. Do not edit.
c380cc28
S
1519
1520'''
1521 _CookieFileEntry = collections.namedtuple(
1522 'CookieFileEntry',
1523 ('domain_name', 'include_subdomains', 'path', 'https_only', 'expires_at', 'name', 'value'))
e7e62441 1524
1bab3437 1525 def save(self, filename=None, ignore_discard=False, ignore_expires=False):
c380cc28
S
1526 """
1527 Save cookies to a file.
1528
1529 Most of the code is taken from CPython 3.8 and slightly adapted
1530 to support cookie files with UTF-8 in both python 2 and 3.
1531 """
1532 if filename is None:
1533 if self.filename is not None:
1534 filename = self.filename
1535 else:
1536 raise ValueError(compat_cookiejar.MISSING_FILENAME_TEXT)
1537
1bab3437
S
1538 # Store session cookies with `expires` set to 0 instead of an empty
1539 # string
1540 for cookie in self:
1541 if cookie.expires is None:
1542 cookie.expires = 0
c380cc28
S
1543
1544 with io.open(filename, 'w', encoding='utf-8') as f:
1545 f.write(self._HEADER)
1546 now = time.time()
1547 for cookie in self:
1548 if not ignore_discard and cookie.discard:
1549 continue
1550 if not ignore_expires and cookie.is_expired(now):
1551 continue
1552 if cookie.secure:
1553 secure = 'TRUE'
1554 else:
1555 secure = 'FALSE'
1556 if cookie.domain.startswith('.'):
1557 initial_dot = 'TRUE'
1558 else:
1559 initial_dot = 'FALSE'
1560 if cookie.expires is not None:
1561 expires = compat_str(cookie.expires)
1562 else:
1563 expires = ''
1564 if cookie.value is None:
1565 # cookies.txt regards 'Set-Cookie: foo' as a cookie
1566 # with no name, whereas http.cookiejar regards it as a
1567 # cookie with no value.
1568 name = ''
1569 value = cookie.name
1570 else:
1571 name = cookie.name
1572 value = cookie.value
1573 f.write(
1574 '\t'.join([cookie.domain, initial_dot, cookie.path,
1575 secure, expires, name, value]) + '\n')
1bab3437
S
1576
1577 def load(self, filename=None, ignore_discard=False, ignore_expires=False):
e7e62441 1578 """Load cookies from a file."""
1579 if filename is None:
1580 if self.filename is not None:
1581 filename = self.filename
1582 else:
1583 raise ValueError(compat_cookiejar.MISSING_FILENAME_TEXT)
1584
c380cc28
S
1585 def prepare_line(line):
1586 if line.startswith(self._HTTPONLY_PREFIX):
1587 line = line[len(self._HTTPONLY_PREFIX):]
1588 # comments and empty lines are fine
1589 if line.startswith('#') or not line.strip():
1590 return line
1591 cookie_list = line.split('\t')
1592 if len(cookie_list) != self._ENTRY_LEN:
1593 raise compat_cookiejar.LoadError('invalid length %d' % len(cookie_list))
1594 cookie = self._CookieFileEntry(*cookie_list)
1595 if cookie.expires_at and not cookie.expires_at.isdigit():
1596 raise compat_cookiejar.LoadError('invalid expires at %s' % cookie.expires_at)
1597 return line
1598
e7e62441 1599 cf = io.StringIO()
c380cc28 1600 with io.open(filename, encoding='utf-8') as f:
e7e62441 1601 for line in f:
c380cc28
S
1602 try:
1603 cf.write(prepare_line(line))
1604 except compat_cookiejar.LoadError as e:
1605 write_string(
1606 'WARNING: skipping cookie file entry due to %s: %r\n'
1607 % (e, line), sys.stderr)
1608 continue
e7e62441 1609 cf.seek(0)
1610 self._really_load(cf, filename, ignore_discard, ignore_expires)
1bab3437
S
1611 # Session cookies are denoted by either `expires` field set to
1612 # an empty string or 0. MozillaCookieJar only recognizes the former
1613 # (see [1]). So we need force the latter to be recognized as session
1614 # cookies on our own.
1615 # Session cookies may be important for cookies-based authentication,
1616 # e.g. usually, when user does not check 'Remember me' check box while
1617 # logging in on a site, some important cookies are stored as session
1618 # cookies so that not recognizing them will result in failed login.
1619 # 1. https://bugs.python.org/issue17164
1620 for cookie in self:
1621 # Treat `expires=0` cookies as session cookies
1622 if cookie.expires == 0:
1623 cookie.expires = None
1624 cookie.discard = True
1625
1626
a6420bf5
S
1627class YoutubeDLCookieProcessor(compat_urllib_request.HTTPCookieProcessor):
1628 def __init__(self, cookiejar=None):
1629 compat_urllib_request.HTTPCookieProcessor.__init__(self, cookiejar)
1630
1631 def http_response(self, request, response):
1632 # Python 2 will choke on next HTTP request in row if there are non-ASCII
1633 # characters in Set-Cookie HTTP header of last response (see
067aa17e 1634 # https://github.com/ytdl-org/youtube-dl/issues/6769).
a6420bf5
S
1635 # In order to at least prevent crashing we will percent encode Set-Cookie
1636 # header before HTTPCookieProcessor starts processing it.
e28034c5
S
1637 # if sys.version_info < (3, 0) and response.headers:
1638 # for set_cookie_header in ('Set-Cookie', 'Set-Cookie2'):
1639 # set_cookie = response.headers.get(set_cookie_header)
1640 # if set_cookie:
1641 # set_cookie_escaped = compat_urllib_parse.quote(set_cookie, b"%/;:@&=+$,!~*'()?#[] ")
1642 # if set_cookie != set_cookie_escaped:
1643 # del response.headers[set_cookie_header]
1644 # response.headers[set_cookie_header] = set_cookie_escaped
a6420bf5
S
1645 return compat_urllib_request.HTTPCookieProcessor.http_response(self, request, response)
1646
f5fa042c 1647 https_request = compat_urllib_request.HTTPCookieProcessor.http_request
a6420bf5
S
1648 https_response = http_response
1649
1650
fca6dba8 1651class YoutubeDLRedirectHandler(compat_urllib_request.HTTPRedirectHandler):
201c1459 1652 """YoutubeDL redirect handler
1653
1654 The code is based on HTTPRedirectHandler implementation from CPython [1].
1655
1656 This redirect handler solves two issues:
1657 - ensures redirect URL is always unicode under python 2
1658 - introduces support for experimental HTTP response status code
1659 308 Permanent Redirect [2] used by some sites [3]
1660
1661 1. https://github.com/python/cpython/blob/master/Lib/urllib/request.py
1662 2. https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/308
1663 3. https://github.com/ytdl-org/youtube-dl/issues/28768
1664 """
1665
1666 http_error_301 = http_error_303 = http_error_307 = http_error_308 = compat_urllib_request.HTTPRedirectHandler.http_error_302
1667
1668 def redirect_request(self, req, fp, code, msg, headers, newurl):
1669 """Return a Request or None in response to a redirect.
1670
1671 This is called by the http_error_30x methods when a
1672 redirection response is received. If a redirection should
1673 take place, return a new Request to allow http_error_30x to
1674 perform the redirect. Otherwise, raise HTTPError if no-one
1675 else should try to handle this url. Return None if you can't
1676 but another Handler might.
1677 """
1678 m = req.get_method()
1679 if (not (code in (301, 302, 303, 307, 308) and m in ("GET", "HEAD")
1680 or code in (301, 302, 303) and m == "POST")):
1681 raise compat_HTTPError(req.full_url, code, msg, headers, fp)
1682 # Strictly (according to RFC 2616), 301 or 302 in response to
1683 # a POST MUST NOT cause a redirection without confirmation
1684 # from the user (of urllib.request, in this case). In practice,
1685 # essentially all clients do redirect in this case, so we do
1686 # the same.
1687
1688 # On python 2 urlh.geturl() may sometimes return redirect URL
1689 # as byte string instead of unicode. This workaround allows
1690 # to force it always return unicode.
1691 if sys.version_info[0] < 3:
1692 newurl = compat_str(newurl)
1693
1694 # Be conciliant with URIs containing a space. This is mainly
1695 # redundant with the more complete encoding done in http_error_302(),
1696 # but it is kept for compatibility with other callers.
1697 newurl = newurl.replace(' ', '%20')
1698
1699 CONTENT_HEADERS = ("content-length", "content-type")
1700 # NB: don't use dict comprehension for python 2.6 compatibility
1701 newheaders = dict((k, v) for k, v in req.headers.items()
1702 if k.lower() not in CONTENT_HEADERS)
1703 return compat_urllib_request.Request(
1704 newurl, headers=newheaders, origin_req_host=req.origin_req_host,
1705 unverifiable=True)
fca6dba8
S
1706
1707
46f59e89
S
1708def extract_timezone(date_str):
1709 m = re.search(
f137e4c2 1710 r'''(?x)
1711 ^.{8,}? # >=8 char non-TZ prefix, if present
1712 (?P<tz>Z| # just the UTC Z, or
1713 (?:(?<=.\b\d{4}|\b\d{2}:\d\d)| # preceded by 4 digits or hh:mm or
1714 (?<!.\b[a-zA-Z]{3}|[a-zA-Z]{4}|..\b\d\d)) # not preceded by 3 alpha word or >= 4 alpha or 2 digits
1715 [ ]? # optional space
1716 (?P<sign>\+|-) # +/-
1717 (?P<hours>[0-9]{2}):?(?P<minutes>[0-9]{2}) # hh[:]mm
1718 $)
1719 ''', date_str)
46f59e89
S
1720 if not m:
1721 timezone = datetime.timedelta()
1722 else:
1723 date_str = date_str[:-len(m.group('tz'))]
1724 if not m.group('sign'):
1725 timezone = datetime.timedelta()
1726 else:
1727 sign = 1 if m.group('sign') == '+' else -1
1728 timezone = datetime.timedelta(
1729 hours=sign * int(m.group('hours')),
1730 minutes=sign * int(m.group('minutes')))
1731 return timezone, date_str
1732
1733
08b38d54 1734def parse_iso8601(date_str, delimiter='T', timezone=None):
912b38b4
PH
1735 """ Return a UNIX timestamp from the given date """
1736
1737 if date_str is None:
1738 return None
1739
52c3a6e4
S
1740 date_str = re.sub(r'\.[0-9]+', '', date_str)
1741
08b38d54 1742 if timezone is None:
46f59e89
S
1743 timezone, date_str = extract_timezone(date_str)
1744
52c3a6e4
S
1745 try:
1746 date_format = '%Y-%m-%d{0}%H:%M:%S'.format(delimiter)
1747 dt = datetime.datetime.strptime(date_str, date_format) - timezone
1748 return calendar.timegm(dt.timetuple())
1749 except ValueError:
1750 pass
912b38b4
PH
1751
1752
46f59e89
S
1753def date_formats(day_first=True):
1754 return DATE_FORMATS_DAY_FIRST if day_first else DATE_FORMATS_MONTH_FIRST
1755
1756
42bdd9d0 1757def unified_strdate(date_str, day_first=True):
bf50b038 1758 """Return a string with the date in the format YYYYMMDD"""
64e7ad60
PH
1759
1760 if date_str is None:
1761 return None
bf50b038 1762 upload_date = None
5f6a1245 1763 # Replace commas
026fcc04 1764 date_str = date_str.replace(',', ' ')
42bdd9d0 1765 # Remove AM/PM + timezone
9bb8e0a3 1766 date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str)
46f59e89 1767 _, date_str = extract_timezone(date_str)
42bdd9d0 1768
46f59e89 1769 for expression in date_formats(day_first):
bf50b038
JMF
1770 try:
1771 upload_date = datetime.datetime.strptime(date_str, expression).strftime('%Y%m%d')
5de90176 1772 except ValueError:
bf50b038 1773 pass
42393ce2
PH
1774 if upload_date is None:
1775 timetuple = email.utils.parsedate_tz(date_str)
1776 if timetuple:
c6b9cf05
S
1777 try:
1778 upload_date = datetime.datetime(*timetuple[:6]).strftime('%Y%m%d')
1779 except ValueError:
1780 pass
6a750402
JMF
1781 if upload_date is not None:
1782 return compat_str(upload_date)
bf50b038 1783
5f6a1245 1784
46f59e89
S
1785def unified_timestamp(date_str, day_first=True):
1786 if date_str is None:
1787 return None
1788
2ae2ffda 1789 date_str = re.sub(r'[,|]', '', date_str)
46f59e89 1790
7dc2a74e 1791 pm_delta = 12 if re.search(r'(?i)PM', date_str) else 0
46f59e89
S
1792 timezone, date_str = extract_timezone(date_str)
1793
1794 # Remove AM/PM + timezone
1795 date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str)
1796
deef3195
S
1797 # Remove unrecognized timezones from ISO 8601 alike timestamps
1798 m = re.search(r'\d{1,2}:\d{1,2}(?:\.\d+)?(?P<tz>\s*[A-Z]+)$', date_str)
1799 if m:
1800 date_str = date_str[:-len(m.group('tz'))]
1801
f226880c
PH
1802 # Python only supports microseconds, so remove nanoseconds
1803 m = re.search(r'^([0-9]{4,}-[0-9]{1,2}-[0-9]{1,2}T[0-9]{1,2}:[0-9]{1,2}:[0-9]{1,2}\.[0-9]{6})[0-9]+$', date_str)
1804 if m:
1805 date_str = m.group(1)
1806
46f59e89
S
1807 for expression in date_formats(day_first):
1808 try:
7dc2a74e 1809 dt = datetime.datetime.strptime(date_str, expression) - timezone + datetime.timedelta(hours=pm_delta)
46f59e89
S
1810 return calendar.timegm(dt.timetuple())
1811 except ValueError:
1812 pass
1813 timetuple = email.utils.parsedate_tz(date_str)
1814 if timetuple:
7dc2a74e 1815 return calendar.timegm(timetuple) + pm_delta * 3600
46f59e89
S
1816
1817
28e614de 1818def determine_ext(url, default_ext='unknown_video'):
85750f89 1819 if url is None or '.' not in url:
f4776371 1820 return default_ext
9cb9a5df 1821 guess = url.partition('?')[0].rpartition('.')[2]
73e79f2a
PH
1822 if re.match(r'^[A-Za-z0-9]+$', guess):
1823 return guess
a7aaa398
S
1824 # Try extract ext from URLs like http://example.com/foo/bar.mp4/?download
1825 elif guess.rstrip('/') in KNOWN_EXTENSIONS:
9cb9a5df 1826 return guess.rstrip('/')
73e79f2a 1827 else:
cbdbb766 1828 return default_ext
73e79f2a 1829
5f6a1245 1830
824fa511
S
1831def subtitles_filename(filename, sub_lang, sub_format, expected_real_ext=None):
1832 return replace_extension(filename, sub_lang + '.' + sub_format, expected_real_ext)
d4051a8e 1833
5f6a1245 1834
9e62f283 1835def datetime_from_str(date_str, precision='auto', format='%Y%m%d'):
37254abc
JMF
1836 """
1837 Return a datetime object from a string in the format YYYYMMDD or
d49f8db3 1838 (now|today|yesterday|date)[+-][0-9](microsecond|second|minute|hour|day|week|month|year)(s)?
9e62f283 1839
1840 format: string date format used to return datetime object from
1841 precision: round the time portion of a datetime object.
1842 auto|microsecond|second|minute|hour|day.
1843 auto: round to the unit provided in date_str (if applicable).
1844 """
1845 auto_precision = False
1846 if precision == 'auto':
1847 auto_precision = True
1848 precision = 'microsecond'
396a76f7 1849 today = datetime_round(datetime.datetime.utcnow(), precision)
f8795e10 1850 if date_str in ('now', 'today'):
37254abc 1851 return today
f8795e10
PH
1852 if date_str == 'yesterday':
1853 return today - datetime.timedelta(days=1)
9e62f283 1854 match = re.match(
1855 r'(?P<start>.+)(?P<sign>[+-])(?P<time>\d+)(?P<unit>microsecond|second|minute|hour|day|week|month|year)(s)?',
1856 date_str)
37254abc 1857 if match is not None:
9e62f283 1858 start_time = datetime_from_str(match.group('start'), precision, format)
1859 time = int(match.group('time')) * (-1 if match.group('sign') == '-' else 1)
37254abc 1860 unit = match.group('unit')
9e62f283 1861 if unit == 'month' or unit == 'year':
1862 new_date = datetime_add_months(start_time, time * 12 if unit == 'year' else time)
37254abc 1863 unit = 'day'
9e62f283 1864 else:
1865 if unit == 'week':
1866 unit = 'day'
1867 time *= 7
1868 delta = datetime.timedelta(**{unit + 's': time})
1869 new_date = start_time + delta
1870 if auto_precision:
1871 return datetime_round(new_date, unit)
1872 return new_date
1873
1874 return datetime_round(datetime.datetime.strptime(date_str, format), precision)
1875
1876
d49f8db3 1877def date_from_str(date_str, format='%Y%m%d', strict=False):
9e62f283 1878 """
1879 Return a datetime object from a string in the format YYYYMMDD or
d49f8db3 1880 (now|today|yesterday|date)[+-][0-9](microsecond|second|minute|hour|day|week|month|year)(s)?
1881
1882 If "strict", only (now|today)[+-][0-9](day|week|month|year)(s)? is allowed
9e62f283 1883
1884 format: string date format used to return datetime object from
1885 """
d49f8db3 1886 if strict and not re.fullmatch(r'\d{8}|(now|today)[+-]\d+(day|week|month|year)(s)?', date_str):
1887 raise ValueError(f'Invalid date format {date_str}')
9e62f283 1888 return datetime_from_str(date_str, precision='microsecond', format=format).date()
1889
1890
1891def datetime_add_months(dt, months):
1892 """Increment/Decrement a datetime object by months."""
1893 month = dt.month + months - 1
1894 year = dt.year + month // 12
1895 month = month % 12 + 1
1896 day = min(dt.day, calendar.monthrange(year, month)[1])
1897 return dt.replace(year, month, day)
1898
1899
1900def datetime_round(dt, precision='day'):
1901 """
1902 Round a datetime object's time to a specific precision
1903 """
1904 if precision == 'microsecond':
1905 return dt
1906
1907 unit_seconds = {
1908 'day': 86400,
1909 'hour': 3600,
1910 'minute': 60,
1911 'second': 1,
1912 }
1913 roundto = lambda x, n: ((x + n / 2) // n) * n
1914 timestamp = calendar.timegm(dt.timetuple())
1915 return datetime.datetime.utcfromtimestamp(roundto(timestamp, unit_seconds[precision]))
5f6a1245
JW
1916
1917
e63fc1be 1918def hyphenate_date(date_str):
1919 """
1920 Convert a date in 'YYYYMMDD' format to 'YYYY-MM-DD' format"""
1921 match = re.match(r'^(\d\d\d\d)(\d\d)(\d\d)$', date_str)
1922 if match is not None:
1923 return '-'.join(match.groups())
1924 else:
1925 return date_str
1926
5f6a1245 1927
bd558525
JMF
1928class DateRange(object):
1929 """Represents a time interval between two dates"""
5f6a1245 1930
bd558525
JMF
1931 def __init__(self, start=None, end=None):
1932 """start and end must be strings in the format accepted by date"""
1933 if start is not None:
d49f8db3 1934 self.start = date_from_str(start, strict=True)
bd558525
JMF
1935 else:
1936 self.start = datetime.datetime.min.date()
1937 if end is not None:
d49f8db3 1938 self.end = date_from_str(end, strict=True)
bd558525
JMF
1939 else:
1940 self.end = datetime.datetime.max.date()
37254abc 1941 if self.start > self.end:
bd558525 1942 raise ValueError('Date range: "%s" , the start date must be before the end date' % self)
5f6a1245 1943
bd558525
JMF
1944 @classmethod
1945 def day(cls, day):
1946 """Returns a range that only contains the given day"""
5f6a1245
JW
1947 return cls(day, day)
1948
bd558525
JMF
1949 def __contains__(self, date):
1950 """Check if the date is in the range"""
37254abc
JMF
1951 if not isinstance(date, datetime.date):
1952 date = date_from_str(date)
1953 return self.start <= date <= self.end
5f6a1245 1954
bd558525 1955 def __str__(self):
5f6a1245 1956 return '%s - %s' % (self.start.isoformat(), self.end.isoformat())
c496ca96
PH
1957
1958
1959def platform_name():
1960 """ Returns the platform name as a compat_str """
1961 res = platform.platform()
1962 if isinstance(res, bytes):
1963 res = res.decode(preferredencoding())
1964
1965 assert isinstance(res, compat_str)
1966 return res
c257baff
PH
1967
1968
49fa4d9a
N
1969def get_windows_version():
1970 ''' Get Windows version. None if it's not running on Windows '''
1971 if compat_os_name == 'nt':
1972 return version_tuple(platform.win32_ver()[1])
1973 else:
1974 return None
1975
1976
b58ddb32
PH
1977def _windows_write_string(s, out):
1978 """ Returns True if the string was written using special methods,
1979 False if it has yet to be written out."""
1980 # Adapted from http://stackoverflow.com/a/3259271/35070
1981
b58ddb32
PH
1982 import ctypes.wintypes
1983
1984 WIN_OUTPUT_IDS = {
1985 1: -11,
1986 2: -12,
1987 }
1988
a383a98a
PH
1989 try:
1990 fileno = out.fileno()
1991 except AttributeError:
1992 # If the output stream doesn't have a fileno, it's virtual
1993 return False
aa42e873
PH
1994 except io.UnsupportedOperation:
1995 # Some strange Windows pseudo files?
1996 return False
b58ddb32
PH
1997 if fileno not in WIN_OUTPUT_IDS:
1998 return False
1999
d7cd9a9e 2000 GetStdHandle = compat_ctypes_WINFUNCTYPE(
b58ddb32 2001 ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD)(
d7cd9a9e 2002 ('GetStdHandle', ctypes.windll.kernel32))
b58ddb32
PH
2003 h = GetStdHandle(WIN_OUTPUT_IDS[fileno])
2004
d7cd9a9e 2005 WriteConsoleW = compat_ctypes_WINFUNCTYPE(
b58ddb32
PH
2006 ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.wintypes.LPWSTR,
2007 ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD),
d7cd9a9e 2008 ctypes.wintypes.LPVOID)(('WriteConsoleW', ctypes.windll.kernel32))
b58ddb32
PH
2009 written = ctypes.wintypes.DWORD(0)
2010
d7cd9a9e 2011 GetFileType = compat_ctypes_WINFUNCTYPE(ctypes.wintypes.DWORD, ctypes.wintypes.DWORD)(('GetFileType', ctypes.windll.kernel32))
b58ddb32
PH
2012 FILE_TYPE_CHAR = 0x0002
2013 FILE_TYPE_REMOTE = 0x8000
d7cd9a9e 2014 GetConsoleMode = compat_ctypes_WINFUNCTYPE(
b58ddb32
PH
2015 ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE,
2016 ctypes.POINTER(ctypes.wintypes.DWORD))(
d7cd9a9e 2017 ('GetConsoleMode', ctypes.windll.kernel32))
b58ddb32
PH
2018 INVALID_HANDLE_VALUE = ctypes.wintypes.DWORD(-1).value
2019
2020 def not_a_console(handle):
2021 if handle == INVALID_HANDLE_VALUE or handle is None:
2022 return True
3089bc74
S
2023 return ((GetFileType(handle) & ~FILE_TYPE_REMOTE) != FILE_TYPE_CHAR
2024 or GetConsoleMode(handle, ctypes.byref(ctypes.wintypes.DWORD())) == 0)
b58ddb32
PH
2025
2026 if not_a_console(h):
2027 return False
2028
d1b9c912
PH
2029 def next_nonbmp_pos(s):
2030 try:
2031 return next(i for i, c in enumerate(s) if ord(c) > 0xffff)
2032 except StopIteration:
2033 return len(s)
2034
2035 while s:
2036 count = min(next_nonbmp_pos(s), 1024)
2037
b58ddb32 2038 ret = WriteConsoleW(
d1b9c912 2039 h, s, count if count else 2, ctypes.byref(written), None)
b58ddb32
PH
2040 if ret == 0:
2041 raise OSError('Failed to write string')
d1b9c912
PH
2042 if not count: # We just wrote a non-BMP character
2043 assert written.value == 2
2044 s = s[1:]
2045 else:
2046 assert written.value > 0
2047 s = s[written.value:]
b58ddb32
PH
2048 return True
2049
2050
734f90bb 2051def write_string(s, out=None, encoding=None):
7459e3a2
PH
2052 if out is None:
2053 out = sys.stderr
8bf48f23 2054 assert type(s) == compat_str
7459e3a2 2055
b58ddb32
PH
2056 if sys.platform == 'win32' and encoding is None and hasattr(out, 'fileno'):
2057 if _windows_write_string(s, out):
2058 return
2059
3089bc74
S
2060 if ('b' in getattr(out, 'mode', '')
2061 or sys.version_info[0] < 3): # Python 2 lies about mode of sys.stderr
104aa738
PH
2062 byt = s.encode(encoding or preferredencoding(), 'ignore')
2063 out.write(byt)
2064 elif hasattr(out, 'buffer'):
2065 enc = encoding or getattr(out, 'encoding', None) or preferredencoding()
2066 byt = s.encode(enc, 'ignore')
2067 out.buffer.write(byt)
2068 else:
8bf48f23 2069 out.write(s)
7459e3a2
PH
2070 out.flush()
2071
2072
48ea9cea
PH
2073def bytes_to_intlist(bs):
2074 if not bs:
2075 return []
2076 if isinstance(bs[0], int): # Python 3
2077 return list(bs)
2078 else:
2079 return [ord(c) for c in bs]
2080
c257baff 2081
cba892fa 2082def intlist_to_bytes(xs):
2083 if not xs:
2084 return b''
edaa23f8 2085 return compat_struct_pack('%dB' % len(xs), *xs)
c38b1e77
PH
2086
2087
c1c9a79c
PH
2088# Cross-platform file locking
2089if sys.platform == 'win32':
2090 import ctypes.wintypes
2091 import msvcrt
2092
2093 class OVERLAPPED(ctypes.Structure):
2094 _fields_ = [
2095 ('Internal', ctypes.wintypes.LPVOID),
2096 ('InternalHigh', ctypes.wintypes.LPVOID),
2097 ('Offset', ctypes.wintypes.DWORD),
2098 ('OffsetHigh', ctypes.wintypes.DWORD),
2099 ('hEvent', ctypes.wintypes.HANDLE),
2100 ]
2101
2102 kernel32 = ctypes.windll.kernel32
2103 LockFileEx = kernel32.LockFileEx
2104 LockFileEx.argtypes = [
2105 ctypes.wintypes.HANDLE, # hFile
2106 ctypes.wintypes.DWORD, # dwFlags
2107 ctypes.wintypes.DWORD, # dwReserved
2108 ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow
2109 ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh
2110 ctypes.POINTER(OVERLAPPED) # Overlapped
2111 ]
2112 LockFileEx.restype = ctypes.wintypes.BOOL
2113 UnlockFileEx = kernel32.UnlockFileEx
2114 UnlockFileEx.argtypes = [
2115 ctypes.wintypes.HANDLE, # hFile
2116 ctypes.wintypes.DWORD, # dwReserved
2117 ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow
2118 ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh
2119 ctypes.POINTER(OVERLAPPED) # Overlapped
2120 ]
2121 UnlockFileEx.restype = ctypes.wintypes.BOOL
2122 whole_low = 0xffffffff
2123 whole_high = 0x7fffffff
2124
a3125791 2125 def _lock_file(f, exclusive, block): # todo: block unused on win32
c1c9a79c
PH
2126 overlapped = OVERLAPPED()
2127 overlapped.Offset = 0
2128 overlapped.OffsetHigh = 0
2129 overlapped.hEvent = 0
2130 f._lock_file_overlapped_p = ctypes.pointer(overlapped)
2131 handle = msvcrt.get_osfhandle(f.fileno())
2132 if not LockFileEx(handle, 0x2 if exclusive else 0x0, 0,
2133 whole_low, whole_high, f._lock_file_overlapped_p):
2134 raise OSError('Locking file failed: %r' % ctypes.FormatError())
2135
2136 def _unlock_file(f):
2137 assert f._lock_file_overlapped_p
2138 handle = msvcrt.get_osfhandle(f.fileno())
2139 if not UnlockFileEx(handle, 0,
2140 whole_low, whole_high, f._lock_file_overlapped_p):
2141 raise OSError('Unlocking file failed: %r' % ctypes.FormatError())
2142
2143else:
399a76e6
YCH
2144 # Some platforms, such as Jython, is missing fcntl
2145 try:
2146 import fcntl
c1c9a79c 2147
a3125791
JK
2148 def _lock_file(f, exclusive, block):
2149 fcntl.flock(f,
2150 fcntl.LOCK_SH if not exclusive
2151 else fcntl.LOCK_EX if block
2152 else fcntl.LOCK_EX | fcntl.LOCK_NB)
c1c9a79c 2153
399a76e6
YCH
2154 def _unlock_file(f):
2155 fcntl.flock(f, fcntl.LOCK_UN)
a3125791 2156
399a76e6
YCH
2157 except ImportError:
2158 UNSUPPORTED_MSG = 'file locking is not supported on this platform'
2159
a3125791 2160 def _lock_file(f, exclusive, block):
399a76e6
YCH
2161 raise IOError(UNSUPPORTED_MSG)
2162
2163 def _unlock_file(f):
2164 raise IOError(UNSUPPORTED_MSG)
c1c9a79c
PH
2165
2166
2167class locked_file(object):
a3125791
JK
2168 def __init__(self, filename, mode, block=True, encoding=None):
2169 assert mode in ['r', 'rb', 'a', 'ab', 'w', 'wb']
c1c9a79c
PH
2170 self.f = io.open(filename, mode, encoding=encoding)
2171 self.mode = mode
a3125791 2172 self.block = block
c1c9a79c
PH
2173
2174 def __enter__(self):
a3125791 2175 exclusive = 'r' not in self.mode
c1c9a79c 2176 try:
a3125791 2177 _lock_file(self.f, exclusive, self.block)
c1c9a79c
PH
2178 except IOError:
2179 self.f.close()
2180 raise
2181 return self
2182
2183 def __exit__(self, etype, value, traceback):
2184 try:
2185 _unlock_file(self.f)
2186 finally:
2187 self.f.close()
2188
2189 def __iter__(self):
2190 return iter(self.f)
2191
2192 def write(self, *args):
2193 return self.f.write(*args)
2194
2195 def read(self, *args):
2196 return self.f.read(*args)
4eb7f1d1 2197
a3125791
JK
2198 def flush(self):
2199 self.f.flush()
2200
2201 def open(self):
2202 return self.__enter__()
2203
2204 def close(self, *args):
2205 self.__exit__(self, *args, value=False, traceback=False)
2206
4eb7f1d1 2207
4644ac55
S
2208def get_filesystem_encoding():
2209 encoding = sys.getfilesystemencoding()
2210 return encoding if encoding is not None else 'utf-8'
2211
2212
4eb7f1d1 2213def shell_quote(args):
a6a173c2 2214 quoted_args = []
4644ac55 2215 encoding = get_filesystem_encoding()
a6a173c2
JMF
2216 for a in args:
2217 if isinstance(a, bytes):
2218 # We may get a filename encoded with 'encodeFilename'
2219 a = a.decode(encoding)
aefce8e6 2220 quoted_args.append(compat_shlex_quote(a))
28e614de 2221 return ' '.join(quoted_args)
9d4660ca
PH
2222
2223
2224def smuggle_url(url, data):
2225 """ Pass additional data in a URL for internal use. """
2226
81953d1a
RA
2227 url, idata = unsmuggle_url(url, {})
2228 data.update(idata)
15707c7e 2229 sdata = compat_urllib_parse_urlencode(
28e614de
PH
2230 {'__youtubedl_smuggle': json.dumps(data)})
2231 return url + '#' + sdata
9d4660ca
PH
2232
2233
79f82953 2234def unsmuggle_url(smug_url, default=None):
83e865a3 2235 if '#__youtubedl_smuggle' not in smug_url:
79f82953 2236 return smug_url, default
28e614de
PH
2237 url, _, sdata = smug_url.rpartition('#')
2238 jsond = compat_parse_qs(sdata)['__youtubedl_smuggle'][0]
9d4660ca
PH
2239 data = json.loads(jsond)
2240 return url, data
02dbf93f
PH
2241
2242
e0fd9573 2243def format_decimal_suffix(num, fmt='%d%s', *, factor=1000):
2244 """ Formats numbers with decimal sufixes like K, M, etc """
2245 num, factor = float_or_none(num), float(factor)
2246 if num is None:
2247 return None
2248 exponent = 0 if num == 0 else int(math.log(num, factor))
abbeeebc 2249 suffix = ['', *'kMGTPEZY'][exponent]
2250 if factor == 1024:
2251 suffix = {'k': 'Ki', '': ''}.get(suffix, f'{suffix}i')
e0fd9573 2252 converted = num / (factor ** exponent)
abbeeebc 2253 return fmt % (converted, suffix)
e0fd9573 2254
2255
02dbf93f 2256def format_bytes(bytes):
f02d24d8 2257 return format_decimal_suffix(bytes, '%.2f%sB', factor=1024) or 'N/A'
f53c966a 2258
1c088fa8 2259
fb47597b
S
2260def lookup_unit_table(unit_table, s):
2261 units_re = '|'.join(re.escape(u) for u in unit_table)
2262 m = re.match(
782b1b5b 2263 r'(?P<num>[0-9]+(?:[,.][0-9]*)?)\s*(?P<unit>%s)\b' % units_re, s)
fb47597b
S
2264 if not m:
2265 return None
2266 num_str = m.group('num').replace(',', '.')
2267 mult = unit_table[m.group('unit')]
2268 return int(float(num_str) * mult)
2269
2270
be64b5b0
PH
2271def parse_filesize(s):
2272 if s is None:
2273 return None
2274
dfb1b146 2275 # The lower-case forms are of course incorrect and unofficial,
be64b5b0
PH
2276 # but we support those too
2277 _UNIT_TABLE = {
2278 'B': 1,
2279 'b': 1,
70852b47 2280 'bytes': 1,
be64b5b0
PH
2281 'KiB': 1024,
2282 'KB': 1000,
2283 'kB': 1024,
2284 'Kb': 1000,
13585d76 2285 'kb': 1000,
70852b47
YCH
2286 'kilobytes': 1000,
2287 'kibibytes': 1024,
be64b5b0
PH
2288 'MiB': 1024 ** 2,
2289 'MB': 1000 ** 2,
2290 'mB': 1024 ** 2,
2291 'Mb': 1000 ** 2,
13585d76 2292 'mb': 1000 ** 2,
70852b47
YCH
2293 'megabytes': 1000 ** 2,
2294 'mebibytes': 1024 ** 2,
be64b5b0
PH
2295 'GiB': 1024 ** 3,
2296 'GB': 1000 ** 3,
2297 'gB': 1024 ** 3,
2298 'Gb': 1000 ** 3,
13585d76 2299 'gb': 1000 ** 3,
70852b47
YCH
2300 'gigabytes': 1000 ** 3,
2301 'gibibytes': 1024 ** 3,
be64b5b0
PH
2302 'TiB': 1024 ** 4,
2303 'TB': 1000 ** 4,
2304 'tB': 1024 ** 4,
2305 'Tb': 1000 ** 4,
13585d76 2306 'tb': 1000 ** 4,
70852b47
YCH
2307 'terabytes': 1000 ** 4,
2308 'tebibytes': 1024 ** 4,
be64b5b0
PH
2309 'PiB': 1024 ** 5,
2310 'PB': 1000 ** 5,
2311 'pB': 1024 ** 5,
2312 'Pb': 1000 ** 5,
13585d76 2313 'pb': 1000 ** 5,
70852b47
YCH
2314 'petabytes': 1000 ** 5,
2315 'pebibytes': 1024 ** 5,
be64b5b0
PH
2316 'EiB': 1024 ** 6,
2317 'EB': 1000 ** 6,
2318 'eB': 1024 ** 6,
2319 'Eb': 1000 ** 6,
13585d76 2320 'eb': 1000 ** 6,
70852b47
YCH
2321 'exabytes': 1000 ** 6,
2322 'exbibytes': 1024 ** 6,
be64b5b0
PH
2323 'ZiB': 1024 ** 7,
2324 'ZB': 1000 ** 7,
2325 'zB': 1024 ** 7,
2326 'Zb': 1000 ** 7,
13585d76 2327 'zb': 1000 ** 7,
70852b47
YCH
2328 'zettabytes': 1000 ** 7,
2329 'zebibytes': 1024 ** 7,
be64b5b0
PH
2330 'YiB': 1024 ** 8,
2331 'YB': 1000 ** 8,
2332 'yB': 1024 ** 8,
2333 'Yb': 1000 ** 8,
13585d76 2334 'yb': 1000 ** 8,
70852b47
YCH
2335 'yottabytes': 1000 ** 8,
2336 'yobibytes': 1024 ** 8,
be64b5b0
PH
2337 }
2338
fb47597b
S
2339 return lookup_unit_table(_UNIT_TABLE, s)
2340
2341
2342def parse_count(s):
2343 if s is None:
be64b5b0
PH
2344 return None
2345
352d5da8 2346 s = re.sub(r'^[^\d]+\s', '', s).strip()
fb47597b
S
2347
2348 if re.match(r'^[\d,.]+$', s):
2349 return str_to_int(s)
2350
2351 _UNIT_TABLE = {
2352 'k': 1000,
2353 'K': 1000,
2354 'm': 1000 ** 2,
2355 'M': 1000 ** 2,
2356 'kk': 1000 ** 2,
2357 'KK': 1000 ** 2,
352d5da8 2358 'b': 1000 ** 3,
2359 'B': 1000 ** 3,
fb47597b 2360 }
be64b5b0 2361
352d5da8 2362 ret = lookup_unit_table(_UNIT_TABLE, s)
2363 if ret is not None:
2364 return ret
2365
2366 mobj = re.match(r'([\d,.]+)(?:$|\s)', s)
2367 if mobj:
2368 return str_to_int(mobj.group(1))
be64b5b0 2369
2f7ae819 2370
b871d7e9
S
2371def parse_resolution(s):
2372 if s is None:
2373 return {}
2374
17ec8bcf 2375 mobj = re.search(r'(?<![a-zA-Z0-9])(?P<w>\d+)\s*[xX×,]\s*(?P<h>\d+)(?![a-zA-Z0-9])', s)
b871d7e9
S
2376 if mobj:
2377 return {
2378 'width': int(mobj.group('w')),
2379 'height': int(mobj.group('h')),
2380 }
2381
17ec8bcf 2382 mobj = re.search(r'(?<![a-zA-Z0-9])(\d+)[pPiI](?![a-zA-Z0-9])', s)
b871d7e9
S
2383 if mobj:
2384 return {'height': int(mobj.group(1))}
2385
2386 mobj = re.search(r'\b([48])[kK]\b', s)
2387 if mobj:
2388 return {'height': int(mobj.group(1)) * 540}
2389
2390 return {}
2391
2392
0dc41787
S
2393def parse_bitrate(s):
2394 if not isinstance(s, compat_str):
2395 return
2396 mobj = re.search(r'\b(\d+)\s*kbps', s)
2397 if mobj:
2398 return int(mobj.group(1))
2399
2400
a942d6cb 2401def month_by_name(name, lang='en'):
caefb1de
PH
2402 """ Return the number of a month by (locale-independently) English name """
2403
f6717dec 2404 month_names = MONTH_NAMES.get(lang, MONTH_NAMES['en'])
a942d6cb 2405
caefb1de 2406 try:
f6717dec 2407 return month_names.index(name) + 1
7105440c
YCH
2408 except ValueError:
2409 return None
2410
2411
2412def month_by_abbreviation(abbrev):
2413 """ Return the number of a month by (locale-independently) English
2414 abbreviations """
2415
2416 try:
2417 return [s[:3] for s in ENGLISH_MONTH_NAMES].index(abbrev) + 1
caefb1de
PH
2418 except ValueError:
2419 return None
18258362
JMF
2420
2421
5aafe895 2422def fix_xml_ampersands(xml_str):
18258362 2423 """Replace all the '&' by '&amp;' in XML"""
5aafe895
PH
2424 return re.sub(
2425 r'&(?!amp;|lt;|gt;|apos;|quot;|#x[0-9a-fA-F]{,4};|#[0-9]{,4};)',
28e614de 2426 '&amp;',
5aafe895 2427 xml_str)
e3946f98
PH
2428
2429
2430def setproctitle(title):
8bf48f23 2431 assert isinstance(title, compat_str)
c1c05c67
YCH
2432
2433 # ctypes in Jython is not complete
2434 # http://bugs.jython.org/issue2148
2435 if sys.platform.startswith('java'):
2436 return
2437
e3946f98 2438 try:
611c1dd9 2439 libc = ctypes.cdll.LoadLibrary('libc.so.6')
e3946f98
PH
2440 except OSError:
2441 return
2f49bcd6
RC
2442 except TypeError:
2443 # LoadLibrary in Windows Python 2.7.13 only expects
2444 # a bytestring, but since unicode_literals turns
2445 # every string into a unicode string, it fails.
2446 return
6eefe533
PH
2447 title_bytes = title.encode('utf-8')
2448 buf = ctypes.create_string_buffer(len(title_bytes))
2449 buf.value = title_bytes
e3946f98 2450 try:
6eefe533 2451 libc.prctl(15, buf, 0, 0, 0)
e3946f98
PH
2452 except AttributeError:
2453 return # Strange libc, just skip this
d7dda168
PH
2454
2455
2456def remove_start(s, start):
46bc9b7d 2457 return s[len(start):] if s is not None and s.startswith(start) else s
29eb5174
PH
2458
2459
2b9faf55 2460def remove_end(s, end):
46bc9b7d 2461 return s[:-len(end)] if s is not None and s.endswith(end) else s
2b9faf55
PH
2462
2463
31b2051e
S
2464def remove_quotes(s):
2465 if s is None or len(s) < 2:
2466 return s
2467 for quote in ('"', "'", ):
2468 if s[0] == quote and s[-1] == quote:
2469 return s[1:-1]
2470 return s
2471
2472
b6e0c7d2
U
2473def get_domain(url):
2474 domain = re.match(r'(?:https?:\/\/)?(?:www\.)?(?P<domain>[^\n\/]+\.[^\n\/]+)(?:\/(.*))?', url)
2475 return domain.group('domain') if domain else None
2476
2477
29eb5174 2478def url_basename(url):
9b8aaeed 2479 path = compat_urlparse.urlparse(url).path
28e614de 2480 return path.strip('/').split('/')[-1]
aa94a6d3
PH
2481
2482
02dc0a36
S
2483def base_url(url):
2484 return re.match(r'https?://[^?#&]+/', url).group()
2485
2486
e34c3361 2487def urljoin(base, path):
4b5de77b
S
2488 if isinstance(path, bytes):
2489 path = path.decode('utf-8')
e34c3361
S
2490 if not isinstance(path, compat_str) or not path:
2491 return None
fad4ceb5 2492 if re.match(r'^(?:[a-zA-Z][a-zA-Z0-9+-.]*:)?//', path):
e34c3361 2493 return path
4b5de77b
S
2494 if isinstance(base, bytes):
2495 base = base.decode('utf-8')
2496 if not isinstance(base, compat_str) or not re.match(
2497 r'^(?:https?:)?//', base):
e34c3361
S
2498 return None
2499 return compat_urlparse.urljoin(base, path)
2500
2501
aa94a6d3
PH
2502class HEADRequest(compat_urllib_request.Request):
2503 def get_method(self):
611c1dd9 2504 return 'HEAD'
7217e148
PH
2505
2506
95cf60e8
S
2507class PUTRequest(compat_urllib_request.Request):
2508 def get_method(self):
2509 return 'PUT'
2510
2511
9732d77e 2512def int_or_none(v, scale=1, default=None, get_attr=None, invscale=1):
9e907ebd 2513 if get_attr and v is not None:
2514 v = getattr(v, get_attr, None)
1812afb7
S
2515 try:
2516 return int(v) * invscale // scale
31c49255 2517 except (ValueError, TypeError, OverflowError):
af98f8ff 2518 return default
9732d77e 2519
9572013d 2520
40a90862
JMF
2521def str_or_none(v, default=None):
2522 return default if v is None else compat_str(v)
2523
9732d77e
PH
2524
2525def str_to_int(int_str):
48d4681e 2526 """ A more relaxed version of int_or_none """
42db58ec 2527 if isinstance(int_str, compat_integer_types):
348c6bf1 2528 return int_str
42db58ec
S
2529 elif isinstance(int_str, compat_str):
2530 int_str = re.sub(r'[,\.\+]', '', int_str)
2531 return int_or_none(int_str)
608d11f5
PH
2532
2533
9732d77e 2534def float_or_none(v, scale=1, invscale=1, default=None):
caf80631
S
2535 if v is None:
2536 return default
2537 try:
2538 return float(v) * invscale / scale
5e1271c5 2539 except (ValueError, TypeError):
caf80631 2540 return default
43f775e4
PH
2541
2542
c7e327c4
S
2543def bool_or_none(v, default=None):
2544 return v if isinstance(v, bool) else default
2545
2546
53cd37ba
S
2547def strip_or_none(v, default=None):
2548 return v.strip() if isinstance(v, compat_str) else default
b72b4431
S
2549
2550
af03000a
S
2551def url_or_none(url):
2552 if not url or not isinstance(url, compat_str):
2553 return None
2554 url = url.strip()
29f7c58a 2555 return url if re.match(r'^(?:(?:https?|rt(?:m(?:pt?[es]?|fp)|sp[su]?)|mms|ftps?):)?//', url) else None
af03000a
S
2556
2557
3e9b66d7
LNO
2558def request_to_url(req):
2559 if isinstance(req, compat_urllib_request.Request):
2560 return req.get_full_url()
2561 else:
2562 return req
2563
2564
e29663c6 2565def strftime_or_none(timestamp, date_format, default=None):
2566 datetime_object = None
2567 try:
2568 if isinstance(timestamp, compat_numeric_types): # unix timestamp
2569 datetime_object = datetime.datetime.utcfromtimestamp(timestamp)
2570 elif isinstance(timestamp, compat_str): # assume YYYYMMDD
2571 datetime_object = datetime.datetime.strptime(timestamp, '%Y%m%d')
2572 return datetime_object.strftime(date_format)
2573 except (ValueError, TypeError, AttributeError):
2574 return default
2575
2576
608d11f5 2577def parse_duration(s):
8f9312c3 2578 if not isinstance(s, compat_basestring):
608d11f5 2579 return None
ca7b3246 2580 s = s.strip()
38d79fd1 2581 if not s:
2582 return None
ca7b3246 2583
acaff495 2584 days, hours, mins, secs, ms = [None] * 5
8bd1c00b 2585 m = re.match(r'''(?x)
2586 (?P<before_secs>
2587 (?:(?:(?P<days>[0-9]+):)?(?P<hours>[0-9]+):)?(?P<mins>[0-9]+):)?
2588 (?P<secs>(?(before_secs)[0-9]{1,2}|[0-9]+))
2589 (?P<ms>[.:][0-9]+)?Z?$
2590 ''', s)
acaff495 2591 if m:
8bd1c00b 2592 days, hours, mins, secs, ms = m.group('days', 'hours', 'mins', 'secs', 'ms')
acaff495 2593 else:
2594 m = re.match(
056653bb
S
2595 r'''(?ix)(?:P?
2596 (?:
2597 [0-9]+\s*y(?:ears?)?\s*
2598 )?
2599 (?:
2600 [0-9]+\s*m(?:onths?)?\s*
2601 )?
2602 (?:
2603 [0-9]+\s*w(?:eeks?)?\s*
2604 )?
8f4b58d7 2605 (?:
acaff495 2606 (?P<days>[0-9]+)\s*d(?:ays?)?\s*
8f4b58d7 2607 )?
056653bb 2608 T)?
acaff495 2609 (?:
2610 (?P<hours>[0-9]+)\s*h(?:ours?)?\s*
2611 )?
2612 (?:
2613 (?P<mins>[0-9]+)\s*m(?:in(?:ute)?s?)?\s*
2614 )?
2615 (?:
2616 (?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?\s*s(?:ec(?:ond)?s?)?\s*
15846398 2617 )?Z?$''', s)
acaff495 2618 if m:
2619 days, hours, mins, secs, ms = m.groups()
2620 else:
15846398 2621 m = re.match(r'(?i)(?:(?P<hours>[0-9.]+)\s*(?:hours?)|(?P<mins>[0-9.]+)\s*(?:mins?\.?|minutes?)\s*)Z?$', s)
acaff495 2622 if m:
2623 hours, mins = m.groups()
2624 else:
2625 return None
2626
2627 duration = 0
2628 if secs:
2629 duration += float(secs)
2630 if mins:
2631 duration += float(mins) * 60
2632 if hours:
2633 duration += float(hours) * 60 * 60
2634 if days:
2635 duration += float(days) * 24 * 60 * 60
2636 if ms:
8bd1c00b 2637 duration += float(ms.replace(':', '.'))
acaff495 2638 return duration
91d7d0b3
JMF
2639
2640
e65e4c88 2641def prepend_extension(filename, ext, expected_real_ext=None):
5f6a1245 2642 name, real_ext = os.path.splitext(filename)
e65e4c88
S
2643 return (
2644 '{0}.{1}{2}'.format(name, ext, real_ext)
2645 if not expected_real_ext or real_ext[1:] == expected_real_ext
2646 else '{0}.{1}'.format(filename, ext))
d70ad093
PH
2647
2648
b3ed15b7
S
2649def replace_extension(filename, ext, expected_real_ext=None):
2650 name, real_ext = os.path.splitext(filename)
2651 return '{0}.{1}'.format(
2652 name if not expected_real_ext or real_ext[1:] == expected_real_ext else filename,
2653 ext)
2654
2655
d70ad093
PH
2656def check_executable(exe, args=[]):
2657 """ Checks if the given binary is installed somewhere in PATH, and returns its name.
2658 args can be a list of arguments for a short output (like -version) """
2659 try:
d3c93ec2 2660 Popen([exe] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate_or_kill()
d70ad093
PH
2661 except OSError:
2662 return False
2663 return exe
b7ab0590
PH
2664
2665
9af98e17 2666def _get_exe_version_output(exe, args):
95807118 2667 try:
b64d04c1 2668 # STDIN should be redirected too. On UNIX-like systems, ffmpeg triggers
7a5c1cfe 2669 # SIGTTOU if yt-dlp is run in the background.
067aa17e 2670 # See https://github.com/ytdl-org/youtube-dl/issues/955#issuecomment-209789656
d3c93ec2 2671 out, _ = Popen(
2672 [encodeArgument(exe)] + args, stdin=subprocess.PIPE,
2673 stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate_or_kill()
95807118
PH
2674 except OSError:
2675 return False
cae97f65
PH
2676 if isinstance(out, bytes): # Python 2.x
2677 out = out.decode('ascii', 'ignore')
9af98e17 2678 return out
cae97f65
PH
2679
2680
2681def detect_exe_version(output, version_re=None, unrecognized='present'):
2682 assert isinstance(output, compat_str)
2683 if version_re is None:
2684 version_re = r'version\s+([-0-9._a-zA-Z]+)'
2685 m = re.search(version_re, output)
95807118
PH
2686 if m:
2687 return m.group(1)
2688 else:
2689 return unrecognized
2690
2691
9af98e17 2692def get_exe_version(exe, args=['--version'],
2693 version_re=None, unrecognized='present'):
2694 """ Returns the version of the specified executable,
2695 or False if the executable is not present """
2696 out = _get_exe_version_output(exe, args)
2697 return detect_exe_version(out, version_re, unrecognized) if out else False
2698
2699
cb89cfc1 2700class LazyList(collections.abc.Sequence):
483336e7 2701 ''' Lazy immutable list from an iterable
2702 Note that slices of a LazyList are lists and not LazyList'''
2703
8e5fecc8 2704 class IndexError(IndexError):
2705 pass
2706
282f5709 2707 def __init__(self, iterable, *, reverse=False, _cache=None):
483336e7 2708 self.__iterable = iter(iterable)
282f5709 2709 self.__cache = [] if _cache is None else _cache
2710 self.__reversed = reverse
483336e7 2711
2712 def __iter__(self):
28419ca2 2713 if self.__reversed:
2714 # We need to consume the entire iterable to iterate in reverse
981052c9 2715 yield from self.exhaust()
28419ca2 2716 return
2717 yield from self.__cache
483336e7 2718 for item in self.__iterable:
2719 self.__cache.append(item)
2720 yield item
2721
981052c9 2722 def __exhaust(self):
483336e7 2723 self.__cache.extend(self.__iterable)
9f1a1c36 2724 # Discard the emptied iterable to make it pickle-able
2725 self.__iterable = []
28419ca2 2726 return self.__cache
2727
981052c9 2728 def exhaust(self):
2729 ''' Evaluate the entire iterable '''
2730 return self.__exhaust()[::-1 if self.__reversed else 1]
2731
28419ca2 2732 @staticmethod
981052c9 2733 def __reverse_index(x):
e0f2b4b4 2734 return None if x is None else -(x + 1)
483336e7 2735
2736 def __getitem__(self, idx):
2737 if isinstance(idx, slice):
28419ca2 2738 if self.__reversed:
e0f2b4b4 2739 idx = slice(self.__reverse_index(idx.start), self.__reverse_index(idx.stop), -(idx.step or 1))
2740 start, stop, step = idx.start, idx.stop, idx.step or 1
483336e7 2741 elif isinstance(idx, int):
28419ca2 2742 if self.__reversed:
981052c9 2743 idx = self.__reverse_index(idx)
e0f2b4b4 2744 start, stop, step = idx, idx, 0
483336e7 2745 else:
2746 raise TypeError('indices must be integers or slices')
e0f2b4b4 2747 if ((start or 0) < 0 or (stop or 0) < 0
2748 or (start is None and step < 0)
2749 or (stop is None and step > 0)):
483336e7 2750 # We need to consume the entire iterable to be able to slice from the end
2751 # Obviously, never use this with infinite iterables
8e5fecc8 2752 self.__exhaust()
2753 try:
2754 return self.__cache[idx]
2755 except IndexError as e:
2756 raise self.IndexError(e) from e
e0f2b4b4 2757 n = max(start or 0, stop or 0) - len(self.__cache) + 1
28419ca2 2758 if n > 0:
2759 self.__cache.extend(itertools.islice(self.__iterable, n))
8e5fecc8 2760 try:
2761 return self.__cache[idx]
2762 except IndexError as e:
2763 raise self.IndexError(e) from e
483336e7 2764
2765 def __bool__(self):
2766 try:
28419ca2 2767 self[-1] if self.__reversed else self[0]
8e5fecc8 2768 except self.IndexError:
483336e7 2769 return False
2770 return True
2771
2772 def __len__(self):
8e5fecc8 2773 self.__exhaust()
483336e7 2774 return len(self.__cache)
2775
282f5709 2776 def __reversed__(self):
2777 return type(self)(self.__iterable, reverse=not self.__reversed, _cache=self.__cache)
2778
2779 def __copy__(self):
2780 return type(self)(self.__iterable, reverse=self.__reversed, _cache=self.__cache)
2781
28419ca2 2782 def __repr__(self):
2783 # repr and str should mimic a list. So we exhaust the iterable
2784 return repr(self.exhaust())
2785
2786 def __str__(self):
2787 return repr(self.exhaust())
2788
483336e7 2789
7be9ccff 2790class PagedList:
c07a39ae 2791
2792 class IndexError(IndexError):
2793 pass
2794
dd26ced1
PH
2795 def __len__(self):
2796 # This is only useful for tests
2797 return len(self.getslice())
2798
7be9ccff 2799 def __init__(self, pagefunc, pagesize, use_cache=True):
2800 self._pagefunc = pagefunc
2801 self._pagesize = pagesize
2802 self._use_cache = use_cache
2803 self._cache = {}
2804
2805 def getpage(self, pagenum):
d8cf8d97 2806 page_results = self._cache.get(pagenum)
2807 if page_results is None:
2808 page_results = list(self._pagefunc(pagenum))
7be9ccff 2809 if self._use_cache:
2810 self._cache[pagenum] = page_results
2811 return page_results
2812
2813 def getslice(self, start=0, end=None):
2814 return list(self._getslice(start, end))
2815
2816 def _getslice(self, start, end):
55575225 2817 raise NotImplementedError('This method must be implemented by subclasses')
2818
2819 def __getitem__(self, idx):
7be9ccff 2820 # NOTE: cache must be enabled if this is used
55575225 2821 if not isinstance(idx, int) or idx < 0:
2822 raise TypeError('indices must be non-negative integers')
2823 entries = self.getslice(idx, idx + 1)
d8cf8d97 2824 if not entries:
c07a39ae 2825 raise self.IndexError()
d8cf8d97 2826 return entries[0]
55575225 2827
9c44d242
PH
2828
2829class OnDemandPagedList(PagedList):
7be9ccff 2830 def _getslice(self, start, end):
b7ab0590
PH
2831 for pagenum in itertools.count(start // self._pagesize):
2832 firstid = pagenum * self._pagesize
2833 nextfirstid = pagenum * self._pagesize + self._pagesize
2834 if start >= nextfirstid:
2835 continue
2836
b7ab0590
PH
2837 startv = (
2838 start % self._pagesize
2839 if firstid <= start < nextfirstid
2840 else 0)
b7ab0590
PH
2841 endv = (
2842 ((end - 1) % self._pagesize) + 1
2843 if (end is not None and firstid <= end <= nextfirstid)
2844 else None)
2845
7be9ccff 2846 page_results = self.getpage(pagenum)
b7ab0590
PH
2847 if startv != 0 or endv is not None:
2848 page_results = page_results[startv:endv]
7be9ccff 2849 yield from page_results
b7ab0590
PH
2850
2851 # A little optimization - if current page is not "full", ie. does
2852 # not contain page_size videos then we can assume that this page
2853 # is the last one - there are no more ids on further pages -
2854 # i.e. no need to query again.
2855 if len(page_results) + startv < self._pagesize:
2856 break
2857
2858 # If we got the whole page, but the next page is not interesting,
2859 # break out early as well
2860 if end == nextfirstid:
2861 break
81c2f20b
PH
2862
2863
9c44d242
PH
2864class InAdvancePagedList(PagedList):
2865 def __init__(self, pagefunc, pagecount, pagesize):
9c44d242 2866 self._pagecount = pagecount
7be9ccff 2867 PagedList.__init__(self, pagefunc, pagesize, True)
9c44d242 2868
7be9ccff 2869 def _getslice(self, start, end):
9c44d242 2870 start_page = start // self._pagesize
d37707bd 2871 end_page = self._pagecount if end is None else min(self._pagecount, end // self._pagesize + 1)
9c44d242
PH
2872 skip_elems = start - start_page * self._pagesize
2873 only_more = None if end is None else end - start
2874 for pagenum in range(start_page, end_page):
7be9ccff 2875 page_results = self.getpage(pagenum)
9c44d242 2876 if skip_elems:
7be9ccff 2877 page_results = page_results[skip_elems:]
9c44d242
PH
2878 skip_elems = None
2879 if only_more is not None:
7be9ccff 2880 if len(page_results) < only_more:
2881 only_more -= len(page_results)
9c44d242 2882 else:
7be9ccff 2883 yield from page_results[:only_more]
9c44d242 2884 break
7be9ccff 2885 yield from page_results
9c44d242
PH
2886
2887
81c2f20b 2888def uppercase_escape(s):
676eb3f2 2889 unicode_escape = codecs.getdecoder('unicode_escape')
81c2f20b 2890 return re.sub(
a612753d 2891 r'\\U[0-9a-fA-F]{8}',
676eb3f2
PH
2892 lambda m: unicode_escape(m.group(0))[0],
2893 s)
0fe2ff78
YCH
2894
2895
2896def lowercase_escape(s):
2897 unicode_escape = codecs.getdecoder('unicode_escape')
2898 return re.sub(
2899 r'\\u[0-9a-fA-F]{4}',
2900 lambda m: unicode_escape(m.group(0))[0],
2901 s)
b53466e1 2902
d05cfe06
S
2903
2904def escape_rfc3986(s):
2905 """Escape non-ASCII characters as suggested by RFC 3986"""
8f9312c3 2906 if sys.version_info < (3, 0) and isinstance(s, compat_str):
d05cfe06 2907 s = s.encode('utf-8')
ecc0c5ee 2908 return compat_urllib_parse.quote(s, b"%/;:@&=+$,!~*'()?#[]")
d05cfe06
S
2909
2910
2911def escape_url(url):
2912 """Escape URL as suggested by RFC 3986"""
2913 url_parsed = compat_urllib_parse_urlparse(url)
2914 return url_parsed._replace(
efbed08d 2915 netloc=url_parsed.netloc.encode('idna').decode('ascii'),
d05cfe06
S
2916 path=escape_rfc3986(url_parsed.path),
2917 params=escape_rfc3986(url_parsed.params),
2918 query=escape_rfc3986(url_parsed.query),
2919 fragment=escape_rfc3986(url_parsed.fragment)
2920 ).geturl()
2921
62e609ab 2922
4dfbf869 2923def parse_qs(url):
2924 return compat_parse_qs(compat_urllib_parse_urlparse(url).query)
2925
2926
62e609ab
PH
2927def read_batch_urls(batch_fd):
2928 def fixup(url):
2929 if not isinstance(url, compat_str):
2930 url = url.decode('utf-8', 'replace')
8c04f0be 2931 BOM_UTF8 = ('\xef\xbb\xbf', '\ufeff')
2932 for bom in BOM_UTF8:
2933 if url.startswith(bom):
2934 url = url[len(bom):]
2935 url = url.lstrip()
2936 if not url or url.startswith(('#', ';', ']')):
62e609ab 2937 return False
8c04f0be 2938 # "#" cannot be stripped out since it is part of the URI
2939 # However, it can be safely stipped out if follwing a whitespace
2940 return re.split(r'\s#', url, 1)[0].rstrip()
62e609ab
PH
2941
2942 with contextlib.closing(batch_fd) as fd:
2943 return [url for url in map(fixup, fd) if url]
b74fa8cd
JMF
2944
2945
2946def urlencode_postdata(*args, **kargs):
15707c7e 2947 return compat_urllib_parse_urlencode(*args, **kargs).encode('ascii')
bcf89ce6
PH
2948
2949
38f9ef31 2950def update_url_query(url, query):
cacd9966
YCH
2951 if not query:
2952 return url
38f9ef31 2953 parsed_url = compat_urlparse.urlparse(url)
2954 qs = compat_parse_qs(parsed_url.query)
2955 qs.update(query)
2956 return compat_urlparse.urlunparse(parsed_url._replace(
15707c7e 2957 query=compat_urllib_parse_urlencode(qs, True)))
16392824 2958
8e60dc75 2959
ed0291d1
S
2960def update_Request(req, url=None, data=None, headers={}, query={}):
2961 req_headers = req.headers.copy()
2962 req_headers.update(headers)
2963 req_data = data or req.data
2964 req_url = update_url_query(url or req.get_full_url(), query)
95cf60e8
S
2965 req_get_method = req.get_method()
2966 if req_get_method == 'HEAD':
2967 req_type = HEADRequest
2968 elif req_get_method == 'PUT':
2969 req_type = PUTRequest
2970 else:
2971 req_type = compat_urllib_request.Request
ed0291d1
S
2972 new_req = req_type(
2973 req_url, data=req_data, headers=req_headers,
2974 origin_req_host=req.origin_req_host, unverifiable=req.unverifiable)
2975 if hasattr(req, 'timeout'):
2976 new_req.timeout = req.timeout
2977 return new_req
2978
2979
10c87c15 2980def _multipart_encode_impl(data, boundary):
0c265486
YCH
2981 content_type = 'multipart/form-data; boundary=%s' % boundary
2982
2983 out = b''
2984 for k, v in data.items():
2985 out += b'--' + boundary.encode('ascii') + b'\r\n'
2986 if isinstance(k, compat_str):
2987 k = k.encode('utf-8')
2988 if isinstance(v, compat_str):
2989 v = v.encode('utf-8')
2990 # RFC 2047 requires non-ASCII field names to be encoded, while RFC 7578
2991 # suggests sending UTF-8 directly. Firefox sends UTF-8, too
b2ad479d 2992 content = b'Content-Disposition: form-data; name="' + k + b'"\r\n\r\n' + v + b'\r\n'
0c265486
YCH
2993 if boundary.encode('ascii') in content:
2994 raise ValueError('Boundary overlaps with data')
2995 out += content
2996
2997 out += b'--' + boundary.encode('ascii') + b'--\r\n'
2998
2999 return out, content_type
3000
3001
3002def multipart_encode(data, boundary=None):
3003 '''
3004 Encode a dict to RFC 7578-compliant form-data
3005
3006 data:
3007 A dict where keys and values can be either Unicode or bytes-like
3008 objects.
3009 boundary:
3010 If specified a Unicode object, it's used as the boundary. Otherwise
3011 a random boundary is generated.
3012
3013 Reference: https://tools.ietf.org/html/rfc7578
3014 '''
3015 has_specified_boundary = boundary is not None
3016
3017 while True:
3018 if boundary is None:
3019 boundary = '---------------' + str(random.randrange(0x0fffffff, 0xffffffff))
3020
3021 try:
10c87c15 3022 out, content_type = _multipart_encode_impl(data, boundary)
0c265486
YCH
3023 break
3024 except ValueError:
3025 if has_specified_boundary:
3026 raise
3027 boundary = None
3028
3029 return out, content_type
3030
3031
86296ad2 3032def dict_get(d, key_or_keys, default=None, skip_false_values=True):
cbecc9b9
S
3033 if isinstance(key_or_keys, (list, tuple)):
3034 for key in key_or_keys:
86296ad2
S
3035 if key not in d or d[key] is None or skip_false_values and not d[key]:
3036 continue
3037 return d[key]
cbecc9b9
S
3038 return default
3039 return d.get(key_or_keys, default)
3040
3041
329ca3be 3042def try_get(src, getter, expected_type=None):
6606817a 3043 for get in variadic(getter):
a32a9a7e
S
3044 try:
3045 v = get(src)
3046 except (AttributeError, KeyError, TypeError, IndexError):
3047 pass
3048 else:
3049 if expected_type is None or isinstance(v, expected_type):
3050 return v
329ca3be
S
3051
3052
6cc62232
S
3053def merge_dicts(*dicts):
3054 merged = {}
3055 for a_dict in dicts:
3056 for k, v in a_dict.items():
3057 if v is None:
3058 continue
3089bc74
S
3059 if (k not in merged
3060 or (isinstance(v, compat_str) and v
3061 and isinstance(merged[k], compat_str)
3062 and not merged[k])):
6cc62232
S
3063 merged[k] = v
3064 return merged
3065
3066
8e60dc75
S
3067def encode_compat_str(string, encoding=preferredencoding(), errors='strict'):
3068 return string if isinstance(string, compat_str) else compat_str(string, encoding, errors)
3069
16392824 3070
a1a530b0
PH
3071US_RATINGS = {
3072 'G': 0,
3073 'PG': 10,
3074 'PG-13': 13,
3075 'R': 16,
3076 'NC': 18,
3077}
fac55558
PH
3078
3079
a8795327 3080TV_PARENTAL_GUIDELINES = {
5a16c9d9
RA
3081 'TV-Y': 0,
3082 'TV-Y7': 7,
3083 'TV-G': 0,
3084 'TV-PG': 0,
3085 'TV-14': 14,
3086 'TV-MA': 17,
a8795327
S
3087}
3088
3089
146c80e2 3090def parse_age_limit(s):
a8795327
S
3091 if type(s) == int:
3092 return s if 0 <= s <= 21 else None
3093 if not isinstance(s, compat_basestring):
d838b1bd 3094 return None
146c80e2 3095 m = re.match(r'^(?P<age>\d{1,2})\+?$', s)
a8795327
S
3096 if m:
3097 return int(m.group('age'))
5c5fae6d 3098 s = s.upper()
a8795327
S
3099 if s in US_RATINGS:
3100 return US_RATINGS[s]
5a16c9d9 3101 m = re.match(r'^TV[_-]?(%s)$' % '|'.join(k[3:] for k in TV_PARENTAL_GUIDELINES), s)
b8361187 3102 if m:
5a16c9d9 3103 return TV_PARENTAL_GUIDELINES['TV-' + m.group(1)]
b8361187 3104 return None
146c80e2
S
3105
3106
fac55558 3107def strip_jsonp(code):
609a61e3 3108 return re.sub(
5552c9eb 3109 r'''(?sx)^
e9c671d5 3110 (?:window\.)?(?P<func_name>[a-zA-Z0-9_.$]*)
5552c9eb
YCH
3111 (?:\s*&&\s*(?P=func_name))?
3112 \s*\(\s*(?P<callback_data>.*)\);?
3113 \s*?(?://[^\n]*)*$''',
3114 r'\g<callback_data>', code)
478c2c61
PH
3115
3116
5c610515 3117def js_to_json(code, vars={}):
3118 # vars is a dict of var, val pairs to substitute
c843e685 3119 COMMENT_RE = r'/\*(?:(?!\*/).)*?\*/|//[^\n]*\n'
4195096e
S
3120 SKIP_RE = r'\s*(?:{comment})?\s*'.format(comment=COMMENT_RE)
3121 INTEGER_TABLE = (
3122 (r'(?s)^(0[xX][0-9a-fA-F]+){skip}:?$'.format(skip=SKIP_RE), 16),
3123 (r'(?s)^(0+[0-7]+){skip}:?$'.format(skip=SKIP_RE), 8),
3124 )
3125
e05f6939 3126 def fix_kv(m):
e7b6d122
PH
3127 v = m.group(0)
3128 if v in ('true', 'false', 'null'):
3129 return v
421ddcb8
C
3130 elif v in ('undefined', 'void 0'):
3131 return 'null'
8bdd16b4 3132 elif v.startswith('/*') or v.startswith('//') or v.startswith('!') or v == ',':
bd1e4844 3133 return ""
3134
3135 if v[0] in ("'", '"'):
3136 v = re.sub(r'(?s)\\.|"', lambda m: {
e7b6d122 3137 '"': '\\"',
bd1e4844 3138 "\\'": "'",
3139 '\\\n': '',
3140 '\\x': '\\u00',
3141 }.get(m.group(0), m.group(0)), v[1:-1])
8bdd16b4 3142 else:
3143 for regex, base in INTEGER_TABLE:
3144 im = re.match(regex, v)
3145 if im:
3146 i = int(im.group(1), base)
3147 return '"%d":' % i if v.endswith(':') else '%d' % i
89ac4a19 3148
5c610515 3149 if v in vars:
3150 return vars[v]
3151
e7b6d122 3152 return '"%s"' % v
e05f6939 3153
febff4c1
B
3154 code = re.sub(r'new Date\((".+")\)', r'\g<1>', code)
3155
bd1e4844 3156 return re.sub(r'''(?sx)
3157 "(?:[^"\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^"\\]*"|
3158 '(?:[^'\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^'\\]*'|
4195096e 3159 {comment}|,(?={skip}[\]}}])|
421ddcb8 3160 void\s0|(?:(?<![0-9])[eE]|[a-df-zA-DF-Z_$])[.a-zA-Z_$0-9]*|
4195096e 3161 \b(?:0[xX][0-9a-fA-F]+|0+[0-7]+)(?:{skip}:)?|
8bdd16b4 3162 [0-9]+(?={skip}:)|
3163 !+
4195096e 3164 '''.format(comment=COMMENT_RE, skip=SKIP_RE), fix_kv, code)
e05f6939
PH
3165
3166
478c2c61
PH
3167def qualities(quality_ids):
3168 """ Get a numeric quality value out of a list of possible values """
3169 def q(qid):
3170 try:
3171 return quality_ids.index(qid)
3172 except ValueError:
3173 return -1
3174 return q
3175
acd69589 3176
09b49e1f 3177POSTPROCESS_WHEN = {'pre_process', 'after_filter', 'before_dl', 'after_move', 'post_process', 'after_video', 'playlist'}
1e43a6f7 3178
3179
de6000d9 3180DEFAULT_OUTTMPL = {
3181 'default': '%(title)s [%(id)s].%(ext)s',
72755351 3182 'chapter': '%(title)s - %(section_number)03d %(section_title)s [%(id)s].%(ext)s',
de6000d9 3183}
3184OUTTMPL_TYPES = {
72755351 3185 'chapter': None,
de6000d9 3186 'subtitle': None,
3187 'thumbnail': None,
3188 'description': 'description',
3189 'annotation': 'annotations.xml',
3190 'infojson': 'info.json',
08438d2c 3191 'link': None,
3b603dbd 3192 'pl_video': None,
5112f26a 3193 'pl_thumbnail': None,
de6000d9 3194 'pl_description': 'description',
3195 'pl_infojson': 'info.json',
3196}
0a871f68 3197
143db31d 3198# As of [1] format syntax is:
3199# %[mapping_key][conversion_flags][minimum_width][.precision][length_modifier]type
3200# 1. https://docs.python.org/2/library/stdtypes.html#string-formatting
901130bb 3201STR_FORMAT_RE_TMPL = r'''(?x)
3202 (?<!%)(?P<prefix>(?:%%)*)
143db31d 3203 %
524e2e4f 3204 (?P<has_key>\((?P<key>{0})\))?
752cda38 3205 (?P<format>
524e2e4f 3206 (?P<conversion>[#0\-+ ]+)?
3207 (?P<min_width>\d+)?
3208 (?P<precision>\.\d+)?
3209 (?P<len_mod>[hlL])? # unused in python
901130bb 3210 {1} # conversion type
752cda38 3211 )
143db31d 3212'''
3213
7d1eb38a 3214
901130bb 3215STR_FORMAT_TYPES = 'diouxXeEfFgGcrs'
a020a0dc 3216
7d1eb38a 3217
a020a0dc
PH
3218def limit_length(s, length):
3219 """ Add ellipses to overly long strings """
3220 if s is None:
3221 return None
3222 ELLIPSES = '...'
3223 if len(s) > length:
3224 return s[:length - len(ELLIPSES)] + ELLIPSES
3225 return s
48844745
PH
3226
3227
3228def version_tuple(v):
5f9b8394 3229 return tuple(int(e) for e in re.split(r'[-.]', v))
48844745
PH
3230
3231
3232def is_outdated_version(version, limit, assume_new=True):
3233 if not version:
3234 return not assume_new
3235 try:
3236 return version_tuple(version) < version_tuple(limit)
3237 except ValueError:
3238 return not assume_new
732ea2f0
PH
3239
3240
3241def ytdl_is_updateable():
7a5c1cfe 3242 """ Returns if yt-dlp can be updated with -U """
735d865e 3243
5d535b4a 3244 from .update import is_non_updateable
732ea2f0 3245
5d535b4a 3246 return not is_non_updateable()
7d4111ed
PH
3247
3248
3249def args_to_str(args):
3250 # Get a short string representation for a subprocess command
702ccf2d 3251 return ' '.join(compat_shlex_quote(a) for a in args)
2ccd1b10
PH
3252
3253
9b9c5355 3254def error_to_compat_str(err):
fdae2358
S
3255 err_str = str(err)
3256 # On python 2 error byte string must be decoded with proper
3257 # encoding rather than ascii
3258 if sys.version_info[0] < 3:
3259 err_str = err_str.decode(preferredencoding())
3260 return err_str
3261
3262
c460bdd5 3263def mimetype2ext(mt):
eb9ee194
S
3264 if mt is None:
3265 return None
3266
9359f3d4
F
3267 mt, _, params = mt.partition(';')
3268 mt = mt.strip()
3269
3270 FULL_MAP = {
765ac263 3271 'audio/mp4': 'm4a',
6c33d24b
YCH
3272 # Per RFC 3003, audio/mpeg can be .mp1, .mp2 or .mp3. Here use .mp3 as
3273 # it's the most popular one
3274 'audio/mpeg': 'mp3',
ba39289d 3275 'audio/x-wav': 'wav',
9359f3d4
F
3276 'audio/wav': 'wav',
3277 'audio/wave': 'wav',
3278 }
3279
3280 ext = FULL_MAP.get(mt)
765ac263
JMF
3281 if ext is not None:
3282 return ext
3283
9359f3d4 3284 SUBTYPE_MAP = {
f6861ec9 3285 '3gpp': '3gp',
cafcf657 3286 'smptett+xml': 'tt',
cafcf657 3287 'ttaf+xml': 'dfxp',
a0d8d704 3288 'ttml+xml': 'ttml',
f6861ec9 3289 'x-flv': 'flv',
a0d8d704 3290 'x-mp4-fragmented': 'mp4',
d4f05d47 3291 'x-ms-sami': 'sami',
a0d8d704 3292 'x-ms-wmv': 'wmv',
b4173f15
RA
3293 'mpegurl': 'm3u8',
3294 'x-mpegurl': 'm3u8',
3295 'vnd.apple.mpegurl': 'm3u8',
3296 'dash+xml': 'mpd',
b4173f15 3297 'f4m+xml': 'f4m',
f164b971 3298 'hds+xml': 'f4m',
e910fe2f 3299 'vnd.ms-sstr+xml': 'ism',
c2b2c7e1 3300 'quicktime': 'mov',
98ce1a3f 3301 'mp2t': 'ts',
39e7107d 3302 'x-wav': 'wav',
9359f3d4
F
3303 'filmstrip+json': 'fs',
3304 'svg+xml': 'svg',
3305 }
3306
3307 _, _, subtype = mt.rpartition('/')
3308 ext = SUBTYPE_MAP.get(subtype.lower())
3309 if ext is not None:
3310 return ext
3311
3312 SUFFIX_MAP = {
3313 'json': 'json',
3314 'xml': 'xml',
3315 'zip': 'zip',
3316 'gzip': 'gz',
3317 }
3318
3319 _, _, suffix = subtype.partition('+')
3320 ext = SUFFIX_MAP.get(suffix)
3321 if ext is not None:
3322 return ext
3323
3324 return subtype.replace('+', '.')
c460bdd5
PH
3325
3326
2814f12b
THD
3327def ext2mimetype(ext_or_url):
3328 if not ext_or_url:
3329 return None
3330 if '.' not in ext_or_url:
3331 ext_or_url = f'file.{ext_or_url}'
3332 return mimetypes.guess_type(ext_or_url)[0]
3333
3334
4f3c5e06 3335def parse_codecs(codecs_str):
3336 # http://tools.ietf.org/html/rfc6381
3337 if not codecs_str:
3338 return {}
a0566bbf 3339 split_codecs = list(filter(None, map(
dbf5416a 3340 str.strip, codecs_str.strip().strip(',').split(','))))
4afa3ec4 3341 vcodec, acodec, tcodec, hdr = None, None, None, None
a0566bbf 3342 for full_codec in split_codecs:
9bd979ca 3343 parts = full_codec.split('.')
3344 codec = parts[0].replace('0', '')
3345 if codec in ('avc1', 'avc2', 'avc3', 'avc4', 'vp9', 'vp8', 'hev1', 'hev2',
3346 'h263', 'h264', 'mp4v', 'hvc1', 'av1', 'theora', 'dvh1', 'dvhe'):
4f3c5e06 3347 if not vcodec:
b69fd25c 3348 vcodec = '.'.join(parts[:4]) if codec in ('vp9', 'av1', 'hvc1') else full_codec
176f1866 3349 if codec in ('dvh1', 'dvhe'):
3350 hdr = 'DV'
9bd979ca 3351 elif codec == 'av1' and len(parts) > 3 and parts[3] == '10':
3352 hdr = 'HDR10'
3353 elif full_codec.replace('0', '').startswith('vp9.2'):
176f1866 3354 hdr = 'HDR10'
b69fd25c 3355 elif codec in ('flac', 'mp4a', 'opus', 'vorbis', 'mp3', 'aac', 'ac-3', 'ec-3', 'eac3', 'dtsc', 'dtse', 'dtsh', 'dtsl'):
4f3c5e06 3356 if not acodec:
3357 acodec = full_codec
4afa3ec4
F
3358 elif codec in ('stpp', 'wvtt',):
3359 if not tcodec:
3360 tcodec = full_codec
4f3c5e06 3361 else:
60f5c9fb 3362 write_string('WARNING: Unknown codec %s\n' % full_codec, sys.stderr)
4afa3ec4 3363 if vcodec or acodec or tcodec:
4f3c5e06 3364 return {
3365 'vcodec': vcodec or 'none',
3366 'acodec': acodec or 'none',
176f1866 3367 'dynamic_range': hdr,
4afa3ec4 3368 **({'tcodec': tcodec} if tcodec is not None else {}),
4f3c5e06 3369 }
b69fd25c 3370 elif len(split_codecs) == 2:
3371 return {
3372 'vcodec': split_codecs[0],
3373 'acodec': split_codecs[1],
3374 }
4f3c5e06 3375 return {}
3376
3377
2ccd1b10 3378def urlhandle_detect_ext(url_handle):
79298173 3379 getheader = url_handle.headers.get
2ccd1b10 3380
b55ee18f
PH
3381 cd = getheader('Content-Disposition')
3382 if cd:
3383 m = re.match(r'attachment;\s*filename="(?P<filename>[^"]+)"', cd)
3384 if m:
3385 e = determine_ext(m.group('filename'), default_ext=None)
3386 if e:
3387 return e
3388
c460bdd5 3389 return mimetype2ext(getheader('Content-Type'))
05900629
PH
3390
3391
1e399778
YCH
3392def encode_data_uri(data, mime_type):
3393 return 'data:%s;base64,%s' % (mime_type, base64.b64encode(data).decode('ascii'))
3394
3395
05900629 3396def age_restricted(content_limit, age_limit):
6ec6cb4e 3397 """ Returns True iff the content should be blocked """
05900629
PH
3398
3399 if age_limit is None: # No limit set
3400 return False
3401 if content_limit is None:
3402 return False # Content available for everyone
3403 return age_limit < content_limit
61ca9a80
PH
3404
3405
3406def is_html(first_bytes):
3407 """ Detect whether a file contains HTML by examining its first bytes. """
3408
3409 BOMS = [
3410 (b'\xef\xbb\xbf', 'utf-8'),
3411 (b'\x00\x00\xfe\xff', 'utf-32-be'),
3412 (b'\xff\xfe\x00\x00', 'utf-32-le'),
3413 (b'\xff\xfe', 'utf-16-le'),
3414 (b'\xfe\xff', 'utf-16-be'),
3415 ]
3416 for bom, enc in BOMS:
3417 if first_bytes.startswith(bom):
3418 s = first_bytes[len(bom):].decode(enc, 'replace')
3419 break
3420 else:
3421 s = first_bytes.decode('utf-8', 'replace')
3422
3423 return re.match(r'^\s*<', s)
a055469f
PH
3424
3425
3426def determine_protocol(info_dict):
3427 protocol = info_dict.get('protocol')
3428 if protocol is not None:
3429 return protocol
3430
7de837a5 3431 url = sanitize_url(info_dict['url'])
a055469f
PH
3432 if url.startswith('rtmp'):
3433 return 'rtmp'
3434 elif url.startswith('mms'):
3435 return 'mms'
3436 elif url.startswith('rtsp'):
3437 return 'rtsp'
3438
3439 ext = determine_ext(url)
3440 if ext == 'm3u8':
3441 return 'm3u8'
3442 elif ext == 'f4m':
3443 return 'f4m'
3444
3445 return compat_urllib_parse_urlparse(url).scheme
cfb56d1a
PH
3446
3447
c5e3f849 3448def render_table(header_row, data, delim=False, extra_gap=0, hide_empty=False):
3449 """ Render a list of rows, each as a list of values.
3450 Text after a \t will be right aligned """
ec11a9f4 3451 def width(string):
c5e3f849 3452 return len(remove_terminal_sequences(string).replace('\t', ''))
76d321f6 3453
3454 def get_max_lens(table):
ec11a9f4 3455 return [max(width(str(v)) for v in col) for col in zip(*table)]
76d321f6 3456
3457 def filter_using_list(row, filterArray):
d16df59d 3458 return [col for take, col in itertools.zip_longest(filterArray, row, fillvalue=True) if take]
76d321f6 3459
d16df59d 3460 max_lens = get_max_lens(data) if hide_empty else []
3461 header_row = filter_using_list(header_row, max_lens)
3462 data = [filter_using_list(row, max_lens) for row in data]
76d321f6 3463
cfb56d1a 3464 table = [header_row] + data
76d321f6 3465 max_lens = get_max_lens(table)
c5e3f849 3466 extra_gap += 1
76d321f6 3467 if delim:
c5e3f849 3468 table = [header_row, [delim * (ml + extra_gap) for ml in max_lens]] + data
3469 table[1][-1] = table[1][-1][:-extra_gap] # Remove extra_gap from end of delimiter
ec11a9f4 3470 for row in table:
3471 for pos, text in enumerate(map(str, row)):
c5e3f849 3472 if '\t' in text:
3473 row[pos] = text.replace('\t', ' ' * (max_lens[pos] - width(text))) + ' ' * extra_gap
3474 else:
3475 row[pos] = text + ' ' * (max_lens[pos] - width(text) + extra_gap)
3476 ret = '\n'.join(''.join(row).rstrip() for row in table)
ec11a9f4 3477 return ret
347de493
PH
3478
3479
8f18aca8 3480def _match_one(filter_part, dct, incomplete):
77b87f05 3481 # TODO: Generalize code with YoutubeDL._build_format_filter
a047eeb6 3482 STRING_OPERATORS = {
3483 '*=': operator.contains,
3484 '^=': lambda attr, value: attr.startswith(value),
3485 '$=': lambda attr, value: attr.endswith(value),
3486 '~=': lambda attr, value: re.search(value, attr),
3487 }
347de493 3488 COMPARISON_OPERATORS = {
a047eeb6 3489 **STRING_OPERATORS,
3490 '<=': operator.le, # "<=" must be defined above "<"
347de493 3491 '<': operator.lt,
347de493 3492 '>=': operator.ge,
a047eeb6 3493 '>': operator.gt,
347de493 3494 '=': operator.eq,
347de493 3495 }
a047eeb6 3496
347de493
PH
3497 operator_rex = re.compile(r'''(?x)\s*
3498 (?P<key>[a-z_]+)
77b87f05 3499 \s*(?P<negation>!\s*)?(?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
347de493 3500 (?:
a047eeb6 3501 (?P<quote>["\'])(?P<quotedstrval>.+?)(?P=quote)|
3502 (?P<strval>.+?)
347de493
PH
3503 )
3504 \s*$
3505 ''' % '|'.join(map(re.escape, COMPARISON_OPERATORS.keys())))
3506 m = operator_rex.search(filter_part)
3507 if m:
18f96d12 3508 m = m.groupdict()
3509 unnegated_op = COMPARISON_OPERATORS[m['op']]
3510 if m['negation']:
77b87f05
MT
3511 op = lambda attr, value: not unnegated_op(attr, value)
3512 else:
3513 op = unnegated_op
18f96d12 3514 comparison_value = m['quotedstrval'] or m['strval'] or m['intval']
3515 if m['quote']:
3516 comparison_value = comparison_value.replace(r'\%s' % m['quote'], m['quote'])
3517 actual_value = dct.get(m['key'])
3518 numeric_comparison = None
3519 if isinstance(actual_value, compat_numeric_types):
e5a088dc
S
3520 # If the original field is a string and matching comparisonvalue is
3521 # a number we should respect the origin of the original field
3522 # and process comparison value as a string (see
18f96d12 3523 # https://github.com/ytdl-org/youtube-dl/issues/11082)
347de493 3524 try:
18f96d12 3525 numeric_comparison = int(comparison_value)
347de493 3526 except ValueError:
18f96d12 3527 numeric_comparison = parse_filesize(comparison_value)
3528 if numeric_comparison is None:
3529 numeric_comparison = parse_filesize(f'{comparison_value}B')
3530 if numeric_comparison is None:
3531 numeric_comparison = parse_duration(comparison_value)
3532 if numeric_comparison is not None and m['op'] in STRING_OPERATORS:
3533 raise ValueError('Operator %s only supports string values!' % m['op'])
347de493 3534 if actual_value is None:
18f96d12 3535 return incomplete or m['none_inclusive']
3536 return op(actual_value, comparison_value if numeric_comparison is None else numeric_comparison)
347de493
PH
3537
3538 UNARY_OPERATORS = {
1cc47c66
S
3539 '': lambda v: (v is True) if isinstance(v, bool) else (v is not None),
3540 '!': lambda v: (v is False) if isinstance(v, bool) else (v is None),
347de493
PH
3541 }
3542 operator_rex = re.compile(r'''(?x)\s*
3543 (?P<op>%s)\s*(?P<key>[a-z_]+)
3544 \s*$
3545 ''' % '|'.join(map(re.escape, UNARY_OPERATORS.keys())))
3546 m = operator_rex.search(filter_part)
3547 if m:
3548 op = UNARY_OPERATORS[m.group('op')]
3549 actual_value = dct.get(m.group('key'))
8f18aca8 3550 if incomplete and actual_value is None:
3551 return True
347de493
PH
3552 return op(actual_value)
3553
3554 raise ValueError('Invalid filter part %r' % filter_part)
3555
3556
8f18aca8 3557def match_str(filter_str, dct, incomplete=False):
3558 """ Filter a dictionary with a simple string syntax. Returns True (=passes filter) or false
3559 When incomplete, all conditions passes on missing fields
3560 """
347de493 3561 return all(
8f18aca8 3562 _match_one(filter_part.replace(r'\&', '&'), dct, incomplete)
a047eeb6 3563 for filter_part in re.split(r'(?<!\\)&', filter_str))
347de493
PH
3564
3565
3566def match_filter_func(filter_str):
8f18aca8 3567 def _match_func(info_dict, *args, **kwargs):
3568 if match_str(filter_str, info_dict, *args, **kwargs):
347de493
PH
3569 return None
3570 else:
3571 video_title = info_dict.get('title', info_dict.get('id', 'video'))
3572 return '%s does not pass filter %s, skipping ..' % (video_title, filter_str)
3573 return _match_func
91410c9b
PH
3574
3575
bf6427d2
YCH
3576def parse_dfxp_time_expr(time_expr):
3577 if not time_expr:
d631d5f9 3578 return
bf6427d2
YCH
3579
3580 mobj = re.match(r'^(?P<time_offset>\d+(?:\.\d+)?)s?$', time_expr)
3581 if mobj:
3582 return float(mobj.group('time_offset'))
3583
db2fe38b 3584 mobj = re.match(r'^(\d+):(\d\d):(\d\d(?:(?:\.|:)\d+)?)$', time_expr)
bf6427d2 3585 if mobj:
db2fe38b 3586 return 3600 * int(mobj.group(1)) + 60 * int(mobj.group(2)) + float(mobj.group(3).replace(':', '.'))
bf6427d2
YCH
3587
3588
c1c924ab 3589def srt_subtitles_timecode(seconds):
aa7785f8 3590 return '%02d:%02d:%02d,%03d' % timetuple_from_msec(seconds * 1000)
3591
3592
3593def ass_subtitles_timecode(seconds):
3594 time = timetuple_from_msec(seconds * 1000)
3595 return '%01d:%02d:%02d.%02d' % (*time[:-1], time.milliseconds / 10)
bf6427d2
YCH
3596
3597
3598def dfxp2srt(dfxp_data):
3869028f
YCH
3599 '''
3600 @param dfxp_data A bytes-like object containing DFXP data
3601 @returns A unicode object containing converted SRT data
3602 '''
5b995f71 3603 LEGACY_NAMESPACES = (
3869028f
YCH
3604 (b'http://www.w3.org/ns/ttml', [
3605 b'http://www.w3.org/2004/11/ttaf1',
3606 b'http://www.w3.org/2006/04/ttaf1',
3607 b'http://www.w3.org/2006/10/ttaf1',
5b995f71 3608 ]),
3869028f
YCH
3609 (b'http://www.w3.org/ns/ttml#styling', [
3610 b'http://www.w3.org/ns/ttml#style',
5b995f71
RA
3611 ]),
3612 )
3613
3614 SUPPORTED_STYLING = [
3615 'color',
3616 'fontFamily',
3617 'fontSize',
3618 'fontStyle',
3619 'fontWeight',
3620 'textDecoration'
3621 ]
3622
4e335771 3623 _x = functools.partial(xpath_with_ns, ns_map={
261f4730 3624 'xml': 'http://www.w3.org/XML/1998/namespace',
4e335771 3625 'ttml': 'http://www.w3.org/ns/ttml',
5b995f71 3626 'tts': 'http://www.w3.org/ns/ttml#styling',
4e335771 3627 })
bf6427d2 3628
5b995f71
RA
3629 styles = {}
3630 default_style = {}
3631
87de7069 3632 class TTMLPElementParser(object):
5b995f71
RA
3633 _out = ''
3634 _unclosed_elements = []
3635 _applied_styles = []
bf6427d2 3636
2b14cb56 3637 def start(self, tag, attrib):
5b995f71
RA
3638 if tag in (_x('ttml:br'), 'br'):
3639 self._out += '\n'
3640 else:
3641 unclosed_elements = []
3642 style = {}
3643 element_style_id = attrib.get('style')
3644 if default_style:
3645 style.update(default_style)
3646 if element_style_id:
3647 style.update(styles.get(element_style_id, {}))
3648 for prop in SUPPORTED_STYLING:
3649 prop_val = attrib.get(_x('tts:' + prop))
3650 if prop_val:
3651 style[prop] = prop_val
3652 if style:
3653 font = ''
3654 for k, v in sorted(style.items()):
3655 if self._applied_styles and self._applied_styles[-1].get(k) == v:
3656 continue
3657 if k == 'color':
3658 font += ' color="%s"' % v
3659 elif k == 'fontSize':
3660 font += ' size="%s"' % v
3661 elif k == 'fontFamily':
3662 font += ' face="%s"' % v
3663 elif k == 'fontWeight' and v == 'bold':
3664 self._out += '<b>'
3665 unclosed_elements.append('b')
3666 elif k == 'fontStyle' and v == 'italic':
3667 self._out += '<i>'
3668 unclosed_elements.append('i')
3669 elif k == 'textDecoration' and v == 'underline':
3670 self._out += '<u>'
3671 unclosed_elements.append('u')
3672 if font:
3673 self._out += '<font' + font + '>'
3674 unclosed_elements.append('font')
3675 applied_style = {}
3676 if self._applied_styles:
3677 applied_style.update(self._applied_styles[-1])
3678 applied_style.update(style)
3679 self._applied_styles.append(applied_style)
3680 self._unclosed_elements.append(unclosed_elements)
bf6427d2 3681
2b14cb56 3682 def end(self, tag):
5b995f71
RA
3683 if tag not in (_x('ttml:br'), 'br'):
3684 unclosed_elements = self._unclosed_elements.pop()
3685 for element in reversed(unclosed_elements):
3686 self._out += '</%s>' % element
3687 if unclosed_elements and self._applied_styles:
3688 self._applied_styles.pop()
bf6427d2 3689
2b14cb56 3690 def data(self, data):
5b995f71 3691 self._out += data
2b14cb56 3692
3693 def close(self):
5b995f71 3694 return self._out.strip()
2b14cb56 3695
3696 def parse_node(node):
3697 target = TTMLPElementParser()
3698 parser = xml.etree.ElementTree.XMLParser(target=target)
3699 parser.feed(xml.etree.ElementTree.tostring(node))
3700 return parser.close()
bf6427d2 3701
5b995f71
RA
3702 for k, v in LEGACY_NAMESPACES:
3703 for ns in v:
3704 dfxp_data = dfxp_data.replace(ns, k)
3705
3869028f 3706 dfxp = compat_etree_fromstring(dfxp_data)
bf6427d2 3707 out = []
5b995f71 3708 paras = dfxp.findall(_x('.//ttml:p')) or dfxp.findall('.//p')
1b0427e6
YCH
3709
3710 if not paras:
3711 raise ValueError('Invalid dfxp/TTML subtitle')
bf6427d2 3712
5b995f71
RA
3713 repeat = False
3714 while True:
3715 for style in dfxp.findall(_x('.//ttml:style')):
261f4730
RA
3716 style_id = style.get('id') or style.get(_x('xml:id'))
3717 if not style_id:
3718 continue
5b995f71
RA
3719 parent_style_id = style.get('style')
3720 if parent_style_id:
3721 if parent_style_id not in styles:
3722 repeat = True
3723 continue
3724 styles[style_id] = styles[parent_style_id].copy()
3725 for prop in SUPPORTED_STYLING:
3726 prop_val = style.get(_x('tts:' + prop))
3727 if prop_val:
3728 styles.setdefault(style_id, {})[prop] = prop_val
3729 if repeat:
3730 repeat = False
3731 else:
3732 break
3733
3734 for p in ('body', 'div'):
3735 ele = xpath_element(dfxp, [_x('.//ttml:' + p), './/' + p])
3736 if ele is None:
3737 continue
3738 style = styles.get(ele.get('style'))
3739 if not style:
3740 continue
3741 default_style.update(style)
3742
bf6427d2 3743 for para, index in zip(paras, itertools.count(1)):
d631d5f9 3744 begin_time = parse_dfxp_time_expr(para.attrib.get('begin'))
7dff0363 3745 end_time = parse_dfxp_time_expr(para.attrib.get('end'))
d631d5f9
YCH
3746 dur = parse_dfxp_time_expr(para.attrib.get('dur'))
3747 if begin_time is None:
3748 continue
7dff0363 3749 if not end_time:
d631d5f9
YCH
3750 if not dur:
3751 continue
3752 end_time = begin_time + dur
bf6427d2
YCH
3753 out.append('%d\n%s --> %s\n%s\n\n' % (
3754 index,
c1c924ab
YCH
3755 srt_subtitles_timecode(begin_time),
3756 srt_subtitles_timecode(end_time),
bf6427d2
YCH
3757 parse_node(para)))
3758
3759 return ''.join(out)
3760
3761
66e289ba
S
3762def cli_option(params, command_option, param):
3763 param = params.get(param)
98e698f1
RA
3764 if param:
3765 param = compat_str(param)
66e289ba
S
3766 return [command_option, param] if param is not None else []
3767
3768
3769def cli_bool_option(params, command_option, param, true_value='true', false_value='false', separator=None):
3770 param = params.get(param)
5b232f46
S
3771 if param is None:
3772 return []
66e289ba
S
3773 assert isinstance(param, bool)
3774 if separator:
3775 return [command_option + separator + (true_value if param else false_value)]
3776 return [command_option, true_value if param else false_value]
3777
3778
3779def cli_valueless_option(params, command_option, param, expected_value=True):
3780 param = params.get(param)
3781 return [command_option] if param == expected_value else []
3782
3783
e92caff5 3784def cli_configuration_args(argdict, keys, default=[], use_compat=True):
eab9b2bc 3785 if isinstance(argdict, (list, tuple)): # for backward compatibility
e92caff5 3786 if use_compat:
5b1ecbb3 3787 return argdict
3788 else:
3789 argdict = None
eab9b2bc 3790 if argdict is None:
5b1ecbb3 3791 return default
eab9b2bc 3792 assert isinstance(argdict, dict)
3793
e92caff5 3794 assert isinstance(keys, (list, tuple))
3795 for key_list in keys:
e92caff5 3796 arg_list = list(filter(
3797 lambda x: x is not None,
6606817a 3798 [argdict.get(key.lower()) for key in variadic(key_list)]))
e92caff5 3799 if arg_list:
3800 return [arg for args in arg_list for arg in args]
3801 return default
66e289ba 3802
6251555f 3803
330690a2 3804def _configuration_args(main_key, argdict, exe, keys=None, default=[], use_compat=True):
3805 main_key, exe = main_key.lower(), exe.lower()
3806 root_key = exe if main_key == exe else f'{main_key}+{exe}'
3807 keys = [f'{root_key}{k}' for k in (keys or [''])]
3808 if root_key in keys:
3809 if main_key != exe:
3810 keys.append((main_key, exe))
3811 keys.append('default')
3812 else:
3813 use_compat = False
3814 return cli_configuration_args(argdict, keys, default, use_compat)
3815
66e289ba 3816
39672624
YCH
3817class ISO639Utils(object):
3818 # See http://www.loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt
3819 _lang_map = {
3820 'aa': 'aar',
3821 'ab': 'abk',
3822 'ae': 'ave',
3823 'af': 'afr',
3824 'ak': 'aka',
3825 'am': 'amh',
3826 'an': 'arg',
3827 'ar': 'ara',
3828 'as': 'asm',
3829 'av': 'ava',
3830 'ay': 'aym',
3831 'az': 'aze',
3832 'ba': 'bak',
3833 'be': 'bel',
3834 'bg': 'bul',
3835 'bh': 'bih',
3836 'bi': 'bis',
3837 'bm': 'bam',
3838 'bn': 'ben',
3839 'bo': 'bod',
3840 'br': 'bre',
3841 'bs': 'bos',
3842 'ca': 'cat',
3843 'ce': 'che',
3844 'ch': 'cha',
3845 'co': 'cos',
3846 'cr': 'cre',
3847 'cs': 'ces',
3848 'cu': 'chu',
3849 'cv': 'chv',
3850 'cy': 'cym',
3851 'da': 'dan',
3852 'de': 'deu',
3853 'dv': 'div',
3854 'dz': 'dzo',
3855 'ee': 'ewe',
3856 'el': 'ell',
3857 'en': 'eng',
3858 'eo': 'epo',
3859 'es': 'spa',
3860 'et': 'est',
3861 'eu': 'eus',
3862 'fa': 'fas',
3863 'ff': 'ful',
3864 'fi': 'fin',
3865 'fj': 'fij',
3866 'fo': 'fao',
3867 'fr': 'fra',
3868 'fy': 'fry',
3869 'ga': 'gle',
3870 'gd': 'gla',
3871 'gl': 'glg',
3872 'gn': 'grn',
3873 'gu': 'guj',
3874 'gv': 'glv',
3875 'ha': 'hau',
3876 'he': 'heb',
b7acc835 3877 'iw': 'heb', # Replaced by he in 1989 revision
39672624
YCH
3878 'hi': 'hin',
3879 'ho': 'hmo',
3880 'hr': 'hrv',
3881 'ht': 'hat',
3882 'hu': 'hun',
3883 'hy': 'hye',
3884 'hz': 'her',
3885 'ia': 'ina',
3886 'id': 'ind',
b7acc835 3887 'in': 'ind', # Replaced by id in 1989 revision
39672624
YCH
3888 'ie': 'ile',
3889 'ig': 'ibo',
3890 'ii': 'iii',
3891 'ik': 'ipk',
3892 'io': 'ido',
3893 'is': 'isl',
3894 'it': 'ita',
3895 'iu': 'iku',
3896 'ja': 'jpn',
3897 'jv': 'jav',
3898 'ka': 'kat',
3899 'kg': 'kon',
3900 'ki': 'kik',
3901 'kj': 'kua',
3902 'kk': 'kaz',
3903 'kl': 'kal',
3904 'km': 'khm',
3905 'kn': 'kan',
3906 'ko': 'kor',
3907 'kr': 'kau',
3908 'ks': 'kas',
3909 'ku': 'kur',
3910 'kv': 'kom',
3911 'kw': 'cor',
3912 'ky': 'kir',
3913 'la': 'lat',
3914 'lb': 'ltz',
3915 'lg': 'lug',
3916 'li': 'lim',
3917 'ln': 'lin',
3918 'lo': 'lao',
3919 'lt': 'lit',
3920 'lu': 'lub',
3921 'lv': 'lav',
3922 'mg': 'mlg',
3923 'mh': 'mah',
3924 'mi': 'mri',
3925 'mk': 'mkd',
3926 'ml': 'mal',
3927 'mn': 'mon',
3928 'mr': 'mar',
3929 'ms': 'msa',
3930 'mt': 'mlt',
3931 'my': 'mya',
3932 'na': 'nau',
3933 'nb': 'nob',
3934 'nd': 'nde',
3935 'ne': 'nep',
3936 'ng': 'ndo',
3937 'nl': 'nld',
3938 'nn': 'nno',
3939 'no': 'nor',
3940 'nr': 'nbl',
3941 'nv': 'nav',
3942 'ny': 'nya',
3943 'oc': 'oci',
3944 'oj': 'oji',
3945 'om': 'orm',
3946 'or': 'ori',
3947 'os': 'oss',
3948 'pa': 'pan',
3949 'pi': 'pli',
3950 'pl': 'pol',
3951 'ps': 'pus',
3952 'pt': 'por',
3953 'qu': 'que',
3954 'rm': 'roh',
3955 'rn': 'run',
3956 'ro': 'ron',
3957 'ru': 'rus',
3958 'rw': 'kin',
3959 'sa': 'san',
3960 'sc': 'srd',
3961 'sd': 'snd',
3962 'se': 'sme',
3963 'sg': 'sag',
3964 'si': 'sin',
3965 'sk': 'slk',
3966 'sl': 'slv',
3967 'sm': 'smo',
3968 'sn': 'sna',
3969 'so': 'som',
3970 'sq': 'sqi',
3971 'sr': 'srp',
3972 'ss': 'ssw',
3973 'st': 'sot',
3974 'su': 'sun',
3975 'sv': 'swe',
3976 'sw': 'swa',
3977 'ta': 'tam',
3978 'te': 'tel',
3979 'tg': 'tgk',
3980 'th': 'tha',
3981 'ti': 'tir',
3982 'tk': 'tuk',
3983 'tl': 'tgl',
3984 'tn': 'tsn',
3985 'to': 'ton',
3986 'tr': 'tur',
3987 'ts': 'tso',
3988 'tt': 'tat',
3989 'tw': 'twi',
3990 'ty': 'tah',
3991 'ug': 'uig',
3992 'uk': 'ukr',
3993 'ur': 'urd',
3994 'uz': 'uzb',
3995 've': 'ven',
3996 'vi': 'vie',
3997 'vo': 'vol',
3998 'wa': 'wln',
3999 'wo': 'wol',
4000 'xh': 'xho',
4001 'yi': 'yid',
e9a50fba 4002 'ji': 'yid', # Replaced by yi in 1989 revision
39672624
YCH
4003 'yo': 'yor',
4004 'za': 'zha',
4005 'zh': 'zho',
4006 'zu': 'zul',
4007 }
4008
4009 @classmethod
4010 def short2long(cls, code):
4011 """Convert language code from ISO 639-1 to ISO 639-2/T"""
4012 return cls._lang_map.get(code[:2])
4013
4014 @classmethod
4015 def long2short(cls, code):
4016 """Convert language code from ISO 639-2/T to ISO 639-1"""
4017 for short_name, long_name in cls._lang_map.items():
4018 if long_name == code:
4019 return short_name
4020
4021
4eb10f66
YCH
4022class ISO3166Utils(object):
4023 # From http://data.okfn.org/data/core/country-list
4024 _country_map = {
4025 'AF': 'Afghanistan',
4026 'AX': 'Åland Islands',
4027 'AL': 'Albania',
4028 'DZ': 'Algeria',
4029 'AS': 'American Samoa',
4030 'AD': 'Andorra',
4031 'AO': 'Angola',
4032 'AI': 'Anguilla',
4033 'AQ': 'Antarctica',
4034 'AG': 'Antigua and Barbuda',
4035 'AR': 'Argentina',
4036 'AM': 'Armenia',
4037 'AW': 'Aruba',
4038 'AU': 'Australia',
4039 'AT': 'Austria',
4040 'AZ': 'Azerbaijan',
4041 'BS': 'Bahamas',
4042 'BH': 'Bahrain',
4043 'BD': 'Bangladesh',
4044 'BB': 'Barbados',
4045 'BY': 'Belarus',
4046 'BE': 'Belgium',
4047 'BZ': 'Belize',
4048 'BJ': 'Benin',
4049 'BM': 'Bermuda',
4050 'BT': 'Bhutan',
4051 'BO': 'Bolivia, Plurinational State of',
4052 'BQ': 'Bonaire, Sint Eustatius and Saba',
4053 'BA': 'Bosnia and Herzegovina',
4054 'BW': 'Botswana',
4055 'BV': 'Bouvet Island',
4056 'BR': 'Brazil',
4057 'IO': 'British Indian Ocean Territory',
4058 'BN': 'Brunei Darussalam',
4059 'BG': 'Bulgaria',
4060 'BF': 'Burkina Faso',
4061 'BI': 'Burundi',
4062 'KH': 'Cambodia',
4063 'CM': 'Cameroon',
4064 'CA': 'Canada',
4065 'CV': 'Cape Verde',
4066 'KY': 'Cayman Islands',
4067 'CF': 'Central African Republic',
4068 'TD': 'Chad',
4069 'CL': 'Chile',
4070 'CN': 'China',
4071 'CX': 'Christmas Island',
4072 'CC': 'Cocos (Keeling) Islands',
4073 'CO': 'Colombia',
4074 'KM': 'Comoros',
4075 'CG': 'Congo',
4076 'CD': 'Congo, the Democratic Republic of the',
4077 'CK': 'Cook Islands',
4078 'CR': 'Costa Rica',
4079 'CI': 'Côte d\'Ivoire',
4080 'HR': 'Croatia',
4081 'CU': 'Cuba',
4082 'CW': 'Curaçao',
4083 'CY': 'Cyprus',
4084 'CZ': 'Czech Republic',
4085 'DK': 'Denmark',
4086 'DJ': 'Djibouti',
4087 'DM': 'Dominica',
4088 'DO': 'Dominican Republic',
4089 'EC': 'Ecuador',
4090 'EG': 'Egypt',
4091 'SV': 'El Salvador',
4092 'GQ': 'Equatorial Guinea',
4093 'ER': 'Eritrea',
4094 'EE': 'Estonia',
4095 'ET': 'Ethiopia',
4096 'FK': 'Falkland Islands (Malvinas)',
4097 'FO': 'Faroe Islands',
4098 'FJ': 'Fiji',
4099 'FI': 'Finland',
4100 'FR': 'France',
4101 'GF': 'French Guiana',
4102 'PF': 'French Polynesia',
4103 'TF': 'French Southern Territories',
4104 'GA': 'Gabon',
4105 'GM': 'Gambia',
4106 'GE': 'Georgia',
4107 'DE': 'Germany',
4108 'GH': 'Ghana',
4109 'GI': 'Gibraltar',
4110 'GR': 'Greece',
4111 'GL': 'Greenland',
4112 'GD': 'Grenada',
4113 'GP': 'Guadeloupe',
4114 'GU': 'Guam',
4115 'GT': 'Guatemala',
4116 'GG': 'Guernsey',
4117 'GN': 'Guinea',
4118 'GW': 'Guinea-Bissau',
4119 'GY': 'Guyana',
4120 'HT': 'Haiti',
4121 'HM': 'Heard Island and McDonald Islands',
4122 'VA': 'Holy See (Vatican City State)',
4123 'HN': 'Honduras',
4124 'HK': 'Hong Kong',
4125 'HU': 'Hungary',
4126 'IS': 'Iceland',
4127 'IN': 'India',
4128 'ID': 'Indonesia',
4129 'IR': 'Iran, Islamic Republic of',
4130 'IQ': 'Iraq',
4131 'IE': 'Ireland',
4132 'IM': 'Isle of Man',
4133 'IL': 'Israel',
4134 'IT': 'Italy',
4135 'JM': 'Jamaica',
4136 'JP': 'Japan',
4137 'JE': 'Jersey',
4138 'JO': 'Jordan',
4139 'KZ': 'Kazakhstan',
4140 'KE': 'Kenya',
4141 'KI': 'Kiribati',
4142 'KP': 'Korea, Democratic People\'s Republic of',
4143 'KR': 'Korea, Republic of',
4144 'KW': 'Kuwait',
4145 'KG': 'Kyrgyzstan',
4146 'LA': 'Lao People\'s Democratic Republic',
4147 'LV': 'Latvia',
4148 'LB': 'Lebanon',
4149 'LS': 'Lesotho',
4150 'LR': 'Liberia',
4151 'LY': 'Libya',
4152 'LI': 'Liechtenstein',
4153 'LT': 'Lithuania',
4154 'LU': 'Luxembourg',
4155 'MO': 'Macao',
4156 'MK': 'Macedonia, the Former Yugoslav Republic of',
4157 'MG': 'Madagascar',
4158 'MW': 'Malawi',
4159 'MY': 'Malaysia',
4160 'MV': 'Maldives',
4161 'ML': 'Mali',
4162 'MT': 'Malta',
4163 'MH': 'Marshall Islands',
4164 'MQ': 'Martinique',
4165 'MR': 'Mauritania',
4166 'MU': 'Mauritius',
4167 'YT': 'Mayotte',
4168 'MX': 'Mexico',
4169 'FM': 'Micronesia, Federated States of',
4170 'MD': 'Moldova, Republic of',
4171 'MC': 'Monaco',
4172 'MN': 'Mongolia',
4173 'ME': 'Montenegro',
4174 'MS': 'Montserrat',
4175 'MA': 'Morocco',
4176 'MZ': 'Mozambique',
4177 'MM': 'Myanmar',
4178 'NA': 'Namibia',
4179 'NR': 'Nauru',
4180 'NP': 'Nepal',
4181 'NL': 'Netherlands',
4182 'NC': 'New Caledonia',
4183 'NZ': 'New Zealand',
4184 'NI': 'Nicaragua',
4185 'NE': 'Niger',
4186 'NG': 'Nigeria',
4187 'NU': 'Niue',
4188 'NF': 'Norfolk Island',
4189 'MP': 'Northern Mariana Islands',
4190 'NO': 'Norway',
4191 'OM': 'Oman',
4192 'PK': 'Pakistan',
4193 'PW': 'Palau',
4194 'PS': 'Palestine, State of',
4195 'PA': 'Panama',
4196 'PG': 'Papua New Guinea',
4197 'PY': 'Paraguay',
4198 'PE': 'Peru',
4199 'PH': 'Philippines',
4200 'PN': 'Pitcairn',
4201 'PL': 'Poland',
4202 'PT': 'Portugal',
4203 'PR': 'Puerto Rico',
4204 'QA': 'Qatar',
4205 'RE': 'Réunion',
4206 'RO': 'Romania',
4207 'RU': 'Russian Federation',
4208 'RW': 'Rwanda',
4209 'BL': 'Saint Barthélemy',
4210 'SH': 'Saint Helena, Ascension and Tristan da Cunha',
4211 'KN': 'Saint Kitts and Nevis',
4212 'LC': 'Saint Lucia',
4213 'MF': 'Saint Martin (French part)',
4214 'PM': 'Saint Pierre and Miquelon',
4215 'VC': 'Saint Vincent and the Grenadines',
4216 'WS': 'Samoa',
4217 'SM': 'San Marino',
4218 'ST': 'Sao Tome and Principe',
4219 'SA': 'Saudi Arabia',
4220 'SN': 'Senegal',
4221 'RS': 'Serbia',
4222 'SC': 'Seychelles',
4223 'SL': 'Sierra Leone',
4224 'SG': 'Singapore',
4225 'SX': 'Sint Maarten (Dutch part)',
4226 'SK': 'Slovakia',
4227 'SI': 'Slovenia',
4228 'SB': 'Solomon Islands',
4229 'SO': 'Somalia',
4230 'ZA': 'South Africa',
4231 'GS': 'South Georgia and the South Sandwich Islands',
4232 'SS': 'South Sudan',
4233 'ES': 'Spain',
4234 'LK': 'Sri Lanka',
4235 'SD': 'Sudan',
4236 'SR': 'Suriname',
4237 'SJ': 'Svalbard and Jan Mayen',
4238 'SZ': 'Swaziland',
4239 'SE': 'Sweden',
4240 'CH': 'Switzerland',
4241 'SY': 'Syrian Arab Republic',
4242 'TW': 'Taiwan, Province of China',
4243 'TJ': 'Tajikistan',
4244 'TZ': 'Tanzania, United Republic of',
4245 'TH': 'Thailand',
4246 'TL': 'Timor-Leste',
4247 'TG': 'Togo',
4248 'TK': 'Tokelau',
4249 'TO': 'Tonga',
4250 'TT': 'Trinidad and Tobago',
4251 'TN': 'Tunisia',
4252 'TR': 'Turkey',
4253 'TM': 'Turkmenistan',
4254 'TC': 'Turks and Caicos Islands',
4255 'TV': 'Tuvalu',
4256 'UG': 'Uganda',
4257 'UA': 'Ukraine',
4258 'AE': 'United Arab Emirates',
4259 'GB': 'United Kingdom',
4260 'US': 'United States',
4261 'UM': 'United States Minor Outlying Islands',
4262 'UY': 'Uruguay',
4263 'UZ': 'Uzbekistan',
4264 'VU': 'Vanuatu',
4265 'VE': 'Venezuela, Bolivarian Republic of',
4266 'VN': 'Viet Nam',
4267 'VG': 'Virgin Islands, British',
4268 'VI': 'Virgin Islands, U.S.',
4269 'WF': 'Wallis and Futuna',
4270 'EH': 'Western Sahara',
4271 'YE': 'Yemen',
4272 'ZM': 'Zambia',
4273 'ZW': 'Zimbabwe',
4274 }
4275
4276 @classmethod
4277 def short2full(cls, code):
4278 """Convert an ISO 3166-2 country code to the corresponding full name"""
4279 return cls._country_map.get(code.upper())
4280
4281
773f291d
S
4282class GeoUtils(object):
4283 # Major IPv4 address blocks per country
4284 _country_ip_map = {
53896ca5 4285 'AD': '46.172.224.0/19',
773f291d
S
4286 'AE': '94.200.0.0/13',
4287 'AF': '149.54.0.0/17',
4288 'AG': '209.59.64.0/18',
4289 'AI': '204.14.248.0/21',
4290 'AL': '46.99.0.0/16',
4291 'AM': '46.70.0.0/15',
4292 'AO': '105.168.0.0/13',
53896ca5
S
4293 'AP': '182.50.184.0/21',
4294 'AQ': '23.154.160.0/24',
773f291d
S
4295 'AR': '181.0.0.0/12',
4296 'AS': '202.70.112.0/20',
53896ca5 4297 'AT': '77.116.0.0/14',
773f291d
S
4298 'AU': '1.128.0.0/11',
4299 'AW': '181.41.0.0/18',
53896ca5
S
4300 'AX': '185.217.4.0/22',
4301 'AZ': '5.197.0.0/16',
773f291d
S
4302 'BA': '31.176.128.0/17',
4303 'BB': '65.48.128.0/17',
4304 'BD': '114.130.0.0/16',
4305 'BE': '57.0.0.0/8',
53896ca5 4306 'BF': '102.178.0.0/15',
773f291d
S
4307 'BG': '95.42.0.0/15',
4308 'BH': '37.131.0.0/17',
4309 'BI': '154.117.192.0/18',
4310 'BJ': '137.255.0.0/16',
53896ca5 4311 'BL': '185.212.72.0/23',
773f291d
S
4312 'BM': '196.12.64.0/18',
4313 'BN': '156.31.0.0/16',
4314 'BO': '161.56.0.0/16',
4315 'BQ': '161.0.80.0/20',
53896ca5 4316 'BR': '191.128.0.0/12',
773f291d
S
4317 'BS': '24.51.64.0/18',
4318 'BT': '119.2.96.0/19',
4319 'BW': '168.167.0.0/16',
4320 'BY': '178.120.0.0/13',
4321 'BZ': '179.42.192.0/18',
4322 'CA': '99.224.0.0/11',
4323 'CD': '41.243.0.0/16',
53896ca5
S
4324 'CF': '197.242.176.0/21',
4325 'CG': '160.113.0.0/16',
773f291d 4326 'CH': '85.0.0.0/13',
53896ca5 4327 'CI': '102.136.0.0/14',
773f291d
S
4328 'CK': '202.65.32.0/19',
4329 'CL': '152.172.0.0/14',
53896ca5 4330 'CM': '102.244.0.0/14',
773f291d
S
4331 'CN': '36.128.0.0/10',
4332 'CO': '181.240.0.0/12',
4333 'CR': '201.192.0.0/12',
4334 'CU': '152.206.0.0/15',
4335 'CV': '165.90.96.0/19',
4336 'CW': '190.88.128.0/17',
53896ca5 4337 'CY': '31.153.0.0/16',
773f291d
S
4338 'CZ': '88.100.0.0/14',
4339 'DE': '53.0.0.0/8',
4340 'DJ': '197.241.0.0/17',
4341 'DK': '87.48.0.0/12',
4342 'DM': '192.243.48.0/20',
4343 'DO': '152.166.0.0/15',
4344 'DZ': '41.96.0.0/12',
4345 'EC': '186.68.0.0/15',
4346 'EE': '90.190.0.0/15',
4347 'EG': '156.160.0.0/11',
4348 'ER': '196.200.96.0/20',
4349 'ES': '88.0.0.0/11',
4350 'ET': '196.188.0.0/14',
4351 'EU': '2.16.0.0/13',
4352 'FI': '91.152.0.0/13',
4353 'FJ': '144.120.0.0/16',
53896ca5 4354 'FK': '80.73.208.0/21',
773f291d
S
4355 'FM': '119.252.112.0/20',
4356 'FO': '88.85.32.0/19',
4357 'FR': '90.0.0.0/9',
4358 'GA': '41.158.0.0/15',
4359 'GB': '25.0.0.0/8',
4360 'GD': '74.122.88.0/21',
4361 'GE': '31.146.0.0/16',
4362 'GF': '161.22.64.0/18',
4363 'GG': '62.68.160.0/19',
53896ca5
S
4364 'GH': '154.160.0.0/12',
4365 'GI': '95.164.0.0/16',
773f291d
S
4366 'GL': '88.83.0.0/19',
4367 'GM': '160.182.0.0/15',
4368 'GN': '197.149.192.0/18',
4369 'GP': '104.250.0.0/19',
4370 'GQ': '105.235.224.0/20',
4371 'GR': '94.64.0.0/13',
4372 'GT': '168.234.0.0/16',
4373 'GU': '168.123.0.0/16',
4374 'GW': '197.214.80.0/20',
4375 'GY': '181.41.64.0/18',
4376 'HK': '113.252.0.0/14',
4377 'HN': '181.210.0.0/16',
4378 'HR': '93.136.0.0/13',
4379 'HT': '148.102.128.0/17',
4380 'HU': '84.0.0.0/14',
4381 'ID': '39.192.0.0/10',
4382 'IE': '87.32.0.0/12',
4383 'IL': '79.176.0.0/13',
4384 'IM': '5.62.80.0/20',
4385 'IN': '117.192.0.0/10',
4386 'IO': '203.83.48.0/21',
4387 'IQ': '37.236.0.0/14',
4388 'IR': '2.176.0.0/12',
4389 'IS': '82.221.0.0/16',
4390 'IT': '79.0.0.0/10',
4391 'JE': '87.244.64.0/18',
4392 'JM': '72.27.0.0/17',
4393 'JO': '176.29.0.0/16',
53896ca5 4394 'JP': '133.0.0.0/8',
773f291d
S
4395 'KE': '105.48.0.0/12',
4396 'KG': '158.181.128.0/17',
4397 'KH': '36.37.128.0/17',
4398 'KI': '103.25.140.0/22',
4399 'KM': '197.255.224.0/20',
53896ca5 4400 'KN': '198.167.192.0/19',
773f291d
S
4401 'KP': '175.45.176.0/22',
4402 'KR': '175.192.0.0/10',
4403 'KW': '37.36.0.0/14',
4404 'KY': '64.96.0.0/15',
4405 'KZ': '2.72.0.0/13',
4406 'LA': '115.84.64.0/18',
4407 'LB': '178.135.0.0/16',
53896ca5 4408 'LC': '24.92.144.0/20',
773f291d
S
4409 'LI': '82.117.0.0/19',
4410 'LK': '112.134.0.0/15',
53896ca5 4411 'LR': '102.183.0.0/16',
773f291d
S
4412 'LS': '129.232.0.0/17',
4413 'LT': '78.56.0.0/13',
4414 'LU': '188.42.0.0/16',
4415 'LV': '46.109.0.0/16',
4416 'LY': '41.252.0.0/14',
4417 'MA': '105.128.0.0/11',
4418 'MC': '88.209.64.0/18',
4419 'MD': '37.246.0.0/16',
4420 'ME': '178.175.0.0/17',
4421 'MF': '74.112.232.0/21',
4422 'MG': '154.126.0.0/17',
4423 'MH': '117.103.88.0/21',
4424 'MK': '77.28.0.0/15',
4425 'ML': '154.118.128.0/18',
4426 'MM': '37.111.0.0/17',
4427 'MN': '49.0.128.0/17',
4428 'MO': '60.246.0.0/16',
4429 'MP': '202.88.64.0/20',
4430 'MQ': '109.203.224.0/19',
4431 'MR': '41.188.64.0/18',
4432 'MS': '208.90.112.0/22',
4433 'MT': '46.11.0.0/16',
4434 'MU': '105.16.0.0/12',
4435 'MV': '27.114.128.0/18',
53896ca5 4436 'MW': '102.70.0.0/15',
773f291d
S
4437 'MX': '187.192.0.0/11',
4438 'MY': '175.136.0.0/13',
4439 'MZ': '197.218.0.0/15',
4440 'NA': '41.182.0.0/16',
4441 'NC': '101.101.0.0/18',
4442 'NE': '197.214.0.0/18',
4443 'NF': '203.17.240.0/22',
4444 'NG': '105.112.0.0/12',
4445 'NI': '186.76.0.0/15',
4446 'NL': '145.96.0.0/11',
4447 'NO': '84.208.0.0/13',
4448 'NP': '36.252.0.0/15',
4449 'NR': '203.98.224.0/19',
4450 'NU': '49.156.48.0/22',
4451 'NZ': '49.224.0.0/14',
4452 'OM': '5.36.0.0/15',
4453 'PA': '186.72.0.0/15',
4454 'PE': '186.160.0.0/14',
4455 'PF': '123.50.64.0/18',
4456 'PG': '124.240.192.0/19',
4457 'PH': '49.144.0.0/13',
4458 'PK': '39.32.0.0/11',
4459 'PL': '83.0.0.0/11',
4460 'PM': '70.36.0.0/20',
4461 'PR': '66.50.0.0/16',
4462 'PS': '188.161.0.0/16',
4463 'PT': '85.240.0.0/13',
4464 'PW': '202.124.224.0/20',
4465 'PY': '181.120.0.0/14',
4466 'QA': '37.210.0.0/15',
53896ca5 4467 'RE': '102.35.0.0/16',
773f291d 4468 'RO': '79.112.0.0/13',
53896ca5 4469 'RS': '93.86.0.0/15',
773f291d 4470 'RU': '5.136.0.0/13',
53896ca5 4471 'RW': '41.186.0.0/16',
773f291d
S
4472 'SA': '188.48.0.0/13',
4473 'SB': '202.1.160.0/19',
4474 'SC': '154.192.0.0/11',
53896ca5 4475 'SD': '102.120.0.0/13',
773f291d 4476 'SE': '78.64.0.0/12',
53896ca5 4477 'SG': '8.128.0.0/10',
773f291d
S
4478 'SI': '188.196.0.0/14',
4479 'SK': '78.98.0.0/15',
53896ca5 4480 'SL': '102.143.0.0/17',
773f291d
S
4481 'SM': '89.186.32.0/19',
4482 'SN': '41.82.0.0/15',
53896ca5 4483 'SO': '154.115.192.0/18',
773f291d
S
4484 'SR': '186.179.128.0/17',
4485 'SS': '105.235.208.0/21',
4486 'ST': '197.159.160.0/19',
4487 'SV': '168.243.0.0/16',
4488 'SX': '190.102.0.0/20',
4489 'SY': '5.0.0.0/16',
4490 'SZ': '41.84.224.0/19',
4491 'TC': '65.255.48.0/20',
4492 'TD': '154.68.128.0/19',
4493 'TG': '196.168.0.0/14',
4494 'TH': '171.96.0.0/13',
4495 'TJ': '85.9.128.0/18',
4496 'TK': '27.96.24.0/21',
4497 'TL': '180.189.160.0/20',
4498 'TM': '95.85.96.0/19',
4499 'TN': '197.0.0.0/11',
4500 'TO': '175.176.144.0/21',
4501 'TR': '78.160.0.0/11',
4502 'TT': '186.44.0.0/15',
4503 'TV': '202.2.96.0/19',
4504 'TW': '120.96.0.0/11',
4505 'TZ': '156.156.0.0/14',
53896ca5
S
4506 'UA': '37.52.0.0/14',
4507 'UG': '102.80.0.0/13',
4508 'US': '6.0.0.0/8',
773f291d 4509 'UY': '167.56.0.0/13',
53896ca5 4510 'UZ': '84.54.64.0/18',
773f291d 4511 'VA': '212.77.0.0/19',
53896ca5 4512 'VC': '207.191.240.0/21',
773f291d 4513 'VE': '186.88.0.0/13',
53896ca5 4514 'VG': '66.81.192.0/20',
773f291d
S
4515 'VI': '146.226.0.0/16',
4516 'VN': '14.160.0.0/11',
4517 'VU': '202.80.32.0/20',
4518 'WF': '117.20.32.0/21',
4519 'WS': '202.4.32.0/19',
4520 'YE': '134.35.0.0/16',
4521 'YT': '41.242.116.0/22',
4522 'ZA': '41.0.0.0/11',
53896ca5
S
4523 'ZM': '102.144.0.0/13',
4524 'ZW': '102.177.192.0/18',
773f291d
S
4525 }
4526
4527 @classmethod
5f95927a
S
4528 def random_ipv4(cls, code_or_block):
4529 if len(code_or_block) == 2:
4530 block = cls._country_ip_map.get(code_or_block.upper())
4531 if not block:
4532 return None
4533 else:
4534 block = code_or_block
773f291d
S
4535 addr, preflen = block.split('/')
4536 addr_min = compat_struct_unpack('!L', socket.inet_aton(addr))[0]
4537 addr_max = addr_min | (0xffffffff >> int(preflen))
18a0defa 4538 return compat_str(socket.inet_ntoa(
4248dad9 4539 compat_struct_pack('!L', random.randint(addr_min, addr_max))))
773f291d
S
4540
4541
91410c9b 4542class PerRequestProxyHandler(compat_urllib_request.ProxyHandler):
2461f79d
PH
4543 def __init__(self, proxies=None):
4544 # Set default handlers
4545 for type in ('http', 'https'):
4546 setattr(self, '%s_open' % type,
4547 lambda r, proxy='__noproxy__', type=type, meth=self.proxy_open:
4548 meth(r, proxy, type))
38e87f6c 4549 compat_urllib_request.ProxyHandler.__init__(self, proxies)
2461f79d 4550
91410c9b 4551 def proxy_open(self, req, proxy, type):
2461f79d 4552 req_proxy = req.headers.get('Ytdl-request-proxy')
91410c9b
PH
4553 if req_proxy is not None:
4554 proxy = req_proxy
2461f79d
PH
4555 del req.headers['Ytdl-request-proxy']
4556
4557 if proxy == '__noproxy__':
4558 return None # No Proxy
51fb4995 4559 if compat_urlparse.urlparse(proxy).scheme.lower() in ('socks', 'socks4', 'socks4a', 'socks5'):
71aff188 4560 req.add_header('Ytdl-socks-proxy', proxy)
7a5c1cfe 4561 # yt-dlp's http/https handlers do wrapping the socket with socks
71aff188 4562 return None
91410c9b
PH
4563 return compat_urllib_request.ProxyHandler.proxy_open(
4564 self, req, proxy, type)
5bc880b9
YCH
4565
4566
0a5445dd
YCH
4567# Both long_to_bytes and bytes_to_long are adapted from PyCrypto, which is
4568# released into Public Domain
4569# https://github.com/dlitz/pycrypto/blob/master/lib/Crypto/Util/number.py#L387
4570
4571def long_to_bytes(n, blocksize=0):
4572 """long_to_bytes(n:long, blocksize:int) : string
4573 Convert a long integer to a byte string.
4574
4575 If optional blocksize is given and greater than zero, pad the front of the
4576 byte string with binary zeros so that the length is a multiple of
4577 blocksize.
4578 """
4579 # after much testing, this algorithm was deemed to be the fastest
4580 s = b''
4581 n = int(n)
4582 while n > 0:
4583 s = compat_struct_pack('>I', n & 0xffffffff) + s
4584 n = n >> 32
4585 # strip off leading zeros
4586 for i in range(len(s)):
4587 if s[i] != b'\000'[0]:
4588 break
4589 else:
4590 # only happens when n == 0
4591 s = b'\000'
4592 i = 0
4593 s = s[i:]
4594 # add back some pad bytes. this could be done more efficiently w.r.t. the
4595 # de-padding being done above, but sigh...
4596 if blocksize > 0 and len(s) % blocksize:
4597 s = (blocksize - len(s) % blocksize) * b'\000' + s
4598 return s
4599
4600
4601def bytes_to_long(s):
4602 """bytes_to_long(string) : long
4603 Convert a byte string to a long integer.
4604
4605 This is (essentially) the inverse of long_to_bytes().
4606 """
4607 acc = 0
4608 length = len(s)
4609 if length % 4:
4610 extra = (4 - length % 4)
4611 s = b'\000' * extra + s
4612 length = length + extra
4613 for i in range(0, length, 4):
4614 acc = (acc << 32) + compat_struct_unpack('>I', s[i:i + 4])[0]
4615 return acc
4616
4617
5bc880b9
YCH
4618def ohdave_rsa_encrypt(data, exponent, modulus):
4619 '''
4620 Implement OHDave's RSA algorithm. See http://www.ohdave.com/rsa/
4621
4622 Input:
4623 data: data to encrypt, bytes-like object
4624 exponent, modulus: parameter e and N of RSA algorithm, both integer
4625 Output: hex string of encrypted data
4626
4627 Limitation: supports one block encryption only
4628 '''
4629
4630 payload = int(binascii.hexlify(data[::-1]), 16)
4631 encrypted = pow(payload, exponent, modulus)
4632 return '%x' % encrypted
81bdc8fd
YCH
4633
4634
f48409c7
YCH
4635def pkcs1pad(data, length):
4636 """
4637 Padding input data with PKCS#1 scheme
4638
4639 @param {int[]} data input data
4640 @param {int} length target length
4641 @returns {int[]} padded data
4642 """
4643 if len(data) > length - 11:
4644 raise ValueError('Input data too long for PKCS#1 padding')
4645
4646 pseudo_random = [random.randint(0, 254) for _ in range(length - len(data) - 3)]
4647 return [0, 2] + pseudo_random + [0] + data
4648
4649
5eb6bdce 4650def encode_base_n(num, n, table=None):
59f898b7 4651 FULL_TABLE = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
59f898b7
YCH
4652 if not table:
4653 table = FULL_TABLE[:n]
4654
5eb6bdce
YCH
4655 if n > len(table):
4656 raise ValueError('base %d exceeds table length %d' % (n, len(table)))
4657
4658 if num == 0:
4659 return table[0]
4660
81bdc8fd
YCH
4661 ret = ''
4662 while num:
4663 ret = table[num % n] + ret
4664 num = num // n
4665 return ret
f52354a8
YCH
4666
4667
4668def decode_packed_codes(code):
06b3fe29 4669 mobj = re.search(PACKED_CODES_RE, code)
a0566bbf 4670 obfuscated_code, base, count, symbols = mobj.groups()
f52354a8
YCH
4671 base = int(base)
4672 count = int(count)
4673 symbols = symbols.split('|')
4674 symbol_table = {}
4675
4676 while count:
4677 count -= 1
5eb6bdce 4678 base_n_count = encode_base_n(count, base)
f52354a8
YCH
4679 symbol_table[base_n_count] = symbols[count] or base_n_count
4680
4681 return re.sub(
4682 r'\b(\w+)\b', lambda mobj: symbol_table[mobj.group(0)],
a0566bbf 4683 obfuscated_code)
e154c651 4684
4685
1ced2221
S
4686def caesar(s, alphabet, shift):
4687 if shift == 0:
4688 return s
4689 l = len(alphabet)
4690 return ''.join(
4691 alphabet[(alphabet.index(c) + shift) % l] if c in alphabet else c
4692 for c in s)
4693
4694
4695def rot47(s):
4696 return caesar(s, r'''!"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~''', 47)
4697
4698
e154c651 4699def parse_m3u8_attributes(attrib):
4700 info = {}
4701 for (key, val) in re.findall(r'(?P<key>[A-Z0-9-]+)=(?P<val>"[^"]+"|[^",]+)(?:,|$)', attrib):
4702 if val.startswith('"'):
4703 val = val[1:-1]
4704 info[key] = val
4705 return info
1143535d
YCH
4706
4707
4708def urshift(val, n):
4709 return val >> n if val >= 0 else (val + 0x100000000) >> n
d3f8e038
YCH
4710
4711
4712# Based on png2str() written by @gdkchan and improved by @yokrysty
067aa17e 4713# Originally posted at https://github.com/ytdl-org/youtube-dl/issues/9706
d3f8e038
YCH
4714def decode_png(png_data):
4715 # Reference: https://www.w3.org/TR/PNG/
4716 header = png_data[8:]
4717
4718 if png_data[:8] != b'\x89PNG\x0d\x0a\x1a\x0a' or header[4:8] != b'IHDR':
4719 raise IOError('Not a valid PNG file.')
4720
4721 int_map = {1: '>B', 2: '>H', 4: '>I'}
4722 unpack_integer = lambda x: compat_struct_unpack(int_map[len(x)], x)[0]
4723
4724 chunks = []
4725
4726 while header:
4727 length = unpack_integer(header[:4])
4728 header = header[4:]
4729
4730 chunk_type = header[:4]
4731 header = header[4:]
4732
4733 chunk_data = header[:length]
4734 header = header[length:]
4735
4736 header = header[4:] # Skip CRC
4737
4738 chunks.append({
4739 'type': chunk_type,
4740 'length': length,
4741 'data': chunk_data
4742 })
4743
4744 ihdr = chunks[0]['data']
4745
4746 width = unpack_integer(ihdr[:4])
4747 height = unpack_integer(ihdr[4:8])
4748
4749 idat = b''
4750
4751 for chunk in chunks:
4752 if chunk['type'] == b'IDAT':
4753 idat += chunk['data']
4754
4755 if not idat:
4756 raise IOError('Unable to read PNG data.')
4757
4758 decompressed_data = bytearray(zlib.decompress(idat))
4759
4760 stride = width * 3
4761 pixels = []
4762
4763 def _get_pixel(idx):
4764 x = idx % stride
4765 y = idx // stride
4766 return pixels[y][x]
4767
4768 for y in range(height):
4769 basePos = y * (1 + stride)
4770 filter_type = decompressed_data[basePos]
4771
4772 current_row = []
4773
4774 pixels.append(current_row)
4775
4776 for x in range(stride):
4777 color = decompressed_data[1 + basePos + x]
4778 basex = y * stride + x
4779 left = 0
4780 up = 0
4781
4782 if x > 2:
4783 left = _get_pixel(basex - 3)
4784 if y > 0:
4785 up = _get_pixel(basex - stride)
4786
4787 if filter_type == 1: # Sub
4788 color = (color + left) & 0xff
4789 elif filter_type == 2: # Up
4790 color = (color + up) & 0xff
4791 elif filter_type == 3: # Average
4792 color = (color + ((left + up) >> 1)) & 0xff
4793 elif filter_type == 4: # Paeth
4794 a = left
4795 b = up
4796 c = 0
4797
4798 if x > 2 and y > 0:
4799 c = _get_pixel(basex - stride - 3)
4800
4801 p = a + b - c
4802
4803 pa = abs(p - a)
4804 pb = abs(p - b)
4805 pc = abs(p - c)
4806
4807 if pa <= pb and pa <= pc:
4808 color = (color + a) & 0xff
4809 elif pb <= pc:
4810 color = (color + b) & 0xff
4811 else:
4812 color = (color + c) & 0xff
4813
4814 current_row.append(color)
4815
4816 return width, height, pixels
efa97bdc
YCH
4817
4818
4819def write_xattr(path, key, value):
4820 # This mess below finds the best xattr tool for the job
4821 try:
4822 # try the pyxattr module...
4823 import xattr
4824
53a7e3d2
YCH
4825 if hasattr(xattr, 'set'): # pyxattr
4826 # Unicode arguments are not supported in python-pyxattr until
4827 # version 0.5.0
067aa17e 4828 # See https://github.com/ytdl-org/youtube-dl/issues/5498
53a7e3d2
YCH
4829 pyxattr_required_version = '0.5.0'
4830 if version_tuple(xattr.__version__) < version_tuple(pyxattr_required_version):
4831 # TODO: fallback to CLI tools
4832 raise XAttrUnavailableError(
4833 'python-pyxattr is detected but is too old. '
7a5c1cfe 4834 'yt-dlp requires %s or above while your version is %s. '
53a7e3d2
YCH
4835 'Falling back to other xattr implementations' % (
4836 pyxattr_required_version, xattr.__version__))
4837
4838 setxattr = xattr.set
4839 else: # xattr
4840 setxattr = xattr.setxattr
efa97bdc
YCH
4841
4842 try:
53a7e3d2 4843 setxattr(path, key, value)
efa97bdc
YCH
4844 except EnvironmentError as e:
4845 raise XAttrMetadataError(e.errno, e.strerror)
4846
4847 except ImportError:
4848 if compat_os_name == 'nt':
4849 # Write xattrs to NTFS Alternate Data Streams:
4850 # http://en.wikipedia.org/wiki/NTFS#Alternate_data_streams_.28ADS.29
4851 assert ':' not in key
4852 assert os.path.exists(path)
4853
4854 ads_fn = path + ':' + key
4855 try:
4856 with open(ads_fn, 'wb') as f:
4857 f.write(value)
4858 except EnvironmentError as e:
4859 raise XAttrMetadataError(e.errno, e.strerror)
4860 else:
4861 user_has_setfattr = check_executable('setfattr', ['--version'])
4862 user_has_xattr = check_executable('xattr', ['-h'])
4863
4864 if user_has_setfattr or user_has_xattr:
4865
4866 value = value.decode('utf-8')
4867 if user_has_setfattr:
4868 executable = 'setfattr'
4869 opts = ['-n', key, '-v', value]
4870 elif user_has_xattr:
4871 executable = 'xattr'
4872 opts = ['-w', key, value]
4873
3089bc74
S
4874 cmd = ([encodeFilename(executable, True)]
4875 + [encodeArgument(o) for o in opts]
4876 + [encodeFilename(path, True)])
efa97bdc
YCH
4877
4878 try:
d3c93ec2 4879 p = Popen(
efa97bdc
YCH
4880 cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
4881 except EnvironmentError as e:
4882 raise XAttrMetadataError(e.errno, e.strerror)
d3c93ec2 4883 stdout, stderr = p.communicate_or_kill()
efa97bdc
YCH
4884 stderr = stderr.decode('utf-8', 'replace')
4885 if p.returncode != 0:
4886 raise XAttrMetadataError(p.returncode, stderr)
4887
4888 else:
4889 # On Unix, and can't find pyxattr, setfattr, or xattr.
4890 if sys.platform.startswith('linux'):
4891 raise XAttrUnavailableError(
4892 "Couldn't find a tool to set the xattrs. "
4893 "Install either the python 'pyxattr' or 'xattr' "
4894 "modules, or the GNU 'attr' package "
4895 "(which contains the 'setfattr' tool).")
4896 else:
4897 raise XAttrUnavailableError(
4898 "Couldn't find a tool to set the xattrs. "
4899 "Install either the python 'xattr' module, "
4900 "or the 'xattr' binary.")
0c265486
YCH
4901
4902
4903def random_birthday(year_field, month_field, day_field):
aa374bc7
AS
4904 start_date = datetime.date(1950, 1, 1)
4905 end_date = datetime.date(1995, 12, 31)
4906 offset = random.randint(0, (end_date - start_date).days)
4907 random_date = start_date + datetime.timedelta(offset)
0c265486 4908 return {
aa374bc7
AS
4909 year_field: str(random_date.year),
4910 month_field: str(random_date.month),
4911 day_field: str(random_date.day),
0c265486 4912 }
732044af 4913
c76eb41b 4914
732044af 4915# Templates for internet shortcut files, which are plain text files.
4916DOT_URL_LINK_TEMPLATE = '''
4917[InternetShortcut]
4918URL=%(url)s
4919'''.lstrip()
4920
4921DOT_WEBLOC_LINK_TEMPLATE = '''
4922<?xml version="1.0" encoding="UTF-8"?>
4923<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
4924<plist version="1.0">
4925<dict>
4926\t<key>URL</key>
4927\t<string>%(url)s</string>
4928</dict>
4929</plist>
4930'''.lstrip()
4931
4932DOT_DESKTOP_LINK_TEMPLATE = '''
4933[Desktop Entry]
4934Encoding=UTF-8
4935Name=%(filename)s
4936Type=Link
4937URL=%(url)s
4938Icon=text-html
4939'''.lstrip()
4940
08438d2c 4941LINK_TEMPLATES = {
4942 'url': DOT_URL_LINK_TEMPLATE,
4943 'desktop': DOT_DESKTOP_LINK_TEMPLATE,
4944 'webloc': DOT_WEBLOC_LINK_TEMPLATE,
4945}
4946
732044af 4947
4948def iri_to_uri(iri):
4949 """
4950 Converts an IRI (Internationalized Resource Identifier, allowing Unicode characters) to a URI (Uniform Resource Identifier, ASCII-only).
4951
4952 The function doesn't add an additional layer of escaping; e.g., it doesn't escape `%3C` as `%253C`. Instead, it percent-escapes characters with an underlying UTF-8 encoding *besides* those already escaped, leaving the URI intact.
4953 """
4954
4955 iri_parts = compat_urllib_parse_urlparse(iri)
4956
4957 if '[' in iri_parts.netloc:
4958 raise ValueError('IPv6 URIs are not, yet, supported.')
4959 # Querying `.netloc`, when there's only one bracket, also raises a ValueError.
4960
4961 # The `safe` argument values, that the following code uses, contain the characters that should not be percent-encoded. Everything else but letters, digits and '_.-' will be percent-encoded with an underlying UTF-8 encoding. Everything already percent-encoded will be left as is.
4962
4963 net_location = ''
4964 if iri_parts.username:
4965 net_location += compat_urllib_parse_quote(iri_parts.username, safe=r"!$%&'()*+,~")
4966 if iri_parts.password is not None:
4967 net_location += ':' + compat_urllib_parse_quote(iri_parts.password, safe=r"!$%&'()*+,~")
4968 net_location += '@'
4969
4970 net_location += iri_parts.hostname.encode('idna').decode('utf-8') # Punycode for Unicode hostnames.
4971 # The 'idna' encoding produces ASCII text.
4972 if iri_parts.port is not None and iri_parts.port != 80:
4973 net_location += ':' + str(iri_parts.port)
4974
4975 return compat_urllib_parse_urlunparse(
4976 (iri_parts.scheme,
4977 net_location,
4978
4979 compat_urllib_parse_quote_plus(iri_parts.path, safe=r"!$%&'()*+,/:;=@|~"),
4980
4981 # Unsure about the `safe` argument, since this is a legacy way of handling parameters.
4982 compat_urllib_parse_quote_plus(iri_parts.params, safe=r"!$%&'()*+,/:;=@|~"),
4983
4984 # Not totally sure about the `safe` argument, since the source does not explicitly mention the query URI component.
4985 compat_urllib_parse_quote_plus(iri_parts.query, safe=r"!$%&'()*+,/:;=?@{|}~"),
4986
4987 compat_urllib_parse_quote_plus(iri_parts.fragment, safe=r"!#$%&'()*+,/:;=?@{|}~")))
4988
4989 # Source for `safe` arguments: https://url.spec.whatwg.org/#percent-encoded-bytes.
4990
4991
4992def to_high_limit_path(path):
4993 if sys.platform in ['win32', 'cygwin']:
4994 # Work around MAX_PATH limitation on Windows. The maximum allowed length for the individual path segments may still be quite limited.
4995 return r'\\?\ '.rstrip() + os.path.abspath(path)
4996
4997 return path
76d321f6 4998
c76eb41b 4999
b868936c 5000def format_field(obj, field=None, template='%s', ignore=(None, ''), default='', func=None):
e0ddbd02 5001 val = traverse_obj(obj, *variadic(field))
5002 if val in ignore:
5003 return default
5004 return template % (func(val) if func else val)
00dd0cd5 5005
5006
5007def clean_podcast_url(url):
5008 return re.sub(r'''(?x)
5009 (?:
5010 (?:
5011 chtbl\.com/track|
5012 media\.blubrry\.com| # https://create.blubrry.com/resources/podcast-media-download-statistics/getting-started/
5013 play\.podtrac\.com
5014 )/[^/]+|
5015 (?:dts|www)\.podtrac\.com/(?:pts/)?redirect\.[0-9a-z]{3,4}| # http://analytics.podtrac.com/how-to-measure
5016 flex\.acast\.com|
5017 pd(?:
5018 cn\.co| # https://podcorn.com/analytics-prefix/
5019 st\.fm # https://podsights.com/docs/
5020 )/e
5021 )/''', '', url)
ffcb8191
THD
5022
5023
5024_HEX_TABLE = '0123456789abcdef'
5025
5026
5027def random_uuidv4():
5028 return re.sub(r'[xy]', lambda x: _HEX_TABLE[random.randint(0, 15)], 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx')
0202b52a 5029
5030
5031def make_dir(path, to_screen=None):
5032 try:
5033 dn = os.path.dirname(path)
5034 if dn and not os.path.exists(dn):
5035 os.makedirs(dn)
5036 return True
5037 except (OSError, IOError) as err:
5038 if callable(to_screen) is not None:
5039 to_screen('unable to create directory ' + error_to_compat_str(err))
5040 return False
f74980cb 5041
5042
5043def get_executable_path():
c552ae88 5044 from zipimport import zipimporter
5045 if hasattr(sys, 'frozen'): # Running from PyInstaller
5046 path = os.path.dirname(sys.executable)
5047 elif isinstance(globals().get('__loader__'), zipimporter): # Running from ZIP
5048 path = os.path.join(os.path.dirname(__file__), '../..')
5049 else:
5050 path = os.path.join(os.path.dirname(__file__), '..')
f74980cb 5051 return os.path.abspath(path)
5052
5053
2f567473 5054def load_plugins(name, suffix, namespace):
3ae5e797 5055 classes = {}
f74980cb 5056 try:
019a94f7
ÁS
5057 plugins_spec = importlib.util.spec_from_file_location(
5058 name, os.path.join(get_executable_path(), 'ytdlp_plugins', name, '__init__.py'))
5059 plugins = importlib.util.module_from_spec(plugins_spec)
5060 sys.modules[plugins_spec.name] = plugins
5061 plugins_spec.loader.exec_module(plugins)
f74980cb 5062 for name in dir(plugins):
2f567473 5063 if name in namespace:
5064 continue
5065 if not name.endswith(suffix):
f74980cb 5066 continue
5067 klass = getattr(plugins, name)
3ae5e797 5068 classes[name] = namespace[name] = klass
019a94f7 5069 except FileNotFoundError:
f74980cb 5070 pass
f74980cb 5071 return classes
06167fbb 5072
5073
325ebc17 5074def traverse_obj(
352d63fd 5075 obj, *path_list, default=None, expected_type=None, get_all=True,
325ebc17 5076 casesense=True, is_user_input=False, traverse_string=False):
324ad820 5077 ''' Traverse nested list/dict/tuple
8f334380 5078 @param path_list A list of paths which are checked one by one.
5079 Each path is a list of keys where each key is a string,
1797b073 5080 a function, a tuple of strings/None or "...".
2614f646 5081 When a fuction is given, it takes the key as argument and
5082 returns whether the key matches or not. When a tuple is given,
8f334380 5083 all the keys given in the tuple are traversed, and
5084 "..." traverses all the keys in the object
1797b073 5085 "None" returns the object without traversal
325ebc17 5086 @param default Default value to return
352d63fd 5087 @param expected_type Only accept final value of this type (Can also be any callable)
5088 @param get_all Return all the values obtained from a path or only the first one
324ad820 5089 @param casesense Whether to consider dictionary keys as case sensitive
5090 @param is_user_input Whether the keys are generated from user input. If True,
5091 strings are converted to int/slice if necessary
5092 @param traverse_string Whether to traverse inside strings. If True, any
5093 non-compatible object will also be converted into a string
8f334380 5094 # TODO: Write tests
324ad820 5095 '''
325ebc17 5096 if not casesense:
dbf5416a 5097 _lower = lambda k: (k.lower() if isinstance(k, str) else k)
8f334380 5098 path_list = (map(_lower, variadic(path)) for path in path_list)
5099
5100 def _traverse_obj(obj, path, _current_depth=0):
5101 nonlocal depth
5102 path = tuple(variadic(path))
5103 for i, key in enumerate(path):
1797b073 5104 if None in (key, obj):
5105 return obj
8f334380 5106 if isinstance(key, (list, tuple)):
5107 obj = [_traverse_obj(obj, sub_key, _current_depth) for sub_key in key]
5108 key = ...
5109 if key is ...:
5110 obj = (obj.values() if isinstance(obj, dict)
5111 else obj if isinstance(obj, (list, tuple, LazyList))
5112 else str(obj) if traverse_string else [])
5113 _current_depth += 1
5114 depth = max(depth, _current_depth)
5115 return [_traverse_obj(inner_obj, path[i + 1:], _current_depth) for inner_obj in obj]
2614f646 5116 elif callable(key):
5117 if isinstance(obj, (list, tuple, LazyList)):
5118 obj = enumerate(obj)
5119 elif isinstance(obj, dict):
5120 obj = obj.items()
5121 else:
5122 if not traverse_string:
5123 return None
5124 obj = str(obj)
5125 _current_depth += 1
5126 depth = max(depth, _current_depth)
5127 return [_traverse_obj(v, path[i + 1:], _current_depth) for k, v in obj if key(k)]
575e17a1 5128 elif isinstance(obj, dict) and not (is_user_input and key == ':'):
325ebc17 5129 obj = (obj.get(key) if casesense or (key in obj)
5130 else next((v for k, v in obj.items() if _lower(k) == key), None))
5131 else:
5132 if is_user_input:
5133 key = (int_or_none(key) if ':' not in key
5134 else slice(*map(int_or_none, key.split(':'))))
8f334380 5135 if key == slice(None):
575e17a1 5136 return _traverse_obj(obj, (..., *path[i + 1:]), _current_depth)
325ebc17 5137 if not isinstance(key, (int, slice)):
9fea350f 5138 return None
8f334380 5139 if not isinstance(obj, (list, tuple, LazyList)):
325ebc17 5140 if not traverse_string:
5141 return None
5142 obj = str(obj)
5143 try:
5144 obj = obj[key]
5145 except IndexError:
324ad820 5146 return None
325ebc17 5147 return obj
5148
352d63fd 5149 if isinstance(expected_type, type):
5150 type_test = lambda val: val if isinstance(val, expected_type) else None
5151 elif expected_type is not None:
5152 type_test = expected_type
5153 else:
5154 type_test = lambda val: val
5155
8f334380 5156 for path in path_list:
5157 depth = 0
5158 val = _traverse_obj(obj, path)
325ebc17 5159 if val is not None:
8f334380 5160 if depth:
5161 for _ in range(depth - 1):
6586bca9 5162 val = itertools.chain.from_iterable(v for v in val if v is not None)
352d63fd 5163 val = [v for v in map(type_test, val) if v is not None]
8f334380 5164 if val:
352d63fd 5165 return val if get_all else val[0]
5166 else:
5167 val = type_test(val)
5168 if val is not None:
8f334380 5169 return val
325ebc17 5170 return default
324ad820 5171
5172
5173def traverse_dict(dictn, keys, casesense=True):
ee8dd27a 5174 write_string('DeprecationWarning: yt_dlp.utils.traverse_dict is deprecated '
5175 'and may be removed in a future version. Use yt_dlp.utils.traverse_obj instead')
5176 return traverse_obj(dictn, keys, casesense=casesense, is_user_input=True, traverse_string=True)
6606817a 5177
5178
4b4b7f74 5179def variadic(x, allowed_types=(str, bytes, dict)):
cb89cfc1 5180 return x if isinstance(x, collections.abc.Iterable) and not isinstance(x, allowed_types) else (x,)
bd50a52b
THD
5181
5182
3e9b66d7
LNO
5183def decode_base(value, digits):
5184 # This will convert given base-x string to scalar (long or int)
5185 table = {char: index for index, char in enumerate(digits)}
5186 result = 0
5187 base = len(digits)
5188 for chr in value:
5189 result *= base
5190 result += table[chr]
5191 return result
5192
5193
5194def time_seconds(**kwargs):
5195 t = datetime.datetime.now(datetime.timezone(datetime.timedelta(**kwargs)))
5196 return t.timestamp()
5197
5198
49fa4d9a
N
5199# create a JSON Web Signature (jws) with HS256 algorithm
5200# the resulting format is in JWS Compact Serialization
5201# implemented following JWT https://www.rfc-editor.org/rfc/rfc7519.html
5202# implemented following JWS https://www.rfc-editor.org/rfc/rfc7515.html
5203def jwt_encode_hs256(payload_data, key, headers={}):
5204 header_data = {
5205 'alg': 'HS256',
5206 'typ': 'JWT',
5207 }
5208 if headers:
5209 header_data.update(headers)
5210 header_b64 = base64.b64encode(json.dumps(header_data).encode('utf-8'))
5211 payload_b64 = base64.b64encode(json.dumps(payload_data).encode('utf-8'))
5212 h = hmac.new(key.encode('utf-8'), header_b64 + b'.' + payload_b64, hashlib.sha256)
5213 signature_b64 = base64.b64encode(h.digest())
5214 token = header_b64 + b'.' + payload_b64 + b'.' + signature_b64
5215 return token
819e0531 5216
5217
16b0d7e6 5218# can be extended in future to verify the signature and parse header and return the algorithm used if it's not HS256
5219def jwt_decode_hs256(jwt):
5220 header_b64, payload_b64, signature_b64 = jwt.split('.')
5221 payload_data = json.loads(base64.urlsafe_b64decode(payload_b64))
5222 return payload_data
5223
5224
819e0531 5225def supports_terminal_sequences(stream):
5226 if compat_os_name == 'nt':
e3c7d495 5227 from .compat import WINDOWS_VT_MODE # Must be imported locally
5228 if not WINDOWS_VT_MODE or get_windows_version() < (10, 0, 10586):
819e0531 5229 return False
5230 elif not os.getenv('TERM'):
5231 return False
5232 try:
5233 return stream.isatty()
5234 except BaseException:
5235 return False
5236
5237
ec11a9f4 5238_terminal_sequences_re = re.compile('\033\\[[^m]+m')
5239
5240
5241def remove_terminal_sequences(string):
5242 return _terminal_sequences_re.sub('', string)
5243
5244
5245def number_of_digits(number):
5246 return len('%d' % number)
34921b43 5247
5248
5249def join_nonempty(*values, delim='-', from_dict=None):
5250 if from_dict is not None:
c586f9e8 5251 values = map(from_dict.get, values)
34921b43 5252 return delim.join(map(str, filter(None, values)))
06e57990 5253
5254
93c8410d
LNO
5255def parse_http_range(range):
5256 """ Parse value of "Range" or "Content-Range" HTTP header into tuple. """
5257 if not range:
5258 return None, None, None
5259 crg = re.search(r'bytes[ =](\d+)-(\d+)?(?:/(\d+))?', range)
5260 if not crg:
5261 return None, None, None
5262 return int(crg.group(1)), int_or_none(crg.group(2)), int_or_none(crg.group(3))
5263
5264
06e57990 5265class Config:
5266 own_args = None
5267 filename = None
5268 __initialized = False
5269
5270 def __init__(self, parser, label=None):
5271 self._parser, self.label = parser, label
5272 self._loaded_paths, self.configs = set(), []
5273
5274 def init(self, args=None, filename=None):
5275 assert not self.__initialized
65662dff 5276 directory = ''
06e57990 5277 if filename:
5278 location = os.path.realpath(filename)
65662dff 5279 directory = os.path.dirname(location)
06e57990 5280 if location in self._loaded_paths:
5281 return False
5282 self._loaded_paths.add(location)
5283
5284 self.__initialized = True
5285 self.own_args, self.filename = args, filename
5286 for location in self._parser.parse_args(args)[0].config_locations or []:
65662dff 5287 location = os.path.join(directory, expand_path(location))
06e57990 5288 if os.path.isdir(location):
5289 location = os.path.join(location, 'yt-dlp.conf')
5290 if not os.path.exists(location):
5291 self._parser.error(f'config location {location} does not exist')
5292 self.append_config(self.read_file(location), location)
5293 return True
5294
5295 def __str__(self):
5296 label = join_nonempty(
5297 self.label, 'config', f'"{self.filename}"' if self.filename else '',
5298 delim=' ')
5299 return join_nonempty(
5300 self.own_args is not None and f'{label[0].upper()}{label[1:]}: {self.hide_login_info(self.own_args)}',
5301 *(f'\n{c}'.replace('\n', '\n| ')[1:] for c in self.configs),
5302 delim='\n')
5303
5304 @staticmethod
5305 def read_file(filename, default=[]):
5306 try:
5307 optionf = open(filename)
5308 except IOError:
5309 return default # silently skip if file is not present
5310 try:
5311 # FIXME: https://github.com/ytdl-org/youtube-dl/commit/dfe5fa49aed02cf36ba9f743b11b0903554b5e56
5312 contents = optionf.read()
5313 if sys.version_info < (3,):
5314 contents = contents.decode(preferredencoding())
5315 res = compat_shlex_split(contents, comments=True)
5316 finally:
5317 optionf.close()
5318 return res
5319
5320 @staticmethod
5321 def hide_login_info(opts):
5322 PRIVATE_OPTS = set(['-p', '--password', '-u', '--username', '--video-password', '--ap-password', '--ap-username'])
5323 eqre = re.compile('^(?P<key>' + ('|'.join(re.escape(po) for po in PRIVATE_OPTS)) + ')=.+$')
5324
5325 def _scrub_eq(o):
5326 m = eqre.match(o)
5327 if m:
5328 return m.group('key') + '=PRIVATE'
5329 else:
5330 return o
5331
5332 opts = list(map(_scrub_eq, opts))
5333 for idx, opt in enumerate(opts):
5334 if opt in PRIVATE_OPTS and idx + 1 < len(opts):
5335 opts[idx + 1] = 'PRIVATE'
5336 return opts
5337
5338 def append_config(self, *args, label=None):
5339 config = type(self)(self._parser, label)
5340 config._loaded_paths = self._loaded_paths
5341 if config.init(*args):
5342 self.configs.append(config)
5343
5344 @property
5345 def all_args(self):
5346 for config in reversed(self.configs):
5347 yield from config.all_args
5348 yield from self.own_args or []
5349
5350 def parse_args(self):
5351 return self._parser.parse_args(list(self.all_args))
da42679b
LNO
5352
5353
5354class WebSocketsWrapper():
5355 """Wraps websockets module to use in non-async scopes"""
5356
5357 def __init__(self, url, headers=None):
5358 self.loop = asyncio.events.new_event_loop()
5359 self.conn = compat_websockets.connect(
5360 url, extra_headers=headers, ping_interval=None,
5361 close_timeout=float('inf'), loop=self.loop, ping_timeout=float('inf'))
15dfb392 5362 atexit.register(self.__exit__, None, None, None)
da42679b
LNO
5363
5364 def __enter__(self):
5365 self.pool = self.run_with_loop(self.conn.__aenter__(), self.loop)
5366 return self
5367
5368 def send(self, *args):
5369 self.run_with_loop(self.pool.send(*args), self.loop)
5370
5371 def recv(self, *args):
5372 return self.run_with_loop(self.pool.recv(*args), self.loop)
5373
5374 def __exit__(self, type, value, traceback):
5375 try:
5376 return self.run_with_loop(self.conn.__aexit__(type, value, traceback), self.loop)
5377 finally:
5378 self.loop.close()
15dfb392 5379 self._cancel_all_tasks(self.loop)
da42679b
LNO
5380
5381 # taken from https://github.com/python/cpython/blob/3.9/Lib/asyncio/runners.py with modifications
5382 # for contributors: If there's any new library using asyncio needs to be run in non-async, move these function out of this class
5383 @staticmethod
5384 def run_with_loop(main, loop):
5385 if not asyncio.coroutines.iscoroutine(main):
5386 raise ValueError(f'a coroutine was expected, got {main!r}')
5387
5388 try:
5389 return loop.run_until_complete(main)
5390 finally:
5391 loop.run_until_complete(loop.shutdown_asyncgens())
5392 if hasattr(loop, 'shutdown_default_executor'):
5393 loop.run_until_complete(loop.shutdown_default_executor())
5394
5395 @staticmethod
5396 def _cancel_all_tasks(loop):
5397 to_cancel = asyncio.tasks.all_tasks(loop)
5398
5399 if not to_cancel:
5400 return
5401
5402 for task in to_cancel:
5403 task.cancel()
5404
5405 loop.run_until_complete(
5406 asyncio.tasks.gather(*to_cancel, loop=loop, return_exceptions=True))
5407
5408 for task in to_cancel:
5409 if task.cancelled():
5410 continue
5411 if task.exception() is not None:
5412 loop.call_exception_handler({
5413 'message': 'unhandled exception during asyncio.run() shutdown',
5414 'exception': task.exception(),
5415 'task': task,
5416 })
5417
5418
5419has_websockets = bool(compat_websockets)