]> jfr.im git - yt-dlp.git/blame - youtube_dl/utils.py
[pornhub] Use actual URL host for requests (closes #18359)
[yt-dlp.git] / youtube_dl / utils.py
CommitLineData
d77c3dfd 1#!/usr/bin/env python
dcdb292f 2# coding: utf-8
d77c3dfd 3
ecc0c5ee
PH
4from __future__ import unicode_literals
5
1e399778 6import base64
5bc880b9 7import binascii
912b38b4 8import calendar
676eb3f2 9import codecs
62e609ab 10import contextlib
e3946f98 11import ctypes
c496ca96
PH
12import datetime
13import email.utils
0c265486 14import email.header
f45c185f 15import errno
be4a824d 16import functools
d77c3dfd 17import gzip
03f9daab 18import io
79a2e94e 19import itertools
f4bfd65f 20import json
d77c3dfd 21import locale
02dbf93f 22import math
347de493 23import operator
d77c3dfd 24import os
c496ca96 25import platform
773f291d 26import random
d77c3dfd 27import re
c496ca96 28import socket
79a2e94e 29import ssl
1c088fa8 30import subprocess
d77c3dfd 31import sys
181c8655 32import tempfile
01951dda 33import traceback
bcf89ce6 34import xml.etree.ElementTree
d77c3dfd 35import zlib
d77c3dfd 36
8c25f81b 37from .compat import (
b4a3d461 38 compat_HTMLParseError,
8bb56eee 39 compat_HTMLParser,
8f9312c3 40 compat_basestring,
8c25f81b 41 compat_chr,
d7cd9a9e 42 compat_ctypes_WINFUNCTYPE,
36e6f62c 43 compat_etree_fromstring,
51098426 44 compat_expanduser,
8c25f81b 45 compat_html_entities,
55b2f099 46 compat_html_entities_html5,
be4a824d 47 compat_http_client,
c86b6142 48 compat_kwargs,
efa97bdc 49 compat_os_name,
8c25f81b 50 compat_parse_qs,
702ccf2d 51 compat_shlex_quote,
8c25f81b 52 compat_str,
edaa23f8 53 compat_struct_pack,
d3f8e038 54 compat_struct_unpack,
8c25f81b
PH
55 compat_urllib_error,
56 compat_urllib_parse,
15707c7e 57 compat_urllib_parse_urlencode,
8c25f81b 58 compat_urllib_parse_urlparse,
7581bfc9 59 compat_urllib_parse_unquote_plus,
8c25f81b
PH
60 compat_urllib_request,
61 compat_urlparse,
810c10ba 62 compat_xpath,
8c25f81b 63)
4644ac55 64
71aff188
YCH
65from .socks import (
66 ProxyType,
67 sockssocket,
68)
69
4644ac55 70
51fb4995
YCH
71def register_socks_protocols():
72 # "Register" SOCKS protocols
d5ae6bb5
YCH
73 # In Python < 2.6.5, urlsplit() suffers from bug https://bugs.python.org/issue7904
74 # URLs with protocols not in urlparse.uses_netloc are not handled correctly
51fb4995
YCH
75 for scheme in ('socks', 'socks4', 'socks4a', 'socks5'):
76 if scheme not in compat_urlparse.uses_netloc:
77 compat_urlparse.uses_netloc.append(scheme)
78
79
468e2e92
FV
80# This is not clearly defined otherwise
81compiled_regex_type = type(re.compile(''))
82
3e669f36 83std_headers = {
60c08562 84 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:59.0) Gecko/20100101 Firefox/59.0',
59ae15a5
PH
85 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
86 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
87 'Accept-Encoding': 'gzip, deflate',
88 'Accept-Language': 'en-us,en;q=0.5',
3e669f36 89}
f427df17 90
5f6a1245 91
fb37eb25
S
92USER_AGENTS = {
93 'Safari': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27',
94}
95
96
bf42a990
S
97NO_DEFAULT = object()
98
7105440c
YCH
99ENGLISH_MONTH_NAMES = [
100 'January', 'February', 'March', 'April', 'May', 'June',
101 'July', 'August', 'September', 'October', 'November', 'December']
102
f6717dec
S
103MONTH_NAMES = {
104 'en': ENGLISH_MONTH_NAMES,
105 'fr': [
3e4185c3
S
106 'janvier', 'février', 'mars', 'avril', 'mai', 'juin',
107 'juillet', 'août', 'septembre', 'octobre', 'novembre', 'décembre'],
f6717dec 108}
a942d6cb 109
a7aaa398
S
110KNOWN_EXTENSIONS = (
111 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'aac',
112 'flv', 'f4v', 'f4a', 'f4b',
113 'webm', 'ogg', 'ogv', 'oga', 'ogx', 'spx', 'opus',
114 'mkv', 'mka', 'mk3d',
115 'avi', 'divx',
116 'mov',
117 'asf', 'wmv', 'wma',
118 '3gp', '3g2',
119 'mp3',
120 'flac',
121 'ape',
122 'wav',
123 'f4f', 'f4m', 'm3u8', 'smil')
124
c587cbb7 125# needed for sanitizing filenames in restricted mode
c8827027 126ACCENT_CHARS = dict(zip('ÂÃÄÀÁÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖŐØŒÙÚÛÜŰÝÞßàáâãäåæçèéêëìíîïðñòóôõöőøœùúûüűýþÿ',
127 itertools.chain('AAAAAA', ['AE'], 'CEEEEIIIIDNOOOOOOO', ['OE'], 'UUUUUYP', ['ss'],
128 'aaaaaa', ['ae'], 'ceeeeiiiionooooooo', ['oe'], 'uuuuuypy')))
c587cbb7 129
46f59e89
S
130DATE_FORMATS = (
131 '%d %B %Y',
132 '%d %b %Y',
133 '%B %d %Y',
cb655f34
S
134 '%B %dst %Y',
135 '%B %dnd %Y',
136 '%B %dth %Y',
46f59e89 137 '%b %d %Y',
cb655f34
S
138 '%b %dst %Y',
139 '%b %dnd %Y',
140 '%b %dth %Y',
46f59e89
S
141 '%b %dst %Y %I:%M',
142 '%b %dnd %Y %I:%M',
143 '%b %dth %Y %I:%M',
144 '%Y %m %d',
145 '%Y-%m-%d',
146 '%Y/%m/%d',
81c13222 147 '%Y/%m/%d %H:%M',
46f59e89 148 '%Y/%m/%d %H:%M:%S',
0c1c6f4b 149 '%Y-%m-%d %H:%M',
46f59e89
S
150 '%Y-%m-%d %H:%M:%S',
151 '%Y-%m-%d %H:%M:%S.%f',
152 '%d.%m.%Y %H:%M',
153 '%d.%m.%Y %H.%M',
154 '%Y-%m-%dT%H:%M:%SZ',
155 '%Y-%m-%dT%H:%M:%S.%fZ',
156 '%Y-%m-%dT%H:%M:%S.%f0Z',
157 '%Y-%m-%dT%H:%M:%S',
158 '%Y-%m-%dT%H:%M:%S.%f',
159 '%Y-%m-%dT%H:%M',
c6eed6b8
S
160 '%b %d %Y at %H:%M',
161 '%b %d %Y at %H:%M:%S',
b555ae9b
S
162 '%B %d %Y at %H:%M',
163 '%B %d %Y at %H:%M:%S',
46f59e89
S
164)
165
166DATE_FORMATS_DAY_FIRST = list(DATE_FORMATS)
167DATE_FORMATS_DAY_FIRST.extend([
168 '%d-%m-%Y',
169 '%d.%m.%Y',
170 '%d.%m.%y',
171 '%d/%m/%Y',
172 '%d/%m/%y',
173 '%d/%m/%Y %H:%M:%S',
174])
175
176DATE_FORMATS_MONTH_FIRST = list(DATE_FORMATS)
177DATE_FORMATS_MONTH_FIRST.extend([
178 '%m-%d-%Y',
179 '%m.%d.%Y',
180 '%m/%d/%Y',
181 '%m/%d/%y',
182 '%m/%d/%Y %H:%M:%S',
183])
184
06b3fe29 185PACKED_CODES_RE = r"}\('(.+)',(\d+),(\d+),'([^']+)'\.split\('\|'\)"
0685d972 186JSON_LD_RE = r'(?is)<script[^>]+type=(["\'])application/ld\+json\1[^>]*>(?P<json_ld>.+?)</script>'
06b3fe29 187
7105440c 188
d77c3dfd 189def preferredencoding():
59ae15a5 190 """Get preferred encoding.
d77c3dfd 191
59ae15a5
PH
192 Returns the best encoding scheme for the system, based on
193 locale.getpreferredencoding() and some further tweaks.
194 """
195 try:
196 pref = locale.getpreferredencoding()
28e614de 197 'TEST'.encode(pref)
70a1165b 198 except Exception:
59ae15a5 199 pref = 'UTF-8'
bae611f2 200
59ae15a5 201 return pref
d77c3dfd 202
f4bfd65f 203
181c8655 204def write_json_file(obj, fn):
1394646a 205 """ Encode obj as JSON and write it to fn, atomically if possible """
181c8655 206
92120217 207 fn = encodeFilename(fn)
61ee5aeb 208 if sys.version_info < (3, 0) and sys.platform != 'win32':
ec5f6016
JMF
209 encoding = get_filesystem_encoding()
210 # os.path.basename returns a bytes object, but NamedTemporaryFile
211 # will fail if the filename contains non ascii characters unless we
212 # use a unicode object
213 path_basename = lambda f: os.path.basename(fn).decode(encoding)
214 # the same for os.path.dirname
215 path_dirname = lambda f: os.path.dirname(fn).decode(encoding)
216 else:
217 path_basename = os.path.basename
218 path_dirname = os.path.dirname
219
73159f99
S
220 args = {
221 'suffix': '.tmp',
ec5f6016
JMF
222 'prefix': path_basename(fn) + '.',
223 'dir': path_dirname(fn),
73159f99
S
224 'delete': False,
225 }
226
181c8655
PH
227 # In Python 2.x, json.dump expects a bytestream.
228 # In Python 3.x, it writes to a character stream
229 if sys.version_info < (3, 0):
73159f99 230 args['mode'] = 'wb'
181c8655 231 else:
73159f99
S
232 args.update({
233 'mode': 'w',
234 'encoding': 'utf-8',
235 })
236
c86b6142 237 tf = tempfile.NamedTemporaryFile(**compat_kwargs(args))
181c8655
PH
238
239 try:
240 with tf:
241 json.dump(obj, tf)
1394646a
IK
242 if sys.platform == 'win32':
243 # Need to remove existing file on Windows, else os.rename raises
244 # WindowsError or FileExistsError.
245 try:
246 os.unlink(fn)
247 except OSError:
248 pass
181c8655 249 os.rename(tf.name, fn)
70a1165b 250 except Exception:
181c8655
PH
251 try:
252 os.remove(tf.name)
253 except OSError:
254 pass
255 raise
256
257
258if sys.version_info >= (2, 7):
ee114368 259 def find_xpath_attr(node, xpath, key, val=None):
59ae56fa 260 """ Find the xpath xpath[@key=val] """
5d2354f1 261 assert re.match(r'^[a-zA-Z_-]+$', key)
ee114368 262 expr = xpath + ('[@%s]' % key if val is None else "[@%s='%s']" % (key, val))
59ae56fa
PH
263 return node.find(expr)
264else:
ee114368 265 def find_xpath_attr(node, xpath, key, val=None):
810c10ba 266 for f in node.findall(compat_xpath(xpath)):
ee114368
S
267 if key not in f.attrib:
268 continue
269 if val is None or f.attrib.get(key) == val:
59ae56fa
PH
270 return f
271 return None
272
d7e66d39
JMF
273# On python2.6 the xml.etree.ElementTree.Element methods don't support
274# the namespace parameter
5f6a1245
JW
275
276
d7e66d39
JMF
277def xpath_with_ns(path, ns_map):
278 components = [c.split(':') for c in path.split('/')]
279 replaced = []
280 for c in components:
281 if len(c) == 1:
282 replaced.append(c[0])
283 else:
284 ns, tag = c
285 replaced.append('{%s}%s' % (ns_map[ns], tag))
286 return '/'.join(replaced)
287
d77c3dfd 288
a41fb80c 289def xpath_element(node, xpath, name=None, fatal=False, default=NO_DEFAULT):
578c0745 290 def _find_xpath(xpath):
810c10ba 291 return node.find(compat_xpath(xpath))
578c0745
S
292
293 if isinstance(xpath, (str, compat_str)):
294 n = _find_xpath(xpath)
295 else:
296 for xp in xpath:
297 n = _find_xpath(xp)
298 if n is not None:
299 break
d74bebd5 300
8e636da4 301 if n is None:
bf42a990
S
302 if default is not NO_DEFAULT:
303 return default
304 elif fatal:
bf0ff932
PH
305 name = xpath if name is None else name
306 raise ExtractorError('Could not find XML element %s' % name)
307 else:
308 return None
a41fb80c
S
309 return n
310
311
312def xpath_text(node, xpath, name=None, fatal=False, default=NO_DEFAULT):
8e636da4
S
313 n = xpath_element(node, xpath, name, fatal=fatal, default=default)
314 if n is None or n == default:
315 return n
316 if n.text is None:
317 if default is not NO_DEFAULT:
318 return default
319 elif fatal:
320 name = xpath if name is None else name
321 raise ExtractorError('Could not find XML element\'s text %s' % name)
322 else:
323 return None
324 return n.text
a41fb80c
S
325
326
327def xpath_attr(node, xpath, key, name=None, fatal=False, default=NO_DEFAULT):
328 n = find_xpath_attr(node, xpath, key)
329 if n is None:
330 if default is not NO_DEFAULT:
331 return default
332 elif fatal:
333 name = '%s[@%s]' % (xpath, key) if name is None else name
334 raise ExtractorError('Could not find XML attribute %s' % name)
335 else:
336 return None
337 return n.attrib[key]
bf0ff932
PH
338
339
9e6dd238 340def get_element_by_id(id, html):
43e8fafd 341 """Return the content of the tag with the specified ID in the passed HTML document"""
611c1dd9 342 return get_element_by_attribute('id', id, html)
43e8fafd 343
12ea2f30 344
84c237fb 345def get_element_by_class(class_name, html):
2af12ad9
TC
346 """Return the content of the first tag with the specified class in the passed HTML document"""
347 retval = get_elements_by_class(class_name, html)
348 return retval[0] if retval else None
349
350
351def get_element_by_attribute(attribute, value, html, escape_value=True):
352 retval = get_elements_by_attribute(attribute, value, html, escape_value)
353 return retval[0] if retval else None
354
355
356def get_elements_by_class(class_name, html):
357 """Return the content of all tags with the specified class in the passed HTML document as a list"""
358 return get_elements_by_attribute(
84c237fb
YCH
359 'class', r'[^\'"]*\b%s\b[^\'"]*' % re.escape(class_name),
360 html, escape_value=False)
361
362
2af12ad9 363def get_elements_by_attribute(attribute, value, html, escape_value=True):
43e8fafd 364 """Return the content of the tag with the specified attribute in the passed HTML document"""
9e6dd238 365
84c237fb
YCH
366 value = re.escape(value) if escape_value else value
367
2af12ad9
TC
368 retlist = []
369 for m in re.finditer(r'''(?xs)
38285056 370 <([a-zA-Z0-9:._-]+)
609ff8ca 371 (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]*|="[^"]*"|='[^']*'|))*?
38285056 372 \s+%s=['"]?%s['"]?
609ff8ca 373 (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]*|="[^"]*"|='[^']*'|))*?
38285056
PH
374 \s*>
375 (?P<content>.*?)
376 </\1>
2af12ad9
TC
377 ''' % (re.escape(attribute), value), html):
378 res = m.group('content')
38285056 379
2af12ad9
TC
380 if res.startswith('"') or res.startswith("'"):
381 res = res[1:-1]
38285056 382
2af12ad9 383 retlist.append(unescapeHTML(res))
a921f407 384
2af12ad9 385 return retlist
a921f407 386
c5229f39 387
8bb56eee
BF
388class HTMLAttributeParser(compat_HTMLParser):
389 """Trivial HTML parser to gather the attributes for a single element"""
390 def __init__(self):
c5229f39 391 self.attrs = {}
8bb56eee
BF
392 compat_HTMLParser.__init__(self)
393
394 def handle_starttag(self, tag, attrs):
395 self.attrs = dict(attrs)
396
c5229f39 397
8bb56eee
BF
398def extract_attributes(html_element):
399 """Given a string for an HTML element such as
400 <el
401 a="foo" B="bar" c="&98;az" d=boz
402 empty= noval entity="&amp;"
403 sq='"' dq="'"
404 >
405 Decode and return a dictionary of attributes.
406 {
407 'a': 'foo', 'b': 'bar', c: 'baz', d: 'boz',
408 'empty': '', 'noval': None, 'entity': '&',
409 'sq': '"', 'dq': '\''
410 }.
411 NB HTMLParser is stricter in Python 2.6 & 3.2 than in later versions,
412 but the cases in the unit test will work for all of 2.6, 2.7, 3.2-3.5.
413 """
414 parser = HTMLAttributeParser()
b4a3d461
S
415 try:
416 parser.feed(html_element)
417 parser.close()
418 # Older Python may throw HTMLParseError in case of malformed HTML
419 except compat_HTMLParseError:
420 pass
8bb56eee 421 return parser.attrs
9e6dd238 422
c5229f39 423
9e6dd238 424def clean_html(html):
59ae15a5 425 """Clean an HTML snippet into a readable string"""
dd622d7c
PH
426
427 if html is None: # Convenience for sanitizing descriptions etc.
428 return html
429
59ae15a5
PH
430 # Newline vs <br />
431 html = html.replace('\n', ' ')
edd9221c
TF
432 html = re.sub(r'(?u)\s*<\s*br\s*/?\s*>\s*', '\n', html)
433 html = re.sub(r'(?u)<\s*/\s*p\s*>\s*<\s*p[^>]*>', '\n', html)
59ae15a5
PH
434 # Strip html tags
435 html = re.sub('<.*?>', '', html)
436 # Replace html entities
437 html = unescapeHTML(html)
7decf895 438 return html.strip()
9e6dd238
FV
439
440
d77c3dfd 441def sanitize_open(filename, open_mode):
59ae15a5
PH
442 """Try to open the given filename, and slightly tweak it if this fails.
443
444 Attempts to open the given filename. If this fails, it tries to change
445 the filename slightly, step by step, until it's either able to open it
446 or it fails and raises a final exception, like the standard open()
447 function.
448
449 It returns the tuple (stream, definitive_file_name).
450 """
451 try:
28e614de 452 if filename == '-':
59ae15a5
PH
453 if sys.platform == 'win32':
454 import msvcrt
455 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
898280a0 456 return (sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout, filename)
59ae15a5
PH
457 stream = open(encodeFilename(filename), open_mode)
458 return (stream, filename)
459 except (IOError, OSError) as err:
f45c185f
PH
460 if err.errno in (errno.EACCES,):
461 raise
59ae15a5 462
f45c185f 463 # In case of error, try to remove win32 forbidden chars
d55de57b 464 alt_filename = sanitize_path(filename)
f45c185f
PH
465 if alt_filename == filename:
466 raise
467 else:
468 # An exception here should be caught in the caller
d55de57b 469 stream = open(encodeFilename(alt_filename), open_mode)
f45c185f 470 return (stream, alt_filename)
d77c3dfd
FV
471
472
473def timeconvert(timestr):
59ae15a5
PH
474 """Convert RFC 2822 defined time string into system timestamp"""
475 timestamp = None
476 timetuple = email.utils.parsedate_tz(timestr)
477 if timetuple is not None:
478 timestamp = email.utils.mktime_tz(timetuple)
479 return timestamp
1c469a94 480
5f6a1245 481
796173d0 482def sanitize_filename(s, restricted=False, is_id=False):
59ae15a5
PH
483 """Sanitizes a string so it could be used as part of a filename.
484 If restricted is set, use a stricter subset of allowed characters.
158af524
S
485 Set is_id if this is not an arbitrary string, but an ID that should be kept
486 if possible.
59ae15a5
PH
487 """
488 def replace_insane(char):
c587cbb7
AT
489 if restricted and char in ACCENT_CHARS:
490 return ACCENT_CHARS[char]
59ae15a5
PH
491 if char == '?' or ord(char) < 32 or ord(char) == 127:
492 return ''
493 elif char == '"':
494 return '' if restricted else '\''
495 elif char == ':':
496 return '_-' if restricted else ' -'
497 elif char in '\\/|*<>':
498 return '_'
627dcfff 499 if restricted and (char in '!&\'()[]{}$;`^,#' or char.isspace()):
59ae15a5
PH
500 return '_'
501 if restricted and ord(char) > 127:
502 return '_'
503 return char
504
2aeb06d6
PH
505 # Handle timestamps
506 s = re.sub(r'[0-9]+(?::[0-9]+)+', lambda m: m.group(0).replace(':', '_'), s)
28e614de 507 result = ''.join(map(replace_insane, s))
796173d0
PH
508 if not is_id:
509 while '__' in result:
510 result = result.replace('__', '_')
511 result = result.strip('_')
512 # Common case of "Foreign band name - English song title"
513 if restricted and result.startswith('-_'):
514 result = result[2:]
5a42414b
PH
515 if result.startswith('-'):
516 result = '_' + result[len('-'):]
a7440261 517 result = result.lstrip('.')
796173d0
PH
518 if not result:
519 result = '_'
59ae15a5 520 return result
d77c3dfd 521
5f6a1245 522
a2aaf4db
S
523def sanitize_path(s):
524 """Sanitizes and normalizes path on Windows"""
525 if sys.platform != 'win32':
526 return s
be531ef1
S
527 drive_or_unc, _ = os.path.splitdrive(s)
528 if sys.version_info < (2, 7) and not drive_or_unc:
529 drive_or_unc, _ = os.path.splitunc(s)
530 norm_path = os.path.normpath(remove_start(s, drive_or_unc)).split(os.path.sep)
531 if drive_or_unc:
a2aaf4db
S
532 norm_path.pop(0)
533 sanitized_path = [
ec85ded8 534 path_part if path_part in ['.', '..'] else re.sub(r'(?:[/<>:"\|\\?\*]|[\s.]$)', '#', path_part)
a2aaf4db 535 for path_part in norm_path]
be531ef1
S
536 if drive_or_unc:
537 sanitized_path.insert(0, drive_or_unc + os.path.sep)
a2aaf4db
S
538 return os.path.join(*sanitized_path)
539
540
17bcc626 541def sanitize_url(url):
befa4708
S
542 # Prepend protocol-less URLs with `http:` scheme in order to mitigate
543 # the number of unwanted failures due to missing protocol
544 if url.startswith('//'):
545 return 'http:%s' % url
546 # Fix some common typos seen so far
547 COMMON_TYPOS = (
548 # https://github.com/rg3/youtube-dl/issues/15649
549 (r'^httpss://', r'https://'),
550 # https://bx1.be/lives/direct-tv/
551 (r'^rmtp([es]?)://', r'rtmp\1://'),
552 )
553 for mistake, fixup in COMMON_TYPOS:
554 if re.match(mistake, url):
555 return re.sub(mistake, fixup, url)
556 return url
17bcc626
S
557
558
67dda517 559def sanitized_Request(url, *args, **kwargs):
17bcc626 560 return compat_urllib_request.Request(sanitize_url(url), *args, **kwargs)
67dda517
S
561
562
51098426
S
563def expand_path(s):
564 """Expand shell variables and ~"""
565 return os.path.expandvars(compat_expanduser(s))
566
567
d77c3dfd 568def orderedSet(iterable):
59ae15a5
PH
569 """ Remove all duplicates from the input iterable """
570 res = []
571 for el in iterable:
572 if el not in res:
573 res.append(el)
574 return res
d77c3dfd 575
912b38b4 576
55b2f099 577def _htmlentity_transform(entity_with_semicolon):
4e408e47 578 """Transforms an HTML entity to a character."""
55b2f099
YCH
579 entity = entity_with_semicolon[:-1]
580
4e408e47
PH
581 # Known non-numeric HTML entity
582 if entity in compat_html_entities.name2codepoint:
583 return compat_chr(compat_html_entities.name2codepoint[entity])
584
55b2f099
YCH
585 # TODO: HTML5 allows entities without a semicolon. For example,
586 # '&Eacuteric' should be decoded as 'Éric'.
587 if entity_with_semicolon in compat_html_entities_html5:
588 return compat_html_entities_html5[entity_with_semicolon]
589
91757b0f 590 mobj = re.match(r'#(x[0-9a-fA-F]+|[0-9]+)', entity)
4e408e47
PH
591 if mobj is not None:
592 numstr = mobj.group(1)
28e614de 593 if numstr.startswith('x'):
4e408e47 594 base = 16
28e614de 595 numstr = '0%s' % numstr
4e408e47
PH
596 else:
597 base = 10
7aefc49c
S
598 # See https://github.com/rg3/youtube-dl/issues/7518
599 try:
600 return compat_chr(int(numstr, base))
601 except ValueError:
602 pass
4e408e47
PH
603
604 # Unknown entity in name, return its literal representation
7a3f0c00 605 return '&%s;' % entity
4e408e47
PH
606
607
d77c3dfd 608def unescapeHTML(s):
912b38b4
PH
609 if s is None:
610 return None
611 assert type(s) == compat_str
d77c3dfd 612
4e408e47 613 return re.sub(
95f3f7c2 614 r'&([^&;]+;)', lambda m: _htmlentity_transform(m.group(1)), s)
d77c3dfd 615
8bf48f23 616
aa49acd1
S
617def get_subprocess_encoding():
618 if sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5:
619 # For subprocess calls, encode with locale encoding
620 # Refer to http://stackoverflow.com/a/9951851/35070
621 encoding = preferredencoding()
622 else:
623 encoding = sys.getfilesystemencoding()
624 if encoding is None:
625 encoding = 'utf-8'
626 return encoding
627
628
8bf48f23 629def encodeFilename(s, for_subprocess=False):
59ae15a5
PH
630 """
631 @param s The name of the file
632 """
d77c3dfd 633
8bf48f23 634 assert type(s) == compat_str
d77c3dfd 635
59ae15a5
PH
636 # Python 3 has a Unicode API
637 if sys.version_info >= (3, 0):
638 return s
0f00efed 639
aa49acd1
S
640 # Pass '' directly to use Unicode APIs on Windows 2000 and up
641 # (Detecting Windows NT 4 is tricky because 'major >= 4' would
642 # match Windows 9x series as well. Besides, NT 4 is obsolete.)
643 if not for_subprocess and sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5:
644 return s
645
8ee239e9
YCH
646 # Jython assumes filenames are Unicode strings though reported as Python 2.x compatible
647 if sys.platform.startswith('java'):
648 return s
649
aa49acd1
S
650 return s.encode(get_subprocess_encoding(), 'ignore')
651
652
653def decodeFilename(b, for_subprocess=False):
654
655 if sys.version_info >= (3, 0):
656 return b
657
658 if not isinstance(b, bytes):
659 return b
660
661 return b.decode(get_subprocess_encoding(), 'ignore')
8bf48f23 662
f07b74fc
PH
663
664def encodeArgument(s):
665 if not isinstance(s, compat_str):
666 # Legacy code that uses byte strings
667 # Uncomment the following line after fixing all post processors
7af808a5 668 # assert False, 'Internal error: %r should be of type %r, is %r' % (s, compat_str, type(s))
f07b74fc
PH
669 s = s.decode('ascii')
670 return encodeFilename(s, True)
671
672
aa49acd1
S
673def decodeArgument(b):
674 return decodeFilename(b, True)
675
676
8271226a
PH
677def decodeOption(optval):
678 if optval is None:
679 return optval
680 if isinstance(optval, bytes):
681 optval = optval.decode(preferredencoding())
682
683 assert isinstance(optval, compat_str)
684 return optval
1c256f70 685
5f6a1245 686
4539dd30
PH
687def formatSeconds(secs):
688 if secs > 3600:
689 return '%d:%02d:%02d' % (secs // 3600, (secs % 3600) // 60, secs % 60)
690 elif secs > 60:
691 return '%d:%02d' % (secs // 60, secs % 60)
692 else:
693 return '%d' % secs
694
a0ddb8a2 695
be4a824d
PH
696def make_HTTPS_handler(params, **kwargs):
697 opts_no_check_certificate = params.get('nocheckcertificate', False)
0db261ba 698 if hasattr(ssl, 'create_default_context'): # Python >= 3.4 or 2.7.9
be5f2c19 699 context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH)
0db261ba 700 if opts_no_check_certificate:
be5f2c19 701 context.check_hostname = False
0db261ba 702 context.verify_mode = ssl.CERT_NONE
a2366922 703 try:
be4a824d 704 return YoutubeDLHTTPSHandler(params, context=context, **kwargs)
a2366922
PH
705 except TypeError:
706 # Python 2.7.8
707 # (create_default_context present but HTTPSHandler has no context=)
708 pass
709
710 if sys.version_info < (3, 2):
d7932313 711 return YoutubeDLHTTPSHandler(params, **kwargs)
aa37e3d4 712 else: # Python < 3.4
d7932313 713 context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ea6d901e 714 context.verify_mode = (ssl.CERT_NONE
dca08720 715 if opts_no_check_certificate
ea6d901e 716 else ssl.CERT_REQUIRED)
303b479e 717 context.set_default_verify_paths()
be4a824d 718 return YoutubeDLHTTPSHandler(params, context=context, **kwargs)
ea6d901e 719
732ea2f0 720
08f2a92c
JMF
721def bug_reports_message():
722 if ytdl_is_updateable():
723 update_cmd = 'type youtube-dl -U to update'
724 else:
725 update_cmd = 'see https://yt-dl.org/update on how to update'
726 msg = '; please report this issue on https://yt-dl.org/bug .'
727 msg += ' Make sure you are using the latest version; %s.' % update_cmd
728 msg += ' Be sure to call youtube-dl with the --verbose flag and include its complete output.'
729 return msg
730
731
bf5b9d85
PM
732class YoutubeDLError(Exception):
733 """Base exception for YoutubeDL errors."""
734 pass
735
736
737class ExtractorError(YoutubeDLError):
1c256f70 738 """Error during info extraction."""
5f6a1245 739
d11271dd 740 def __init__(self, msg, tb=None, expected=False, cause=None, video_id=None):
9a82b238
PH
741 """ tb, if given, is the original traceback (so that it can be printed out).
742 If expected is set, this is a normal error message and most likely not a bug in youtube-dl.
743 """
744
745 if sys.exc_info()[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError):
746 expected = True
d11271dd
PH
747 if video_id is not None:
748 msg = video_id + ': ' + msg
410f3e73 749 if cause:
28e614de 750 msg += ' (caused by %r)' % cause
9a82b238 751 if not expected:
08f2a92c 752 msg += bug_reports_message()
1c256f70 753 super(ExtractorError, self).__init__(msg)
d5979c5d 754
1c256f70 755 self.traceback = tb
8cc83b8d 756 self.exc_info = sys.exc_info() # preserve original exception
2eabb802 757 self.cause = cause
d11271dd 758 self.video_id = video_id
1c256f70 759
01951dda
PH
760 def format_traceback(self):
761 if self.traceback is None:
762 return None
28e614de 763 return ''.join(traceback.format_tb(self.traceback))
01951dda 764
1c256f70 765
416c7fcb
PH
766class UnsupportedError(ExtractorError):
767 def __init__(self, url):
768 super(UnsupportedError, self).__init__(
769 'Unsupported URL: %s' % url, expected=True)
770 self.url = url
771
772
55b3e45b
JMF
773class RegexNotFoundError(ExtractorError):
774 """Error when a regex didn't match"""
775 pass
776
777
773f291d
S
778class GeoRestrictedError(ExtractorError):
779 """Geographic restriction Error exception.
780
781 This exception may be thrown when a video is not available from your
782 geographic location due to geographic restrictions imposed by a website.
783 """
784 def __init__(self, msg, countries=None):
785 super(GeoRestrictedError, self).__init__(msg, expected=True)
786 self.msg = msg
787 self.countries = countries
788
789
bf5b9d85 790class DownloadError(YoutubeDLError):
59ae15a5 791 """Download Error exception.
d77c3dfd 792
59ae15a5
PH
793 This exception may be thrown by FileDownloader objects if they are not
794 configured to continue on errors. They will contain the appropriate
795 error message.
796 """
5f6a1245 797
8cc83b8d
FV
798 def __init__(self, msg, exc_info=None):
799 """ exc_info, if given, is the original exception that caused the trouble (as returned by sys.exc_info()). """
800 super(DownloadError, self).__init__(msg)
801 self.exc_info = exc_info
d77c3dfd
FV
802
803
bf5b9d85 804class SameFileError(YoutubeDLError):
59ae15a5 805 """Same File exception.
d77c3dfd 806
59ae15a5
PH
807 This exception will be thrown by FileDownloader objects if they detect
808 multiple files would have to be downloaded to the same file on disk.
809 """
810 pass
d77c3dfd
FV
811
812
bf5b9d85 813class PostProcessingError(YoutubeDLError):
59ae15a5 814 """Post Processing exception.
d77c3dfd 815
59ae15a5
PH
816 This exception may be raised by PostProcessor's .run() method to
817 indicate an error in the postprocessing task.
818 """
5f6a1245 819
7851b379 820 def __init__(self, msg):
bf5b9d85 821 super(PostProcessingError, self).__init__(msg)
7851b379 822 self.msg = msg
d77c3dfd 823
5f6a1245 824
bf5b9d85 825class MaxDownloadsReached(YoutubeDLError):
59ae15a5
PH
826 """ --max-downloads limit has been reached. """
827 pass
d77c3dfd
FV
828
829
bf5b9d85 830class UnavailableVideoError(YoutubeDLError):
59ae15a5 831 """Unavailable Format exception.
d77c3dfd 832
59ae15a5
PH
833 This exception will be thrown when a video is requested
834 in a format that is not available for that video.
835 """
836 pass
d77c3dfd
FV
837
838
bf5b9d85 839class ContentTooShortError(YoutubeDLError):
59ae15a5 840 """Content Too Short exception.
d77c3dfd 841
59ae15a5
PH
842 This exception may be raised by FileDownloader objects when a file they
843 download is too small for what the server announced first, indicating
844 the connection was probably interrupted.
845 """
d77c3dfd 846
59ae15a5 847 def __init__(self, downloaded, expected):
bf5b9d85
PM
848 super(ContentTooShortError, self).__init__(
849 'Downloaded {0} bytes, expected {1} bytes'.format(downloaded, expected)
850 )
2c7ed247 851 # Both in bytes
59ae15a5
PH
852 self.downloaded = downloaded
853 self.expected = expected
d77c3dfd 854
5f6a1245 855
bf5b9d85 856class XAttrMetadataError(YoutubeDLError):
efa97bdc
YCH
857 def __init__(self, code=None, msg='Unknown error'):
858 super(XAttrMetadataError, self).__init__(msg)
859 self.code = code
bd264412 860 self.msg = msg
efa97bdc
YCH
861
862 # Parsing code and msg
863 if (self.code in (errno.ENOSPC, errno.EDQUOT) or
864 'No space left' in self.msg or 'Disk quota excedded' in self.msg):
865 self.reason = 'NO_SPACE'
866 elif self.code == errno.E2BIG or 'Argument list too long' in self.msg:
867 self.reason = 'VALUE_TOO_LONG'
868 else:
869 self.reason = 'NOT_SUPPORTED'
870
871
bf5b9d85 872class XAttrUnavailableError(YoutubeDLError):
efa97bdc
YCH
873 pass
874
875
c5a59d93 876def _create_http_connection(ydl_handler, http_class, is_https, *args, **kwargs):
e5e78797
S
877 # Working around python 2 bug (see http://bugs.python.org/issue17849) by limiting
878 # expected HTTP responses to meet HTTP/1.0 or later (see also
879 # https://github.com/rg3/youtube-dl/issues/6727)
880 if sys.version_info < (3, 0):
65220c3b
S
881 kwargs['strict'] = True
882 hc = http_class(*args, **compat_kwargs(kwargs))
be4a824d 883 source_address = ydl_handler._params.get('source_address')
8959018a 884
be4a824d 885 if source_address is not None:
8959018a
AU
886 # This is to workaround _create_connection() from socket where it will try all
887 # address data from getaddrinfo() including IPv6. This filters the result from
888 # getaddrinfo() based on the source_address value.
889 # This is based on the cpython socket.create_connection() function.
890 # https://github.com/python/cpython/blob/master/Lib/socket.py#L691
891 def _create_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, source_address=None):
892 host, port = address
893 err = None
894 addrs = socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM)
9e21e6d9
S
895 af = socket.AF_INET if '.' in source_address[0] else socket.AF_INET6
896 ip_addrs = [addr for addr in addrs if addr[0] == af]
897 if addrs and not ip_addrs:
898 ip_version = 'v4' if af == socket.AF_INET else 'v6'
899 raise socket.error(
900 "No remote IP%s addresses available for connect, can't use '%s' as source address"
901 % (ip_version, source_address[0]))
8959018a
AU
902 for res in ip_addrs:
903 af, socktype, proto, canonname, sa = res
904 sock = None
905 try:
906 sock = socket.socket(af, socktype, proto)
907 if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT:
908 sock.settimeout(timeout)
909 sock.bind(source_address)
910 sock.connect(sa)
911 err = None # Explicitly break reference cycle
912 return sock
913 except socket.error as _:
914 err = _
915 if sock is not None:
916 sock.close()
917 if err is not None:
918 raise err
919 else:
9e21e6d9
S
920 raise socket.error('getaddrinfo returns an empty list')
921 if hasattr(hc, '_create_connection'):
922 hc._create_connection = _create_connection
be4a824d
PH
923 sa = (source_address, 0)
924 if hasattr(hc, 'source_address'): # Python 2.7+
925 hc.source_address = sa
926 else: # Python 2.6
927 def _hc_connect(self, *args, **kwargs):
9e21e6d9 928 sock = _create_connection(
be4a824d
PH
929 (self.host, self.port), self.timeout, sa)
930 if is_https:
d7932313
PH
931 self.sock = ssl.wrap_socket(
932 sock, self.key_file, self.cert_file,
933 ssl_version=ssl.PROTOCOL_TLSv1)
be4a824d
PH
934 else:
935 self.sock = sock
936 hc.connect = functools.partial(_hc_connect, hc)
937
938 return hc
939
940
87f0e62d 941def handle_youtubedl_headers(headers):
992fc9d6
YCH
942 filtered_headers = headers
943
944 if 'Youtubedl-no-compression' in filtered_headers:
945 filtered_headers = dict((k, v) for k, v in filtered_headers.items() if k.lower() != 'accept-encoding')
87f0e62d 946 del filtered_headers['Youtubedl-no-compression']
87f0e62d 947
992fc9d6 948 return filtered_headers
87f0e62d
YCH
949
950
acebc9cd 951class YoutubeDLHandler(compat_urllib_request.HTTPHandler):
59ae15a5
PH
952 """Handler for HTTP requests and responses.
953
954 This class, when installed with an OpenerDirector, automatically adds
955 the standard headers to every HTTP request and handles gzipped and
956 deflated responses from web servers. If compression is to be avoided in
957 a particular request, the original request in the program code only has
0424ec30 958 to include the HTTP header "Youtubedl-no-compression", which will be
59ae15a5
PH
959 removed before making the real request.
960
961 Part of this code was copied from:
962
963 http://techknack.net/python-urllib2-handlers/
964
965 Andrew Rowls, the author of that code, agreed to release it to the
966 public domain.
967 """
968
be4a824d
PH
969 def __init__(self, params, *args, **kwargs):
970 compat_urllib_request.HTTPHandler.__init__(self, *args, **kwargs)
971 self._params = params
972
973 def http_open(self, req):
71aff188
YCH
974 conn_class = compat_http_client.HTTPConnection
975
976 socks_proxy = req.headers.get('Ytdl-socks-proxy')
977 if socks_proxy:
978 conn_class = make_socks_conn_class(conn_class, socks_proxy)
979 del req.headers['Ytdl-socks-proxy']
980
be4a824d 981 return self.do_open(functools.partial(
71aff188 982 _create_http_connection, self, conn_class, False),
be4a824d
PH
983 req)
984
59ae15a5
PH
985 @staticmethod
986 def deflate(data):
987 try:
988 return zlib.decompress(data, -zlib.MAX_WBITS)
989 except zlib.error:
990 return zlib.decompress(data)
991
acebc9cd 992 def http_request(self, req):
51f267d9
S
993 # According to RFC 3986, URLs can not contain non-ASCII characters, however this is not
994 # always respected by websites, some tend to give out URLs with non percent-encoded
995 # non-ASCII characters (see telemb.py, ard.py [#3412])
996 # urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991)
997 # To work around aforementioned issue we will replace request's original URL with
998 # percent-encoded one
999 # Since redirects are also affected (e.g. http://www.southpark.de/alle-episoden/s18e09)
1000 # the code of this workaround has been moved here from YoutubeDL.urlopen()
1001 url = req.get_full_url()
1002 url_escaped = escape_url(url)
1003
1004 # Substitute URL if any change after escaping
1005 if url != url_escaped:
15d260eb 1006 req = update_Request(req, url=url_escaped)
51f267d9 1007
33ac271b 1008 for h, v in std_headers.items():
3d5f7a39
JK
1009 # Capitalize is needed because of Python bug 2275: http://bugs.python.org/issue2275
1010 # The dict keys are capitalized because of this bug by urllib
1011 if h.capitalize() not in req.headers:
33ac271b 1012 req.add_header(h, v)
87f0e62d
YCH
1013
1014 req.headers = handle_youtubedl_headers(req.headers)
989b4b2b
PH
1015
1016 if sys.version_info < (2, 7) and '#' in req.get_full_url():
1017 # Python 2.6 is brain-dead when it comes to fragments
1018 req._Request__original = req._Request__original.partition('#')[0]
1019 req._Request__r_type = req._Request__r_type.partition('#')[0]
1020
59ae15a5
PH
1021 return req
1022
acebc9cd 1023 def http_response(self, req, resp):
59ae15a5
PH
1024 old_resp = resp
1025 # gzip
1026 if resp.headers.get('Content-encoding', '') == 'gzip':
aa3e9507
PH
1027 content = resp.read()
1028 gz = gzip.GzipFile(fileobj=io.BytesIO(content), mode='rb')
1029 try:
1030 uncompressed = io.BytesIO(gz.read())
1031 except IOError as original_ioerror:
1032 # There may be junk add the end of the file
1033 # See http://stackoverflow.com/q/4928560/35070 for details
1034 for i in range(1, 1024):
1035 try:
1036 gz = gzip.GzipFile(fileobj=io.BytesIO(content[:-i]), mode='rb')
1037 uncompressed = io.BytesIO(gz.read())
1038 except IOError:
1039 continue
1040 break
1041 else:
1042 raise original_ioerror
b407d853 1043 resp = compat_urllib_request.addinfourl(uncompressed, old_resp.headers, old_resp.url, old_resp.code)
59ae15a5 1044 resp.msg = old_resp.msg
c047270c 1045 del resp.headers['Content-encoding']
59ae15a5
PH
1046 # deflate
1047 if resp.headers.get('Content-encoding', '') == 'deflate':
1048 gz = io.BytesIO(self.deflate(resp.read()))
b407d853 1049 resp = compat_urllib_request.addinfourl(gz, old_resp.headers, old_resp.url, old_resp.code)
59ae15a5 1050 resp.msg = old_resp.msg
c047270c 1051 del resp.headers['Content-encoding']
ad729172
S
1052 # Percent-encode redirect URL of Location HTTP header to satisfy RFC 3986 (see
1053 # https://github.com/rg3/youtube-dl/issues/6457).
5a4d9ddb
S
1054 if 300 <= resp.code < 400:
1055 location = resp.headers.get('Location')
1056 if location:
1057 # As of RFC 2616 default charset is iso-8859-1 that is respected by python 3
1058 if sys.version_info >= (3, 0):
1059 location = location.encode('iso-8859-1').decode('utf-8')
0ea59007
YCH
1060 else:
1061 location = location.decode('utf-8')
5a4d9ddb
S
1062 location_escaped = escape_url(location)
1063 if location != location_escaped:
1064 del resp.headers['Location']
9a4aec8b
YCH
1065 if sys.version_info < (3, 0):
1066 location_escaped = location_escaped.encode('utf-8')
5a4d9ddb 1067 resp.headers['Location'] = location_escaped
59ae15a5 1068 return resp
0f8d03f8 1069
acebc9cd
PH
1070 https_request = http_request
1071 https_response = http_response
bf50b038 1072
5de90176 1073
71aff188
YCH
1074def make_socks_conn_class(base_class, socks_proxy):
1075 assert issubclass(base_class, (
1076 compat_http_client.HTTPConnection, compat_http_client.HTTPSConnection))
1077
1078 url_components = compat_urlparse.urlparse(socks_proxy)
1079 if url_components.scheme.lower() == 'socks5':
1080 socks_type = ProxyType.SOCKS5
1081 elif url_components.scheme.lower() in ('socks', 'socks4'):
1082 socks_type = ProxyType.SOCKS4
51fb4995
YCH
1083 elif url_components.scheme.lower() == 'socks4a':
1084 socks_type = ProxyType.SOCKS4A
71aff188 1085
cdd94c2e
YCH
1086 def unquote_if_non_empty(s):
1087 if not s:
1088 return s
1089 return compat_urllib_parse_unquote_plus(s)
1090
71aff188
YCH
1091 proxy_args = (
1092 socks_type,
1093 url_components.hostname, url_components.port or 1080,
1094 True, # Remote DNS
cdd94c2e
YCH
1095 unquote_if_non_empty(url_components.username),
1096 unquote_if_non_empty(url_components.password),
71aff188
YCH
1097 )
1098
1099 class SocksConnection(base_class):
1100 def connect(self):
1101 self.sock = sockssocket()
1102 self.sock.setproxy(*proxy_args)
1103 if type(self.timeout) in (int, float):
1104 self.sock.settimeout(self.timeout)
1105 self.sock.connect((self.host, self.port))
1106
1107 if isinstance(self, compat_http_client.HTTPSConnection):
1108 if hasattr(self, '_context'): # Python > 2.6
1109 self.sock = self._context.wrap_socket(
1110 self.sock, server_hostname=self.host)
1111 else:
1112 self.sock = ssl.wrap_socket(self.sock)
1113
1114 return SocksConnection
1115
1116
be4a824d
PH
1117class YoutubeDLHTTPSHandler(compat_urllib_request.HTTPSHandler):
1118 def __init__(self, params, https_conn_class=None, *args, **kwargs):
1119 compat_urllib_request.HTTPSHandler.__init__(self, *args, **kwargs)
1120 self._https_conn_class = https_conn_class or compat_http_client.HTTPSConnection
1121 self._params = params
1122
1123 def https_open(self, req):
4f264c02 1124 kwargs = {}
71aff188
YCH
1125 conn_class = self._https_conn_class
1126
4f264c02
JMF
1127 if hasattr(self, '_context'): # python > 2.6
1128 kwargs['context'] = self._context
1129 if hasattr(self, '_check_hostname'): # python 3.x
1130 kwargs['check_hostname'] = self._check_hostname
71aff188
YCH
1131
1132 socks_proxy = req.headers.get('Ytdl-socks-proxy')
1133 if socks_proxy:
1134 conn_class = make_socks_conn_class(conn_class, socks_proxy)
1135 del req.headers['Ytdl-socks-proxy']
1136
be4a824d 1137 return self.do_open(functools.partial(
71aff188 1138 _create_http_connection, self, conn_class, True),
4f264c02 1139 req, **kwargs)
be4a824d
PH
1140
1141
a6420bf5
S
1142class YoutubeDLCookieProcessor(compat_urllib_request.HTTPCookieProcessor):
1143 def __init__(self, cookiejar=None):
1144 compat_urllib_request.HTTPCookieProcessor.__init__(self, cookiejar)
1145
1146 def http_response(self, request, response):
1147 # Python 2 will choke on next HTTP request in row if there are non-ASCII
1148 # characters in Set-Cookie HTTP header of last response (see
1149 # https://github.com/rg3/youtube-dl/issues/6769).
1150 # In order to at least prevent crashing we will percent encode Set-Cookie
1151 # header before HTTPCookieProcessor starts processing it.
e28034c5
S
1152 # if sys.version_info < (3, 0) and response.headers:
1153 # for set_cookie_header in ('Set-Cookie', 'Set-Cookie2'):
1154 # set_cookie = response.headers.get(set_cookie_header)
1155 # if set_cookie:
1156 # set_cookie_escaped = compat_urllib_parse.quote(set_cookie, b"%/;:@&=+$,!~*'()?#[] ")
1157 # if set_cookie != set_cookie_escaped:
1158 # del response.headers[set_cookie_header]
1159 # response.headers[set_cookie_header] = set_cookie_escaped
a6420bf5
S
1160 return compat_urllib_request.HTTPCookieProcessor.http_response(self, request, response)
1161
1162 https_request = compat_urllib_request.HTTPCookieProcessor.http_request
1163 https_response = http_response
1164
1165
46f59e89
S
1166def extract_timezone(date_str):
1167 m = re.search(
1168 r'^.{8,}?(?P<tz>Z$| ?(?P<sign>\+|-)(?P<hours>[0-9]{2}):?(?P<minutes>[0-9]{2})$)',
1169 date_str)
1170 if not m:
1171 timezone = datetime.timedelta()
1172 else:
1173 date_str = date_str[:-len(m.group('tz'))]
1174 if not m.group('sign'):
1175 timezone = datetime.timedelta()
1176 else:
1177 sign = 1 if m.group('sign') == '+' else -1
1178 timezone = datetime.timedelta(
1179 hours=sign * int(m.group('hours')),
1180 minutes=sign * int(m.group('minutes')))
1181 return timezone, date_str
1182
1183
08b38d54 1184def parse_iso8601(date_str, delimiter='T', timezone=None):
912b38b4
PH
1185 """ Return a UNIX timestamp from the given date """
1186
1187 if date_str is None:
1188 return None
1189
52c3a6e4
S
1190 date_str = re.sub(r'\.[0-9]+', '', date_str)
1191
08b38d54 1192 if timezone is None:
46f59e89
S
1193 timezone, date_str = extract_timezone(date_str)
1194
52c3a6e4
S
1195 try:
1196 date_format = '%Y-%m-%d{0}%H:%M:%S'.format(delimiter)
1197 dt = datetime.datetime.strptime(date_str, date_format) - timezone
1198 return calendar.timegm(dt.timetuple())
1199 except ValueError:
1200 pass
912b38b4
PH
1201
1202
46f59e89
S
1203def date_formats(day_first=True):
1204 return DATE_FORMATS_DAY_FIRST if day_first else DATE_FORMATS_MONTH_FIRST
1205
1206
42bdd9d0 1207def unified_strdate(date_str, day_first=True):
bf50b038 1208 """Return a string with the date in the format YYYYMMDD"""
64e7ad60
PH
1209
1210 if date_str is None:
1211 return None
bf50b038 1212 upload_date = None
5f6a1245 1213 # Replace commas
026fcc04 1214 date_str = date_str.replace(',', ' ')
42bdd9d0 1215 # Remove AM/PM + timezone
9bb8e0a3 1216 date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str)
46f59e89 1217 _, date_str = extract_timezone(date_str)
42bdd9d0 1218
46f59e89 1219 for expression in date_formats(day_first):
bf50b038
JMF
1220 try:
1221 upload_date = datetime.datetime.strptime(date_str, expression).strftime('%Y%m%d')
5de90176 1222 except ValueError:
bf50b038 1223 pass
42393ce2
PH
1224 if upload_date is None:
1225 timetuple = email.utils.parsedate_tz(date_str)
1226 if timetuple:
c6b9cf05
S
1227 try:
1228 upload_date = datetime.datetime(*timetuple[:6]).strftime('%Y%m%d')
1229 except ValueError:
1230 pass
6a750402
JMF
1231 if upload_date is not None:
1232 return compat_str(upload_date)
bf50b038 1233
5f6a1245 1234
46f59e89
S
1235def unified_timestamp(date_str, day_first=True):
1236 if date_str is None:
1237 return None
1238
2ae2ffda 1239 date_str = re.sub(r'[,|]', '', date_str)
46f59e89 1240
7dc2a74e 1241 pm_delta = 12 if re.search(r'(?i)PM', date_str) else 0
46f59e89
S
1242 timezone, date_str = extract_timezone(date_str)
1243
1244 # Remove AM/PM + timezone
1245 date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str)
1246
deef3195
S
1247 # Remove unrecognized timezones from ISO 8601 alike timestamps
1248 m = re.search(r'\d{1,2}:\d{1,2}(?:\.\d+)?(?P<tz>\s*[A-Z]+)$', date_str)
1249 if m:
1250 date_str = date_str[:-len(m.group('tz'))]
1251
f226880c
PH
1252 # Python only supports microseconds, so remove nanoseconds
1253 m = re.search(r'^([0-9]{4,}-[0-9]{1,2}-[0-9]{1,2}T[0-9]{1,2}:[0-9]{1,2}:[0-9]{1,2}\.[0-9]{6})[0-9]+$', date_str)
1254 if m:
1255 date_str = m.group(1)
1256
46f59e89
S
1257 for expression in date_formats(day_first):
1258 try:
7dc2a74e 1259 dt = datetime.datetime.strptime(date_str, expression) - timezone + datetime.timedelta(hours=pm_delta)
46f59e89
S
1260 return calendar.timegm(dt.timetuple())
1261 except ValueError:
1262 pass
1263 timetuple = email.utils.parsedate_tz(date_str)
1264 if timetuple:
7dc2a74e 1265 return calendar.timegm(timetuple) + pm_delta * 3600
46f59e89
S
1266
1267
28e614de 1268def determine_ext(url, default_ext='unknown_video'):
85750f89 1269 if url is None or '.' not in url:
f4776371 1270 return default_ext
9cb9a5df 1271 guess = url.partition('?')[0].rpartition('.')[2]
73e79f2a
PH
1272 if re.match(r'^[A-Za-z0-9]+$', guess):
1273 return guess
a7aaa398
S
1274 # Try extract ext from URLs like http://example.com/foo/bar.mp4/?download
1275 elif guess.rstrip('/') in KNOWN_EXTENSIONS:
9cb9a5df 1276 return guess.rstrip('/')
73e79f2a 1277 else:
cbdbb766 1278 return default_ext
73e79f2a 1279
5f6a1245 1280
d4051a8e 1281def subtitles_filename(filename, sub_lang, sub_format):
28e614de 1282 return filename.rsplit('.', 1)[0] + '.' + sub_lang + '.' + sub_format
d4051a8e 1283
5f6a1245 1284
bd558525 1285def date_from_str(date_str):
37254abc
JMF
1286 """
1287 Return a datetime object from a string in the format YYYYMMDD or
1288 (now|today)[+-][0-9](day|week|month|year)(s)?"""
1289 today = datetime.date.today()
f8795e10 1290 if date_str in ('now', 'today'):
37254abc 1291 return today
f8795e10
PH
1292 if date_str == 'yesterday':
1293 return today - datetime.timedelta(days=1)
ec85ded8 1294 match = re.match(r'(now|today)(?P<sign>[+-])(?P<time>\d+)(?P<unit>day|week|month|year)(s)?', date_str)
37254abc
JMF
1295 if match is not None:
1296 sign = match.group('sign')
1297 time = int(match.group('time'))
1298 if sign == '-':
1299 time = -time
1300 unit = match.group('unit')
dfb1b146 1301 # A bad approximation?
37254abc
JMF
1302 if unit == 'month':
1303 unit = 'day'
1304 time *= 30
1305 elif unit == 'year':
1306 unit = 'day'
1307 time *= 365
1308 unit += 's'
1309 delta = datetime.timedelta(**{unit: time})
1310 return today + delta
611c1dd9 1311 return datetime.datetime.strptime(date_str, '%Y%m%d').date()
5f6a1245
JW
1312
1313
e63fc1be 1314def hyphenate_date(date_str):
1315 """
1316 Convert a date in 'YYYYMMDD' format to 'YYYY-MM-DD' format"""
1317 match = re.match(r'^(\d\d\d\d)(\d\d)(\d\d)$', date_str)
1318 if match is not None:
1319 return '-'.join(match.groups())
1320 else:
1321 return date_str
1322
5f6a1245 1323
bd558525
JMF
1324class DateRange(object):
1325 """Represents a time interval between two dates"""
5f6a1245 1326
bd558525
JMF
1327 def __init__(self, start=None, end=None):
1328 """start and end must be strings in the format accepted by date"""
1329 if start is not None:
1330 self.start = date_from_str(start)
1331 else:
1332 self.start = datetime.datetime.min.date()
1333 if end is not None:
1334 self.end = date_from_str(end)
1335 else:
1336 self.end = datetime.datetime.max.date()
37254abc 1337 if self.start > self.end:
bd558525 1338 raise ValueError('Date range: "%s" , the start date must be before the end date' % self)
5f6a1245 1339
bd558525
JMF
1340 @classmethod
1341 def day(cls, day):
1342 """Returns a range that only contains the given day"""
5f6a1245
JW
1343 return cls(day, day)
1344
bd558525
JMF
1345 def __contains__(self, date):
1346 """Check if the date is in the range"""
37254abc
JMF
1347 if not isinstance(date, datetime.date):
1348 date = date_from_str(date)
1349 return self.start <= date <= self.end
5f6a1245 1350
bd558525 1351 def __str__(self):
5f6a1245 1352 return '%s - %s' % (self.start.isoformat(), self.end.isoformat())
c496ca96
PH
1353
1354
1355def platform_name():
1356 """ Returns the platform name as a compat_str """
1357 res = platform.platform()
1358 if isinstance(res, bytes):
1359 res = res.decode(preferredencoding())
1360
1361 assert isinstance(res, compat_str)
1362 return res
c257baff
PH
1363
1364
b58ddb32
PH
1365def _windows_write_string(s, out):
1366 """ Returns True if the string was written using special methods,
1367 False if it has yet to be written out."""
1368 # Adapted from http://stackoverflow.com/a/3259271/35070
1369
1370 import ctypes
1371 import ctypes.wintypes
1372
1373 WIN_OUTPUT_IDS = {
1374 1: -11,
1375 2: -12,
1376 }
1377
a383a98a
PH
1378 try:
1379 fileno = out.fileno()
1380 except AttributeError:
1381 # If the output stream doesn't have a fileno, it's virtual
1382 return False
aa42e873
PH
1383 except io.UnsupportedOperation:
1384 # Some strange Windows pseudo files?
1385 return False
b58ddb32
PH
1386 if fileno not in WIN_OUTPUT_IDS:
1387 return False
1388
d7cd9a9e 1389 GetStdHandle = compat_ctypes_WINFUNCTYPE(
b58ddb32 1390 ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD)(
d7cd9a9e 1391 ('GetStdHandle', ctypes.windll.kernel32))
b58ddb32
PH
1392 h = GetStdHandle(WIN_OUTPUT_IDS[fileno])
1393
d7cd9a9e 1394 WriteConsoleW = compat_ctypes_WINFUNCTYPE(
b58ddb32
PH
1395 ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.wintypes.LPWSTR,
1396 ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD),
d7cd9a9e 1397 ctypes.wintypes.LPVOID)(('WriteConsoleW', ctypes.windll.kernel32))
b58ddb32
PH
1398 written = ctypes.wintypes.DWORD(0)
1399
d7cd9a9e 1400 GetFileType = compat_ctypes_WINFUNCTYPE(ctypes.wintypes.DWORD, ctypes.wintypes.DWORD)(('GetFileType', ctypes.windll.kernel32))
b58ddb32
PH
1401 FILE_TYPE_CHAR = 0x0002
1402 FILE_TYPE_REMOTE = 0x8000
d7cd9a9e 1403 GetConsoleMode = compat_ctypes_WINFUNCTYPE(
b58ddb32
PH
1404 ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE,
1405 ctypes.POINTER(ctypes.wintypes.DWORD))(
d7cd9a9e 1406 ('GetConsoleMode', ctypes.windll.kernel32))
b58ddb32
PH
1407 INVALID_HANDLE_VALUE = ctypes.wintypes.DWORD(-1).value
1408
1409 def not_a_console(handle):
1410 if handle == INVALID_HANDLE_VALUE or handle is None:
1411 return True
8fb3ac36
PH
1412 return ((GetFileType(handle) & ~FILE_TYPE_REMOTE) != FILE_TYPE_CHAR or
1413 GetConsoleMode(handle, ctypes.byref(ctypes.wintypes.DWORD())) == 0)
b58ddb32
PH
1414
1415 if not_a_console(h):
1416 return False
1417
d1b9c912
PH
1418 def next_nonbmp_pos(s):
1419 try:
1420 return next(i for i, c in enumerate(s) if ord(c) > 0xffff)
1421 except StopIteration:
1422 return len(s)
1423
1424 while s:
1425 count = min(next_nonbmp_pos(s), 1024)
1426
b58ddb32 1427 ret = WriteConsoleW(
d1b9c912 1428 h, s, count if count else 2, ctypes.byref(written), None)
b58ddb32
PH
1429 if ret == 0:
1430 raise OSError('Failed to write string')
d1b9c912
PH
1431 if not count: # We just wrote a non-BMP character
1432 assert written.value == 2
1433 s = s[1:]
1434 else:
1435 assert written.value > 0
1436 s = s[written.value:]
b58ddb32
PH
1437 return True
1438
1439
734f90bb 1440def write_string(s, out=None, encoding=None):
7459e3a2
PH
1441 if out is None:
1442 out = sys.stderr
8bf48f23 1443 assert type(s) == compat_str
7459e3a2 1444
b58ddb32
PH
1445 if sys.platform == 'win32' and encoding is None and hasattr(out, 'fileno'):
1446 if _windows_write_string(s, out):
1447 return
1448
7459e3a2
PH
1449 if ('b' in getattr(out, 'mode', '') or
1450 sys.version_info[0] < 3): # Python 2 lies about mode of sys.stderr
104aa738
PH
1451 byt = s.encode(encoding or preferredencoding(), 'ignore')
1452 out.write(byt)
1453 elif hasattr(out, 'buffer'):
1454 enc = encoding or getattr(out, 'encoding', None) or preferredencoding()
1455 byt = s.encode(enc, 'ignore')
1456 out.buffer.write(byt)
1457 else:
8bf48f23 1458 out.write(s)
7459e3a2
PH
1459 out.flush()
1460
1461
48ea9cea
PH
1462def bytes_to_intlist(bs):
1463 if not bs:
1464 return []
1465 if isinstance(bs[0], int): # Python 3
1466 return list(bs)
1467 else:
1468 return [ord(c) for c in bs]
1469
c257baff 1470
cba892fa 1471def intlist_to_bytes(xs):
1472 if not xs:
1473 return b''
edaa23f8 1474 return compat_struct_pack('%dB' % len(xs), *xs)
c38b1e77
PH
1475
1476
c1c9a79c
PH
1477# Cross-platform file locking
1478if sys.platform == 'win32':
1479 import ctypes.wintypes
1480 import msvcrt
1481
1482 class OVERLAPPED(ctypes.Structure):
1483 _fields_ = [
1484 ('Internal', ctypes.wintypes.LPVOID),
1485 ('InternalHigh', ctypes.wintypes.LPVOID),
1486 ('Offset', ctypes.wintypes.DWORD),
1487 ('OffsetHigh', ctypes.wintypes.DWORD),
1488 ('hEvent', ctypes.wintypes.HANDLE),
1489 ]
1490
1491 kernel32 = ctypes.windll.kernel32
1492 LockFileEx = kernel32.LockFileEx
1493 LockFileEx.argtypes = [
1494 ctypes.wintypes.HANDLE, # hFile
1495 ctypes.wintypes.DWORD, # dwFlags
1496 ctypes.wintypes.DWORD, # dwReserved
1497 ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow
1498 ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh
1499 ctypes.POINTER(OVERLAPPED) # Overlapped
1500 ]
1501 LockFileEx.restype = ctypes.wintypes.BOOL
1502 UnlockFileEx = kernel32.UnlockFileEx
1503 UnlockFileEx.argtypes = [
1504 ctypes.wintypes.HANDLE, # hFile
1505 ctypes.wintypes.DWORD, # dwReserved
1506 ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow
1507 ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh
1508 ctypes.POINTER(OVERLAPPED) # Overlapped
1509 ]
1510 UnlockFileEx.restype = ctypes.wintypes.BOOL
1511 whole_low = 0xffffffff
1512 whole_high = 0x7fffffff
1513
1514 def _lock_file(f, exclusive):
1515 overlapped = OVERLAPPED()
1516 overlapped.Offset = 0
1517 overlapped.OffsetHigh = 0
1518 overlapped.hEvent = 0
1519 f._lock_file_overlapped_p = ctypes.pointer(overlapped)
1520 handle = msvcrt.get_osfhandle(f.fileno())
1521 if not LockFileEx(handle, 0x2 if exclusive else 0x0, 0,
1522 whole_low, whole_high, f._lock_file_overlapped_p):
1523 raise OSError('Locking file failed: %r' % ctypes.FormatError())
1524
1525 def _unlock_file(f):
1526 assert f._lock_file_overlapped_p
1527 handle = msvcrt.get_osfhandle(f.fileno())
1528 if not UnlockFileEx(handle, 0,
1529 whole_low, whole_high, f._lock_file_overlapped_p):
1530 raise OSError('Unlocking file failed: %r' % ctypes.FormatError())
1531
1532else:
399a76e6
YCH
1533 # Some platforms, such as Jython, is missing fcntl
1534 try:
1535 import fcntl
c1c9a79c 1536
399a76e6
YCH
1537 def _lock_file(f, exclusive):
1538 fcntl.flock(f, fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH)
c1c9a79c 1539
399a76e6
YCH
1540 def _unlock_file(f):
1541 fcntl.flock(f, fcntl.LOCK_UN)
1542 except ImportError:
1543 UNSUPPORTED_MSG = 'file locking is not supported on this platform'
1544
1545 def _lock_file(f, exclusive):
1546 raise IOError(UNSUPPORTED_MSG)
1547
1548 def _unlock_file(f):
1549 raise IOError(UNSUPPORTED_MSG)
c1c9a79c
PH
1550
1551
1552class locked_file(object):
1553 def __init__(self, filename, mode, encoding=None):
1554 assert mode in ['r', 'a', 'w']
1555 self.f = io.open(filename, mode, encoding=encoding)
1556 self.mode = mode
1557
1558 def __enter__(self):
1559 exclusive = self.mode != 'r'
1560 try:
1561 _lock_file(self.f, exclusive)
1562 except IOError:
1563 self.f.close()
1564 raise
1565 return self
1566
1567 def __exit__(self, etype, value, traceback):
1568 try:
1569 _unlock_file(self.f)
1570 finally:
1571 self.f.close()
1572
1573 def __iter__(self):
1574 return iter(self.f)
1575
1576 def write(self, *args):
1577 return self.f.write(*args)
1578
1579 def read(self, *args):
1580 return self.f.read(*args)
4eb7f1d1
JMF
1581
1582
4644ac55
S
1583def get_filesystem_encoding():
1584 encoding = sys.getfilesystemencoding()
1585 return encoding if encoding is not None else 'utf-8'
1586
1587
4eb7f1d1 1588def shell_quote(args):
a6a173c2 1589 quoted_args = []
4644ac55 1590 encoding = get_filesystem_encoding()
a6a173c2
JMF
1591 for a in args:
1592 if isinstance(a, bytes):
1593 # We may get a filename encoded with 'encodeFilename'
1594 a = a.decode(encoding)
aefce8e6 1595 quoted_args.append(compat_shlex_quote(a))
28e614de 1596 return ' '.join(quoted_args)
9d4660ca
PH
1597
1598
1599def smuggle_url(url, data):
1600 """ Pass additional data in a URL for internal use. """
1601
81953d1a
RA
1602 url, idata = unsmuggle_url(url, {})
1603 data.update(idata)
15707c7e 1604 sdata = compat_urllib_parse_urlencode(
28e614de
PH
1605 {'__youtubedl_smuggle': json.dumps(data)})
1606 return url + '#' + sdata
9d4660ca
PH
1607
1608
79f82953 1609def unsmuggle_url(smug_url, default=None):
83e865a3 1610 if '#__youtubedl_smuggle' not in smug_url:
79f82953 1611 return smug_url, default
28e614de
PH
1612 url, _, sdata = smug_url.rpartition('#')
1613 jsond = compat_parse_qs(sdata)['__youtubedl_smuggle'][0]
9d4660ca
PH
1614 data = json.loads(jsond)
1615 return url, data
02dbf93f
PH
1616
1617
02dbf93f
PH
1618def format_bytes(bytes):
1619 if bytes is None:
28e614de 1620 return 'N/A'
02dbf93f
PH
1621 if type(bytes) is str:
1622 bytes = float(bytes)
1623 if bytes == 0.0:
1624 exponent = 0
1625 else:
1626 exponent = int(math.log(bytes, 1024.0))
28e614de 1627 suffix = ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB'][exponent]
02dbf93f 1628 converted = float(bytes) / float(1024 ** exponent)
28e614de 1629 return '%.2f%s' % (converted, suffix)
f53c966a 1630
1c088fa8 1631
fb47597b
S
1632def lookup_unit_table(unit_table, s):
1633 units_re = '|'.join(re.escape(u) for u in unit_table)
1634 m = re.match(
782b1b5b 1635 r'(?P<num>[0-9]+(?:[,.][0-9]*)?)\s*(?P<unit>%s)\b' % units_re, s)
fb47597b
S
1636 if not m:
1637 return None
1638 num_str = m.group('num').replace(',', '.')
1639 mult = unit_table[m.group('unit')]
1640 return int(float(num_str) * mult)
1641
1642
be64b5b0
PH
1643def parse_filesize(s):
1644 if s is None:
1645 return None
1646
dfb1b146 1647 # The lower-case forms are of course incorrect and unofficial,
be64b5b0
PH
1648 # but we support those too
1649 _UNIT_TABLE = {
1650 'B': 1,
1651 'b': 1,
70852b47 1652 'bytes': 1,
be64b5b0
PH
1653 'KiB': 1024,
1654 'KB': 1000,
1655 'kB': 1024,
1656 'Kb': 1000,
13585d76 1657 'kb': 1000,
70852b47
YCH
1658 'kilobytes': 1000,
1659 'kibibytes': 1024,
be64b5b0
PH
1660 'MiB': 1024 ** 2,
1661 'MB': 1000 ** 2,
1662 'mB': 1024 ** 2,
1663 'Mb': 1000 ** 2,
13585d76 1664 'mb': 1000 ** 2,
70852b47
YCH
1665 'megabytes': 1000 ** 2,
1666 'mebibytes': 1024 ** 2,
be64b5b0
PH
1667 'GiB': 1024 ** 3,
1668 'GB': 1000 ** 3,
1669 'gB': 1024 ** 3,
1670 'Gb': 1000 ** 3,
13585d76 1671 'gb': 1000 ** 3,
70852b47
YCH
1672 'gigabytes': 1000 ** 3,
1673 'gibibytes': 1024 ** 3,
be64b5b0
PH
1674 'TiB': 1024 ** 4,
1675 'TB': 1000 ** 4,
1676 'tB': 1024 ** 4,
1677 'Tb': 1000 ** 4,
13585d76 1678 'tb': 1000 ** 4,
70852b47
YCH
1679 'terabytes': 1000 ** 4,
1680 'tebibytes': 1024 ** 4,
be64b5b0
PH
1681 'PiB': 1024 ** 5,
1682 'PB': 1000 ** 5,
1683 'pB': 1024 ** 5,
1684 'Pb': 1000 ** 5,
13585d76 1685 'pb': 1000 ** 5,
70852b47
YCH
1686 'petabytes': 1000 ** 5,
1687 'pebibytes': 1024 ** 5,
be64b5b0
PH
1688 'EiB': 1024 ** 6,
1689 'EB': 1000 ** 6,
1690 'eB': 1024 ** 6,
1691 'Eb': 1000 ** 6,
13585d76 1692 'eb': 1000 ** 6,
70852b47
YCH
1693 'exabytes': 1000 ** 6,
1694 'exbibytes': 1024 ** 6,
be64b5b0
PH
1695 'ZiB': 1024 ** 7,
1696 'ZB': 1000 ** 7,
1697 'zB': 1024 ** 7,
1698 'Zb': 1000 ** 7,
13585d76 1699 'zb': 1000 ** 7,
70852b47
YCH
1700 'zettabytes': 1000 ** 7,
1701 'zebibytes': 1024 ** 7,
be64b5b0
PH
1702 'YiB': 1024 ** 8,
1703 'YB': 1000 ** 8,
1704 'yB': 1024 ** 8,
1705 'Yb': 1000 ** 8,
13585d76 1706 'yb': 1000 ** 8,
70852b47
YCH
1707 'yottabytes': 1000 ** 8,
1708 'yobibytes': 1024 ** 8,
be64b5b0
PH
1709 }
1710
fb47597b
S
1711 return lookup_unit_table(_UNIT_TABLE, s)
1712
1713
1714def parse_count(s):
1715 if s is None:
be64b5b0
PH
1716 return None
1717
fb47597b
S
1718 s = s.strip()
1719
1720 if re.match(r'^[\d,.]+$', s):
1721 return str_to_int(s)
1722
1723 _UNIT_TABLE = {
1724 'k': 1000,
1725 'K': 1000,
1726 'm': 1000 ** 2,
1727 'M': 1000 ** 2,
1728 'kk': 1000 ** 2,
1729 'KK': 1000 ** 2,
1730 }
be64b5b0 1731
fb47597b 1732 return lookup_unit_table(_UNIT_TABLE, s)
be64b5b0 1733
2f7ae819 1734
b871d7e9
S
1735def parse_resolution(s):
1736 if s is None:
1737 return {}
1738
1739 mobj = re.search(r'\b(?P<w>\d+)\s*[xX×]\s*(?P<h>\d+)\b', s)
1740 if mobj:
1741 return {
1742 'width': int(mobj.group('w')),
1743 'height': int(mobj.group('h')),
1744 }
1745
1746 mobj = re.search(r'\b(\d+)[pPiI]\b', s)
1747 if mobj:
1748 return {'height': int(mobj.group(1))}
1749
1750 mobj = re.search(r'\b([48])[kK]\b', s)
1751 if mobj:
1752 return {'height': int(mobj.group(1)) * 540}
1753
1754 return {}
1755
1756
a942d6cb 1757def month_by_name(name, lang='en'):
caefb1de
PH
1758 """ Return the number of a month by (locale-independently) English name """
1759
f6717dec 1760 month_names = MONTH_NAMES.get(lang, MONTH_NAMES['en'])
a942d6cb 1761
caefb1de 1762 try:
f6717dec 1763 return month_names.index(name) + 1
7105440c
YCH
1764 except ValueError:
1765 return None
1766
1767
1768def month_by_abbreviation(abbrev):
1769 """ Return the number of a month by (locale-independently) English
1770 abbreviations """
1771
1772 try:
1773 return [s[:3] for s in ENGLISH_MONTH_NAMES].index(abbrev) + 1
caefb1de
PH
1774 except ValueError:
1775 return None
18258362
JMF
1776
1777
5aafe895 1778def fix_xml_ampersands(xml_str):
18258362 1779 """Replace all the '&' by '&amp;' in XML"""
5aafe895
PH
1780 return re.sub(
1781 r'&(?!amp;|lt;|gt;|apos;|quot;|#x[0-9a-fA-F]{,4};|#[0-9]{,4};)',
28e614de 1782 '&amp;',
5aafe895 1783 xml_str)
e3946f98
PH
1784
1785
1786def setproctitle(title):
8bf48f23 1787 assert isinstance(title, compat_str)
c1c05c67
YCH
1788
1789 # ctypes in Jython is not complete
1790 # http://bugs.jython.org/issue2148
1791 if sys.platform.startswith('java'):
1792 return
1793
e3946f98 1794 try:
611c1dd9 1795 libc = ctypes.cdll.LoadLibrary('libc.so.6')
e3946f98
PH
1796 except OSError:
1797 return
2f49bcd6
RC
1798 except TypeError:
1799 # LoadLibrary in Windows Python 2.7.13 only expects
1800 # a bytestring, but since unicode_literals turns
1801 # every string into a unicode string, it fails.
1802 return
6eefe533
PH
1803 title_bytes = title.encode('utf-8')
1804 buf = ctypes.create_string_buffer(len(title_bytes))
1805 buf.value = title_bytes
e3946f98 1806 try:
6eefe533 1807 libc.prctl(15, buf, 0, 0, 0)
e3946f98
PH
1808 except AttributeError:
1809 return # Strange libc, just skip this
d7dda168
PH
1810
1811
1812def remove_start(s, start):
46bc9b7d 1813 return s[len(start):] if s is not None and s.startswith(start) else s
29eb5174
PH
1814
1815
2b9faf55 1816def remove_end(s, end):
46bc9b7d 1817 return s[:-len(end)] if s is not None and s.endswith(end) else s
2b9faf55
PH
1818
1819
31b2051e
S
1820def remove_quotes(s):
1821 if s is None or len(s) < 2:
1822 return s
1823 for quote in ('"', "'", ):
1824 if s[0] == quote and s[-1] == quote:
1825 return s[1:-1]
1826 return s
1827
1828
29eb5174 1829def url_basename(url):
9b8aaeed 1830 path = compat_urlparse.urlparse(url).path
28e614de 1831 return path.strip('/').split('/')[-1]
aa94a6d3
PH
1832
1833
02dc0a36
S
1834def base_url(url):
1835 return re.match(r'https?://[^?#&]+/', url).group()
1836
1837
e34c3361 1838def urljoin(base, path):
4b5de77b
S
1839 if isinstance(path, bytes):
1840 path = path.decode('utf-8')
e34c3361
S
1841 if not isinstance(path, compat_str) or not path:
1842 return None
b0c65c67 1843 if re.match(r'^(?:https?:)?//', path):
e34c3361 1844 return path
4b5de77b
S
1845 if isinstance(base, bytes):
1846 base = base.decode('utf-8')
1847 if not isinstance(base, compat_str) or not re.match(
1848 r'^(?:https?:)?//', base):
e34c3361
S
1849 return None
1850 return compat_urlparse.urljoin(base, path)
1851
1852
aa94a6d3
PH
1853class HEADRequest(compat_urllib_request.Request):
1854 def get_method(self):
611c1dd9 1855 return 'HEAD'
7217e148
PH
1856
1857
95cf60e8
S
1858class PUTRequest(compat_urllib_request.Request):
1859 def get_method(self):
1860 return 'PUT'
1861
1862
9732d77e 1863def int_or_none(v, scale=1, default=None, get_attr=None, invscale=1):
28746fbd
PH
1864 if get_attr:
1865 if v is not None:
1866 v = getattr(v, get_attr, None)
9572013d
PH
1867 if v == '':
1868 v = None
1812afb7
S
1869 if v is None:
1870 return default
1871 try:
1872 return int(v) * invscale // scale
1873 except ValueError:
af98f8ff 1874 return default
9732d77e 1875
9572013d 1876
40a90862
JMF
1877def str_or_none(v, default=None):
1878 return default if v is None else compat_str(v)
1879
9732d77e
PH
1880
1881def str_to_int(int_str):
48d4681e 1882 """ A more relaxed version of int_or_none """
9732d77e
PH
1883 if int_str is None:
1884 return None
28e614de 1885 int_str = re.sub(r'[,\.\+]', '', int_str)
9732d77e 1886 return int(int_str)
608d11f5
PH
1887
1888
9732d77e 1889def float_or_none(v, scale=1, invscale=1, default=None):
caf80631
S
1890 if v is None:
1891 return default
1892 try:
1893 return float(v) * invscale / scale
1894 except ValueError:
1895 return default
43f775e4
PH
1896
1897
c7e327c4
S
1898def bool_or_none(v, default=None):
1899 return v if isinstance(v, bool) else default
1900
1901
b72b4431
S
1902def strip_or_none(v):
1903 return None if v is None else v.strip()
1904
1905
af03000a
S
1906def url_or_none(url):
1907 if not url or not isinstance(url, compat_str):
1908 return None
1909 url = url.strip()
1910 return url if re.match(r'^(?:[a-zA-Z][\da-zA-Z.+-]*:)?//', url) else None
1911
1912
608d11f5 1913def parse_duration(s):
8f9312c3 1914 if not isinstance(s, compat_basestring):
608d11f5
PH
1915 return None
1916
ca7b3246
S
1917 s = s.strip()
1918
acaff495 1919 days, hours, mins, secs, ms = [None] * 5
15846398 1920 m = re.match(r'(?:(?:(?:(?P<days>[0-9]+):)?(?P<hours>[0-9]+):)?(?P<mins>[0-9]+):)?(?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?Z?$', s)
acaff495 1921 if m:
1922 days, hours, mins, secs, ms = m.groups()
1923 else:
1924 m = re.match(
056653bb
S
1925 r'''(?ix)(?:P?
1926 (?:
1927 [0-9]+\s*y(?:ears?)?\s*
1928 )?
1929 (?:
1930 [0-9]+\s*m(?:onths?)?\s*
1931 )?
1932 (?:
1933 [0-9]+\s*w(?:eeks?)?\s*
1934 )?
8f4b58d7 1935 (?:
acaff495 1936 (?P<days>[0-9]+)\s*d(?:ays?)?\s*
8f4b58d7 1937 )?
056653bb 1938 T)?
acaff495 1939 (?:
1940 (?P<hours>[0-9]+)\s*h(?:ours?)?\s*
1941 )?
1942 (?:
1943 (?P<mins>[0-9]+)\s*m(?:in(?:ute)?s?)?\s*
1944 )?
1945 (?:
1946 (?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?\s*s(?:ec(?:ond)?s?)?\s*
15846398 1947 )?Z?$''', s)
acaff495 1948 if m:
1949 days, hours, mins, secs, ms = m.groups()
1950 else:
15846398 1951 m = re.match(r'(?i)(?:(?P<hours>[0-9.]+)\s*(?:hours?)|(?P<mins>[0-9.]+)\s*(?:mins?\.?|minutes?)\s*)Z?$', s)
acaff495 1952 if m:
1953 hours, mins = m.groups()
1954 else:
1955 return None
1956
1957 duration = 0
1958 if secs:
1959 duration += float(secs)
1960 if mins:
1961 duration += float(mins) * 60
1962 if hours:
1963 duration += float(hours) * 60 * 60
1964 if days:
1965 duration += float(days) * 24 * 60 * 60
1966 if ms:
1967 duration += float(ms)
1968 return duration
91d7d0b3
JMF
1969
1970
e65e4c88 1971def prepend_extension(filename, ext, expected_real_ext=None):
5f6a1245 1972 name, real_ext = os.path.splitext(filename)
e65e4c88
S
1973 return (
1974 '{0}.{1}{2}'.format(name, ext, real_ext)
1975 if not expected_real_ext or real_ext[1:] == expected_real_ext
1976 else '{0}.{1}'.format(filename, ext))
d70ad093
PH
1977
1978
b3ed15b7
S
1979def replace_extension(filename, ext, expected_real_ext=None):
1980 name, real_ext = os.path.splitext(filename)
1981 return '{0}.{1}'.format(
1982 name if not expected_real_ext or real_ext[1:] == expected_real_ext else filename,
1983 ext)
1984
1985
d70ad093
PH
1986def check_executable(exe, args=[]):
1987 """ Checks if the given binary is installed somewhere in PATH, and returns its name.
1988 args can be a list of arguments for a short output (like -version) """
1989 try:
1990 subprocess.Popen([exe] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
1991 except OSError:
1992 return False
1993 return exe
b7ab0590
PH
1994
1995
95807118 1996def get_exe_version(exe, args=['--version'],
cae97f65 1997 version_re=None, unrecognized='present'):
95807118
PH
1998 """ Returns the version of the specified executable,
1999 or False if the executable is not present """
2000 try:
b64d04c1
YCH
2001 # STDIN should be redirected too. On UNIX-like systems, ffmpeg triggers
2002 # SIGTTOU if youtube-dl is run in the background.
2003 # See https://github.com/rg3/youtube-dl/issues/955#issuecomment-209789656
cae97f65 2004 out, _ = subprocess.Popen(
54116803 2005 [encodeArgument(exe)] + args,
00ca7552 2006 stdin=subprocess.PIPE,
95807118
PH
2007 stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()
2008 except OSError:
2009 return False
cae97f65
PH
2010 if isinstance(out, bytes): # Python 2.x
2011 out = out.decode('ascii', 'ignore')
2012 return detect_exe_version(out, version_re, unrecognized)
2013
2014
2015def detect_exe_version(output, version_re=None, unrecognized='present'):
2016 assert isinstance(output, compat_str)
2017 if version_re is None:
2018 version_re = r'version\s+([-0-9._a-zA-Z]+)'
2019 m = re.search(version_re, output)
95807118
PH
2020 if m:
2021 return m.group(1)
2022 else:
2023 return unrecognized
2024
2025
b7ab0590 2026class PagedList(object):
dd26ced1
PH
2027 def __len__(self):
2028 # This is only useful for tests
2029 return len(self.getslice())
2030
9c44d242
PH
2031
2032class OnDemandPagedList(PagedList):
6be08ce6 2033 def __init__(self, pagefunc, pagesize, use_cache=True):
9c44d242
PH
2034 self._pagefunc = pagefunc
2035 self._pagesize = pagesize
b95dc034
YCH
2036 self._use_cache = use_cache
2037 if use_cache:
2038 self._cache = {}
9c44d242 2039
b7ab0590
PH
2040 def getslice(self, start=0, end=None):
2041 res = []
2042 for pagenum in itertools.count(start // self._pagesize):
2043 firstid = pagenum * self._pagesize
2044 nextfirstid = pagenum * self._pagesize + self._pagesize
2045 if start >= nextfirstid:
2046 continue
2047
b95dc034
YCH
2048 page_results = None
2049 if self._use_cache:
2050 page_results = self._cache.get(pagenum)
2051 if page_results is None:
2052 page_results = list(self._pagefunc(pagenum))
2053 if self._use_cache:
2054 self._cache[pagenum] = page_results
b7ab0590
PH
2055
2056 startv = (
2057 start % self._pagesize
2058 if firstid <= start < nextfirstid
2059 else 0)
2060
2061 endv = (
2062 ((end - 1) % self._pagesize) + 1
2063 if (end is not None and firstid <= end <= nextfirstid)
2064 else None)
2065
2066 if startv != 0 or endv is not None:
2067 page_results = page_results[startv:endv]
2068 res.extend(page_results)
2069
2070 # A little optimization - if current page is not "full", ie. does
2071 # not contain page_size videos then we can assume that this page
2072 # is the last one - there are no more ids on further pages -
2073 # i.e. no need to query again.
2074 if len(page_results) + startv < self._pagesize:
2075 break
2076
2077 # If we got the whole page, but the next page is not interesting,
2078 # break out early as well
2079 if end == nextfirstid:
2080 break
2081 return res
81c2f20b
PH
2082
2083
9c44d242
PH
2084class InAdvancePagedList(PagedList):
2085 def __init__(self, pagefunc, pagecount, pagesize):
2086 self._pagefunc = pagefunc
2087 self._pagecount = pagecount
2088 self._pagesize = pagesize
2089
2090 def getslice(self, start=0, end=None):
2091 res = []
2092 start_page = start // self._pagesize
2093 end_page = (
2094 self._pagecount if end is None else (end // self._pagesize + 1))
2095 skip_elems = start - start_page * self._pagesize
2096 only_more = None if end is None else end - start
2097 for pagenum in range(start_page, end_page):
2098 page = list(self._pagefunc(pagenum))
2099 if skip_elems:
2100 page = page[skip_elems:]
2101 skip_elems = None
2102 if only_more is not None:
2103 if len(page) < only_more:
2104 only_more -= len(page)
2105 else:
2106 page = page[:only_more]
2107 res.extend(page)
2108 break
2109 res.extend(page)
2110 return res
2111
2112
81c2f20b 2113def uppercase_escape(s):
676eb3f2 2114 unicode_escape = codecs.getdecoder('unicode_escape')
81c2f20b 2115 return re.sub(
a612753d 2116 r'\\U[0-9a-fA-F]{8}',
676eb3f2
PH
2117 lambda m: unicode_escape(m.group(0))[0],
2118 s)
0fe2ff78
YCH
2119
2120
2121def lowercase_escape(s):
2122 unicode_escape = codecs.getdecoder('unicode_escape')
2123 return re.sub(
2124 r'\\u[0-9a-fA-F]{4}',
2125 lambda m: unicode_escape(m.group(0))[0],
2126 s)
b53466e1 2127
d05cfe06
S
2128
2129def escape_rfc3986(s):
2130 """Escape non-ASCII characters as suggested by RFC 3986"""
8f9312c3 2131 if sys.version_info < (3, 0) and isinstance(s, compat_str):
d05cfe06 2132 s = s.encode('utf-8')
ecc0c5ee 2133 return compat_urllib_parse.quote(s, b"%/;:@&=+$,!~*'()?#[]")
d05cfe06
S
2134
2135
2136def escape_url(url):
2137 """Escape URL as suggested by RFC 3986"""
2138 url_parsed = compat_urllib_parse_urlparse(url)
2139 return url_parsed._replace(
efbed08d 2140 netloc=url_parsed.netloc.encode('idna').decode('ascii'),
d05cfe06
S
2141 path=escape_rfc3986(url_parsed.path),
2142 params=escape_rfc3986(url_parsed.params),
2143 query=escape_rfc3986(url_parsed.query),
2144 fragment=escape_rfc3986(url_parsed.fragment)
2145 ).geturl()
2146
62e609ab
PH
2147
2148def read_batch_urls(batch_fd):
2149 def fixup(url):
2150 if not isinstance(url, compat_str):
2151 url = url.decode('utf-8', 'replace')
28e614de 2152 BOM_UTF8 = '\xef\xbb\xbf'
62e609ab
PH
2153 if url.startswith(BOM_UTF8):
2154 url = url[len(BOM_UTF8):]
2155 url = url.strip()
2156 if url.startswith(('#', ';', ']')):
2157 return False
2158 return url
2159
2160 with contextlib.closing(batch_fd) as fd:
2161 return [url for url in map(fixup, fd) if url]
b74fa8cd
JMF
2162
2163
2164def urlencode_postdata(*args, **kargs):
15707c7e 2165 return compat_urllib_parse_urlencode(*args, **kargs).encode('ascii')
bcf89ce6
PH
2166
2167
38f9ef31 2168def update_url_query(url, query):
cacd9966
YCH
2169 if not query:
2170 return url
38f9ef31 2171 parsed_url = compat_urlparse.urlparse(url)
2172 qs = compat_parse_qs(parsed_url.query)
2173 qs.update(query)
2174 return compat_urlparse.urlunparse(parsed_url._replace(
15707c7e 2175 query=compat_urllib_parse_urlencode(qs, True)))
16392824 2176
8e60dc75 2177
ed0291d1
S
2178def update_Request(req, url=None, data=None, headers={}, query={}):
2179 req_headers = req.headers.copy()
2180 req_headers.update(headers)
2181 req_data = data or req.data
2182 req_url = update_url_query(url or req.get_full_url(), query)
95cf60e8
S
2183 req_get_method = req.get_method()
2184 if req_get_method == 'HEAD':
2185 req_type = HEADRequest
2186 elif req_get_method == 'PUT':
2187 req_type = PUTRequest
2188 else:
2189 req_type = compat_urllib_request.Request
ed0291d1
S
2190 new_req = req_type(
2191 req_url, data=req_data, headers=req_headers,
2192 origin_req_host=req.origin_req_host, unverifiable=req.unverifiable)
2193 if hasattr(req, 'timeout'):
2194 new_req.timeout = req.timeout
2195 return new_req
2196
2197
10c87c15 2198def _multipart_encode_impl(data, boundary):
0c265486
YCH
2199 content_type = 'multipart/form-data; boundary=%s' % boundary
2200
2201 out = b''
2202 for k, v in data.items():
2203 out += b'--' + boundary.encode('ascii') + b'\r\n'
2204 if isinstance(k, compat_str):
2205 k = k.encode('utf-8')
2206 if isinstance(v, compat_str):
2207 v = v.encode('utf-8')
2208 # RFC 2047 requires non-ASCII field names to be encoded, while RFC 7578
2209 # suggests sending UTF-8 directly. Firefox sends UTF-8, too
b2ad479d 2210 content = b'Content-Disposition: form-data; name="' + k + b'"\r\n\r\n' + v + b'\r\n'
0c265486
YCH
2211 if boundary.encode('ascii') in content:
2212 raise ValueError('Boundary overlaps with data')
2213 out += content
2214
2215 out += b'--' + boundary.encode('ascii') + b'--\r\n'
2216
2217 return out, content_type
2218
2219
2220def multipart_encode(data, boundary=None):
2221 '''
2222 Encode a dict to RFC 7578-compliant form-data
2223
2224 data:
2225 A dict where keys and values can be either Unicode or bytes-like
2226 objects.
2227 boundary:
2228 If specified a Unicode object, it's used as the boundary. Otherwise
2229 a random boundary is generated.
2230
2231 Reference: https://tools.ietf.org/html/rfc7578
2232 '''
2233 has_specified_boundary = boundary is not None
2234
2235 while True:
2236 if boundary is None:
2237 boundary = '---------------' + str(random.randrange(0x0fffffff, 0xffffffff))
2238
2239 try:
10c87c15 2240 out, content_type = _multipart_encode_impl(data, boundary)
0c265486
YCH
2241 break
2242 except ValueError:
2243 if has_specified_boundary:
2244 raise
2245 boundary = None
2246
2247 return out, content_type
2248
2249
86296ad2 2250def dict_get(d, key_or_keys, default=None, skip_false_values=True):
cbecc9b9
S
2251 if isinstance(key_or_keys, (list, tuple)):
2252 for key in key_or_keys:
86296ad2
S
2253 if key not in d or d[key] is None or skip_false_values and not d[key]:
2254 continue
2255 return d[key]
cbecc9b9
S
2256 return default
2257 return d.get(key_or_keys, default)
2258
2259
329ca3be 2260def try_get(src, getter, expected_type=None):
a32a9a7e
S
2261 if not isinstance(getter, (list, tuple)):
2262 getter = [getter]
2263 for get in getter:
2264 try:
2265 v = get(src)
2266 except (AttributeError, KeyError, TypeError, IndexError):
2267 pass
2268 else:
2269 if expected_type is None or isinstance(v, expected_type):
2270 return v
329ca3be
S
2271
2272
6cc62232
S
2273def merge_dicts(*dicts):
2274 merged = {}
2275 for a_dict in dicts:
2276 for k, v in a_dict.items():
2277 if v is None:
2278 continue
2279 if (k not in merged or
2280 (isinstance(v, compat_str) and v and
2281 isinstance(merged[k], compat_str) and
2282 not merged[k])):
2283 merged[k] = v
2284 return merged
2285
2286
8e60dc75
S
2287def encode_compat_str(string, encoding=preferredencoding(), errors='strict'):
2288 return string if isinstance(string, compat_str) else compat_str(string, encoding, errors)
2289
16392824 2290
a1a530b0
PH
2291US_RATINGS = {
2292 'G': 0,
2293 'PG': 10,
2294 'PG-13': 13,
2295 'R': 16,
2296 'NC': 18,
2297}
fac55558
PH
2298
2299
a8795327 2300TV_PARENTAL_GUIDELINES = {
5a16c9d9
RA
2301 'TV-Y': 0,
2302 'TV-Y7': 7,
2303 'TV-G': 0,
2304 'TV-PG': 0,
2305 'TV-14': 14,
2306 'TV-MA': 17,
a8795327
S
2307}
2308
2309
146c80e2 2310def parse_age_limit(s):
a8795327
S
2311 if type(s) == int:
2312 return s if 0 <= s <= 21 else None
2313 if not isinstance(s, compat_basestring):
d838b1bd 2314 return None
146c80e2 2315 m = re.match(r'^(?P<age>\d{1,2})\+?$', s)
a8795327
S
2316 if m:
2317 return int(m.group('age'))
2318 if s in US_RATINGS:
2319 return US_RATINGS[s]
5a16c9d9 2320 m = re.match(r'^TV[_-]?(%s)$' % '|'.join(k[3:] for k in TV_PARENTAL_GUIDELINES), s)
b8361187 2321 if m:
5a16c9d9 2322 return TV_PARENTAL_GUIDELINES['TV-' + m.group(1)]
b8361187 2323 return None
146c80e2
S
2324
2325
fac55558 2326def strip_jsonp(code):
609a61e3 2327 return re.sub(
5552c9eb 2328 r'''(?sx)^
e9c671d5 2329 (?:window\.)?(?P<func_name>[a-zA-Z0-9_.$]*)
5552c9eb
YCH
2330 (?:\s*&&\s*(?P=func_name))?
2331 \s*\(\s*(?P<callback_data>.*)\);?
2332 \s*?(?://[^\n]*)*$''',
2333 r'\g<callback_data>', code)
478c2c61
PH
2334
2335
e05f6939 2336def js_to_json(code):
4195096e
S
2337 COMMENT_RE = r'/\*(?:(?!\*/).)*?\*/|//[^\n]*'
2338 SKIP_RE = r'\s*(?:{comment})?\s*'.format(comment=COMMENT_RE)
2339 INTEGER_TABLE = (
2340 (r'(?s)^(0[xX][0-9a-fA-F]+){skip}:?$'.format(skip=SKIP_RE), 16),
2341 (r'(?s)^(0+[0-7]+){skip}:?$'.format(skip=SKIP_RE), 8),
2342 )
2343
e05f6939 2344 def fix_kv(m):
e7b6d122
PH
2345 v = m.group(0)
2346 if v in ('true', 'false', 'null'):
2347 return v
b3ee552e 2348 elif v.startswith('/*') or v.startswith('//') or v == ',':
bd1e4844 2349 return ""
2350
2351 if v[0] in ("'", '"'):
2352 v = re.sub(r'(?s)\\.|"', lambda m: {
e7b6d122 2353 '"': '\\"',
bd1e4844 2354 "\\'": "'",
2355 '\\\n': '',
2356 '\\x': '\\u00',
2357 }.get(m.group(0), m.group(0)), v[1:-1])
2358
89ac4a19
S
2359 for regex, base in INTEGER_TABLE:
2360 im = re.match(regex, v)
2361 if im:
e4659b45 2362 i = int(im.group(1), base)
89ac4a19
S
2363 return '"%d":' % i if v.endswith(':') else '%d' % i
2364
e7b6d122 2365 return '"%s"' % v
e05f6939 2366
bd1e4844 2367 return re.sub(r'''(?sx)
2368 "(?:[^"\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^"\\]*"|
2369 '(?:[^'\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^'\\]*'|
4195096e 2370 {comment}|,(?={skip}[\]}}])|
c384d537 2371 (?:(?<![0-9])[eE]|[a-df-zA-DF-Z_])[.a-zA-Z_0-9]*|
4195096e
S
2372 \b(?:0[xX][0-9a-fA-F]+|0+[0-7]+)(?:{skip}:)?|
2373 [0-9]+(?={skip}:)
2374 '''.format(comment=COMMENT_RE, skip=SKIP_RE), fix_kv, code)
e05f6939
PH
2375
2376
478c2c61
PH
2377def qualities(quality_ids):
2378 """ Get a numeric quality value out of a list of possible values """
2379 def q(qid):
2380 try:
2381 return quality_ids.index(qid)
2382 except ValueError:
2383 return -1
2384 return q
2385
acd69589
PH
2386
2387DEFAULT_OUTTMPL = '%(title)s-%(id)s.%(ext)s'
0a871f68 2388
a020a0dc
PH
2389
2390def limit_length(s, length):
2391 """ Add ellipses to overly long strings """
2392 if s is None:
2393 return None
2394 ELLIPSES = '...'
2395 if len(s) > length:
2396 return s[:length - len(ELLIPSES)] + ELLIPSES
2397 return s
48844745
PH
2398
2399
2400def version_tuple(v):
5f9b8394 2401 return tuple(int(e) for e in re.split(r'[-.]', v))
48844745
PH
2402
2403
2404def is_outdated_version(version, limit, assume_new=True):
2405 if not version:
2406 return not assume_new
2407 try:
2408 return version_tuple(version) < version_tuple(limit)
2409 except ValueError:
2410 return not assume_new
732ea2f0
PH
2411
2412
2413def ytdl_is_updateable():
2414 """ Returns if youtube-dl can be updated with -U """
2415 from zipimport import zipimporter
2416
2417 return isinstance(globals().get('__loader__'), zipimporter) or hasattr(sys, 'frozen')
7d4111ed
PH
2418
2419
2420def args_to_str(args):
2421 # Get a short string representation for a subprocess command
702ccf2d 2422 return ' '.join(compat_shlex_quote(a) for a in args)
2ccd1b10
PH
2423
2424
9b9c5355 2425def error_to_compat_str(err):
fdae2358
S
2426 err_str = str(err)
2427 # On python 2 error byte string must be decoded with proper
2428 # encoding rather than ascii
2429 if sys.version_info[0] < 3:
2430 err_str = err_str.decode(preferredencoding())
2431 return err_str
2432
2433
c460bdd5 2434def mimetype2ext(mt):
eb9ee194
S
2435 if mt is None:
2436 return None
2437
765ac263
JMF
2438 ext = {
2439 'audio/mp4': 'm4a',
6c33d24b
YCH
2440 # Per RFC 3003, audio/mpeg can be .mp1, .mp2 or .mp3. Here use .mp3 as
2441 # it's the most popular one
2442 'audio/mpeg': 'mp3',
765ac263
JMF
2443 }.get(mt)
2444 if ext is not None:
2445 return ext
2446
c460bdd5 2447 _, _, res = mt.rpartition('/')
6562d34a 2448 res = res.split(';')[0].strip().lower()
c460bdd5
PH
2449
2450 return {
f6861ec9 2451 '3gpp': '3gp',
cafcf657 2452 'smptett+xml': 'tt',
cafcf657 2453 'ttaf+xml': 'dfxp',
a0d8d704 2454 'ttml+xml': 'ttml',
f6861ec9 2455 'x-flv': 'flv',
a0d8d704 2456 'x-mp4-fragmented': 'mp4',
d4f05d47 2457 'x-ms-sami': 'sami',
a0d8d704 2458 'x-ms-wmv': 'wmv',
b4173f15
RA
2459 'mpegurl': 'm3u8',
2460 'x-mpegurl': 'm3u8',
2461 'vnd.apple.mpegurl': 'm3u8',
2462 'dash+xml': 'mpd',
b4173f15 2463 'f4m+xml': 'f4m',
f164b971 2464 'hds+xml': 'f4m',
e910fe2f 2465 'vnd.ms-sstr+xml': 'ism',
c2b2c7e1 2466 'quicktime': 'mov',
98ce1a3f 2467 'mp2t': 'ts',
c460bdd5
PH
2468 }.get(res, res)
2469
2470
4f3c5e06 2471def parse_codecs(codecs_str):
2472 # http://tools.ietf.org/html/rfc6381
2473 if not codecs_str:
2474 return {}
2475 splited_codecs = list(filter(None, map(
2476 lambda str: str.strip(), codecs_str.strip().strip(',').split(','))))
2477 vcodec, acodec = None, None
2478 for full_codec in splited_codecs:
2479 codec = full_codec.split('.')[0]
25d110be 2480 if codec in ('avc1', 'avc2', 'avc3', 'avc4', 'vp9', 'vp8', 'hev1', 'hev2', 'h263', 'h264', 'mp4v', 'hvc1', 'av01'):
4f3c5e06 2481 if not vcodec:
2482 vcodec = full_codec
60f5c9fb 2483 elif codec in ('mp4a', 'opus', 'vorbis', 'mp3', 'aac', 'ac-3', 'ec-3', 'eac3', 'dtsc', 'dtse', 'dtsh', 'dtsl'):
4f3c5e06 2484 if not acodec:
2485 acodec = full_codec
2486 else:
60f5c9fb 2487 write_string('WARNING: Unknown codec %s\n' % full_codec, sys.stderr)
4f3c5e06 2488 if not vcodec and not acodec:
2489 if len(splited_codecs) == 2:
2490 return {
2491 'vcodec': vcodec,
2492 'acodec': acodec,
2493 }
2494 elif len(splited_codecs) == 1:
2495 return {
2496 'vcodec': 'none',
2497 'acodec': vcodec,
2498 }
2499 else:
2500 return {
2501 'vcodec': vcodec or 'none',
2502 'acodec': acodec or 'none',
2503 }
2504 return {}
2505
2506
2ccd1b10 2507def urlhandle_detect_ext(url_handle):
79298173 2508 getheader = url_handle.headers.get
2ccd1b10 2509
b55ee18f
PH
2510 cd = getheader('Content-Disposition')
2511 if cd:
2512 m = re.match(r'attachment;\s*filename="(?P<filename>[^"]+)"', cd)
2513 if m:
2514 e = determine_ext(m.group('filename'), default_ext=None)
2515 if e:
2516 return e
2517
c460bdd5 2518 return mimetype2ext(getheader('Content-Type'))
05900629
PH
2519
2520
1e399778
YCH
2521def encode_data_uri(data, mime_type):
2522 return 'data:%s;base64,%s' % (mime_type, base64.b64encode(data).decode('ascii'))
2523
2524
05900629 2525def age_restricted(content_limit, age_limit):
6ec6cb4e 2526 """ Returns True iff the content should be blocked """
05900629
PH
2527
2528 if age_limit is None: # No limit set
2529 return False
2530 if content_limit is None:
2531 return False # Content available for everyone
2532 return age_limit < content_limit
61ca9a80
PH
2533
2534
2535def is_html(first_bytes):
2536 """ Detect whether a file contains HTML by examining its first bytes. """
2537
2538 BOMS = [
2539 (b'\xef\xbb\xbf', 'utf-8'),
2540 (b'\x00\x00\xfe\xff', 'utf-32-be'),
2541 (b'\xff\xfe\x00\x00', 'utf-32-le'),
2542 (b'\xff\xfe', 'utf-16-le'),
2543 (b'\xfe\xff', 'utf-16-be'),
2544 ]
2545 for bom, enc in BOMS:
2546 if first_bytes.startswith(bom):
2547 s = first_bytes[len(bom):].decode(enc, 'replace')
2548 break
2549 else:
2550 s = first_bytes.decode('utf-8', 'replace')
2551
2552 return re.match(r'^\s*<', s)
a055469f
PH
2553
2554
2555def determine_protocol(info_dict):
2556 protocol = info_dict.get('protocol')
2557 if protocol is not None:
2558 return protocol
2559
2560 url = info_dict['url']
2561 if url.startswith('rtmp'):
2562 return 'rtmp'
2563 elif url.startswith('mms'):
2564 return 'mms'
2565 elif url.startswith('rtsp'):
2566 return 'rtsp'
2567
2568 ext = determine_ext(url)
2569 if ext == 'm3u8':
2570 return 'm3u8'
2571 elif ext == 'f4m':
2572 return 'f4m'
2573
2574 return compat_urllib_parse_urlparse(url).scheme
cfb56d1a
PH
2575
2576
2577def render_table(header_row, data):
2578 """ Render a list of rows, each as a list of values """
2579 table = [header_row] + data
2580 max_lens = [max(len(compat_str(v)) for v in col) for col in zip(*table)]
2581 format_str = ' '.join('%-' + compat_str(ml + 1) + 's' for ml in max_lens[:-1]) + '%s'
2582 return '\n'.join(format_str % tuple(row) for row in table)
347de493
PH
2583
2584
2585def _match_one(filter_part, dct):
2586 COMPARISON_OPERATORS = {
2587 '<': operator.lt,
2588 '<=': operator.le,
2589 '>': operator.gt,
2590 '>=': operator.ge,
2591 '=': operator.eq,
2592 '!=': operator.ne,
2593 }
2594 operator_rex = re.compile(r'''(?x)\s*
2595 (?P<key>[a-z_]+)
2596 \s*(?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
2597 (?:
2598 (?P<intval>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)|
db13c16e 2599 (?P<quote>["\'])(?P<quotedstrval>(?:\\.|(?!(?P=quote)|\\).)+?)(?P=quote)|
347de493
PH
2600 (?P<strval>(?![0-9.])[a-z0-9A-Z]*)
2601 )
2602 \s*$
2603 ''' % '|'.join(map(re.escape, COMPARISON_OPERATORS.keys())))
2604 m = operator_rex.search(filter_part)
2605 if m:
2606 op = COMPARISON_OPERATORS[m.group('op')]
e5a088dc 2607 actual_value = dct.get(m.group('key'))
db13c16e
S
2608 if (m.group('quotedstrval') is not None or
2609 m.group('strval') is not None or
e5a088dc
S
2610 # If the original field is a string and matching comparisonvalue is
2611 # a number we should respect the origin of the original field
2612 # and process comparison value as a string (see
2613 # https://github.com/rg3/youtube-dl/issues/11082).
2614 actual_value is not None and m.group('intval') is not None and
2615 isinstance(actual_value, compat_str)):
347de493
PH
2616 if m.group('op') not in ('=', '!='):
2617 raise ValueError(
2618 'Operator %s does not support string values!' % m.group('op'))
db13c16e
S
2619 comparison_value = m.group('quotedstrval') or m.group('strval') or m.group('intval')
2620 quote = m.group('quote')
2621 if quote is not None:
2622 comparison_value = comparison_value.replace(r'\%s' % quote, quote)
347de493
PH
2623 else:
2624 try:
2625 comparison_value = int(m.group('intval'))
2626 except ValueError:
2627 comparison_value = parse_filesize(m.group('intval'))
2628 if comparison_value is None:
2629 comparison_value = parse_filesize(m.group('intval') + 'B')
2630 if comparison_value is None:
2631 raise ValueError(
2632 'Invalid integer value %r in filter part %r' % (
2633 m.group('intval'), filter_part))
347de493
PH
2634 if actual_value is None:
2635 return m.group('none_inclusive')
2636 return op(actual_value, comparison_value)
2637
2638 UNARY_OPERATORS = {
1cc47c66
S
2639 '': lambda v: (v is True) if isinstance(v, bool) else (v is not None),
2640 '!': lambda v: (v is False) if isinstance(v, bool) else (v is None),
347de493
PH
2641 }
2642 operator_rex = re.compile(r'''(?x)\s*
2643 (?P<op>%s)\s*(?P<key>[a-z_]+)
2644 \s*$
2645 ''' % '|'.join(map(re.escape, UNARY_OPERATORS.keys())))
2646 m = operator_rex.search(filter_part)
2647 if m:
2648 op = UNARY_OPERATORS[m.group('op')]
2649 actual_value = dct.get(m.group('key'))
2650 return op(actual_value)
2651
2652 raise ValueError('Invalid filter part %r' % filter_part)
2653
2654
2655def match_str(filter_str, dct):
2656 """ Filter a dictionary with a simple string syntax. Returns True (=passes filter) or false """
2657
2658 return all(
2659 _match_one(filter_part, dct) for filter_part in filter_str.split('&'))
2660
2661
2662def match_filter_func(filter_str):
2663 def _match_func(info_dict):
2664 if match_str(filter_str, info_dict):
2665 return None
2666 else:
2667 video_title = info_dict.get('title', info_dict.get('id', 'video'))
2668 return '%s does not pass filter %s, skipping ..' % (video_title, filter_str)
2669 return _match_func
91410c9b
PH
2670
2671
bf6427d2
YCH
2672def parse_dfxp_time_expr(time_expr):
2673 if not time_expr:
d631d5f9 2674 return
bf6427d2
YCH
2675
2676 mobj = re.match(r'^(?P<time_offset>\d+(?:\.\d+)?)s?$', time_expr)
2677 if mobj:
2678 return float(mobj.group('time_offset'))
2679
db2fe38b 2680 mobj = re.match(r'^(\d+):(\d\d):(\d\d(?:(?:\.|:)\d+)?)$', time_expr)
bf6427d2 2681 if mobj:
db2fe38b 2682 return 3600 * int(mobj.group(1)) + 60 * int(mobj.group(2)) + float(mobj.group(3).replace(':', '.'))
bf6427d2
YCH
2683
2684
c1c924ab
YCH
2685def srt_subtitles_timecode(seconds):
2686 return '%02d:%02d:%02d,%03d' % (seconds / 3600, (seconds % 3600) / 60, seconds % 60, (seconds % 1) * 1000)
bf6427d2
YCH
2687
2688
2689def dfxp2srt(dfxp_data):
3869028f
YCH
2690 '''
2691 @param dfxp_data A bytes-like object containing DFXP data
2692 @returns A unicode object containing converted SRT data
2693 '''
5b995f71 2694 LEGACY_NAMESPACES = (
3869028f
YCH
2695 (b'http://www.w3.org/ns/ttml', [
2696 b'http://www.w3.org/2004/11/ttaf1',
2697 b'http://www.w3.org/2006/04/ttaf1',
2698 b'http://www.w3.org/2006/10/ttaf1',
5b995f71 2699 ]),
3869028f
YCH
2700 (b'http://www.w3.org/ns/ttml#styling', [
2701 b'http://www.w3.org/ns/ttml#style',
5b995f71
RA
2702 ]),
2703 )
2704
2705 SUPPORTED_STYLING = [
2706 'color',
2707 'fontFamily',
2708 'fontSize',
2709 'fontStyle',
2710 'fontWeight',
2711 'textDecoration'
2712 ]
2713
4e335771 2714 _x = functools.partial(xpath_with_ns, ns_map={
261f4730 2715 'xml': 'http://www.w3.org/XML/1998/namespace',
4e335771 2716 'ttml': 'http://www.w3.org/ns/ttml',
5b995f71 2717 'tts': 'http://www.w3.org/ns/ttml#styling',
4e335771 2718 })
bf6427d2 2719
5b995f71
RA
2720 styles = {}
2721 default_style = {}
2722
87de7069 2723 class TTMLPElementParser(object):
5b995f71
RA
2724 _out = ''
2725 _unclosed_elements = []
2726 _applied_styles = []
bf6427d2 2727
2b14cb56 2728 def start(self, tag, attrib):
5b995f71
RA
2729 if tag in (_x('ttml:br'), 'br'):
2730 self._out += '\n'
2731 else:
2732 unclosed_elements = []
2733 style = {}
2734 element_style_id = attrib.get('style')
2735 if default_style:
2736 style.update(default_style)
2737 if element_style_id:
2738 style.update(styles.get(element_style_id, {}))
2739 for prop in SUPPORTED_STYLING:
2740 prop_val = attrib.get(_x('tts:' + prop))
2741 if prop_val:
2742 style[prop] = prop_val
2743 if style:
2744 font = ''
2745 for k, v in sorted(style.items()):
2746 if self._applied_styles and self._applied_styles[-1].get(k) == v:
2747 continue
2748 if k == 'color':
2749 font += ' color="%s"' % v
2750 elif k == 'fontSize':
2751 font += ' size="%s"' % v
2752 elif k == 'fontFamily':
2753 font += ' face="%s"' % v
2754 elif k == 'fontWeight' and v == 'bold':
2755 self._out += '<b>'
2756 unclosed_elements.append('b')
2757 elif k == 'fontStyle' and v == 'italic':
2758 self._out += '<i>'
2759 unclosed_elements.append('i')
2760 elif k == 'textDecoration' and v == 'underline':
2761 self._out += '<u>'
2762 unclosed_elements.append('u')
2763 if font:
2764 self._out += '<font' + font + '>'
2765 unclosed_elements.append('font')
2766 applied_style = {}
2767 if self._applied_styles:
2768 applied_style.update(self._applied_styles[-1])
2769 applied_style.update(style)
2770 self._applied_styles.append(applied_style)
2771 self._unclosed_elements.append(unclosed_elements)
bf6427d2 2772
2b14cb56 2773 def end(self, tag):
5b995f71
RA
2774 if tag not in (_x('ttml:br'), 'br'):
2775 unclosed_elements = self._unclosed_elements.pop()
2776 for element in reversed(unclosed_elements):
2777 self._out += '</%s>' % element
2778 if unclosed_elements and self._applied_styles:
2779 self._applied_styles.pop()
bf6427d2 2780
2b14cb56 2781 def data(self, data):
5b995f71 2782 self._out += data
2b14cb56 2783
2784 def close(self):
5b995f71 2785 return self._out.strip()
2b14cb56 2786
2787 def parse_node(node):
2788 target = TTMLPElementParser()
2789 parser = xml.etree.ElementTree.XMLParser(target=target)
2790 parser.feed(xml.etree.ElementTree.tostring(node))
2791 return parser.close()
bf6427d2 2792
5b995f71
RA
2793 for k, v in LEGACY_NAMESPACES:
2794 for ns in v:
2795 dfxp_data = dfxp_data.replace(ns, k)
2796
3869028f 2797 dfxp = compat_etree_fromstring(dfxp_data)
bf6427d2 2798 out = []
5b995f71 2799 paras = dfxp.findall(_x('.//ttml:p')) or dfxp.findall('.//p')
1b0427e6
YCH
2800
2801 if not paras:
2802 raise ValueError('Invalid dfxp/TTML subtitle')
bf6427d2 2803
5b995f71
RA
2804 repeat = False
2805 while True:
2806 for style in dfxp.findall(_x('.//ttml:style')):
261f4730
RA
2807 style_id = style.get('id') or style.get(_x('xml:id'))
2808 if not style_id:
2809 continue
5b995f71
RA
2810 parent_style_id = style.get('style')
2811 if parent_style_id:
2812 if parent_style_id not in styles:
2813 repeat = True
2814 continue
2815 styles[style_id] = styles[parent_style_id].copy()
2816 for prop in SUPPORTED_STYLING:
2817 prop_val = style.get(_x('tts:' + prop))
2818 if prop_val:
2819 styles.setdefault(style_id, {})[prop] = prop_val
2820 if repeat:
2821 repeat = False
2822 else:
2823 break
2824
2825 for p in ('body', 'div'):
2826 ele = xpath_element(dfxp, [_x('.//ttml:' + p), './/' + p])
2827 if ele is None:
2828 continue
2829 style = styles.get(ele.get('style'))
2830 if not style:
2831 continue
2832 default_style.update(style)
2833
bf6427d2 2834 for para, index in zip(paras, itertools.count(1)):
d631d5f9 2835 begin_time = parse_dfxp_time_expr(para.attrib.get('begin'))
7dff0363 2836 end_time = parse_dfxp_time_expr(para.attrib.get('end'))
d631d5f9
YCH
2837 dur = parse_dfxp_time_expr(para.attrib.get('dur'))
2838 if begin_time is None:
2839 continue
7dff0363 2840 if not end_time:
d631d5f9
YCH
2841 if not dur:
2842 continue
2843 end_time = begin_time + dur
bf6427d2
YCH
2844 out.append('%d\n%s --> %s\n%s\n\n' % (
2845 index,
c1c924ab
YCH
2846 srt_subtitles_timecode(begin_time),
2847 srt_subtitles_timecode(end_time),
bf6427d2
YCH
2848 parse_node(para)))
2849
2850 return ''.join(out)
2851
2852
66e289ba
S
2853def cli_option(params, command_option, param):
2854 param = params.get(param)
98e698f1
RA
2855 if param:
2856 param = compat_str(param)
66e289ba
S
2857 return [command_option, param] if param is not None else []
2858
2859
2860def cli_bool_option(params, command_option, param, true_value='true', false_value='false', separator=None):
2861 param = params.get(param)
5b232f46
S
2862 if param is None:
2863 return []
66e289ba
S
2864 assert isinstance(param, bool)
2865 if separator:
2866 return [command_option + separator + (true_value if param else false_value)]
2867 return [command_option, true_value if param else false_value]
2868
2869
2870def cli_valueless_option(params, command_option, param, expected_value=True):
2871 param = params.get(param)
2872 return [command_option] if param == expected_value else []
2873
2874
2875def cli_configuration_args(params, param, default=[]):
2876 ex_args = params.get(param)
2877 if ex_args is None:
2878 return default
2879 assert isinstance(ex_args, list)
2880 return ex_args
2881
2882
39672624
YCH
2883class ISO639Utils(object):
2884 # See http://www.loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt
2885 _lang_map = {
2886 'aa': 'aar',
2887 'ab': 'abk',
2888 'ae': 'ave',
2889 'af': 'afr',
2890 'ak': 'aka',
2891 'am': 'amh',
2892 'an': 'arg',
2893 'ar': 'ara',
2894 'as': 'asm',
2895 'av': 'ava',
2896 'ay': 'aym',
2897 'az': 'aze',
2898 'ba': 'bak',
2899 'be': 'bel',
2900 'bg': 'bul',
2901 'bh': 'bih',
2902 'bi': 'bis',
2903 'bm': 'bam',
2904 'bn': 'ben',
2905 'bo': 'bod',
2906 'br': 'bre',
2907 'bs': 'bos',
2908 'ca': 'cat',
2909 'ce': 'che',
2910 'ch': 'cha',
2911 'co': 'cos',
2912 'cr': 'cre',
2913 'cs': 'ces',
2914 'cu': 'chu',
2915 'cv': 'chv',
2916 'cy': 'cym',
2917 'da': 'dan',
2918 'de': 'deu',
2919 'dv': 'div',
2920 'dz': 'dzo',
2921 'ee': 'ewe',
2922 'el': 'ell',
2923 'en': 'eng',
2924 'eo': 'epo',
2925 'es': 'spa',
2926 'et': 'est',
2927 'eu': 'eus',
2928 'fa': 'fas',
2929 'ff': 'ful',
2930 'fi': 'fin',
2931 'fj': 'fij',
2932 'fo': 'fao',
2933 'fr': 'fra',
2934 'fy': 'fry',
2935 'ga': 'gle',
2936 'gd': 'gla',
2937 'gl': 'glg',
2938 'gn': 'grn',
2939 'gu': 'guj',
2940 'gv': 'glv',
2941 'ha': 'hau',
2942 'he': 'heb',
2943 'hi': 'hin',
2944 'ho': 'hmo',
2945 'hr': 'hrv',
2946 'ht': 'hat',
2947 'hu': 'hun',
2948 'hy': 'hye',
2949 'hz': 'her',
2950 'ia': 'ina',
2951 'id': 'ind',
2952 'ie': 'ile',
2953 'ig': 'ibo',
2954 'ii': 'iii',
2955 'ik': 'ipk',
2956 'io': 'ido',
2957 'is': 'isl',
2958 'it': 'ita',
2959 'iu': 'iku',
2960 'ja': 'jpn',
2961 'jv': 'jav',
2962 'ka': 'kat',
2963 'kg': 'kon',
2964 'ki': 'kik',
2965 'kj': 'kua',
2966 'kk': 'kaz',
2967 'kl': 'kal',
2968 'km': 'khm',
2969 'kn': 'kan',
2970 'ko': 'kor',
2971 'kr': 'kau',
2972 'ks': 'kas',
2973 'ku': 'kur',
2974 'kv': 'kom',
2975 'kw': 'cor',
2976 'ky': 'kir',
2977 'la': 'lat',
2978 'lb': 'ltz',
2979 'lg': 'lug',
2980 'li': 'lim',
2981 'ln': 'lin',
2982 'lo': 'lao',
2983 'lt': 'lit',
2984 'lu': 'lub',
2985 'lv': 'lav',
2986 'mg': 'mlg',
2987 'mh': 'mah',
2988 'mi': 'mri',
2989 'mk': 'mkd',
2990 'ml': 'mal',
2991 'mn': 'mon',
2992 'mr': 'mar',
2993 'ms': 'msa',
2994 'mt': 'mlt',
2995 'my': 'mya',
2996 'na': 'nau',
2997 'nb': 'nob',
2998 'nd': 'nde',
2999 'ne': 'nep',
3000 'ng': 'ndo',
3001 'nl': 'nld',
3002 'nn': 'nno',
3003 'no': 'nor',
3004 'nr': 'nbl',
3005 'nv': 'nav',
3006 'ny': 'nya',
3007 'oc': 'oci',
3008 'oj': 'oji',
3009 'om': 'orm',
3010 'or': 'ori',
3011 'os': 'oss',
3012 'pa': 'pan',
3013 'pi': 'pli',
3014 'pl': 'pol',
3015 'ps': 'pus',
3016 'pt': 'por',
3017 'qu': 'que',
3018 'rm': 'roh',
3019 'rn': 'run',
3020 'ro': 'ron',
3021 'ru': 'rus',
3022 'rw': 'kin',
3023 'sa': 'san',
3024 'sc': 'srd',
3025 'sd': 'snd',
3026 'se': 'sme',
3027 'sg': 'sag',
3028 'si': 'sin',
3029 'sk': 'slk',
3030 'sl': 'slv',
3031 'sm': 'smo',
3032 'sn': 'sna',
3033 'so': 'som',
3034 'sq': 'sqi',
3035 'sr': 'srp',
3036 'ss': 'ssw',
3037 'st': 'sot',
3038 'su': 'sun',
3039 'sv': 'swe',
3040 'sw': 'swa',
3041 'ta': 'tam',
3042 'te': 'tel',
3043 'tg': 'tgk',
3044 'th': 'tha',
3045 'ti': 'tir',
3046 'tk': 'tuk',
3047 'tl': 'tgl',
3048 'tn': 'tsn',
3049 'to': 'ton',
3050 'tr': 'tur',
3051 'ts': 'tso',
3052 'tt': 'tat',
3053 'tw': 'twi',
3054 'ty': 'tah',
3055 'ug': 'uig',
3056 'uk': 'ukr',
3057 'ur': 'urd',
3058 'uz': 'uzb',
3059 've': 'ven',
3060 'vi': 'vie',
3061 'vo': 'vol',
3062 'wa': 'wln',
3063 'wo': 'wol',
3064 'xh': 'xho',
3065 'yi': 'yid',
3066 'yo': 'yor',
3067 'za': 'zha',
3068 'zh': 'zho',
3069 'zu': 'zul',
3070 }
3071
3072 @classmethod
3073 def short2long(cls, code):
3074 """Convert language code from ISO 639-1 to ISO 639-2/T"""
3075 return cls._lang_map.get(code[:2])
3076
3077 @classmethod
3078 def long2short(cls, code):
3079 """Convert language code from ISO 639-2/T to ISO 639-1"""
3080 for short_name, long_name in cls._lang_map.items():
3081 if long_name == code:
3082 return short_name
3083
3084
4eb10f66
YCH
3085class ISO3166Utils(object):
3086 # From http://data.okfn.org/data/core/country-list
3087 _country_map = {
3088 'AF': 'Afghanistan',
3089 'AX': 'Åland Islands',
3090 'AL': 'Albania',
3091 'DZ': 'Algeria',
3092 'AS': 'American Samoa',
3093 'AD': 'Andorra',
3094 'AO': 'Angola',
3095 'AI': 'Anguilla',
3096 'AQ': 'Antarctica',
3097 'AG': 'Antigua and Barbuda',
3098 'AR': 'Argentina',
3099 'AM': 'Armenia',
3100 'AW': 'Aruba',
3101 'AU': 'Australia',
3102 'AT': 'Austria',
3103 'AZ': 'Azerbaijan',
3104 'BS': 'Bahamas',
3105 'BH': 'Bahrain',
3106 'BD': 'Bangladesh',
3107 'BB': 'Barbados',
3108 'BY': 'Belarus',
3109 'BE': 'Belgium',
3110 'BZ': 'Belize',
3111 'BJ': 'Benin',
3112 'BM': 'Bermuda',
3113 'BT': 'Bhutan',
3114 'BO': 'Bolivia, Plurinational State of',
3115 'BQ': 'Bonaire, Sint Eustatius and Saba',
3116 'BA': 'Bosnia and Herzegovina',
3117 'BW': 'Botswana',
3118 'BV': 'Bouvet Island',
3119 'BR': 'Brazil',
3120 'IO': 'British Indian Ocean Territory',
3121 'BN': 'Brunei Darussalam',
3122 'BG': 'Bulgaria',
3123 'BF': 'Burkina Faso',
3124 'BI': 'Burundi',
3125 'KH': 'Cambodia',
3126 'CM': 'Cameroon',
3127 'CA': 'Canada',
3128 'CV': 'Cape Verde',
3129 'KY': 'Cayman Islands',
3130 'CF': 'Central African Republic',
3131 'TD': 'Chad',
3132 'CL': 'Chile',
3133 'CN': 'China',
3134 'CX': 'Christmas Island',
3135 'CC': 'Cocos (Keeling) Islands',
3136 'CO': 'Colombia',
3137 'KM': 'Comoros',
3138 'CG': 'Congo',
3139 'CD': 'Congo, the Democratic Republic of the',
3140 'CK': 'Cook Islands',
3141 'CR': 'Costa Rica',
3142 'CI': 'Côte d\'Ivoire',
3143 'HR': 'Croatia',
3144 'CU': 'Cuba',
3145 'CW': 'Curaçao',
3146 'CY': 'Cyprus',
3147 'CZ': 'Czech Republic',
3148 'DK': 'Denmark',
3149 'DJ': 'Djibouti',
3150 'DM': 'Dominica',
3151 'DO': 'Dominican Republic',
3152 'EC': 'Ecuador',
3153 'EG': 'Egypt',
3154 'SV': 'El Salvador',
3155 'GQ': 'Equatorial Guinea',
3156 'ER': 'Eritrea',
3157 'EE': 'Estonia',
3158 'ET': 'Ethiopia',
3159 'FK': 'Falkland Islands (Malvinas)',
3160 'FO': 'Faroe Islands',
3161 'FJ': 'Fiji',
3162 'FI': 'Finland',
3163 'FR': 'France',
3164 'GF': 'French Guiana',
3165 'PF': 'French Polynesia',
3166 'TF': 'French Southern Territories',
3167 'GA': 'Gabon',
3168 'GM': 'Gambia',
3169 'GE': 'Georgia',
3170 'DE': 'Germany',
3171 'GH': 'Ghana',
3172 'GI': 'Gibraltar',
3173 'GR': 'Greece',
3174 'GL': 'Greenland',
3175 'GD': 'Grenada',
3176 'GP': 'Guadeloupe',
3177 'GU': 'Guam',
3178 'GT': 'Guatemala',
3179 'GG': 'Guernsey',
3180 'GN': 'Guinea',
3181 'GW': 'Guinea-Bissau',
3182 'GY': 'Guyana',
3183 'HT': 'Haiti',
3184 'HM': 'Heard Island and McDonald Islands',
3185 'VA': 'Holy See (Vatican City State)',
3186 'HN': 'Honduras',
3187 'HK': 'Hong Kong',
3188 'HU': 'Hungary',
3189 'IS': 'Iceland',
3190 'IN': 'India',
3191 'ID': 'Indonesia',
3192 'IR': 'Iran, Islamic Republic of',
3193 'IQ': 'Iraq',
3194 'IE': 'Ireland',
3195 'IM': 'Isle of Man',
3196 'IL': 'Israel',
3197 'IT': 'Italy',
3198 'JM': 'Jamaica',
3199 'JP': 'Japan',
3200 'JE': 'Jersey',
3201 'JO': 'Jordan',
3202 'KZ': 'Kazakhstan',
3203 'KE': 'Kenya',
3204 'KI': 'Kiribati',
3205 'KP': 'Korea, Democratic People\'s Republic of',
3206 'KR': 'Korea, Republic of',
3207 'KW': 'Kuwait',
3208 'KG': 'Kyrgyzstan',
3209 'LA': 'Lao People\'s Democratic Republic',
3210 'LV': 'Latvia',
3211 'LB': 'Lebanon',
3212 'LS': 'Lesotho',
3213 'LR': 'Liberia',
3214 'LY': 'Libya',
3215 'LI': 'Liechtenstein',
3216 'LT': 'Lithuania',
3217 'LU': 'Luxembourg',
3218 'MO': 'Macao',
3219 'MK': 'Macedonia, the Former Yugoslav Republic of',
3220 'MG': 'Madagascar',
3221 'MW': 'Malawi',
3222 'MY': 'Malaysia',
3223 'MV': 'Maldives',
3224 'ML': 'Mali',
3225 'MT': 'Malta',
3226 'MH': 'Marshall Islands',
3227 'MQ': 'Martinique',
3228 'MR': 'Mauritania',
3229 'MU': 'Mauritius',
3230 'YT': 'Mayotte',
3231 'MX': 'Mexico',
3232 'FM': 'Micronesia, Federated States of',
3233 'MD': 'Moldova, Republic of',
3234 'MC': 'Monaco',
3235 'MN': 'Mongolia',
3236 'ME': 'Montenegro',
3237 'MS': 'Montserrat',
3238 'MA': 'Morocco',
3239 'MZ': 'Mozambique',
3240 'MM': 'Myanmar',
3241 'NA': 'Namibia',
3242 'NR': 'Nauru',
3243 'NP': 'Nepal',
3244 'NL': 'Netherlands',
3245 'NC': 'New Caledonia',
3246 'NZ': 'New Zealand',
3247 'NI': 'Nicaragua',
3248 'NE': 'Niger',
3249 'NG': 'Nigeria',
3250 'NU': 'Niue',
3251 'NF': 'Norfolk Island',
3252 'MP': 'Northern Mariana Islands',
3253 'NO': 'Norway',
3254 'OM': 'Oman',
3255 'PK': 'Pakistan',
3256 'PW': 'Palau',
3257 'PS': 'Palestine, State of',
3258 'PA': 'Panama',
3259 'PG': 'Papua New Guinea',
3260 'PY': 'Paraguay',
3261 'PE': 'Peru',
3262 'PH': 'Philippines',
3263 'PN': 'Pitcairn',
3264 'PL': 'Poland',
3265 'PT': 'Portugal',
3266 'PR': 'Puerto Rico',
3267 'QA': 'Qatar',
3268 'RE': 'Réunion',
3269 'RO': 'Romania',
3270 'RU': 'Russian Federation',
3271 'RW': 'Rwanda',
3272 'BL': 'Saint Barthélemy',
3273 'SH': 'Saint Helena, Ascension and Tristan da Cunha',
3274 'KN': 'Saint Kitts and Nevis',
3275 'LC': 'Saint Lucia',
3276 'MF': 'Saint Martin (French part)',
3277 'PM': 'Saint Pierre and Miquelon',
3278 'VC': 'Saint Vincent and the Grenadines',
3279 'WS': 'Samoa',
3280 'SM': 'San Marino',
3281 'ST': 'Sao Tome and Principe',
3282 'SA': 'Saudi Arabia',
3283 'SN': 'Senegal',
3284 'RS': 'Serbia',
3285 'SC': 'Seychelles',
3286 'SL': 'Sierra Leone',
3287 'SG': 'Singapore',
3288 'SX': 'Sint Maarten (Dutch part)',
3289 'SK': 'Slovakia',
3290 'SI': 'Slovenia',
3291 'SB': 'Solomon Islands',
3292 'SO': 'Somalia',
3293 'ZA': 'South Africa',
3294 'GS': 'South Georgia and the South Sandwich Islands',
3295 'SS': 'South Sudan',
3296 'ES': 'Spain',
3297 'LK': 'Sri Lanka',
3298 'SD': 'Sudan',
3299 'SR': 'Suriname',
3300 'SJ': 'Svalbard and Jan Mayen',
3301 'SZ': 'Swaziland',
3302 'SE': 'Sweden',
3303 'CH': 'Switzerland',
3304 'SY': 'Syrian Arab Republic',
3305 'TW': 'Taiwan, Province of China',
3306 'TJ': 'Tajikistan',
3307 'TZ': 'Tanzania, United Republic of',
3308 'TH': 'Thailand',
3309 'TL': 'Timor-Leste',
3310 'TG': 'Togo',
3311 'TK': 'Tokelau',
3312 'TO': 'Tonga',
3313 'TT': 'Trinidad and Tobago',
3314 'TN': 'Tunisia',
3315 'TR': 'Turkey',
3316 'TM': 'Turkmenistan',
3317 'TC': 'Turks and Caicos Islands',
3318 'TV': 'Tuvalu',
3319 'UG': 'Uganda',
3320 'UA': 'Ukraine',
3321 'AE': 'United Arab Emirates',
3322 'GB': 'United Kingdom',
3323 'US': 'United States',
3324 'UM': 'United States Minor Outlying Islands',
3325 'UY': 'Uruguay',
3326 'UZ': 'Uzbekistan',
3327 'VU': 'Vanuatu',
3328 'VE': 'Venezuela, Bolivarian Republic of',
3329 'VN': 'Viet Nam',
3330 'VG': 'Virgin Islands, British',
3331 'VI': 'Virgin Islands, U.S.',
3332 'WF': 'Wallis and Futuna',
3333 'EH': 'Western Sahara',
3334 'YE': 'Yemen',
3335 'ZM': 'Zambia',
3336 'ZW': 'Zimbabwe',
3337 }
3338
3339 @classmethod
3340 def short2full(cls, code):
3341 """Convert an ISO 3166-2 country code to the corresponding full name"""
3342 return cls._country_map.get(code.upper())
3343
3344
773f291d
S
3345class GeoUtils(object):
3346 # Major IPv4 address blocks per country
3347 _country_ip_map = {
3348 'AD': '85.94.160.0/19',
3349 'AE': '94.200.0.0/13',
3350 'AF': '149.54.0.0/17',
3351 'AG': '209.59.64.0/18',
3352 'AI': '204.14.248.0/21',
3353 'AL': '46.99.0.0/16',
3354 'AM': '46.70.0.0/15',
3355 'AO': '105.168.0.0/13',
3356 'AP': '159.117.192.0/21',
3357 'AR': '181.0.0.0/12',
3358 'AS': '202.70.112.0/20',
3359 'AT': '84.112.0.0/13',
3360 'AU': '1.128.0.0/11',
3361 'AW': '181.41.0.0/18',
3362 'AZ': '5.191.0.0/16',
3363 'BA': '31.176.128.0/17',
3364 'BB': '65.48.128.0/17',
3365 'BD': '114.130.0.0/16',
3366 'BE': '57.0.0.0/8',
3367 'BF': '129.45.128.0/17',
3368 'BG': '95.42.0.0/15',
3369 'BH': '37.131.0.0/17',
3370 'BI': '154.117.192.0/18',
3371 'BJ': '137.255.0.0/16',
3372 'BL': '192.131.134.0/24',
3373 'BM': '196.12.64.0/18',
3374 'BN': '156.31.0.0/16',
3375 'BO': '161.56.0.0/16',
3376 'BQ': '161.0.80.0/20',
3377 'BR': '152.240.0.0/12',
3378 'BS': '24.51.64.0/18',
3379 'BT': '119.2.96.0/19',
3380 'BW': '168.167.0.0/16',
3381 'BY': '178.120.0.0/13',
3382 'BZ': '179.42.192.0/18',
3383 'CA': '99.224.0.0/11',
3384 'CD': '41.243.0.0/16',
3385 'CF': '196.32.200.0/21',
3386 'CG': '197.214.128.0/17',
3387 'CH': '85.0.0.0/13',
3388 'CI': '154.232.0.0/14',
3389 'CK': '202.65.32.0/19',
3390 'CL': '152.172.0.0/14',
3391 'CM': '165.210.0.0/15',
3392 'CN': '36.128.0.0/10',
3393 'CO': '181.240.0.0/12',
3394 'CR': '201.192.0.0/12',
3395 'CU': '152.206.0.0/15',
3396 'CV': '165.90.96.0/19',
3397 'CW': '190.88.128.0/17',
3398 'CY': '46.198.0.0/15',
3399 'CZ': '88.100.0.0/14',
3400 'DE': '53.0.0.0/8',
3401 'DJ': '197.241.0.0/17',
3402 'DK': '87.48.0.0/12',
3403 'DM': '192.243.48.0/20',
3404 'DO': '152.166.0.0/15',
3405 'DZ': '41.96.0.0/12',
3406 'EC': '186.68.0.0/15',
3407 'EE': '90.190.0.0/15',
3408 'EG': '156.160.0.0/11',
3409 'ER': '196.200.96.0/20',
3410 'ES': '88.0.0.0/11',
3411 'ET': '196.188.0.0/14',
3412 'EU': '2.16.0.0/13',
3413 'FI': '91.152.0.0/13',
3414 'FJ': '144.120.0.0/16',
3415 'FM': '119.252.112.0/20',
3416 'FO': '88.85.32.0/19',
3417 'FR': '90.0.0.0/9',
3418 'GA': '41.158.0.0/15',
3419 'GB': '25.0.0.0/8',
3420 'GD': '74.122.88.0/21',
3421 'GE': '31.146.0.0/16',
3422 'GF': '161.22.64.0/18',
3423 'GG': '62.68.160.0/19',
3424 'GH': '45.208.0.0/14',
3425 'GI': '85.115.128.0/19',
3426 'GL': '88.83.0.0/19',
3427 'GM': '160.182.0.0/15',
3428 'GN': '197.149.192.0/18',
3429 'GP': '104.250.0.0/19',
3430 'GQ': '105.235.224.0/20',
3431 'GR': '94.64.0.0/13',
3432 'GT': '168.234.0.0/16',
3433 'GU': '168.123.0.0/16',
3434 'GW': '197.214.80.0/20',
3435 'GY': '181.41.64.0/18',
3436 'HK': '113.252.0.0/14',
3437 'HN': '181.210.0.0/16',
3438 'HR': '93.136.0.0/13',
3439 'HT': '148.102.128.0/17',
3440 'HU': '84.0.0.0/14',
3441 'ID': '39.192.0.0/10',
3442 'IE': '87.32.0.0/12',
3443 'IL': '79.176.0.0/13',
3444 'IM': '5.62.80.0/20',
3445 'IN': '117.192.0.0/10',
3446 'IO': '203.83.48.0/21',
3447 'IQ': '37.236.0.0/14',
3448 'IR': '2.176.0.0/12',
3449 'IS': '82.221.0.0/16',
3450 'IT': '79.0.0.0/10',
3451 'JE': '87.244.64.0/18',
3452 'JM': '72.27.0.0/17',
3453 'JO': '176.29.0.0/16',
3454 'JP': '126.0.0.0/8',
3455 'KE': '105.48.0.0/12',
3456 'KG': '158.181.128.0/17',
3457 'KH': '36.37.128.0/17',
3458 'KI': '103.25.140.0/22',
3459 'KM': '197.255.224.0/20',
3460 'KN': '198.32.32.0/19',
3461 'KP': '175.45.176.0/22',
3462 'KR': '175.192.0.0/10',
3463 'KW': '37.36.0.0/14',
3464 'KY': '64.96.0.0/15',
3465 'KZ': '2.72.0.0/13',
3466 'LA': '115.84.64.0/18',
3467 'LB': '178.135.0.0/16',
3468 'LC': '192.147.231.0/24',
3469 'LI': '82.117.0.0/19',
3470 'LK': '112.134.0.0/15',
3471 'LR': '41.86.0.0/19',
3472 'LS': '129.232.0.0/17',
3473 'LT': '78.56.0.0/13',
3474 'LU': '188.42.0.0/16',
3475 'LV': '46.109.0.0/16',
3476 'LY': '41.252.0.0/14',
3477 'MA': '105.128.0.0/11',
3478 'MC': '88.209.64.0/18',
3479 'MD': '37.246.0.0/16',
3480 'ME': '178.175.0.0/17',
3481 'MF': '74.112.232.0/21',
3482 'MG': '154.126.0.0/17',
3483 'MH': '117.103.88.0/21',
3484 'MK': '77.28.0.0/15',
3485 'ML': '154.118.128.0/18',
3486 'MM': '37.111.0.0/17',
3487 'MN': '49.0.128.0/17',
3488 'MO': '60.246.0.0/16',
3489 'MP': '202.88.64.0/20',
3490 'MQ': '109.203.224.0/19',
3491 'MR': '41.188.64.0/18',
3492 'MS': '208.90.112.0/22',
3493 'MT': '46.11.0.0/16',
3494 'MU': '105.16.0.0/12',
3495 'MV': '27.114.128.0/18',
3496 'MW': '105.234.0.0/16',
3497 'MX': '187.192.0.0/11',
3498 'MY': '175.136.0.0/13',
3499 'MZ': '197.218.0.0/15',
3500 'NA': '41.182.0.0/16',
3501 'NC': '101.101.0.0/18',
3502 'NE': '197.214.0.0/18',
3503 'NF': '203.17.240.0/22',
3504 'NG': '105.112.0.0/12',
3505 'NI': '186.76.0.0/15',
3506 'NL': '145.96.0.0/11',
3507 'NO': '84.208.0.0/13',
3508 'NP': '36.252.0.0/15',
3509 'NR': '203.98.224.0/19',
3510 'NU': '49.156.48.0/22',
3511 'NZ': '49.224.0.0/14',
3512 'OM': '5.36.0.0/15',
3513 'PA': '186.72.0.0/15',
3514 'PE': '186.160.0.0/14',
3515 'PF': '123.50.64.0/18',
3516 'PG': '124.240.192.0/19',
3517 'PH': '49.144.0.0/13',
3518 'PK': '39.32.0.0/11',
3519 'PL': '83.0.0.0/11',
3520 'PM': '70.36.0.0/20',
3521 'PR': '66.50.0.0/16',
3522 'PS': '188.161.0.0/16',
3523 'PT': '85.240.0.0/13',
3524 'PW': '202.124.224.0/20',
3525 'PY': '181.120.0.0/14',
3526 'QA': '37.210.0.0/15',
3527 'RE': '139.26.0.0/16',
3528 'RO': '79.112.0.0/13',
3529 'RS': '178.220.0.0/14',
3530 'RU': '5.136.0.0/13',
3531 'RW': '105.178.0.0/15',
3532 'SA': '188.48.0.0/13',
3533 'SB': '202.1.160.0/19',
3534 'SC': '154.192.0.0/11',
3535 'SD': '154.96.0.0/13',
3536 'SE': '78.64.0.0/12',
3537 'SG': '152.56.0.0/14',
3538 'SI': '188.196.0.0/14',
3539 'SK': '78.98.0.0/15',
3540 'SL': '197.215.0.0/17',
3541 'SM': '89.186.32.0/19',
3542 'SN': '41.82.0.0/15',
3543 'SO': '197.220.64.0/19',
3544 'SR': '186.179.128.0/17',
3545 'SS': '105.235.208.0/21',
3546 'ST': '197.159.160.0/19',
3547 'SV': '168.243.0.0/16',
3548 'SX': '190.102.0.0/20',
3549 'SY': '5.0.0.0/16',
3550 'SZ': '41.84.224.0/19',
3551 'TC': '65.255.48.0/20',
3552 'TD': '154.68.128.0/19',
3553 'TG': '196.168.0.0/14',
3554 'TH': '171.96.0.0/13',
3555 'TJ': '85.9.128.0/18',
3556 'TK': '27.96.24.0/21',
3557 'TL': '180.189.160.0/20',
3558 'TM': '95.85.96.0/19',
3559 'TN': '197.0.0.0/11',
3560 'TO': '175.176.144.0/21',
3561 'TR': '78.160.0.0/11',
3562 'TT': '186.44.0.0/15',
3563 'TV': '202.2.96.0/19',
3564 'TW': '120.96.0.0/11',
3565 'TZ': '156.156.0.0/14',
3566 'UA': '93.72.0.0/13',
3567 'UG': '154.224.0.0/13',
3568 'US': '3.0.0.0/8',
3569 'UY': '167.56.0.0/13',
3570 'UZ': '82.215.64.0/18',
3571 'VA': '212.77.0.0/19',
3572 'VC': '24.92.144.0/20',
3573 'VE': '186.88.0.0/13',
3574 'VG': '172.103.64.0/18',
3575 'VI': '146.226.0.0/16',
3576 'VN': '14.160.0.0/11',
3577 'VU': '202.80.32.0/20',
3578 'WF': '117.20.32.0/21',
3579 'WS': '202.4.32.0/19',
3580 'YE': '134.35.0.0/16',
3581 'YT': '41.242.116.0/22',
3582 'ZA': '41.0.0.0/11',
3583 'ZM': '165.56.0.0/13',
3584 'ZW': '41.85.192.0/19',
3585 }
3586
3587 @classmethod
5f95927a
S
3588 def random_ipv4(cls, code_or_block):
3589 if len(code_or_block) == 2:
3590 block = cls._country_ip_map.get(code_or_block.upper())
3591 if not block:
3592 return None
3593 else:
3594 block = code_or_block
773f291d
S
3595 addr, preflen = block.split('/')
3596 addr_min = compat_struct_unpack('!L', socket.inet_aton(addr))[0]
3597 addr_max = addr_min | (0xffffffff >> int(preflen))
18a0defa 3598 return compat_str(socket.inet_ntoa(
4248dad9 3599 compat_struct_pack('!L', random.randint(addr_min, addr_max))))
773f291d
S
3600
3601
91410c9b 3602class PerRequestProxyHandler(compat_urllib_request.ProxyHandler):
2461f79d
PH
3603 def __init__(self, proxies=None):
3604 # Set default handlers
3605 for type in ('http', 'https'):
3606 setattr(self, '%s_open' % type,
3607 lambda r, proxy='__noproxy__', type=type, meth=self.proxy_open:
3608 meth(r, proxy, type))
38e87f6c 3609 compat_urllib_request.ProxyHandler.__init__(self, proxies)
2461f79d 3610
91410c9b 3611 def proxy_open(self, req, proxy, type):
2461f79d 3612 req_proxy = req.headers.get('Ytdl-request-proxy')
91410c9b
PH
3613 if req_proxy is not None:
3614 proxy = req_proxy
2461f79d
PH
3615 del req.headers['Ytdl-request-proxy']
3616
3617 if proxy == '__noproxy__':
3618 return None # No Proxy
51fb4995 3619 if compat_urlparse.urlparse(proxy).scheme.lower() in ('socks', 'socks4', 'socks4a', 'socks5'):
71aff188
YCH
3620 req.add_header('Ytdl-socks-proxy', proxy)
3621 # youtube-dl's http/https handlers do wrapping the socket with socks
3622 return None
91410c9b
PH
3623 return compat_urllib_request.ProxyHandler.proxy_open(
3624 self, req, proxy, type)
5bc880b9
YCH
3625
3626
0a5445dd
YCH
3627# Both long_to_bytes and bytes_to_long are adapted from PyCrypto, which is
3628# released into Public Domain
3629# https://github.com/dlitz/pycrypto/blob/master/lib/Crypto/Util/number.py#L387
3630
3631def long_to_bytes(n, blocksize=0):
3632 """long_to_bytes(n:long, blocksize:int) : string
3633 Convert a long integer to a byte string.
3634
3635 If optional blocksize is given and greater than zero, pad the front of the
3636 byte string with binary zeros so that the length is a multiple of
3637 blocksize.
3638 """
3639 # after much testing, this algorithm was deemed to be the fastest
3640 s = b''
3641 n = int(n)
3642 while n > 0:
3643 s = compat_struct_pack('>I', n & 0xffffffff) + s
3644 n = n >> 32
3645 # strip off leading zeros
3646 for i in range(len(s)):
3647 if s[i] != b'\000'[0]:
3648 break
3649 else:
3650 # only happens when n == 0
3651 s = b'\000'
3652 i = 0
3653 s = s[i:]
3654 # add back some pad bytes. this could be done more efficiently w.r.t. the
3655 # de-padding being done above, but sigh...
3656 if blocksize > 0 and len(s) % blocksize:
3657 s = (blocksize - len(s) % blocksize) * b'\000' + s
3658 return s
3659
3660
3661def bytes_to_long(s):
3662 """bytes_to_long(string) : long
3663 Convert a byte string to a long integer.
3664
3665 This is (essentially) the inverse of long_to_bytes().
3666 """
3667 acc = 0
3668 length = len(s)
3669 if length % 4:
3670 extra = (4 - length % 4)
3671 s = b'\000' * extra + s
3672 length = length + extra
3673 for i in range(0, length, 4):
3674 acc = (acc << 32) + compat_struct_unpack('>I', s[i:i + 4])[0]
3675 return acc
3676
3677
5bc880b9
YCH
3678def ohdave_rsa_encrypt(data, exponent, modulus):
3679 '''
3680 Implement OHDave's RSA algorithm. See http://www.ohdave.com/rsa/
3681
3682 Input:
3683 data: data to encrypt, bytes-like object
3684 exponent, modulus: parameter e and N of RSA algorithm, both integer
3685 Output: hex string of encrypted data
3686
3687 Limitation: supports one block encryption only
3688 '''
3689
3690 payload = int(binascii.hexlify(data[::-1]), 16)
3691 encrypted = pow(payload, exponent, modulus)
3692 return '%x' % encrypted
81bdc8fd
YCH
3693
3694
f48409c7
YCH
3695def pkcs1pad(data, length):
3696 """
3697 Padding input data with PKCS#1 scheme
3698
3699 @param {int[]} data input data
3700 @param {int} length target length
3701 @returns {int[]} padded data
3702 """
3703 if len(data) > length - 11:
3704 raise ValueError('Input data too long for PKCS#1 padding')
3705
3706 pseudo_random = [random.randint(0, 254) for _ in range(length - len(data) - 3)]
3707 return [0, 2] + pseudo_random + [0] + data
3708
3709
5eb6bdce 3710def encode_base_n(num, n, table=None):
59f898b7 3711 FULL_TABLE = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
59f898b7
YCH
3712 if not table:
3713 table = FULL_TABLE[:n]
3714
5eb6bdce
YCH
3715 if n > len(table):
3716 raise ValueError('base %d exceeds table length %d' % (n, len(table)))
3717
3718 if num == 0:
3719 return table[0]
3720
81bdc8fd
YCH
3721 ret = ''
3722 while num:
3723 ret = table[num % n] + ret
3724 num = num // n
3725 return ret
f52354a8
YCH
3726
3727
3728def decode_packed_codes(code):
06b3fe29 3729 mobj = re.search(PACKED_CODES_RE, code)
f52354a8
YCH
3730 obfucasted_code, base, count, symbols = mobj.groups()
3731 base = int(base)
3732 count = int(count)
3733 symbols = symbols.split('|')
3734 symbol_table = {}
3735
3736 while count:
3737 count -= 1
5eb6bdce 3738 base_n_count = encode_base_n(count, base)
f52354a8
YCH
3739 symbol_table[base_n_count] = symbols[count] or base_n_count
3740
3741 return re.sub(
3742 r'\b(\w+)\b', lambda mobj: symbol_table[mobj.group(0)],
3743 obfucasted_code)
e154c651 3744
3745
3746def parse_m3u8_attributes(attrib):
3747 info = {}
3748 for (key, val) in re.findall(r'(?P<key>[A-Z0-9-]+)=(?P<val>"[^"]+"|[^",]+)(?:,|$)', attrib):
3749 if val.startswith('"'):
3750 val = val[1:-1]
3751 info[key] = val
3752 return info
1143535d
YCH
3753
3754
3755def urshift(val, n):
3756 return val >> n if val >= 0 else (val + 0x100000000) >> n
d3f8e038
YCH
3757
3758
3759# Based on png2str() written by @gdkchan and improved by @yokrysty
3760# Originally posted at https://github.com/rg3/youtube-dl/issues/9706
3761def decode_png(png_data):
3762 # Reference: https://www.w3.org/TR/PNG/
3763 header = png_data[8:]
3764
3765 if png_data[:8] != b'\x89PNG\x0d\x0a\x1a\x0a' or header[4:8] != b'IHDR':
3766 raise IOError('Not a valid PNG file.')
3767
3768 int_map = {1: '>B', 2: '>H', 4: '>I'}
3769 unpack_integer = lambda x: compat_struct_unpack(int_map[len(x)], x)[0]
3770
3771 chunks = []
3772
3773 while header:
3774 length = unpack_integer(header[:4])
3775 header = header[4:]
3776
3777 chunk_type = header[:4]
3778 header = header[4:]
3779
3780 chunk_data = header[:length]
3781 header = header[length:]
3782
3783 header = header[4:] # Skip CRC
3784
3785 chunks.append({
3786 'type': chunk_type,
3787 'length': length,
3788 'data': chunk_data
3789 })
3790
3791 ihdr = chunks[0]['data']
3792
3793 width = unpack_integer(ihdr[:4])
3794 height = unpack_integer(ihdr[4:8])
3795
3796 idat = b''
3797
3798 for chunk in chunks:
3799 if chunk['type'] == b'IDAT':
3800 idat += chunk['data']
3801
3802 if not idat:
3803 raise IOError('Unable to read PNG data.')
3804
3805 decompressed_data = bytearray(zlib.decompress(idat))
3806
3807 stride = width * 3
3808 pixels = []
3809
3810 def _get_pixel(idx):
3811 x = idx % stride
3812 y = idx // stride
3813 return pixels[y][x]
3814
3815 for y in range(height):
3816 basePos = y * (1 + stride)
3817 filter_type = decompressed_data[basePos]
3818
3819 current_row = []
3820
3821 pixels.append(current_row)
3822
3823 for x in range(stride):
3824 color = decompressed_data[1 + basePos + x]
3825 basex = y * stride + x
3826 left = 0
3827 up = 0
3828
3829 if x > 2:
3830 left = _get_pixel(basex - 3)
3831 if y > 0:
3832 up = _get_pixel(basex - stride)
3833
3834 if filter_type == 1: # Sub
3835 color = (color + left) & 0xff
3836 elif filter_type == 2: # Up
3837 color = (color + up) & 0xff
3838 elif filter_type == 3: # Average
3839 color = (color + ((left + up) >> 1)) & 0xff
3840 elif filter_type == 4: # Paeth
3841 a = left
3842 b = up
3843 c = 0
3844
3845 if x > 2 and y > 0:
3846 c = _get_pixel(basex - stride - 3)
3847
3848 p = a + b - c
3849
3850 pa = abs(p - a)
3851 pb = abs(p - b)
3852 pc = abs(p - c)
3853
3854 if pa <= pb and pa <= pc:
3855 color = (color + a) & 0xff
3856 elif pb <= pc:
3857 color = (color + b) & 0xff
3858 else:
3859 color = (color + c) & 0xff
3860
3861 current_row.append(color)
3862
3863 return width, height, pixels
efa97bdc
YCH
3864
3865
3866def write_xattr(path, key, value):
3867 # This mess below finds the best xattr tool for the job
3868 try:
3869 # try the pyxattr module...
3870 import xattr
3871
53a7e3d2
YCH
3872 if hasattr(xattr, 'set'): # pyxattr
3873 # Unicode arguments are not supported in python-pyxattr until
3874 # version 0.5.0
3875 # See https://github.com/rg3/youtube-dl/issues/5498
3876 pyxattr_required_version = '0.5.0'
3877 if version_tuple(xattr.__version__) < version_tuple(pyxattr_required_version):
3878 # TODO: fallback to CLI tools
3879 raise XAttrUnavailableError(
3880 'python-pyxattr is detected but is too old. '
3881 'youtube-dl requires %s or above while your version is %s. '
3882 'Falling back to other xattr implementations' % (
3883 pyxattr_required_version, xattr.__version__))
3884
3885 setxattr = xattr.set
3886 else: # xattr
3887 setxattr = xattr.setxattr
efa97bdc
YCH
3888
3889 try:
53a7e3d2 3890 setxattr(path, key, value)
efa97bdc
YCH
3891 except EnvironmentError as e:
3892 raise XAttrMetadataError(e.errno, e.strerror)
3893
3894 except ImportError:
3895 if compat_os_name == 'nt':
3896 # Write xattrs to NTFS Alternate Data Streams:
3897 # http://en.wikipedia.org/wiki/NTFS#Alternate_data_streams_.28ADS.29
3898 assert ':' not in key
3899 assert os.path.exists(path)
3900
3901 ads_fn = path + ':' + key
3902 try:
3903 with open(ads_fn, 'wb') as f:
3904 f.write(value)
3905 except EnvironmentError as e:
3906 raise XAttrMetadataError(e.errno, e.strerror)
3907 else:
3908 user_has_setfattr = check_executable('setfattr', ['--version'])
3909 user_has_xattr = check_executable('xattr', ['-h'])
3910
3911 if user_has_setfattr or user_has_xattr:
3912
3913 value = value.decode('utf-8')
3914 if user_has_setfattr:
3915 executable = 'setfattr'
3916 opts = ['-n', key, '-v', value]
3917 elif user_has_xattr:
3918 executable = 'xattr'
3919 opts = ['-w', key, value]
3920
3921 cmd = ([encodeFilename(executable, True)] +
3922 [encodeArgument(o) for o in opts] +
3923 [encodeFilename(path, True)])
3924
3925 try:
3926 p = subprocess.Popen(
3927 cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
3928 except EnvironmentError as e:
3929 raise XAttrMetadataError(e.errno, e.strerror)
3930 stdout, stderr = p.communicate()
3931 stderr = stderr.decode('utf-8', 'replace')
3932 if p.returncode != 0:
3933 raise XAttrMetadataError(p.returncode, stderr)
3934
3935 else:
3936 # On Unix, and can't find pyxattr, setfattr, or xattr.
3937 if sys.platform.startswith('linux'):
3938 raise XAttrUnavailableError(
3939 "Couldn't find a tool to set the xattrs. "
3940 "Install either the python 'pyxattr' or 'xattr' "
3941 "modules, or the GNU 'attr' package "
3942 "(which contains the 'setfattr' tool).")
3943 else:
3944 raise XAttrUnavailableError(
3945 "Couldn't find a tool to set the xattrs. "
3946 "Install either the python 'xattr' module, "
3947 "or the 'xattr' binary.")
0c265486
YCH
3948
3949
3950def random_birthday(year_field, month_field, day_field):
3951 return {
3952 year_field: str(random.randint(1950, 1995)),
3953 month_field: str(random.randint(1, 12)),
3954 day_field: str(random.randint(1, 31)),
3955 }