]> jfr.im git - yt-dlp.git/blame - youtube_dl/utils.py
[zaq1] Add new extractor
[yt-dlp.git] / youtube_dl / utils.py
CommitLineData
d77c3dfd 1#!/usr/bin/env python
dcdb292f 2# coding: utf-8
d77c3dfd 3
ecc0c5ee
PH
4from __future__ import unicode_literals
5
1e399778 6import base64
5bc880b9 7import binascii
912b38b4 8import calendar
676eb3f2 9import codecs
62e609ab 10import contextlib
e3946f98 11import ctypes
c496ca96
PH
12import datetime
13import email.utils
f45c185f 14import errno
be4a824d 15import functools
d77c3dfd 16import gzip
03f9daab 17import io
79a2e94e 18import itertools
f4bfd65f 19import json
d77c3dfd 20import locale
02dbf93f 21import math
347de493 22import operator
d77c3dfd 23import os
4eb7f1d1 24import pipes
c496ca96 25import platform
773f291d 26import random
d77c3dfd 27import re
c496ca96 28import socket
79a2e94e 29import ssl
1c088fa8 30import subprocess
d77c3dfd 31import sys
181c8655 32import tempfile
01951dda 33import traceback
bcf89ce6 34import xml.etree.ElementTree
d77c3dfd 35import zlib
d77c3dfd 36
8c25f81b 37from .compat import (
8bb56eee 38 compat_HTMLParser,
8f9312c3 39 compat_basestring,
8c25f81b 40 compat_chr,
36e6f62c 41 compat_etree_fromstring,
51098426 42 compat_expanduser,
8c25f81b 43 compat_html_entities,
55b2f099 44 compat_html_entities_html5,
be4a824d 45 compat_http_client,
c86b6142 46 compat_kwargs,
efa97bdc 47 compat_os_name,
8c25f81b 48 compat_parse_qs,
702ccf2d 49 compat_shlex_quote,
be4a824d 50 compat_socket_create_connection,
8c25f81b 51 compat_str,
edaa23f8 52 compat_struct_pack,
d3f8e038 53 compat_struct_unpack,
8c25f81b
PH
54 compat_urllib_error,
55 compat_urllib_parse,
15707c7e 56 compat_urllib_parse_urlencode,
8c25f81b 57 compat_urllib_parse_urlparse,
7581bfc9 58 compat_urllib_parse_unquote_plus,
8c25f81b
PH
59 compat_urllib_request,
60 compat_urlparse,
810c10ba 61 compat_xpath,
8c25f81b 62)
4644ac55 63
71aff188
YCH
64from .socks import (
65 ProxyType,
66 sockssocket,
67)
68
4644ac55 69
51fb4995
YCH
70def register_socks_protocols():
71 # "Register" SOCKS protocols
d5ae6bb5
YCH
72 # In Python < 2.6.5, urlsplit() suffers from bug https://bugs.python.org/issue7904
73 # URLs with protocols not in urlparse.uses_netloc are not handled correctly
51fb4995
YCH
74 for scheme in ('socks', 'socks4', 'socks4a', 'socks5'):
75 if scheme not in compat_urlparse.uses_netloc:
76 compat_urlparse.uses_netloc.append(scheme)
77
78
468e2e92
FV
79# This is not clearly defined otherwise
80compiled_regex_type = type(re.compile(''))
81
3e669f36 82std_headers = {
15d10678 83 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20150101 Firefox/47.0 (Chrome)',
59ae15a5
PH
84 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
85 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
86 'Accept-Encoding': 'gzip, deflate',
87 'Accept-Language': 'en-us,en;q=0.5',
3e669f36 88}
f427df17 89
5f6a1245 90
fb37eb25
S
91USER_AGENTS = {
92 'Safari': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27',
93}
94
95
bf42a990
S
96NO_DEFAULT = object()
97
7105440c
YCH
98ENGLISH_MONTH_NAMES = [
99 'January', 'February', 'March', 'April', 'May', 'June',
100 'July', 'August', 'September', 'October', 'November', 'December']
101
f6717dec
S
102MONTH_NAMES = {
103 'en': ENGLISH_MONTH_NAMES,
104 'fr': [
3e4185c3
S
105 'janvier', 'février', 'mars', 'avril', 'mai', 'juin',
106 'juillet', 'août', 'septembre', 'octobre', 'novembre', 'décembre'],
f6717dec 107}
a942d6cb 108
a7aaa398
S
109KNOWN_EXTENSIONS = (
110 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'aac',
111 'flv', 'f4v', 'f4a', 'f4b',
112 'webm', 'ogg', 'ogv', 'oga', 'ogx', 'spx', 'opus',
113 'mkv', 'mka', 'mk3d',
114 'avi', 'divx',
115 'mov',
116 'asf', 'wmv', 'wma',
117 '3gp', '3g2',
118 'mp3',
119 'flac',
120 'ape',
121 'wav',
122 'f4f', 'f4m', 'm3u8', 'smil')
123
c587cbb7 124# needed for sanitizing filenames in restricted mode
c8827027 125ACCENT_CHARS = dict(zip('ÂÃÄÀÁÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖŐØŒÙÚÛÜŰÝÞßàáâãäåæçèéêëìíîïðñòóôõöőøœùúûüűýþÿ',
126 itertools.chain('AAAAAA', ['AE'], 'CEEEEIIIIDNOOOOOOO', ['OE'], 'UUUUUYP', ['ss'],
127 'aaaaaa', ['ae'], 'ceeeeiiiionooooooo', ['oe'], 'uuuuuypy')))
c587cbb7 128
46f59e89
S
129DATE_FORMATS = (
130 '%d %B %Y',
131 '%d %b %Y',
132 '%B %d %Y',
cb655f34
S
133 '%B %dst %Y',
134 '%B %dnd %Y',
135 '%B %dth %Y',
46f59e89 136 '%b %d %Y',
cb655f34
S
137 '%b %dst %Y',
138 '%b %dnd %Y',
139 '%b %dth %Y',
46f59e89
S
140 '%b %dst %Y %I:%M',
141 '%b %dnd %Y %I:%M',
142 '%b %dth %Y %I:%M',
143 '%Y %m %d',
144 '%Y-%m-%d',
145 '%Y/%m/%d',
81c13222 146 '%Y/%m/%d %H:%M',
46f59e89 147 '%Y/%m/%d %H:%M:%S',
0c1c6f4b 148 '%Y-%m-%d %H:%M',
46f59e89
S
149 '%Y-%m-%d %H:%M:%S',
150 '%Y-%m-%d %H:%M:%S.%f',
151 '%d.%m.%Y %H:%M',
152 '%d.%m.%Y %H.%M',
153 '%Y-%m-%dT%H:%M:%SZ',
154 '%Y-%m-%dT%H:%M:%S.%fZ',
155 '%Y-%m-%dT%H:%M:%S.%f0Z',
156 '%Y-%m-%dT%H:%M:%S',
157 '%Y-%m-%dT%H:%M:%S.%f',
158 '%Y-%m-%dT%H:%M',
c6eed6b8
S
159 '%b %d %Y at %H:%M',
160 '%b %d %Y at %H:%M:%S',
46f59e89
S
161)
162
163DATE_FORMATS_DAY_FIRST = list(DATE_FORMATS)
164DATE_FORMATS_DAY_FIRST.extend([
165 '%d-%m-%Y',
166 '%d.%m.%Y',
167 '%d.%m.%y',
168 '%d/%m/%Y',
169 '%d/%m/%y',
170 '%d/%m/%Y %H:%M:%S',
171])
172
173DATE_FORMATS_MONTH_FIRST = list(DATE_FORMATS)
174DATE_FORMATS_MONTH_FIRST.extend([
175 '%m-%d-%Y',
176 '%m.%d.%Y',
177 '%m/%d/%Y',
178 '%m/%d/%y',
179 '%m/%d/%Y %H:%M:%S',
180])
181
06b3fe29
S
182PACKED_CODES_RE = r"}\('(.+)',(\d+),(\d+),'([^']+)'\.split\('\|'\)"
183
7105440c 184
d77c3dfd 185def preferredencoding():
59ae15a5 186 """Get preferred encoding.
d77c3dfd 187
59ae15a5
PH
188 Returns the best encoding scheme for the system, based on
189 locale.getpreferredencoding() and some further tweaks.
190 """
191 try:
192 pref = locale.getpreferredencoding()
28e614de 193 'TEST'.encode(pref)
70a1165b 194 except Exception:
59ae15a5 195 pref = 'UTF-8'
bae611f2 196
59ae15a5 197 return pref
d77c3dfd 198
f4bfd65f 199
181c8655 200def write_json_file(obj, fn):
1394646a 201 """ Encode obj as JSON and write it to fn, atomically if possible """
181c8655 202
92120217 203 fn = encodeFilename(fn)
61ee5aeb 204 if sys.version_info < (3, 0) and sys.platform != 'win32':
ec5f6016
JMF
205 encoding = get_filesystem_encoding()
206 # os.path.basename returns a bytes object, but NamedTemporaryFile
207 # will fail if the filename contains non ascii characters unless we
208 # use a unicode object
209 path_basename = lambda f: os.path.basename(fn).decode(encoding)
210 # the same for os.path.dirname
211 path_dirname = lambda f: os.path.dirname(fn).decode(encoding)
212 else:
213 path_basename = os.path.basename
214 path_dirname = os.path.dirname
215
73159f99
S
216 args = {
217 'suffix': '.tmp',
ec5f6016
JMF
218 'prefix': path_basename(fn) + '.',
219 'dir': path_dirname(fn),
73159f99
S
220 'delete': False,
221 }
222
181c8655
PH
223 # In Python 2.x, json.dump expects a bytestream.
224 # In Python 3.x, it writes to a character stream
225 if sys.version_info < (3, 0):
73159f99 226 args['mode'] = 'wb'
181c8655 227 else:
73159f99
S
228 args.update({
229 'mode': 'w',
230 'encoding': 'utf-8',
231 })
232
c86b6142 233 tf = tempfile.NamedTemporaryFile(**compat_kwargs(args))
181c8655
PH
234
235 try:
236 with tf:
237 json.dump(obj, tf)
1394646a
IK
238 if sys.platform == 'win32':
239 # Need to remove existing file on Windows, else os.rename raises
240 # WindowsError or FileExistsError.
241 try:
242 os.unlink(fn)
243 except OSError:
244 pass
181c8655 245 os.rename(tf.name, fn)
70a1165b 246 except Exception:
181c8655
PH
247 try:
248 os.remove(tf.name)
249 except OSError:
250 pass
251 raise
252
253
254if sys.version_info >= (2, 7):
ee114368 255 def find_xpath_attr(node, xpath, key, val=None):
59ae56fa 256 """ Find the xpath xpath[@key=val] """
5d2354f1 257 assert re.match(r'^[a-zA-Z_-]+$', key)
ee114368 258 expr = xpath + ('[@%s]' % key if val is None else "[@%s='%s']" % (key, val))
59ae56fa
PH
259 return node.find(expr)
260else:
ee114368 261 def find_xpath_attr(node, xpath, key, val=None):
810c10ba 262 for f in node.findall(compat_xpath(xpath)):
ee114368
S
263 if key not in f.attrib:
264 continue
265 if val is None or f.attrib.get(key) == val:
59ae56fa
PH
266 return f
267 return None
268
d7e66d39
JMF
269# On python2.6 the xml.etree.ElementTree.Element methods don't support
270# the namespace parameter
5f6a1245
JW
271
272
d7e66d39
JMF
273def xpath_with_ns(path, ns_map):
274 components = [c.split(':') for c in path.split('/')]
275 replaced = []
276 for c in components:
277 if len(c) == 1:
278 replaced.append(c[0])
279 else:
280 ns, tag = c
281 replaced.append('{%s}%s' % (ns_map[ns], tag))
282 return '/'.join(replaced)
283
d77c3dfd 284
a41fb80c 285def xpath_element(node, xpath, name=None, fatal=False, default=NO_DEFAULT):
578c0745 286 def _find_xpath(xpath):
810c10ba 287 return node.find(compat_xpath(xpath))
578c0745
S
288
289 if isinstance(xpath, (str, compat_str)):
290 n = _find_xpath(xpath)
291 else:
292 for xp in xpath:
293 n = _find_xpath(xp)
294 if n is not None:
295 break
d74bebd5 296
8e636da4 297 if n is None:
bf42a990
S
298 if default is not NO_DEFAULT:
299 return default
300 elif fatal:
bf0ff932
PH
301 name = xpath if name is None else name
302 raise ExtractorError('Could not find XML element %s' % name)
303 else:
304 return None
a41fb80c
S
305 return n
306
307
308def xpath_text(node, xpath, name=None, fatal=False, default=NO_DEFAULT):
8e636da4
S
309 n = xpath_element(node, xpath, name, fatal=fatal, default=default)
310 if n is None or n == default:
311 return n
312 if n.text is None:
313 if default is not NO_DEFAULT:
314 return default
315 elif fatal:
316 name = xpath if name is None else name
317 raise ExtractorError('Could not find XML element\'s text %s' % name)
318 else:
319 return None
320 return n.text
a41fb80c
S
321
322
323def xpath_attr(node, xpath, key, name=None, fatal=False, default=NO_DEFAULT):
324 n = find_xpath_attr(node, xpath, key)
325 if n is None:
326 if default is not NO_DEFAULT:
327 return default
328 elif fatal:
329 name = '%s[@%s]' % (xpath, key) if name is None else name
330 raise ExtractorError('Could not find XML attribute %s' % name)
331 else:
332 return None
333 return n.attrib[key]
bf0ff932
PH
334
335
9e6dd238 336def get_element_by_id(id, html):
43e8fafd 337 """Return the content of the tag with the specified ID in the passed HTML document"""
611c1dd9 338 return get_element_by_attribute('id', id, html)
43e8fafd 339
12ea2f30 340
84c237fb 341def get_element_by_class(class_name, html):
2af12ad9
TC
342 """Return the content of the first tag with the specified class in the passed HTML document"""
343 retval = get_elements_by_class(class_name, html)
344 return retval[0] if retval else None
345
346
347def get_element_by_attribute(attribute, value, html, escape_value=True):
348 retval = get_elements_by_attribute(attribute, value, html, escape_value)
349 return retval[0] if retval else None
350
351
352def get_elements_by_class(class_name, html):
353 """Return the content of all tags with the specified class in the passed HTML document as a list"""
354 return get_elements_by_attribute(
84c237fb
YCH
355 'class', r'[^\'"]*\b%s\b[^\'"]*' % re.escape(class_name),
356 html, escape_value=False)
357
358
2af12ad9 359def get_elements_by_attribute(attribute, value, html, escape_value=True):
43e8fafd 360 """Return the content of the tag with the specified attribute in the passed HTML document"""
9e6dd238 361
84c237fb
YCH
362 value = re.escape(value) if escape_value else value
363
2af12ad9
TC
364 retlist = []
365 for m in re.finditer(r'''(?xs)
38285056 366 <([a-zA-Z0-9:._-]+)
abc97b5e 367 (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]*|="[^"]*"|='[^']*'))*?
38285056 368 \s+%s=['"]?%s['"]?
abc97b5e 369 (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]*|="[^"]*"|='[^']*'))*?
38285056
PH
370 \s*>
371 (?P<content>.*?)
372 </\1>
2af12ad9
TC
373 ''' % (re.escape(attribute), value), html):
374 res = m.group('content')
38285056 375
2af12ad9
TC
376 if res.startswith('"') or res.startswith("'"):
377 res = res[1:-1]
38285056 378
2af12ad9 379 retlist.append(unescapeHTML(res))
a921f407 380
2af12ad9 381 return retlist
a921f407 382
c5229f39 383
8bb56eee
BF
384class HTMLAttributeParser(compat_HTMLParser):
385 """Trivial HTML parser to gather the attributes for a single element"""
386 def __init__(self):
c5229f39 387 self.attrs = {}
8bb56eee
BF
388 compat_HTMLParser.__init__(self)
389
390 def handle_starttag(self, tag, attrs):
391 self.attrs = dict(attrs)
392
c5229f39 393
8bb56eee
BF
394def extract_attributes(html_element):
395 """Given a string for an HTML element such as
396 <el
397 a="foo" B="bar" c="&98;az" d=boz
398 empty= noval entity="&amp;"
399 sq='"' dq="'"
400 >
401 Decode and return a dictionary of attributes.
402 {
403 'a': 'foo', 'b': 'bar', c: 'baz', d: 'boz',
404 'empty': '', 'noval': None, 'entity': '&',
405 'sq': '"', 'dq': '\''
406 }.
407 NB HTMLParser is stricter in Python 2.6 & 3.2 than in later versions,
408 but the cases in the unit test will work for all of 2.6, 2.7, 3.2-3.5.
409 """
410 parser = HTMLAttributeParser()
411 parser.feed(html_element)
412 parser.close()
413 return parser.attrs
9e6dd238 414
c5229f39 415
9e6dd238 416def clean_html(html):
59ae15a5 417 """Clean an HTML snippet into a readable string"""
dd622d7c
PH
418
419 if html is None: # Convenience for sanitizing descriptions etc.
420 return html
421
59ae15a5
PH
422 # Newline vs <br />
423 html = html.replace('\n', ' ')
edd9221c
TF
424 html = re.sub(r'(?u)\s*<\s*br\s*/?\s*>\s*', '\n', html)
425 html = re.sub(r'(?u)<\s*/\s*p\s*>\s*<\s*p[^>]*>', '\n', html)
59ae15a5
PH
426 # Strip html tags
427 html = re.sub('<.*?>', '', html)
428 # Replace html entities
429 html = unescapeHTML(html)
7decf895 430 return html.strip()
9e6dd238
FV
431
432
d77c3dfd 433def sanitize_open(filename, open_mode):
59ae15a5
PH
434 """Try to open the given filename, and slightly tweak it if this fails.
435
436 Attempts to open the given filename. If this fails, it tries to change
437 the filename slightly, step by step, until it's either able to open it
438 or it fails and raises a final exception, like the standard open()
439 function.
440
441 It returns the tuple (stream, definitive_file_name).
442 """
443 try:
28e614de 444 if filename == '-':
59ae15a5
PH
445 if sys.platform == 'win32':
446 import msvcrt
447 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
898280a0 448 return (sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout, filename)
59ae15a5
PH
449 stream = open(encodeFilename(filename), open_mode)
450 return (stream, filename)
451 except (IOError, OSError) as err:
f45c185f
PH
452 if err.errno in (errno.EACCES,):
453 raise
59ae15a5 454
f45c185f 455 # In case of error, try to remove win32 forbidden chars
d55de57b 456 alt_filename = sanitize_path(filename)
f45c185f
PH
457 if alt_filename == filename:
458 raise
459 else:
460 # An exception here should be caught in the caller
d55de57b 461 stream = open(encodeFilename(alt_filename), open_mode)
f45c185f 462 return (stream, alt_filename)
d77c3dfd
FV
463
464
465def timeconvert(timestr):
59ae15a5
PH
466 """Convert RFC 2822 defined time string into system timestamp"""
467 timestamp = None
468 timetuple = email.utils.parsedate_tz(timestr)
469 if timetuple is not None:
470 timestamp = email.utils.mktime_tz(timetuple)
471 return timestamp
1c469a94 472
5f6a1245 473
796173d0 474def sanitize_filename(s, restricted=False, is_id=False):
59ae15a5
PH
475 """Sanitizes a string so it could be used as part of a filename.
476 If restricted is set, use a stricter subset of allowed characters.
158af524
S
477 Set is_id if this is not an arbitrary string, but an ID that should be kept
478 if possible.
59ae15a5
PH
479 """
480 def replace_insane(char):
c587cbb7
AT
481 if restricted and char in ACCENT_CHARS:
482 return ACCENT_CHARS[char]
59ae15a5
PH
483 if char == '?' or ord(char) < 32 or ord(char) == 127:
484 return ''
485 elif char == '"':
486 return '' if restricted else '\''
487 elif char == ':':
488 return '_-' if restricted else ' -'
489 elif char in '\\/|*<>':
490 return '_'
627dcfff 491 if restricted and (char in '!&\'()[]{}$;`^,#' or char.isspace()):
59ae15a5
PH
492 return '_'
493 if restricted and ord(char) > 127:
494 return '_'
495 return char
496
2aeb06d6
PH
497 # Handle timestamps
498 s = re.sub(r'[0-9]+(?::[0-9]+)+', lambda m: m.group(0).replace(':', '_'), s)
28e614de 499 result = ''.join(map(replace_insane, s))
796173d0
PH
500 if not is_id:
501 while '__' in result:
502 result = result.replace('__', '_')
503 result = result.strip('_')
504 # Common case of "Foreign band name - English song title"
505 if restricted and result.startswith('-_'):
506 result = result[2:]
5a42414b
PH
507 if result.startswith('-'):
508 result = '_' + result[len('-'):]
a7440261 509 result = result.lstrip('.')
796173d0
PH
510 if not result:
511 result = '_'
59ae15a5 512 return result
d77c3dfd 513
5f6a1245 514
a2aaf4db
S
515def sanitize_path(s):
516 """Sanitizes and normalizes path on Windows"""
517 if sys.platform != 'win32':
518 return s
be531ef1
S
519 drive_or_unc, _ = os.path.splitdrive(s)
520 if sys.version_info < (2, 7) and not drive_or_unc:
521 drive_or_unc, _ = os.path.splitunc(s)
522 norm_path = os.path.normpath(remove_start(s, drive_or_unc)).split(os.path.sep)
523 if drive_or_unc:
a2aaf4db
S
524 norm_path.pop(0)
525 sanitized_path = [
ec85ded8 526 path_part if path_part in ['.', '..'] else re.sub(r'(?:[/<>:"\|\\?\*]|[\s.]$)', '#', path_part)
a2aaf4db 527 for path_part in norm_path]
be531ef1
S
528 if drive_or_unc:
529 sanitized_path.insert(0, drive_or_unc + os.path.sep)
a2aaf4db
S
530 return os.path.join(*sanitized_path)
531
532
67dda517
S
533# Prepend protocol-less URLs with `http:` scheme in order to mitigate the number of
534# unwanted failures due to missing protocol
17bcc626
S
535def sanitize_url(url):
536 return 'http:%s' % url if url.startswith('//') else url
537
538
67dda517 539def sanitized_Request(url, *args, **kwargs):
17bcc626 540 return compat_urllib_request.Request(sanitize_url(url), *args, **kwargs)
67dda517
S
541
542
51098426
S
543def expand_path(s):
544 """Expand shell variables and ~"""
545 return os.path.expandvars(compat_expanduser(s))
546
547
d77c3dfd 548def orderedSet(iterable):
59ae15a5
PH
549 """ Remove all duplicates from the input iterable """
550 res = []
551 for el in iterable:
552 if el not in res:
553 res.append(el)
554 return res
d77c3dfd 555
912b38b4 556
55b2f099 557def _htmlentity_transform(entity_with_semicolon):
4e408e47 558 """Transforms an HTML entity to a character."""
55b2f099
YCH
559 entity = entity_with_semicolon[:-1]
560
4e408e47
PH
561 # Known non-numeric HTML entity
562 if entity in compat_html_entities.name2codepoint:
563 return compat_chr(compat_html_entities.name2codepoint[entity])
564
55b2f099
YCH
565 # TODO: HTML5 allows entities without a semicolon. For example,
566 # '&Eacuteric' should be decoded as 'Éric'.
567 if entity_with_semicolon in compat_html_entities_html5:
568 return compat_html_entities_html5[entity_with_semicolon]
569
91757b0f 570 mobj = re.match(r'#(x[0-9a-fA-F]+|[0-9]+)', entity)
4e408e47
PH
571 if mobj is not None:
572 numstr = mobj.group(1)
28e614de 573 if numstr.startswith('x'):
4e408e47 574 base = 16
28e614de 575 numstr = '0%s' % numstr
4e408e47
PH
576 else:
577 base = 10
7aefc49c
S
578 # See https://github.com/rg3/youtube-dl/issues/7518
579 try:
580 return compat_chr(int(numstr, base))
581 except ValueError:
582 pass
4e408e47
PH
583
584 # Unknown entity in name, return its literal representation
7a3f0c00 585 return '&%s;' % entity
4e408e47
PH
586
587
d77c3dfd 588def unescapeHTML(s):
912b38b4
PH
589 if s is None:
590 return None
591 assert type(s) == compat_str
d77c3dfd 592
4e408e47 593 return re.sub(
55b2f099 594 r'&([^;]+;)', lambda m: _htmlentity_transform(m.group(1)), s)
d77c3dfd 595
8bf48f23 596
aa49acd1
S
597def get_subprocess_encoding():
598 if sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5:
599 # For subprocess calls, encode with locale encoding
600 # Refer to http://stackoverflow.com/a/9951851/35070
601 encoding = preferredencoding()
602 else:
603 encoding = sys.getfilesystemencoding()
604 if encoding is None:
605 encoding = 'utf-8'
606 return encoding
607
608
8bf48f23 609def encodeFilename(s, for_subprocess=False):
59ae15a5
PH
610 """
611 @param s The name of the file
612 """
d77c3dfd 613
8bf48f23 614 assert type(s) == compat_str
d77c3dfd 615
59ae15a5
PH
616 # Python 3 has a Unicode API
617 if sys.version_info >= (3, 0):
618 return s
0f00efed 619
aa49acd1
S
620 # Pass '' directly to use Unicode APIs on Windows 2000 and up
621 # (Detecting Windows NT 4 is tricky because 'major >= 4' would
622 # match Windows 9x series as well. Besides, NT 4 is obsolete.)
623 if not for_subprocess and sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5:
624 return s
625
8ee239e9
YCH
626 # Jython assumes filenames are Unicode strings though reported as Python 2.x compatible
627 if sys.platform.startswith('java'):
628 return s
629
aa49acd1
S
630 return s.encode(get_subprocess_encoding(), 'ignore')
631
632
633def decodeFilename(b, for_subprocess=False):
634
635 if sys.version_info >= (3, 0):
636 return b
637
638 if not isinstance(b, bytes):
639 return b
640
641 return b.decode(get_subprocess_encoding(), 'ignore')
8bf48f23 642
f07b74fc
PH
643
644def encodeArgument(s):
645 if not isinstance(s, compat_str):
646 # Legacy code that uses byte strings
647 # Uncomment the following line after fixing all post processors
7af808a5 648 # assert False, 'Internal error: %r should be of type %r, is %r' % (s, compat_str, type(s))
f07b74fc
PH
649 s = s.decode('ascii')
650 return encodeFilename(s, True)
651
652
aa49acd1
S
653def decodeArgument(b):
654 return decodeFilename(b, True)
655
656
8271226a
PH
657def decodeOption(optval):
658 if optval is None:
659 return optval
660 if isinstance(optval, bytes):
661 optval = optval.decode(preferredencoding())
662
663 assert isinstance(optval, compat_str)
664 return optval
1c256f70 665
5f6a1245 666
4539dd30
PH
667def formatSeconds(secs):
668 if secs > 3600:
669 return '%d:%02d:%02d' % (secs // 3600, (secs % 3600) // 60, secs % 60)
670 elif secs > 60:
671 return '%d:%02d' % (secs // 60, secs % 60)
672 else:
673 return '%d' % secs
674
a0ddb8a2 675
be4a824d
PH
676def make_HTTPS_handler(params, **kwargs):
677 opts_no_check_certificate = params.get('nocheckcertificate', False)
0db261ba 678 if hasattr(ssl, 'create_default_context'): # Python >= 3.4 or 2.7.9
be5f2c19 679 context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH)
0db261ba 680 if opts_no_check_certificate:
be5f2c19 681 context.check_hostname = False
0db261ba 682 context.verify_mode = ssl.CERT_NONE
a2366922 683 try:
be4a824d 684 return YoutubeDLHTTPSHandler(params, context=context, **kwargs)
a2366922
PH
685 except TypeError:
686 # Python 2.7.8
687 # (create_default_context present but HTTPSHandler has no context=)
688 pass
689
690 if sys.version_info < (3, 2):
d7932313 691 return YoutubeDLHTTPSHandler(params, **kwargs)
aa37e3d4 692 else: # Python < 3.4
d7932313 693 context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ea6d901e 694 context.verify_mode = (ssl.CERT_NONE
dca08720 695 if opts_no_check_certificate
ea6d901e 696 else ssl.CERT_REQUIRED)
303b479e 697 context.set_default_verify_paths()
be4a824d 698 return YoutubeDLHTTPSHandler(params, context=context, **kwargs)
ea6d901e 699
732ea2f0 700
08f2a92c
JMF
701def bug_reports_message():
702 if ytdl_is_updateable():
703 update_cmd = 'type youtube-dl -U to update'
704 else:
705 update_cmd = 'see https://yt-dl.org/update on how to update'
706 msg = '; please report this issue on https://yt-dl.org/bug .'
707 msg += ' Make sure you are using the latest version; %s.' % update_cmd
708 msg += ' Be sure to call youtube-dl with the --verbose flag and include its complete output.'
709 return msg
710
711
bf5b9d85
PM
712class YoutubeDLError(Exception):
713 """Base exception for YoutubeDL errors."""
714 pass
715
716
717class ExtractorError(YoutubeDLError):
1c256f70 718 """Error during info extraction."""
5f6a1245 719
d11271dd 720 def __init__(self, msg, tb=None, expected=False, cause=None, video_id=None):
9a82b238
PH
721 """ tb, if given, is the original traceback (so that it can be printed out).
722 If expected is set, this is a normal error message and most likely not a bug in youtube-dl.
723 """
724
725 if sys.exc_info()[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError):
726 expected = True
d11271dd
PH
727 if video_id is not None:
728 msg = video_id + ': ' + msg
410f3e73 729 if cause:
28e614de 730 msg += ' (caused by %r)' % cause
9a82b238 731 if not expected:
08f2a92c 732 msg += bug_reports_message()
1c256f70 733 super(ExtractorError, self).__init__(msg)
d5979c5d 734
1c256f70 735 self.traceback = tb
8cc83b8d 736 self.exc_info = sys.exc_info() # preserve original exception
2eabb802 737 self.cause = cause
d11271dd 738 self.video_id = video_id
1c256f70 739
01951dda
PH
740 def format_traceback(self):
741 if self.traceback is None:
742 return None
28e614de 743 return ''.join(traceback.format_tb(self.traceback))
01951dda 744
1c256f70 745
416c7fcb
PH
746class UnsupportedError(ExtractorError):
747 def __init__(self, url):
748 super(UnsupportedError, self).__init__(
749 'Unsupported URL: %s' % url, expected=True)
750 self.url = url
751
752
55b3e45b
JMF
753class RegexNotFoundError(ExtractorError):
754 """Error when a regex didn't match"""
755 pass
756
757
773f291d
S
758class GeoRestrictedError(ExtractorError):
759 """Geographic restriction Error exception.
760
761 This exception may be thrown when a video is not available from your
762 geographic location due to geographic restrictions imposed by a website.
763 """
764 def __init__(self, msg, countries=None):
765 super(GeoRestrictedError, self).__init__(msg, expected=True)
766 self.msg = msg
767 self.countries = countries
768
769
bf5b9d85 770class DownloadError(YoutubeDLError):
59ae15a5 771 """Download Error exception.
d77c3dfd 772
59ae15a5
PH
773 This exception may be thrown by FileDownloader objects if they are not
774 configured to continue on errors. They will contain the appropriate
775 error message.
776 """
5f6a1245 777
8cc83b8d
FV
778 def __init__(self, msg, exc_info=None):
779 """ exc_info, if given, is the original exception that caused the trouble (as returned by sys.exc_info()). """
780 super(DownloadError, self).__init__(msg)
781 self.exc_info = exc_info
d77c3dfd
FV
782
783
bf5b9d85 784class SameFileError(YoutubeDLError):
59ae15a5 785 """Same File exception.
d77c3dfd 786
59ae15a5
PH
787 This exception will be thrown by FileDownloader objects if they detect
788 multiple files would have to be downloaded to the same file on disk.
789 """
790 pass
d77c3dfd
FV
791
792
bf5b9d85 793class PostProcessingError(YoutubeDLError):
59ae15a5 794 """Post Processing exception.
d77c3dfd 795
59ae15a5
PH
796 This exception may be raised by PostProcessor's .run() method to
797 indicate an error in the postprocessing task.
798 """
5f6a1245 799
7851b379 800 def __init__(self, msg):
bf5b9d85 801 super(PostProcessingError, self).__init__(msg)
7851b379 802 self.msg = msg
d77c3dfd 803
5f6a1245 804
bf5b9d85 805class MaxDownloadsReached(YoutubeDLError):
59ae15a5
PH
806 """ --max-downloads limit has been reached. """
807 pass
d77c3dfd
FV
808
809
bf5b9d85 810class UnavailableVideoError(YoutubeDLError):
59ae15a5 811 """Unavailable Format exception.
d77c3dfd 812
59ae15a5
PH
813 This exception will be thrown when a video is requested
814 in a format that is not available for that video.
815 """
816 pass
d77c3dfd
FV
817
818
bf5b9d85 819class ContentTooShortError(YoutubeDLError):
59ae15a5 820 """Content Too Short exception.
d77c3dfd 821
59ae15a5
PH
822 This exception may be raised by FileDownloader objects when a file they
823 download is too small for what the server announced first, indicating
824 the connection was probably interrupted.
825 """
d77c3dfd 826
59ae15a5 827 def __init__(self, downloaded, expected):
bf5b9d85
PM
828 super(ContentTooShortError, self).__init__(
829 'Downloaded {0} bytes, expected {1} bytes'.format(downloaded, expected)
830 )
2c7ed247 831 # Both in bytes
59ae15a5
PH
832 self.downloaded = downloaded
833 self.expected = expected
d77c3dfd 834
5f6a1245 835
bf5b9d85 836class XAttrMetadataError(YoutubeDLError):
efa97bdc
YCH
837 def __init__(self, code=None, msg='Unknown error'):
838 super(XAttrMetadataError, self).__init__(msg)
839 self.code = code
bd264412 840 self.msg = msg
efa97bdc
YCH
841
842 # Parsing code and msg
843 if (self.code in (errno.ENOSPC, errno.EDQUOT) or
844 'No space left' in self.msg or 'Disk quota excedded' in self.msg):
845 self.reason = 'NO_SPACE'
846 elif self.code == errno.E2BIG or 'Argument list too long' in self.msg:
847 self.reason = 'VALUE_TOO_LONG'
848 else:
849 self.reason = 'NOT_SUPPORTED'
850
851
bf5b9d85 852class XAttrUnavailableError(YoutubeDLError):
efa97bdc
YCH
853 pass
854
855
c5a59d93 856def _create_http_connection(ydl_handler, http_class, is_https, *args, **kwargs):
e5e78797
S
857 # Working around python 2 bug (see http://bugs.python.org/issue17849) by limiting
858 # expected HTTP responses to meet HTTP/1.0 or later (see also
859 # https://github.com/rg3/youtube-dl/issues/6727)
860 if sys.version_info < (3, 0):
5a1a2e94 861 kwargs[b'strict'] = True
be4a824d
PH
862 hc = http_class(*args, **kwargs)
863 source_address = ydl_handler._params.get('source_address')
864 if source_address is not None:
865 sa = (source_address, 0)
866 if hasattr(hc, 'source_address'): # Python 2.7+
867 hc.source_address = sa
868 else: # Python 2.6
869 def _hc_connect(self, *args, **kwargs):
870 sock = compat_socket_create_connection(
871 (self.host, self.port), self.timeout, sa)
872 if is_https:
d7932313
PH
873 self.sock = ssl.wrap_socket(
874 sock, self.key_file, self.cert_file,
875 ssl_version=ssl.PROTOCOL_TLSv1)
be4a824d
PH
876 else:
877 self.sock = sock
878 hc.connect = functools.partial(_hc_connect, hc)
879
880 return hc
881
882
87f0e62d 883def handle_youtubedl_headers(headers):
992fc9d6
YCH
884 filtered_headers = headers
885
886 if 'Youtubedl-no-compression' in filtered_headers:
887 filtered_headers = dict((k, v) for k, v in filtered_headers.items() if k.lower() != 'accept-encoding')
87f0e62d 888 del filtered_headers['Youtubedl-no-compression']
87f0e62d 889
992fc9d6 890 return filtered_headers
87f0e62d
YCH
891
892
acebc9cd 893class YoutubeDLHandler(compat_urllib_request.HTTPHandler):
59ae15a5
PH
894 """Handler for HTTP requests and responses.
895
896 This class, when installed with an OpenerDirector, automatically adds
897 the standard headers to every HTTP request and handles gzipped and
898 deflated responses from web servers. If compression is to be avoided in
899 a particular request, the original request in the program code only has
0424ec30 900 to include the HTTP header "Youtubedl-no-compression", which will be
59ae15a5
PH
901 removed before making the real request.
902
903 Part of this code was copied from:
904
905 http://techknack.net/python-urllib2-handlers/
906
907 Andrew Rowls, the author of that code, agreed to release it to the
908 public domain.
909 """
910
be4a824d
PH
911 def __init__(self, params, *args, **kwargs):
912 compat_urllib_request.HTTPHandler.__init__(self, *args, **kwargs)
913 self._params = params
914
915 def http_open(self, req):
71aff188
YCH
916 conn_class = compat_http_client.HTTPConnection
917
918 socks_proxy = req.headers.get('Ytdl-socks-proxy')
919 if socks_proxy:
920 conn_class = make_socks_conn_class(conn_class, socks_proxy)
921 del req.headers['Ytdl-socks-proxy']
922
be4a824d 923 return self.do_open(functools.partial(
71aff188 924 _create_http_connection, self, conn_class, False),
be4a824d
PH
925 req)
926
59ae15a5
PH
927 @staticmethod
928 def deflate(data):
929 try:
930 return zlib.decompress(data, -zlib.MAX_WBITS)
931 except zlib.error:
932 return zlib.decompress(data)
933
934 @staticmethod
935 def addinfourl_wrapper(stream, headers, url, code):
936 if hasattr(compat_urllib_request.addinfourl, 'getcode'):
937 return compat_urllib_request.addinfourl(stream, headers, url, code)
938 ret = compat_urllib_request.addinfourl(stream, headers, url)
939 ret.code = code
940 return ret
941
acebc9cd 942 def http_request(self, req):
51f267d9
S
943 # According to RFC 3986, URLs can not contain non-ASCII characters, however this is not
944 # always respected by websites, some tend to give out URLs with non percent-encoded
945 # non-ASCII characters (see telemb.py, ard.py [#3412])
946 # urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991)
947 # To work around aforementioned issue we will replace request's original URL with
948 # percent-encoded one
949 # Since redirects are also affected (e.g. http://www.southpark.de/alle-episoden/s18e09)
950 # the code of this workaround has been moved here from YoutubeDL.urlopen()
951 url = req.get_full_url()
952 url_escaped = escape_url(url)
953
954 # Substitute URL if any change after escaping
955 if url != url_escaped:
15d260eb 956 req = update_Request(req, url=url_escaped)
51f267d9 957
33ac271b 958 for h, v in std_headers.items():
3d5f7a39
JK
959 # Capitalize is needed because of Python bug 2275: http://bugs.python.org/issue2275
960 # The dict keys are capitalized because of this bug by urllib
961 if h.capitalize() not in req.headers:
33ac271b 962 req.add_header(h, v)
87f0e62d
YCH
963
964 req.headers = handle_youtubedl_headers(req.headers)
989b4b2b
PH
965
966 if sys.version_info < (2, 7) and '#' in req.get_full_url():
967 # Python 2.6 is brain-dead when it comes to fragments
968 req._Request__original = req._Request__original.partition('#')[0]
969 req._Request__r_type = req._Request__r_type.partition('#')[0]
970
59ae15a5
PH
971 return req
972
acebc9cd 973 def http_response(self, req, resp):
59ae15a5
PH
974 old_resp = resp
975 # gzip
976 if resp.headers.get('Content-encoding', '') == 'gzip':
aa3e9507
PH
977 content = resp.read()
978 gz = gzip.GzipFile(fileobj=io.BytesIO(content), mode='rb')
979 try:
980 uncompressed = io.BytesIO(gz.read())
981 except IOError as original_ioerror:
982 # There may be junk add the end of the file
983 # See http://stackoverflow.com/q/4928560/35070 for details
984 for i in range(1, 1024):
985 try:
986 gz = gzip.GzipFile(fileobj=io.BytesIO(content[:-i]), mode='rb')
987 uncompressed = io.BytesIO(gz.read())
988 except IOError:
989 continue
990 break
991 else:
992 raise original_ioerror
993 resp = self.addinfourl_wrapper(uncompressed, old_resp.headers, old_resp.url, old_resp.code)
59ae15a5 994 resp.msg = old_resp.msg
c047270c 995 del resp.headers['Content-encoding']
59ae15a5
PH
996 # deflate
997 if resp.headers.get('Content-encoding', '') == 'deflate':
998 gz = io.BytesIO(self.deflate(resp.read()))
999 resp = self.addinfourl_wrapper(gz, old_resp.headers, old_resp.url, old_resp.code)
1000 resp.msg = old_resp.msg
c047270c 1001 del resp.headers['Content-encoding']
ad729172
S
1002 # Percent-encode redirect URL of Location HTTP header to satisfy RFC 3986 (see
1003 # https://github.com/rg3/youtube-dl/issues/6457).
5a4d9ddb
S
1004 if 300 <= resp.code < 400:
1005 location = resp.headers.get('Location')
1006 if location:
1007 # As of RFC 2616 default charset is iso-8859-1 that is respected by python 3
1008 if sys.version_info >= (3, 0):
1009 location = location.encode('iso-8859-1').decode('utf-8')
0ea59007
YCH
1010 else:
1011 location = location.decode('utf-8')
5a4d9ddb
S
1012 location_escaped = escape_url(location)
1013 if location != location_escaped:
1014 del resp.headers['Location']
9a4aec8b
YCH
1015 if sys.version_info < (3, 0):
1016 location_escaped = location_escaped.encode('utf-8')
5a4d9ddb 1017 resp.headers['Location'] = location_escaped
59ae15a5 1018 return resp
0f8d03f8 1019
acebc9cd
PH
1020 https_request = http_request
1021 https_response = http_response
bf50b038 1022
5de90176 1023
71aff188
YCH
1024def make_socks_conn_class(base_class, socks_proxy):
1025 assert issubclass(base_class, (
1026 compat_http_client.HTTPConnection, compat_http_client.HTTPSConnection))
1027
1028 url_components = compat_urlparse.urlparse(socks_proxy)
1029 if url_components.scheme.lower() == 'socks5':
1030 socks_type = ProxyType.SOCKS5
1031 elif url_components.scheme.lower() in ('socks', 'socks4'):
1032 socks_type = ProxyType.SOCKS4
51fb4995
YCH
1033 elif url_components.scheme.lower() == 'socks4a':
1034 socks_type = ProxyType.SOCKS4A
71aff188 1035
cdd94c2e
YCH
1036 def unquote_if_non_empty(s):
1037 if not s:
1038 return s
1039 return compat_urllib_parse_unquote_plus(s)
1040
71aff188
YCH
1041 proxy_args = (
1042 socks_type,
1043 url_components.hostname, url_components.port or 1080,
1044 True, # Remote DNS
cdd94c2e
YCH
1045 unquote_if_non_empty(url_components.username),
1046 unquote_if_non_empty(url_components.password),
71aff188
YCH
1047 )
1048
1049 class SocksConnection(base_class):
1050 def connect(self):
1051 self.sock = sockssocket()
1052 self.sock.setproxy(*proxy_args)
1053 if type(self.timeout) in (int, float):
1054 self.sock.settimeout(self.timeout)
1055 self.sock.connect((self.host, self.port))
1056
1057 if isinstance(self, compat_http_client.HTTPSConnection):
1058 if hasattr(self, '_context'): # Python > 2.6
1059 self.sock = self._context.wrap_socket(
1060 self.sock, server_hostname=self.host)
1061 else:
1062 self.sock = ssl.wrap_socket(self.sock)
1063
1064 return SocksConnection
1065
1066
be4a824d
PH
1067class YoutubeDLHTTPSHandler(compat_urllib_request.HTTPSHandler):
1068 def __init__(self, params, https_conn_class=None, *args, **kwargs):
1069 compat_urllib_request.HTTPSHandler.__init__(self, *args, **kwargs)
1070 self._https_conn_class = https_conn_class or compat_http_client.HTTPSConnection
1071 self._params = params
1072
1073 def https_open(self, req):
4f264c02 1074 kwargs = {}
71aff188
YCH
1075 conn_class = self._https_conn_class
1076
4f264c02
JMF
1077 if hasattr(self, '_context'): # python > 2.6
1078 kwargs['context'] = self._context
1079 if hasattr(self, '_check_hostname'): # python 3.x
1080 kwargs['check_hostname'] = self._check_hostname
71aff188
YCH
1081
1082 socks_proxy = req.headers.get('Ytdl-socks-proxy')
1083 if socks_proxy:
1084 conn_class = make_socks_conn_class(conn_class, socks_proxy)
1085 del req.headers['Ytdl-socks-proxy']
1086
be4a824d 1087 return self.do_open(functools.partial(
71aff188 1088 _create_http_connection, self, conn_class, True),
4f264c02 1089 req, **kwargs)
be4a824d
PH
1090
1091
a6420bf5
S
1092class YoutubeDLCookieProcessor(compat_urllib_request.HTTPCookieProcessor):
1093 def __init__(self, cookiejar=None):
1094 compat_urllib_request.HTTPCookieProcessor.__init__(self, cookiejar)
1095
1096 def http_response(self, request, response):
1097 # Python 2 will choke on next HTTP request in row if there are non-ASCII
1098 # characters in Set-Cookie HTTP header of last response (see
1099 # https://github.com/rg3/youtube-dl/issues/6769).
1100 # In order to at least prevent crashing we will percent encode Set-Cookie
1101 # header before HTTPCookieProcessor starts processing it.
e28034c5
S
1102 # if sys.version_info < (3, 0) and response.headers:
1103 # for set_cookie_header in ('Set-Cookie', 'Set-Cookie2'):
1104 # set_cookie = response.headers.get(set_cookie_header)
1105 # if set_cookie:
1106 # set_cookie_escaped = compat_urllib_parse.quote(set_cookie, b"%/;:@&=+$,!~*'()?#[] ")
1107 # if set_cookie != set_cookie_escaped:
1108 # del response.headers[set_cookie_header]
1109 # response.headers[set_cookie_header] = set_cookie_escaped
a6420bf5
S
1110 return compat_urllib_request.HTTPCookieProcessor.http_response(self, request, response)
1111
1112 https_request = compat_urllib_request.HTTPCookieProcessor.http_request
1113 https_response = http_response
1114
1115
46f59e89
S
1116def extract_timezone(date_str):
1117 m = re.search(
1118 r'^.{8,}?(?P<tz>Z$| ?(?P<sign>\+|-)(?P<hours>[0-9]{2}):?(?P<minutes>[0-9]{2})$)',
1119 date_str)
1120 if not m:
1121 timezone = datetime.timedelta()
1122 else:
1123 date_str = date_str[:-len(m.group('tz'))]
1124 if not m.group('sign'):
1125 timezone = datetime.timedelta()
1126 else:
1127 sign = 1 if m.group('sign') == '+' else -1
1128 timezone = datetime.timedelta(
1129 hours=sign * int(m.group('hours')),
1130 minutes=sign * int(m.group('minutes')))
1131 return timezone, date_str
1132
1133
08b38d54 1134def parse_iso8601(date_str, delimiter='T', timezone=None):
912b38b4
PH
1135 """ Return a UNIX timestamp from the given date """
1136
1137 if date_str is None:
1138 return None
1139
52c3a6e4
S
1140 date_str = re.sub(r'\.[0-9]+', '', date_str)
1141
08b38d54 1142 if timezone is None:
46f59e89
S
1143 timezone, date_str = extract_timezone(date_str)
1144
52c3a6e4
S
1145 try:
1146 date_format = '%Y-%m-%d{0}%H:%M:%S'.format(delimiter)
1147 dt = datetime.datetime.strptime(date_str, date_format) - timezone
1148 return calendar.timegm(dt.timetuple())
1149 except ValueError:
1150 pass
912b38b4
PH
1151
1152
46f59e89
S
1153def date_formats(day_first=True):
1154 return DATE_FORMATS_DAY_FIRST if day_first else DATE_FORMATS_MONTH_FIRST
1155
1156
42bdd9d0 1157def unified_strdate(date_str, day_first=True):
bf50b038 1158 """Return a string with the date in the format YYYYMMDD"""
64e7ad60
PH
1159
1160 if date_str is None:
1161 return None
bf50b038 1162 upload_date = None
5f6a1245 1163 # Replace commas
026fcc04 1164 date_str = date_str.replace(',', ' ')
42bdd9d0 1165 # Remove AM/PM + timezone
9bb8e0a3 1166 date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str)
46f59e89 1167 _, date_str = extract_timezone(date_str)
42bdd9d0 1168
46f59e89 1169 for expression in date_formats(day_first):
bf50b038
JMF
1170 try:
1171 upload_date = datetime.datetime.strptime(date_str, expression).strftime('%Y%m%d')
5de90176 1172 except ValueError:
bf50b038 1173 pass
42393ce2
PH
1174 if upload_date is None:
1175 timetuple = email.utils.parsedate_tz(date_str)
1176 if timetuple:
c6b9cf05
S
1177 try:
1178 upload_date = datetime.datetime(*timetuple[:6]).strftime('%Y%m%d')
1179 except ValueError:
1180 pass
6a750402
JMF
1181 if upload_date is not None:
1182 return compat_str(upload_date)
bf50b038 1183
5f6a1245 1184
46f59e89
S
1185def unified_timestamp(date_str, day_first=True):
1186 if date_str is None:
1187 return None
1188
1189 date_str = date_str.replace(',', ' ')
1190
7dc2a74e 1191 pm_delta = 12 if re.search(r'(?i)PM', date_str) else 0
46f59e89
S
1192 timezone, date_str = extract_timezone(date_str)
1193
1194 # Remove AM/PM + timezone
1195 date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str)
1196
1197 for expression in date_formats(day_first):
1198 try:
7dc2a74e 1199 dt = datetime.datetime.strptime(date_str, expression) - timezone + datetime.timedelta(hours=pm_delta)
46f59e89
S
1200 return calendar.timegm(dt.timetuple())
1201 except ValueError:
1202 pass
1203 timetuple = email.utils.parsedate_tz(date_str)
1204 if timetuple:
7dc2a74e 1205 return calendar.timegm(timetuple) + pm_delta * 3600
46f59e89
S
1206
1207
28e614de 1208def determine_ext(url, default_ext='unknown_video'):
f4776371
S
1209 if url is None:
1210 return default_ext
9cb9a5df 1211 guess = url.partition('?')[0].rpartition('.')[2]
73e79f2a
PH
1212 if re.match(r'^[A-Za-z0-9]+$', guess):
1213 return guess
a7aaa398
S
1214 # Try extract ext from URLs like http://example.com/foo/bar.mp4/?download
1215 elif guess.rstrip('/') in KNOWN_EXTENSIONS:
9cb9a5df 1216 return guess.rstrip('/')
73e79f2a 1217 else:
cbdbb766 1218 return default_ext
73e79f2a 1219
5f6a1245 1220
d4051a8e 1221def subtitles_filename(filename, sub_lang, sub_format):
28e614de 1222 return filename.rsplit('.', 1)[0] + '.' + sub_lang + '.' + sub_format
d4051a8e 1223
5f6a1245 1224
bd558525 1225def date_from_str(date_str):
37254abc
JMF
1226 """
1227 Return a datetime object from a string in the format YYYYMMDD or
1228 (now|today)[+-][0-9](day|week|month|year)(s)?"""
1229 today = datetime.date.today()
f8795e10 1230 if date_str in ('now', 'today'):
37254abc 1231 return today
f8795e10
PH
1232 if date_str == 'yesterday':
1233 return today - datetime.timedelta(days=1)
ec85ded8 1234 match = re.match(r'(now|today)(?P<sign>[+-])(?P<time>\d+)(?P<unit>day|week|month|year)(s)?', date_str)
37254abc
JMF
1235 if match is not None:
1236 sign = match.group('sign')
1237 time = int(match.group('time'))
1238 if sign == '-':
1239 time = -time
1240 unit = match.group('unit')
dfb1b146 1241 # A bad approximation?
37254abc
JMF
1242 if unit == 'month':
1243 unit = 'day'
1244 time *= 30
1245 elif unit == 'year':
1246 unit = 'day'
1247 time *= 365
1248 unit += 's'
1249 delta = datetime.timedelta(**{unit: time})
1250 return today + delta
611c1dd9 1251 return datetime.datetime.strptime(date_str, '%Y%m%d').date()
5f6a1245
JW
1252
1253
e63fc1be 1254def hyphenate_date(date_str):
1255 """
1256 Convert a date in 'YYYYMMDD' format to 'YYYY-MM-DD' format"""
1257 match = re.match(r'^(\d\d\d\d)(\d\d)(\d\d)$', date_str)
1258 if match is not None:
1259 return '-'.join(match.groups())
1260 else:
1261 return date_str
1262
5f6a1245 1263
bd558525
JMF
1264class DateRange(object):
1265 """Represents a time interval between two dates"""
5f6a1245 1266
bd558525
JMF
1267 def __init__(self, start=None, end=None):
1268 """start and end must be strings in the format accepted by date"""
1269 if start is not None:
1270 self.start = date_from_str(start)
1271 else:
1272 self.start = datetime.datetime.min.date()
1273 if end is not None:
1274 self.end = date_from_str(end)
1275 else:
1276 self.end = datetime.datetime.max.date()
37254abc 1277 if self.start > self.end:
bd558525 1278 raise ValueError('Date range: "%s" , the start date must be before the end date' % self)
5f6a1245 1279
bd558525
JMF
1280 @classmethod
1281 def day(cls, day):
1282 """Returns a range that only contains the given day"""
5f6a1245
JW
1283 return cls(day, day)
1284
bd558525
JMF
1285 def __contains__(self, date):
1286 """Check if the date is in the range"""
37254abc
JMF
1287 if not isinstance(date, datetime.date):
1288 date = date_from_str(date)
1289 return self.start <= date <= self.end
5f6a1245 1290
bd558525 1291 def __str__(self):
5f6a1245 1292 return '%s - %s' % (self.start.isoformat(), self.end.isoformat())
c496ca96
PH
1293
1294
1295def platform_name():
1296 """ Returns the platform name as a compat_str """
1297 res = platform.platform()
1298 if isinstance(res, bytes):
1299 res = res.decode(preferredencoding())
1300
1301 assert isinstance(res, compat_str)
1302 return res
c257baff
PH
1303
1304
b58ddb32
PH
1305def _windows_write_string(s, out):
1306 """ Returns True if the string was written using special methods,
1307 False if it has yet to be written out."""
1308 # Adapted from http://stackoverflow.com/a/3259271/35070
1309
1310 import ctypes
1311 import ctypes.wintypes
1312
1313 WIN_OUTPUT_IDS = {
1314 1: -11,
1315 2: -12,
1316 }
1317
a383a98a
PH
1318 try:
1319 fileno = out.fileno()
1320 except AttributeError:
1321 # If the output stream doesn't have a fileno, it's virtual
1322 return False
aa42e873
PH
1323 except io.UnsupportedOperation:
1324 # Some strange Windows pseudo files?
1325 return False
b58ddb32
PH
1326 if fileno not in WIN_OUTPUT_IDS:
1327 return False
1328
e2f89ec7 1329 GetStdHandle = ctypes.WINFUNCTYPE(
b58ddb32 1330 ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD)(
611c1dd9 1331 (b'GetStdHandle', ctypes.windll.kernel32))
b58ddb32
PH
1332 h = GetStdHandle(WIN_OUTPUT_IDS[fileno])
1333
e2f89ec7 1334 WriteConsoleW = ctypes.WINFUNCTYPE(
b58ddb32
PH
1335 ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.wintypes.LPWSTR,
1336 ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD),
611c1dd9 1337 ctypes.wintypes.LPVOID)((b'WriteConsoleW', ctypes.windll.kernel32))
b58ddb32
PH
1338 written = ctypes.wintypes.DWORD(0)
1339
611c1dd9 1340 GetFileType = ctypes.WINFUNCTYPE(ctypes.wintypes.DWORD, ctypes.wintypes.DWORD)((b'GetFileType', ctypes.windll.kernel32))
b58ddb32
PH
1341 FILE_TYPE_CHAR = 0x0002
1342 FILE_TYPE_REMOTE = 0x8000
e2f89ec7 1343 GetConsoleMode = ctypes.WINFUNCTYPE(
b58ddb32
PH
1344 ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE,
1345 ctypes.POINTER(ctypes.wintypes.DWORD))(
611c1dd9 1346 (b'GetConsoleMode', ctypes.windll.kernel32))
b58ddb32
PH
1347 INVALID_HANDLE_VALUE = ctypes.wintypes.DWORD(-1).value
1348
1349 def not_a_console(handle):
1350 if handle == INVALID_HANDLE_VALUE or handle is None:
1351 return True
8fb3ac36
PH
1352 return ((GetFileType(handle) & ~FILE_TYPE_REMOTE) != FILE_TYPE_CHAR or
1353 GetConsoleMode(handle, ctypes.byref(ctypes.wintypes.DWORD())) == 0)
b58ddb32
PH
1354
1355 if not_a_console(h):
1356 return False
1357
d1b9c912
PH
1358 def next_nonbmp_pos(s):
1359 try:
1360 return next(i for i, c in enumerate(s) if ord(c) > 0xffff)
1361 except StopIteration:
1362 return len(s)
1363
1364 while s:
1365 count = min(next_nonbmp_pos(s), 1024)
1366
b58ddb32 1367 ret = WriteConsoleW(
d1b9c912 1368 h, s, count if count else 2, ctypes.byref(written), None)
b58ddb32
PH
1369 if ret == 0:
1370 raise OSError('Failed to write string')
d1b9c912
PH
1371 if not count: # We just wrote a non-BMP character
1372 assert written.value == 2
1373 s = s[1:]
1374 else:
1375 assert written.value > 0
1376 s = s[written.value:]
b58ddb32
PH
1377 return True
1378
1379
734f90bb 1380def write_string(s, out=None, encoding=None):
7459e3a2
PH
1381 if out is None:
1382 out = sys.stderr
8bf48f23 1383 assert type(s) == compat_str
7459e3a2 1384
b58ddb32
PH
1385 if sys.platform == 'win32' and encoding is None and hasattr(out, 'fileno'):
1386 if _windows_write_string(s, out):
1387 return
1388
7459e3a2
PH
1389 if ('b' in getattr(out, 'mode', '') or
1390 sys.version_info[0] < 3): # Python 2 lies about mode of sys.stderr
104aa738
PH
1391 byt = s.encode(encoding or preferredencoding(), 'ignore')
1392 out.write(byt)
1393 elif hasattr(out, 'buffer'):
1394 enc = encoding or getattr(out, 'encoding', None) or preferredencoding()
1395 byt = s.encode(enc, 'ignore')
1396 out.buffer.write(byt)
1397 else:
8bf48f23 1398 out.write(s)
7459e3a2
PH
1399 out.flush()
1400
1401
48ea9cea
PH
1402def bytes_to_intlist(bs):
1403 if not bs:
1404 return []
1405 if isinstance(bs[0], int): # Python 3
1406 return list(bs)
1407 else:
1408 return [ord(c) for c in bs]
1409
c257baff 1410
cba892fa 1411def intlist_to_bytes(xs):
1412 if not xs:
1413 return b''
edaa23f8 1414 return compat_struct_pack('%dB' % len(xs), *xs)
c38b1e77
PH
1415
1416
c1c9a79c
PH
1417# Cross-platform file locking
1418if sys.platform == 'win32':
1419 import ctypes.wintypes
1420 import msvcrt
1421
1422 class OVERLAPPED(ctypes.Structure):
1423 _fields_ = [
1424 ('Internal', ctypes.wintypes.LPVOID),
1425 ('InternalHigh', ctypes.wintypes.LPVOID),
1426 ('Offset', ctypes.wintypes.DWORD),
1427 ('OffsetHigh', ctypes.wintypes.DWORD),
1428 ('hEvent', ctypes.wintypes.HANDLE),
1429 ]
1430
1431 kernel32 = ctypes.windll.kernel32
1432 LockFileEx = kernel32.LockFileEx
1433 LockFileEx.argtypes = [
1434 ctypes.wintypes.HANDLE, # hFile
1435 ctypes.wintypes.DWORD, # dwFlags
1436 ctypes.wintypes.DWORD, # dwReserved
1437 ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow
1438 ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh
1439 ctypes.POINTER(OVERLAPPED) # Overlapped
1440 ]
1441 LockFileEx.restype = ctypes.wintypes.BOOL
1442 UnlockFileEx = kernel32.UnlockFileEx
1443 UnlockFileEx.argtypes = [
1444 ctypes.wintypes.HANDLE, # hFile
1445 ctypes.wintypes.DWORD, # dwReserved
1446 ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow
1447 ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh
1448 ctypes.POINTER(OVERLAPPED) # Overlapped
1449 ]
1450 UnlockFileEx.restype = ctypes.wintypes.BOOL
1451 whole_low = 0xffffffff
1452 whole_high = 0x7fffffff
1453
1454 def _lock_file(f, exclusive):
1455 overlapped = OVERLAPPED()
1456 overlapped.Offset = 0
1457 overlapped.OffsetHigh = 0
1458 overlapped.hEvent = 0
1459 f._lock_file_overlapped_p = ctypes.pointer(overlapped)
1460 handle = msvcrt.get_osfhandle(f.fileno())
1461 if not LockFileEx(handle, 0x2 if exclusive else 0x0, 0,
1462 whole_low, whole_high, f._lock_file_overlapped_p):
1463 raise OSError('Locking file failed: %r' % ctypes.FormatError())
1464
1465 def _unlock_file(f):
1466 assert f._lock_file_overlapped_p
1467 handle = msvcrt.get_osfhandle(f.fileno())
1468 if not UnlockFileEx(handle, 0,
1469 whole_low, whole_high, f._lock_file_overlapped_p):
1470 raise OSError('Unlocking file failed: %r' % ctypes.FormatError())
1471
1472else:
399a76e6
YCH
1473 # Some platforms, such as Jython, is missing fcntl
1474 try:
1475 import fcntl
c1c9a79c 1476
399a76e6
YCH
1477 def _lock_file(f, exclusive):
1478 fcntl.flock(f, fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH)
c1c9a79c 1479
399a76e6
YCH
1480 def _unlock_file(f):
1481 fcntl.flock(f, fcntl.LOCK_UN)
1482 except ImportError:
1483 UNSUPPORTED_MSG = 'file locking is not supported on this platform'
1484
1485 def _lock_file(f, exclusive):
1486 raise IOError(UNSUPPORTED_MSG)
1487
1488 def _unlock_file(f):
1489 raise IOError(UNSUPPORTED_MSG)
c1c9a79c
PH
1490
1491
1492class locked_file(object):
1493 def __init__(self, filename, mode, encoding=None):
1494 assert mode in ['r', 'a', 'w']
1495 self.f = io.open(filename, mode, encoding=encoding)
1496 self.mode = mode
1497
1498 def __enter__(self):
1499 exclusive = self.mode != 'r'
1500 try:
1501 _lock_file(self.f, exclusive)
1502 except IOError:
1503 self.f.close()
1504 raise
1505 return self
1506
1507 def __exit__(self, etype, value, traceback):
1508 try:
1509 _unlock_file(self.f)
1510 finally:
1511 self.f.close()
1512
1513 def __iter__(self):
1514 return iter(self.f)
1515
1516 def write(self, *args):
1517 return self.f.write(*args)
1518
1519 def read(self, *args):
1520 return self.f.read(*args)
4eb7f1d1
JMF
1521
1522
4644ac55
S
1523def get_filesystem_encoding():
1524 encoding = sys.getfilesystemencoding()
1525 return encoding if encoding is not None else 'utf-8'
1526
1527
4eb7f1d1 1528def shell_quote(args):
a6a173c2 1529 quoted_args = []
4644ac55 1530 encoding = get_filesystem_encoding()
a6a173c2
JMF
1531 for a in args:
1532 if isinstance(a, bytes):
1533 # We may get a filename encoded with 'encodeFilename'
1534 a = a.decode(encoding)
1535 quoted_args.append(pipes.quote(a))
28e614de 1536 return ' '.join(quoted_args)
9d4660ca
PH
1537
1538
1539def smuggle_url(url, data):
1540 """ Pass additional data in a URL for internal use. """
1541
81953d1a
RA
1542 url, idata = unsmuggle_url(url, {})
1543 data.update(idata)
15707c7e 1544 sdata = compat_urllib_parse_urlencode(
28e614de
PH
1545 {'__youtubedl_smuggle': json.dumps(data)})
1546 return url + '#' + sdata
9d4660ca
PH
1547
1548
79f82953 1549def unsmuggle_url(smug_url, default=None):
83e865a3 1550 if '#__youtubedl_smuggle' not in smug_url:
79f82953 1551 return smug_url, default
28e614de
PH
1552 url, _, sdata = smug_url.rpartition('#')
1553 jsond = compat_parse_qs(sdata)['__youtubedl_smuggle'][0]
9d4660ca
PH
1554 data = json.loads(jsond)
1555 return url, data
02dbf93f
PH
1556
1557
02dbf93f
PH
1558def format_bytes(bytes):
1559 if bytes is None:
28e614de 1560 return 'N/A'
02dbf93f
PH
1561 if type(bytes) is str:
1562 bytes = float(bytes)
1563 if bytes == 0.0:
1564 exponent = 0
1565 else:
1566 exponent = int(math.log(bytes, 1024.0))
28e614de 1567 suffix = ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB'][exponent]
02dbf93f 1568 converted = float(bytes) / float(1024 ** exponent)
28e614de 1569 return '%.2f%s' % (converted, suffix)
f53c966a 1570
1c088fa8 1571
fb47597b
S
1572def lookup_unit_table(unit_table, s):
1573 units_re = '|'.join(re.escape(u) for u in unit_table)
1574 m = re.match(
782b1b5b 1575 r'(?P<num>[0-9]+(?:[,.][0-9]*)?)\s*(?P<unit>%s)\b' % units_re, s)
fb47597b
S
1576 if not m:
1577 return None
1578 num_str = m.group('num').replace(',', '.')
1579 mult = unit_table[m.group('unit')]
1580 return int(float(num_str) * mult)
1581
1582
be64b5b0
PH
1583def parse_filesize(s):
1584 if s is None:
1585 return None
1586
dfb1b146 1587 # The lower-case forms are of course incorrect and unofficial,
be64b5b0
PH
1588 # but we support those too
1589 _UNIT_TABLE = {
1590 'B': 1,
1591 'b': 1,
70852b47 1592 'bytes': 1,
be64b5b0
PH
1593 'KiB': 1024,
1594 'KB': 1000,
1595 'kB': 1024,
1596 'Kb': 1000,
13585d76 1597 'kb': 1000,
70852b47
YCH
1598 'kilobytes': 1000,
1599 'kibibytes': 1024,
be64b5b0
PH
1600 'MiB': 1024 ** 2,
1601 'MB': 1000 ** 2,
1602 'mB': 1024 ** 2,
1603 'Mb': 1000 ** 2,
13585d76 1604 'mb': 1000 ** 2,
70852b47
YCH
1605 'megabytes': 1000 ** 2,
1606 'mebibytes': 1024 ** 2,
be64b5b0
PH
1607 'GiB': 1024 ** 3,
1608 'GB': 1000 ** 3,
1609 'gB': 1024 ** 3,
1610 'Gb': 1000 ** 3,
13585d76 1611 'gb': 1000 ** 3,
70852b47
YCH
1612 'gigabytes': 1000 ** 3,
1613 'gibibytes': 1024 ** 3,
be64b5b0
PH
1614 'TiB': 1024 ** 4,
1615 'TB': 1000 ** 4,
1616 'tB': 1024 ** 4,
1617 'Tb': 1000 ** 4,
13585d76 1618 'tb': 1000 ** 4,
70852b47
YCH
1619 'terabytes': 1000 ** 4,
1620 'tebibytes': 1024 ** 4,
be64b5b0
PH
1621 'PiB': 1024 ** 5,
1622 'PB': 1000 ** 5,
1623 'pB': 1024 ** 5,
1624 'Pb': 1000 ** 5,
13585d76 1625 'pb': 1000 ** 5,
70852b47
YCH
1626 'petabytes': 1000 ** 5,
1627 'pebibytes': 1024 ** 5,
be64b5b0
PH
1628 'EiB': 1024 ** 6,
1629 'EB': 1000 ** 6,
1630 'eB': 1024 ** 6,
1631 'Eb': 1000 ** 6,
13585d76 1632 'eb': 1000 ** 6,
70852b47
YCH
1633 'exabytes': 1000 ** 6,
1634 'exbibytes': 1024 ** 6,
be64b5b0
PH
1635 'ZiB': 1024 ** 7,
1636 'ZB': 1000 ** 7,
1637 'zB': 1024 ** 7,
1638 'Zb': 1000 ** 7,
13585d76 1639 'zb': 1000 ** 7,
70852b47
YCH
1640 'zettabytes': 1000 ** 7,
1641 'zebibytes': 1024 ** 7,
be64b5b0
PH
1642 'YiB': 1024 ** 8,
1643 'YB': 1000 ** 8,
1644 'yB': 1024 ** 8,
1645 'Yb': 1000 ** 8,
13585d76 1646 'yb': 1000 ** 8,
70852b47
YCH
1647 'yottabytes': 1000 ** 8,
1648 'yobibytes': 1024 ** 8,
be64b5b0
PH
1649 }
1650
fb47597b
S
1651 return lookup_unit_table(_UNIT_TABLE, s)
1652
1653
1654def parse_count(s):
1655 if s is None:
be64b5b0
PH
1656 return None
1657
fb47597b
S
1658 s = s.strip()
1659
1660 if re.match(r'^[\d,.]+$', s):
1661 return str_to_int(s)
1662
1663 _UNIT_TABLE = {
1664 'k': 1000,
1665 'K': 1000,
1666 'm': 1000 ** 2,
1667 'M': 1000 ** 2,
1668 'kk': 1000 ** 2,
1669 'KK': 1000 ** 2,
1670 }
be64b5b0 1671
fb47597b 1672 return lookup_unit_table(_UNIT_TABLE, s)
be64b5b0 1673
2f7ae819 1674
a942d6cb 1675def month_by_name(name, lang='en'):
caefb1de
PH
1676 """ Return the number of a month by (locale-independently) English name """
1677
f6717dec 1678 month_names = MONTH_NAMES.get(lang, MONTH_NAMES['en'])
a942d6cb 1679
caefb1de 1680 try:
f6717dec 1681 return month_names.index(name) + 1
7105440c
YCH
1682 except ValueError:
1683 return None
1684
1685
1686def month_by_abbreviation(abbrev):
1687 """ Return the number of a month by (locale-independently) English
1688 abbreviations """
1689
1690 try:
1691 return [s[:3] for s in ENGLISH_MONTH_NAMES].index(abbrev) + 1
caefb1de
PH
1692 except ValueError:
1693 return None
18258362
JMF
1694
1695
5aafe895 1696def fix_xml_ampersands(xml_str):
18258362 1697 """Replace all the '&' by '&amp;' in XML"""
5aafe895
PH
1698 return re.sub(
1699 r'&(?!amp;|lt;|gt;|apos;|quot;|#x[0-9a-fA-F]{,4};|#[0-9]{,4};)',
28e614de 1700 '&amp;',
5aafe895 1701 xml_str)
e3946f98
PH
1702
1703
1704def setproctitle(title):
8bf48f23 1705 assert isinstance(title, compat_str)
c1c05c67
YCH
1706
1707 # ctypes in Jython is not complete
1708 # http://bugs.jython.org/issue2148
1709 if sys.platform.startswith('java'):
1710 return
1711
e3946f98 1712 try:
611c1dd9 1713 libc = ctypes.cdll.LoadLibrary('libc.so.6')
e3946f98
PH
1714 except OSError:
1715 return
2f49bcd6
RC
1716 except TypeError:
1717 # LoadLibrary in Windows Python 2.7.13 only expects
1718 # a bytestring, but since unicode_literals turns
1719 # every string into a unicode string, it fails.
1720 return
6eefe533
PH
1721 title_bytes = title.encode('utf-8')
1722 buf = ctypes.create_string_buffer(len(title_bytes))
1723 buf.value = title_bytes
e3946f98 1724 try:
6eefe533 1725 libc.prctl(15, buf, 0, 0, 0)
e3946f98
PH
1726 except AttributeError:
1727 return # Strange libc, just skip this
d7dda168
PH
1728
1729
1730def remove_start(s, start):
46bc9b7d 1731 return s[len(start):] if s is not None and s.startswith(start) else s
29eb5174
PH
1732
1733
2b9faf55 1734def remove_end(s, end):
46bc9b7d 1735 return s[:-len(end)] if s is not None and s.endswith(end) else s
2b9faf55
PH
1736
1737
31b2051e
S
1738def remove_quotes(s):
1739 if s is None or len(s) < 2:
1740 return s
1741 for quote in ('"', "'", ):
1742 if s[0] == quote and s[-1] == quote:
1743 return s[1:-1]
1744 return s
1745
1746
29eb5174 1747def url_basename(url):
9b8aaeed 1748 path = compat_urlparse.urlparse(url).path
28e614de 1749 return path.strip('/').split('/')[-1]
aa94a6d3
PH
1750
1751
02dc0a36
S
1752def base_url(url):
1753 return re.match(r'https?://[^?#&]+/', url).group()
1754
1755
e34c3361 1756def urljoin(base, path):
4b5de77b
S
1757 if isinstance(path, bytes):
1758 path = path.decode('utf-8')
e34c3361
S
1759 if not isinstance(path, compat_str) or not path:
1760 return None
b0c65c67 1761 if re.match(r'^(?:https?:)?//', path):
e34c3361 1762 return path
4b5de77b
S
1763 if isinstance(base, bytes):
1764 base = base.decode('utf-8')
1765 if not isinstance(base, compat_str) or not re.match(
1766 r'^(?:https?:)?//', base):
e34c3361
S
1767 return None
1768 return compat_urlparse.urljoin(base, path)
1769
1770
aa94a6d3
PH
1771class HEADRequest(compat_urllib_request.Request):
1772 def get_method(self):
611c1dd9 1773 return 'HEAD'
7217e148
PH
1774
1775
95cf60e8
S
1776class PUTRequest(compat_urllib_request.Request):
1777 def get_method(self):
1778 return 'PUT'
1779
1780
9732d77e 1781def int_or_none(v, scale=1, default=None, get_attr=None, invscale=1):
28746fbd
PH
1782 if get_attr:
1783 if v is not None:
1784 v = getattr(v, get_attr, None)
9572013d
PH
1785 if v == '':
1786 v = None
1812afb7
S
1787 if v is None:
1788 return default
1789 try:
1790 return int(v) * invscale // scale
1791 except ValueError:
af98f8ff 1792 return default
9732d77e 1793
9572013d 1794
40a90862
JMF
1795def str_or_none(v, default=None):
1796 return default if v is None else compat_str(v)
1797
9732d77e
PH
1798
1799def str_to_int(int_str):
48d4681e 1800 """ A more relaxed version of int_or_none """
9732d77e
PH
1801 if int_str is None:
1802 return None
28e614de 1803 int_str = re.sub(r'[,\.\+]', '', int_str)
9732d77e 1804 return int(int_str)
608d11f5
PH
1805
1806
9732d77e 1807def float_or_none(v, scale=1, invscale=1, default=None):
caf80631
S
1808 if v is None:
1809 return default
1810 try:
1811 return float(v) * invscale / scale
1812 except ValueError:
1813 return default
43f775e4
PH
1814
1815
b72b4431
S
1816def strip_or_none(v):
1817 return None if v is None else v.strip()
1818
1819
608d11f5 1820def parse_duration(s):
8f9312c3 1821 if not isinstance(s, compat_basestring):
608d11f5
PH
1822 return None
1823
ca7b3246
S
1824 s = s.strip()
1825
acaff495 1826 days, hours, mins, secs, ms = [None] * 5
15846398 1827 m = re.match(r'(?:(?:(?:(?P<days>[0-9]+):)?(?P<hours>[0-9]+):)?(?P<mins>[0-9]+):)?(?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?Z?$', s)
acaff495 1828 if m:
1829 days, hours, mins, secs, ms = m.groups()
1830 else:
1831 m = re.match(
1832 r'''(?ix)(?:P?T)?
8f4b58d7 1833 (?:
acaff495 1834 (?P<days>[0-9]+)\s*d(?:ays?)?\s*
8f4b58d7 1835 )?
acaff495 1836 (?:
1837 (?P<hours>[0-9]+)\s*h(?:ours?)?\s*
1838 )?
1839 (?:
1840 (?P<mins>[0-9]+)\s*m(?:in(?:ute)?s?)?\s*
1841 )?
1842 (?:
1843 (?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?\s*s(?:ec(?:ond)?s?)?\s*
15846398 1844 )?Z?$''', s)
acaff495 1845 if m:
1846 days, hours, mins, secs, ms = m.groups()
1847 else:
15846398 1848 m = re.match(r'(?i)(?:(?P<hours>[0-9.]+)\s*(?:hours?)|(?P<mins>[0-9.]+)\s*(?:mins?\.?|minutes?)\s*)Z?$', s)
acaff495 1849 if m:
1850 hours, mins = m.groups()
1851 else:
1852 return None
1853
1854 duration = 0
1855 if secs:
1856 duration += float(secs)
1857 if mins:
1858 duration += float(mins) * 60
1859 if hours:
1860 duration += float(hours) * 60 * 60
1861 if days:
1862 duration += float(days) * 24 * 60 * 60
1863 if ms:
1864 duration += float(ms)
1865 return duration
91d7d0b3
JMF
1866
1867
e65e4c88 1868def prepend_extension(filename, ext, expected_real_ext=None):
5f6a1245 1869 name, real_ext = os.path.splitext(filename)
e65e4c88
S
1870 return (
1871 '{0}.{1}{2}'.format(name, ext, real_ext)
1872 if not expected_real_ext or real_ext[1:] == expected_real_ext
1873 else '{0}.{1}'.format(filename, ext))
d70ad093
PH
1874
1875
b3ed15b7
S
1876def replace_extension(filename, ext, expected_real_ext=None):
1877 name, real_ext = os.path.splitext(filename)
1878 return '{0}.{1}'.format(
1879 name if not expected_real_ext or real_ext[1:] == expected_real_ext else filename,
1880 ext)
1881
1882
d70ad093
PH
1883def check_executable(exe, args=[]):
1884 """ Checks if the given binary is installed somewhere in PATH, and returns its name.
1885 args can be a list of arguments for a short output (like -version) """
1886 try:
1887 subprocess.Popen([exe] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
1888 except OSError:
1889 return False
1890 return exe
b7ab0590
PH
1891
1892
95807118 1893def get_exe_version(exe, args=['--version'],
cae97f65 1894 version_re=None, unrecognized='present'):
95807118
PH
1895 """ Returns the version of the specified executable,
1896 or False if the executable is not present """
1897 try:
b64d04c1
YCH
1898 # STDIN should be redirected too. On UNIX-like systems, ffmpeg triggers
1899 # SIGTTOU if youtube-dl is run in the background.
1900 # See https://github.com/rg3/youtube-dl/issues/955#issuecomment-209789656
cae97f65 1901 out, _ = subprocess.Popen(
54116803 1902 [encodeArgument(exe)] + args,
00ca7552 1903 stdin=subprocess.PIPE,
95807118
PH
1904 stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()
1905 except OSError:
1906 return False
cae97f65
PH
1907 if isinstance(out, bytes): # Python 2.x
1908 out = out.decode('ascii', 'ignore')
1909 return detect_exe_version(out, version_re, unrecognized)
1910
1911
1912def detect_exe_version(output, version_re=None, unrecognized='present'):
1913 assert isinstance(output, compat_str)
1914 if version_re is None:
1915 version_re = r'version\s+([-0-9._a-zA-Z]+)'
1916 m = re.search(version_re, output)
95807118
PH
1917 if m:
1918 return m.group(1)
1919 else:
1920 return unrecognized
1921
1922
b7ab0590 1923class PagedList(object):
dd26ced1
PH
1924 def __len__(self):
1925 # This is only useful for tests
1926 return len(self.getslice())
1927
9c44d242
PH
1928
1929class OnDemandPagedList(PagedList):
b95dc034 1930 def __init__(self, pagefunc, pagesize, use_cache=False):
9c44d242
PH
1931 self._pagefunc = pagefunc
1932 self._pagesize = pagesize
b95dc034
YCH
1933 self._use_cache = use_cache
1934 if use_cache:
1935 self._cache = {}
9c44d242 1936
b7ab0590
PH
1937 def getslice(self, start=0, end=None):
1938 res = []
1939 for pagenum in itertools.count(start // self._pagesize):
1940 firstid = pagenum * self._pagesize
1941 nextfirstid = pagenum * self._pagesize + self._pagesize
1942 if start >= nextfirstid:
1943 continue
1944
b95dc034
YCH
1945 page_results = None
1946 if self._use_cache:
1947 page_results = self._cache.get(pagenum)
1948 if page_results is None:
1949 page_results = list(self._pagefunc(pagenum))
1950 if self._use_cache:
1951 self._cache[pagenum] = page_results
b7ab0590
PH
1952
1953 startv = (
1954 start % self._pagesize
1955 if firstid <= start < nextfirstid
1956 else 0)
1957
1958 endv = (
1959 ((end - 1) % self._pagesize) + 1
1960 if (end is not None and firstid <= end <= nextfirstid)
1961 else None)
1962
1963 if startv != 0 or endv is not None:
1964 page_results = page_results[startv:endv]
1965 res.extend(page_results)
1966
1967 # A little optimization - if current page is not "full", ie. does
1968 # not contain page_size videos then we can assume that this page
1969 # is the last one - there are no more ids on further pages -
1970 # i.e. no need to query again.
1971 if len(page_results) + startv < self._pagesize:
1972 break
1973
1974 # If we got the whole page, but the next page is not interesting,
1975 # break out early as well
1976 if end == nextfirstid:
1977 break
1978 return res
81c2f20b
PH
1979
1980
9c44d242
PH
1981class InAdvancePagedList(PagedList):
1982 def __init__(self, pagefunc, pagecount, pagesize):
1983 self._pagefunc = pagefunc
1984 self._pagecount = pagecount
1985 self._pagesize = pagesize
1986
1987 def getslice(self, start=0, end=None):
1988 res = []
1989 start_page = start // self._pagesize
1990 end_page = (
1991 self._pagecount if end is None else (end // self._pagesize + 1))
1992 skip_elems = start - start_page * self._pagesize
1993 only_more = None if end is None else end - start
1994 for pagenum in range(start_page, end_page):
1995 page = list(self._pagefunc(pagenum))
1996 if skip_elems:
1997 page = page[skip_elems:]
1998 skip_elems = None
1999 if only_more is not None:
2000 if len(page) < only_more:
2001 only_more -= len(page)
2002 else:
2003 page = page[:only_more]
2004 res.extend(page)
2005 break
2006 res.extend(page)
2007 return res
2008
2009
81c2f20b 2010def uppercase_escape(s):
676eb3f2 2011 unicode_escape = codecs.getdecoder('unicode_escape')
81c2f20b 2012 return re.sub(
a612753d 2013 r'\\U[0-9a-fA-F]{8}',
676eb3f2
PH
2014 lambda m: unicode_escape(m.group(0))[0],
2015 s)
0fe2ff78
YCH
2016
2017
2018def lowercase_escape(s):
2019 unicode_escape = codecs.getdecoder('unicode_escape')
2020 return re.sub(
2021 r'\\u[0-9a-fA-F]{4}',
2022 lambda m: unicode_escape(m.group(0))[0],
2023 s)
b53466e1 2024
d05cfe06
S
2025
2026def escape_rfc3986(s):
2027 """Escape non-ASCII characters as suggested by RFC 3986"""
8f9312c3 2028 if sys.version_info < (3, 0) and isinstance(s, compat_str):
d05cfe06 2029 s = s.encode('utf-8')
ecc0c5ee 2030 return compat_urllib_parse.quote(s, b"%/;:@&=+$,!~*'()?#[]")
d05cfe06
S
2031
2032
2033def escape_url(url):
2034 """Escape URL as suggested by RFC 3986"""
2035 url_parsed = compat_urllib_parse_urlparse(url)
2036 return url_parsed._replace(
efbed08d 2037 netloc=url_parsed.netloc.encode('idna').decode('ascii'),
d05cfe06
S
2038 path=escape_rfc3986(url_parsed.path),
2039 params=escape_rfc3986(url_parsed.params),
2040 query=escape_rfc3986(url_parsed.query),
2041 fragment=escape_rfc3986(url_parsed.fragment)
2042 ).geturl()
2043
62e609ab
PH
2044
2045def read_batch_urls(batch_fd):
2046 def fixup(url):
2047 if not isinstance(url, compat_str):
2048 url = url.decode('utf-8', 'replace')
28e614de 2049 BOM_UTF8 = '\xef\xbb\xbf'
62e609ab
PH
2050 if url.startswith(BOM_UTF8):
2051 url = url[len(BOM_UTF8):]
2052 url = url.strip()
2053 if url.startswith(('#', ';', ']')):
2054 return False
2055 return url
2056
2057 with contextlib.closing(batch_fd) as fd:
2058 return [url for url in map(fixup, fd) if url]
b74fa8cd
JMF
2059
2060
2061def urlencode_postdata(*args, **kargs):
15707c7e 2062 return compat_urllib_parse_urlencode(*args, **kargs).encode('ascii')
bcf89ce6
PH
2063
2064
38f9ef31 2065def update_url_query(url, query):
cacd9966
YCH
2066 if not query:
2067 return url
38f9ef31 2068 parsed_url = compat_urlparse.urlparse(url)
2069 qs = compat_parse_qs(parsed_url.query)
2070 qs.update(query)
2071 return compat_urlparse.urlunparse(parsed_url._replace(
15707c7e 2072 query=compat_urllib_parse_urlencode(qs, True)))
16392824 2073
8e60dc75 2074
ed0291d1
S
2075def update_Request(req, url=None, data=None, headers={}, query={}):
2076 req_headers = req.headers.copy()
2077 req_headers.update(headers)
2078 req_data = data or req.data
2079 req_url = update_url_query(url or req.get_full_url(), query)
95cf60e8
S
2080 req_get_method = req.get_method()
2081 if req_get_method == 'HEAD':
2082 req_type = HEADRequest
2083 elif req_get_method == 'PUT':
2084 req_type = PUTRequest
2085 else:
2086 req_type = compat_urllib_request.Request
ed0291d1
S
2087 new_req = req_type(
2088 req_url, data=req_data, headers=req_headers,
2089 origin_req_host=req.origin_req_host, unverifiable=req.unverifiable)
2090 if hasattr(req, 'timeout'):
2091 new_req.timeout = req.timeout
2092 return new_req
2093
2094
86296ad2 2095def dict_get(d, key_or_keys, default=None, skip_false_values=True):
cbecc9b9
S
2096 if isinstance(key_or_keys, (list, tuple)):
2097 for key in key_or_keys:
86296ad2
S
2098 if key not in d or d[key] is None or skip_false_values and not d[key]:
2099 continue
2100 return d[key]
cbecc9b9
S
2101 return default
2102 return d.get(key_or_keys, default)
2103
2104
329ca3be 2105def try_get(src, getter, expected_type=None):
a32a9a7e
S
2106 if not isinstance(getter, (list, tuple)):
2107 getter = [getter]
2108 for get in getter:
2109 try:
2110 v = get(src)
2111 except (AttributeError, KeyError, TypeError, IndexError):
2112 pass
2113 else:
2114 if expected_type is None or isinstance(v, expected_type):
2115 return v
329ca3be
S
2116
2117
8e60dc75
S
2118def encode_compat_str(string, encoding=preferredencoding(), errors='strict'):
2119 return string if isinstance(string, compat_str) else compat_str(string, encoding, errors)
2120
16392824 2121
a1a530b0
PH
2122US_RATINGS = {
2123 'G': 0,
2124 'PG': 10,
2125 'PG-13': 13,
2126 'R': 16,
2127 'NC': 18,
2128}
fac55558
PH
2129
2130
a8795327
S
2131TV_PARENTAL_GUIDELINES = {
2132 'TV-Y': 0,
2133 'TV-Y7': 7,
2134 'TV-G': 0,
2135 'TV-PG': 0,
2136 'TV-14': 14,
2137 'TV-MA': 17,
2138}
2139
2140
146c80e2 2141def parse_age_limit(s):
a8795327
S
2142 if type(s) == int:
2143 return s if 0 <= s <= 21 else None
2144 if not isinstance(s, compat_basestring):
d838b1bd 2145 return None
146c80e2 2146 m = re.match(r'^(?P<age>\d{1,2})\+?$', s)
a8795327
S
2147 if m:
2148 return int(m.group('age'))
2149 if s in US_RATINGS:
2150 return US_RATINGS[s]
2151 return TV_PARENTAL_GUIDELINES.get(s)
146c80e2
S
2152
2153
fac55558 2154def strip_jsonp(code):
609a61e3 2155 return re.sub(
5950cb1d 2156 r'(?s)^[a-zA-Z0-9_.$]+\s*\(\s*(.*)\);?\s*?(?://[^\n]*)*$', r'\1', code)
478c2c61
PH
2157
2158
e05f6939 2159def js_to_json(code):
4195096e
S
2160 COMMENT_RE = r'/\*(?:(?!\*/).)*?\*/|//[^\n]*'
2161 SKIP_RE = r'\s*(?:{comment})?\s*'.format(comment=COMMENT_RE)
2162 INTEGER_TABLE = (
2163 (r'(?s)^(0[xX][0-9a-fA-F]+){skip}:?$'.format(skip=SKIP_RE), 16),
2164 (r'(?s)^(0+[0-7]+){skip}:?$'.format(skip=SKIP_RE), 8),
2165 )
2166
e05f6939 2167 def fix_kv(m):
e7b6d122
PH
2168 v = m.group(0)
2169 if v in ('true', 'false', 'null'):
2170 return v
b3ee552e 2171 elif v.startswith('/*') or v.startswith('//') or v == ',':
bd1e4844 2172 return ""
2173
2174 if v[0] in ("'", '"'):
2175 v = re.sub(r'(?s)\\.|"', lambda m: {
e7b6d122 2176 '"': '\\"',
bd1e4844 2177 "\\'": "'",
2178 '\\\n': '',
2179 '\\x': '\\u00',
2180 }.get(m.group(0), m.group(0)), v[1:-1])
2181
89ac4a19
S
2182 for regex, base in INTEGER_TABLE:
2183 im = re.match(regex, v)
2184 if im:
e4659b45 2185 i = int(im.group(1), base)
89ac4a19
S
2186 return '"%d":' % i if v.endswith(':') else '%d' % i
2187
e7b6d122 2188 return '"%s"' % v
e05f6939 2189
bd1e4844 2190 return re.sub(r'''(?sx)
2191 "(?:[^"\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^"\\]*"|
2192 '(?:[^'\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^'\\]*'|
4195096e 2193 {comment}|,(?={skip}[\]}}])|
bd1e4844 2194 [a-zA-Z_][.a-zA-Z_0-9]*|
4195096e
S
2195 \b(?:0[xX][0-9a-fA-F]+|0+[0-7]+)(?:{skip}:)?|
2196 [0-9]+(?={skip}:)
2197 '''.format(comment=COMMENT_RE, skip=SKIP_RE), fix_kv, code)
e05f6939
PH
2198
2199
478c2c61
PH
2200def qualities(quality_ids):
2201 """ Get a numeric quality value out of a list of possible values """
2202 def q(qid):
2203 try:
2204 return quality_ids.index(qid)
2205 except ValueError:
2206 return -1
2207 return q
2208
acd69589
PH
2209
2210DEFAULT_OUTTMPL = '%(title)s-%(id)s.%(ext)s'
0a871f68 2211
a020a0dc
PH
2212
2213def limit_length(s, length):
2214 """ Add ellipses to overly long strings """
2215 if s is None:
2216 return None
2217 ELLIPSES = '...'
2218 if len(s) > length:
2219 return s[:length - len(ELLIPSES)] + ELLIPSES
2220 return s
48844745
PH
2221
2222
2223def version_tuple(v):
5f9b8394 2224 return tuple(int(e) for e in re.split(r'[-.]', v))
48844745
PH
2225
2226
2227def is_outdated_version(version, limit, assume_new=True):
2228 if not version:
2229 return not assume_new
2230 try:
2231 return version_tuple(version) < version_tuple(limit)
2232 except ValueError:
2233 return not assume_new
732ea2f0
PH
2234
2235
2236def ytdl_is_updateable():
2237 """ Returns if youtube-dl can be updated with -U """
2238 from zipimport import zipimporter
2239
2240 return isinstance(globals().get('__loader__'), zipimporter) or hasattr(sys, 'frozen')
7d4111ed
PH
2241
2242
2243def args_to_str(args):
2244 # Get a short string representation for a subprocess command
702ccf2d 2245 return ' '.join(compat_shlex_quote(a) for a in args)
2ccd1b10
PH
2246
2247
9b9c5355 2248def error_to_compat_str(err):
fdae2358
S
2249 err_str = str(err)
2250 # On python 2 error byte string must be decoded with proper
2251 # encoding rather than ascii
2252 if sys.version_info[0] < 3:
2253 err_str = err_str.decode(preferredencoding())
2254 return err_str
2255
2256
c460bdd5 2257def mimetype2ext(mt):
eb9ee194
S
2258 if mt is None:
2259 return None
2260
765ac263
JMF
2261 ext = {
2262 'audio/mp4': 'm4a',
6c33d24b
YCH
2263 # Per RFC 3003, audio/mpeg can be .mp1, .mp2 or .mp3. Here use .mp3 as
2264 # it's the most popular one
2265 'audio/mpeg': 'mp3',
765ac263
JMF
2266 }.get(mt)
2267 if ext is not None:
2268 return ext
2269
c460bdd5 2270 _, _, res = mt.rpartition('/')
6562d34a 2271 res = res.split(';')[0].strip().lower()
c460bdd5
PH
2272
2273 return {
f6861ec9 2274 '3gpp': '3gp',
cafcf657 2275 'smptett+xml': 'tt',
cafcf657 2276 'ttaf+xml': 'dfxp',
a0d8d704 2277 'ttml+xml': 'ttml',
f6861ec9 2278 'x-flv': 'flv',
a0d8d704
YCH
2279 'x-mp4-fragmented': 'mp4',
2280 'x-ms-wmv': 'wmv',
b4173f15
RA
2281 'mpegurl': 'm3u8',
2282 'x-mpegurl': 'm3u8',
2283 'vnd.apple.mpegurl': 'm3u8',
2284 'dash+xml': 'mpd',
b4173f15 2285 'f4m+xml': 'f4m',
f164b971 2286 'hds+xml': 'f4m',
e910fe2f 2287 'vnd.ms-sstr+xml': 'ism',
c2b2c7e1 2288 'quicktime': 'mov',
98ce1a3f 2289 'mp2t': 'ts',
c460bdd5
PH
2290 }.get(res, res)
2291
2292
4f3c5e06 2293def parse_codecs(codecs_str):
2294 # http://tools.ietf.org/html/rfc6381
2295 if not codecs_str:
2296 return {}
2297 splited_codecs = list(filter(None, map(
2298 lambda str: str.strip(), codecs_str.strip().strip(',').split(','))))
2299 vcodec, acodec = None, None
2300 for full_codec in splited_codecs:
2301 codec = full_codec.split('.')[0]
2302 if codec in ('avc1', 'avc2', 'avc3', 'avc4', 'vp9', 'vp8', 'hev1', 'hev2', 'h263', 'h264', 'mp4v'):
2303 if not vcodec:
2304 vcodec = full_codec
073ac122 2305 elif codec in ('mp4a', 'opus', 'vorbis', 'mp3', 'aac', 'ac-3'):
4f3c5e06 2306 if not acodec:
2307 acodec = full_codec
2308 else:
2309 write_string('WARNING: Unknown codec %s' % full_codec, sys.stderr)
2310 if not vcodec and not acodec:
2311 if len(splited_codecs) == 2:
2312 return {
2313 'vcodec': vcodec,
2314 'acodec': acodec,
2315 }
2316 elif len(splited_codecs) == 1:
2317 return {
2318 'vcodec': 'none',
2319 'acodec': vcodec,
2320 }
2321 else:
2322 return {
2323 'vcodec': vcodec or 'none',
2324 'acodec': acodec or 'none',
2325 }
2326 return {}
2327
2328
2ccd1b10 2329def urlhandle_detect_ext(url_handle):
79298173 2330 getheader = url_handle.headers.get
2ccd1b10 2331
b55ee18f
PH
2332 cd = getheader('Content-Disposition')
2333 if cd:
2334 m = re.match(r'attachment;\s*filename="(?P<filename>[^"]+)"', cd)
2335 if m:
2336 e = determine_ext(m.group('filename'), default_ext=None)
2337 if e:
2338 return e
2339
c460bdd5 2340 return mimetype2ext(getheader('Content-Type'))
05900629
PH
2341
2342
1e399778
YCH
2343def encode_data_uri(data, mime_type):
2344 return 'data:%s;base64,%s' % (mime_type, base64.b64encode(data).decode('ascii'))
2345
2346
05900629 2347def age_restricted(content_limit, age_limit):
6ec6cb4e 2348 """ Returns True iff the content should be blocked """
05900629
PH
2349
2350 if age_limit is None: # No limit set
2351 return False
2352 if content_limit is None:
2353 return False # Content available for everyone
2354 return age_limit < content_limit
61ca9a80
PH
2355
2356
2357def is_html(first_bytes):
2358 """ Detect whether a file contains HTML by examining its first bytes. """
2359
2360 BOMS = [
2361 (b'\xef\xbb\xbf', 'utf-8'),
2362 (b'\x00\x00\xfe\xff', 'utf-32-be'),
2363 (b'\xff\xfe\x00\x00', 'utf-32-le'),
2364 (b'\xff\xfe', 'utf-16-le'),
2365 (b'\xfe\xff', 'utf-16-be'),
2366 ]
2367 for bom, enc in BOMS:
2368 if first_bytes.startswith(bom):
2369 s = first_bytes[len(bom):].decode(enc, 'replace')
2370 break
2371 else:
2372 s = first_bytes.decode('utf-8', 'replace')
2373
2374 return re.match(r'^\s*<', s)
a055469f
PH
2375
2376
2377def determine_protocol(info_dict):
2378 protocol = info_dict.get('protocol')
2379 if protocol is not None:
2380 return protocol
2381
2382 url = info_dict['url']
2383 if url.startswith('rtmp'):
2384 return 'rtmp'
2385 elif url.startswith('mms'):
2386 return 'mms'
2387 elif url.startswith('rtsp'):
2388 return 'rtsp'
2389
2390 ext = determine_ext(url)
2391 if ext == 'm3u8':
2392 return 'm3u8'
2393 elif ext == 'f4m':
2394 return 'f4m'
2395
2396 return compat_urllib_parse_urlparse(url).scheme
cfb56d1a
PH
2397
2398
2399def render_table(header_row, data):
2400 """ Render a list of rows, each as a list of values """
2401 table = [header_row] + data
2402 max_lens = [max(len(compat_str(v)) for v in col) for col in zip(*table)]
2403 format_str = ' '.join('%-' + compat_str(ml + 1) + 's' for ml in max_lens[:-1]) + '%s'
2404 return '\n'.join(format_str % tuple(row) for row in table)
347de493
PH
2405
2406
2407def _match_one(filter_part, dct):
2408 COMPARISON_OPERATORS = {
2409 '<': operator.lt,
2410 '<=': operator.le,
2411 '>': operator.gt,
2412 '>=': operator.ge,
2413 '=': operator.eq,
2414 '!=': operator.ne,
2415 }
2416 operator_rex = re.compile(r'''(?x)\s*
2417 (?P<key>[a-z_]+)
2418 \s*(?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
2419 (?:
2420 (?P<intval>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)|
db13c16e 2421 (?P<quote>["\'])(?P<quotedstrval>(?:\\.|(?!(?P=quote)|\\).)+?)(?P=quote)|
347de493
PH
2422 (?P<strval>(?![0-9.])[a-z0-9A-Z]*)
2423 )
2424 \s*$
2425 ''' % '|'.join(map(re.escape, COMPARISON_OPERATORS.keys())))
2426 m = operator_rex.search(filter_part)
2427 if m:
2428 op = COMPARISON_OPERATORS[m.group('op')]
e5a088dc 2429 actual_value = dct.get(m.group('key'))
db13c16e
S
2430 if (m.group('quotedstrval') is not None or
2431 m.group('strval') is not None or
e5a088dc
S
2432 # If the original field is a string and matching comparisonvalue is
2433 # a number we should respect the origin of the original field
2434 # and process comparison value as a string (see
2435 # https://github.com/rg3/youtube-dl/issues/11082).
2436 actual_value is not None and m.group('intval') is not None and
2437 isinstance(actual_value, compat_str)):
347de493
PH
2438 if m.group('op') not in ('=', '!='):
2439 raise ValueError(
2440 'Operator %s does not support string values!' % m.group('op'))
db13c16e
S
2441 comparison_value = m.group('quotedstrval') or m.group('strval') or m.group('intval')
2442 quote = m.group('quote')
2443 if quote is not None:
2444 comparison_value = comparison_value.replace(r'\%s' % quote, quote)
347de493
PH
2445 else:
2446 try:
2447 comparison_value = int(m.group('intval'))
2448 except ValueError:
2449 comparison_value = parse_filesize(m.group('intval'))
2450 if comparison_value is None:
2451 comparison_value = parse_filesize(m.group('intval') + 'B')
2452 if comparison_value is None:
2453 raise ValueError(
2454 'Invalid integer value %r in filter part %r' % (
2455 m.group('intval'), filter_part))
347de493
PH
2456 if actual_value is None:
2457 return m.group('none_inclusive')
2458 return op(actual_value, comparison_value)
2459
2460 UNARY_OPERATORS = {
2461 '': lambda v: v is not None,
2462 '!': lambda v: v is None,
2463 }
2464 operator_rex = re.compile(r'''(?x)\s*
2465 (?P<op>%s)\s*(?P<key>[a-z_]+)
2466 \s*$
2467 ''' % '|'.join(map(re.escape, UNARY_OPERATORS.keys())))
2468 m = operator_rex.search(filter_part)
2469 if m:
2470 op = UNARY_OPERATORS[m.group('op')]
2471 actual_value = dct.get(m.group('key'))
2472 return op(actual_value)
2473
2474 raise ValueError('Invalid filter part %r' % filter_part)
2475
2476
2477def match_str(filter_str, dct):
2478 """ Filter a dictionary with a simple string syntax. Returns True (=passes filter) or false """
2479
2480 return all(
2481 _match_one(filter_part, dct) for filter_part in filter_str.split('&'))
2482
2483
2484def match_filter_func(filter_str):
2485 def _match_func(info_dict):
2486 if match_str(filter_str, info_dict):
2487 return None
2488 else:
2489 video_title = info_dict.get('title', info_dict.get('id', 'video'))
2490 return '%s does not pass filter %s, skipping ..' % (video_title, filter_str)
2491 return _match_func
91410c9b
PH
2492
2493
bf6427d2
YCH
2494def parse_dfxp_time_expr(time_expr):
2495 if not time_expr:
d631d5f9 2496 return
bf6427d2
YCH
2497
2498 mobj = re.match(r'^(?P<time_offset>\d+(?:\.\d+)?)s?$', time_expr)
2499 if mobj:
2500 return float(mobj.group('time_offset'))
2501
db2fe38b 2502 mobj = re.match(r'^(\d+):(\d\d):(\d\d(?:(?:\.|:)\d+)?)$', time_expr)
bf6427d2 2503 if mobj:
db2fe38b 2504 return 3600 * int(mobj.group(1)) + 60 * int(mobj.group(2)) + float(mobj.group(3).replace(':', '.'))
bf6427d2
YCH
2505
2506
c1c924ab
YCH
2507def srt_subtitles_timecode(seconds):
2508 return '%02d:%02d:%02d,%03d' % (seconds / 3600, (seconds % 3600) / 60, seconds % 60, (seconds % 1) * 1000)
bf6427d2
YCH
2509
2510
2511def dfxp2srt(dfxp_data):
5b995f71
RA
2512 LEGACY_NAMESPACES = (
2513 ('http://www.w3.org/ns/ttml', [
2514 'http://www.w3.org/2004/11/ttaf1',
2515 'http://www.w3.org/2006/04/ttaf1',
2516 'http://www.w3.org/2006/10/ttaf1',
2517 ]),
2518 ('http://www.w3.org/ns/ttml#styling', [
2519 'http://www.w3.org/ns/ttml#style',
2520 ]),
2521 )
2522
2523 SUPPORTED_STYLING = [
2524 'color',
2525 'fontFamily',
2526 'fontSize',
2527 'fontStyle',
2528 'fontWeight',
2529 'textDecoration'
2530 ]
2531
4e335771
YCH
2532 _x = functools.partial(xpath_with_ns, ns_map={
2533 'ttml': 'http://www.w3.org/ns/ttml',
5b995f71 2534 'tts': 'http://www.w3.org/ns/ttml#styling',
4e335771 2535 })
bf6427d2 2536
5b995f71
RA
2537 styles = {}
2538 default_style = {}
2539
87de7069 2540 class TTMLPElementParser(object):
5b995f71
RA
2541 _out = ''
2542 _unclosed_elements = []
2543 _applied_styles = []
bf6427d2 2544
2b14cb56 2545 def start(self, tag, attrib):
5b995f71
RA
2546 if tag in (_x('ttml:br'), 'br'):
2547 self._out += '\n'
2548 else:
2549 unclosed_elements = []
2550 style = {}
2551 element_style_id = attrib.get('style')
2552 if default_style:
2553 style.update(default_style)
2554 if element_style_id:
2555 style.update(styles.get(element_style_id, {}))
2556 for prop in SUPPORTED_STYLING:
2557 prop_val = attrib.get(_x('tts:' + prop))
2558 if prop_val:
2559 style[prop] = prop_val
2560 if style:
2561 font = ''
2562 for k, v in sorted(style.items()):
2563 if self._applied_styles and self._applied_styles[-1].get(k) == v:
2564 continue
2565 if k == 'color':
2566 font += ' color="%s"' % v
2567 elif k == 'fontSize':
2568 font += ' size="%s"' % v
2569 elif k == 'fontFamily':
2570 font += ' face="%s"' % v
2571 elif k == 'fontWeight' and v == 'bold':
2572 self._out += '<b>'
2573 unclosed_elements.append('b')
2574 elif k == 'fontStyle' and v == 'italic':
2575 self._out += '<i>'
2576 unclosed_elements.append('i')
2577 elif k == 'textDecoration' and v == 'underline':
2578 self._out += '<u>'
2579 unclosed_elements.append('u')
2580 if font:
2581 self._out += '<font' + font + '>'
2582 unclosed_elements.append('font')
2583 applied_style = {}
2584 if self._applied_styles:
2585 applied_style.update(self._applied_styles[-1])
2586 applied_style.update(style)
2587 self._applied_styles.append(applied_style)
2588 self._unclosed_elements.append(unclosed_elements)
bf6427d2 2589
2b14cb56 2590 def end(self, tag):
5b995f71
RA
2591 if tag not in (_x('ttml:br'), 'br'):
2592 unclosed_elements = self._unclosed_elements.pop()
2593 for element in reversed(unclosed_elements):
2594 self._out += '</%s>' % element
2595 if unclosed_elements and self._applied_styles:
2596 self._applied_styles.pop()
bf6427d2 2597
2b14cb56 2598 def data(self, data):
5b995f71 2599 self._out += data
2b14cb56 2600
2601 def close(self):
5b995f71 2602 return self._out.strip()
2b14cb56 2603
2604 def parse_node(node):
2605 target = TTMLPElementParser()
2606 parser = xml.etree.ElementTree.XMLParser(target=target)
2607 parser.feed(xml.etree.ElementTree.tostring(node))
2608 return parser.close()
bf6427d2 2609
5b995f71
RA
2610 for k, v in LEGACY_NAMESPACES:
2611 for ns in v:
2612 dfxp_data = dfxp_data.replace(ns, k)
2613
36e6f62c 2614 dfxp = compat_etree_fromstring(dfxp_data.encode('utf-8'))
bf6427d2 2615 out = []
5b995f71 2616 paras = dfxp.findall(_x('.//ttml:p')) or dfxp.findall('.//p')
1b0427e6
YCH
2617
2618 if not paras:
2619 raise ValueError('Invalid dfxp/TTML subtitle')
bf6427d2 2620
5b995f71
RA
2621 repeat = False
2622 while True:
2623 for style in dfxp.findall(_x('.//ttml:style')):
2624 style_id = style.get('id')
2625 parent_style_id = style.get('style')
2626 if parent_style_id:
2627 if parent_style_id not in styles:
2628 repeat = True
2629 continue
2630 styles[style_id] = styles[parent_style_id].copy()
2631 for prop in SUPPORTED_STYLING:
2632 prop_val = style.get(_x('tts:' + prop))
2633 if prop_val:
2634 styles.setdefault(style_id, {})[prop] = prop_val
2635 if repeat:
2636 repeat = False
2637 else:
2638 break
2639
2640 for p in ('body', 'div'):
2641 ele = xpath_element(dfxp, [_x('.//ttml:' + p), './/' + p])
2642 if ele is None:
2643 continue
2644 style = styles.get(ele.get('style'))
2645 if not style:
2646 continue
2647 default_style.update(style)
2648
bf6427d2 2649 for para, index in zip(paras, itertools.count(1)):
d631d5f9 2650 begin_time = parse_dfxp_time_expr(para.attrib.get('begin'))
7dff0363 2651 end_time = parse_dfxp_time_expr(para.attrib.get('end'))
d631d5f9
YCH
2652 dur = parse_dfxp_time_expr(para.attrib.get('dur'))
2653 if begin_time is None:
2654 continue
7dff0363 2655 if not end_time:
d631d5f9
YCH
2656 if not dur:
2657 continue
2658 end_time = begin_time + dur
bf6427d2
YCH
2659 out.append('%d\n%s --> %s\n%s\n\n' % (
2660 index,
c1c924ab
YCH
2661 srt_subtitles_timecode(begin_time),
2662 srt_subtitles_timecode(end_time),
bf6427d2
YCH
2663 parse_node(para)))
2664
2665 return ''.join(out)
2666
2667
66e289ba
S
2668def cli_option(params, command_option, param):
2669 param = params.get(param)
98e698f1
RA
2670 if param:
2671 param = compat_str(param)
66e289ba
S
2672 return [command_option, param] if param is not None else []
2673
2674
2675def cli_bool_option(params, command_option, param, true_value='true', false_value='false', separator=None):
2676 param = params.get(param)
2677 assert isinstance(param, bool)
2678 if separator:
2679 return [command_option + separator + (true_value if param else false_value)]
2680 return [command_option, true_value if param else false_value]
2681
2682
2683def cli_valueless_option(params, command_option, param, expected_value=True):
2684 param = params.get(param)
2685 return [command_option] if param == expected_value else []
2686
2687
2688def cli_configuration_args(params, param, default=[]):
2689 ex_args = params.get(param)
2690 if ex_args is None:
2691 return default
2692 assert isinstance(ex_args, list)
2693 return ex_args
2694
2695
39672624
YCH
2696class ISO639Utils(object):
2697 # See http://www.loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt
2698 _lang_map = {
2699 'aa': 'aar',
2700 'ab': 'abk',
2701 'ae': 'ave',
2702 'af': 'afr',
2703 'ak': 'aka',
2704 'am': 'amh',
2705 'an': 'arg',
2706 'ar': 'ara',
2707 'as': 'asm',
2708 'av': 'ava',
2709 'ay': 'aym',
2710 'az': 'aze',
2711 'ba': 'bak',
2712 'be': 'bel',
2713 'bg': 'bul',
2714 'bh': 'bih',
2715 'bi': 'bis',
2716 'bm': 'bam',
2717 'bn': 'ben',
2718 'bo': 'bod',
2719 'br': 'bre',
2720 'bs': 'bos',
2721 'ca': 'cat',
2722 'ce': 'che',
2723 'ch': 'cha',
2724 'co': 'cos',
2725 'cr': 'cre',
2726 'cs': 'ces',
2727 'cu': 'chu',
2728 'cv': 'chv',
2729 'cy': 'cym',
2730 'da': 'dan',
2731 'de': 'deu',
2732 'dv': 'div',
2733 'dz': 'dzo',
2734 'ee': 'ewe',
2735 'el': 'ell',
2736 'en': 'eng',
2737 'eo': 'epo',
2738 'es': 'spa',
2739 'et': 'est',
2740 'eu': 'eus',
2741 'fa': 'fas',
2742 'ff': 'ful',
2743 'fi': 'fin',
2744 'fj': 'fij',
2745 'fo': 'fao',
2746 'fr': 'fra',
2747 'fy': 'fry',
2748 'ga': 'gle',
2749 'gd': 'gla',
2750 'gl': 'glg',
2751 'gn': 'grn',
2752 'gu': 'guj',
2753 'gv': 'glv',
2754 'ha': 'hau',
2755 'he': 'heb',
2756 'hi': 'hin',
2757 'ho': 'hmo',
2758 'hr': 'hrv',
2759 'ht': 'hat',
2760 'hu': 'hun',
2761 'hy': 'hye',
2762 'hz': 'her',
2763 'ia': 'ina',
2764 'id': 'ind',
2765 'ie': 'ile',
2766 'ig': 'ibo',
2767 'ii': 'iii',
2768 'ik': 'ipk',
2769 'io': 'ido',
2770 'is': 'isl',
2771 'it': 'ita',
2772 'iu': 'iku',
2773 'ja': 'jpn',
2774 'jv': 'jav',
2775 'ka': 'kat',
2776 'kg': 'kon',
2777 'ki': 'kik',
2778 'kj': 'kua',
2779 'kk': 'kaz',
2780 'kl': 'kal',
2781 'km': 'khm',
2782 'kn': 'kan',
2783 'ko': 'kor',
2784 'kr': 'kau',
2785 'ks': 'kas',
2786 'ku': 'kur',
2787 'kv': 'kom',
2788 'kw': 'cor',
2789 'ky': 'kir',
2790 'la': 'lat',
2791 'lb': 'ltz',
2792 'lg': 'lug',
2793 'li': 'lim',
2794 'ln': 'lin',
2795 'lo': 'lao',
2796 'lt': 'lit',
2797 'lu': 'lub',
2798 'lv': 'lav',
2799 'mg': 'mlg',
2800 'mh': 'mah',
2801 'mi': 'mri',
2802 'mk': 'mkd',
2803 'ml': 'mal',
2804 'mn': 'mon',
2805 'mr': 'mar',
2806 'ms': 'msa',
2807 'mt': 'mlt',
2808 'my': 'mya',
2809 'na': 'nau',
2810 'nb': 'nob',
2811 'nd': 'nde',
2812 'ne': 'nep',
2813 'ng': 'ndo',
2814 'nl': 'nld',
2815 'nn': 'nno',
2816 'no': 'nor',
2817 'nr': 'nbl',
2818 'nv': 'nav',
2819 'ny': 'nya',
2820 'oc': 'oci',
2821 'oj': 'oji',
2822 'om': 'orm',
2823 'or': 'ori',
2824 'os': 'oss',
2825 'pa': 'pan',
2826 'pi': 'pli',
2827 'pl': 'pol',
2828 'ps': 'pus',
2829 'pt': 'por',
2830 'qu': 'que',
2831 'rm': 'roh',
2832 'rn': 'run',
2833 'ro': 'ron',
2834 'ru': 'rus',
2835 'rw': 'kin',
2836 'sa': 'san',
2837 'sc': 'srd',
2838 'sd': 'snd',
2839 'se': 'sme',
2840 'sg': 'sag',
2841 'si': 'sin',
2842 'sk': 'slk',
2843 'sl': 'slv',
2844 'sm': 'smo',
2845 'sn': 'sna',
2846 'so': 'som',
2847 'sq': 'sqi',
2848 'sr': 'srp',
2849 'ss': 'ssw',
2850 'st': 'sot',
2851 'su': 'sun',
2852 'sv': 'swe',
2853 'sw': 'swa',
2854 'ta': 'tam',
2855 'te': 'tel',
2856 'tg': 'tgk',
2857 'th': 'tha',
2858 'ti': 'tir',
2859 'tk': 'tuk',
2860 'tl': 'tgl',
2861 'tn': 'tsn',
2862 'to': 'ton',
2863 'tr': 'tur',
2864 'ts': 'tso',
2865 'tt': 'tat',
2866 'tw': 'twi',
2867 'ty': 'tah',
2868 'ug': 'uig',
2869 'uk': 'ukr',
2870 'ur': 'urd',
2871 'uz': 'uzb',
2872 've': 'ven',
2873 'vi': 'vie',
2874 'vo': 'vol',
2875 'wa': 'wln',
2876 'wo': 'wol',
2877 'xh': 'xho',
2878 'yi': 'yid',
2879 'yo': 'yor',
2880 'za': 'zha',
2881 'zh': 'zho',
2882 'zu': 'zul',
2883 }
2884
2885 @classmethod
2886 def short2long(cls, code):
2887 """Convert language code from ISO 639-1 to ISO 639-2/T"""
2888 return cls._lang_map.get(code[:2])
2889
2890 @classmethod
2891 def long2short(cls, code):
2892 """Convert language code from ISO 639-2/T to ISO 639-1"""
2893 for short_name, long_name in cls._lang_map.items():
2894 if long_name == code:
2895 return short_name
2896
2897
4eb10f66
YCH
2898class ISO3166Utils(object):
2899 # From http://data.okfn.org/data/core/country-list
2900 _country_map = {
2901 'AF': 'Afghanistan',
2902 'AX': 'Åland Islands',
2903 'AL': 'Albania',
2904 'DZ': 'Algeria',
2905 'AS': 'American Samoa',
2906 'AD': 'Andorra',
2907 'AO': 'Angola',
2908 'AI': 'Anguilla',
2909 'AQ': 'Antarctica',
2910 'AG': 'Antigua and Barbuda',
2911 'AR': 'Argentina',
2912 'AM': 'Armenia',
2913 'AW': 'Aruba',
2914 'AU': 'Australia',
2915 'AT': 'Austria',
2916 'AZ': 'Azerbaijan',
2917 'BS': 'Bahamas',
2918 'BH': 'Bahrain',
2919 'BD': 'Bangladesh',
2920 'BB': 'Barbados',
2921 'BY': 'Belarus',
2922 'BE': 'Belgium',
2923 'BZ': 'Belize',
2924 'BJ': 'Benin',
2925 'BM': 'Bermuda',
2926 'BT': 'Bhutan',
2927 'BO': 'Bolivia, Plurinational State of',
2928 'BQ': 'Bonaire, Sint Eustatius and Saba',
2929 'BA': 'Bosnia and Herzegovina',
2930 'BW': 'Botswana',
2931 'BV': 'Bouvet Island',
2932 'BR': 'Brazil',
2933 'IO': 'British Indian Ocean Territory',
2934 'BN': 'Brunei Darussalam',
2935 'BG': 'Bulgaria',
2936 'BF': 'Burkina Faso',
2937 'BI': 'Burundi',
2938 'KH': 'Cambodia',
2939 'CM': 'Cameroon',
2940 'CA': 'Canada',
2941 'CV': 'Cape Verde',
2942 'KY': 'Cayman Islands',
2943 'CF': 'Central African Republic',
2944 'TD': 'Chad',
2945 'CL': 'Chile',
2946 'CN': 'China',
2947 'CX': 'Christmas Island',
2948 'CC': 'Cocos (Keeling) Islands',
2949 'CO': 'Colombia',
2950 'KM': 'Comoros',
2951 'CG': 'Congo',
2952 'CD': 'Congo, the Democratic Republic of the',
2953 'CK': 'Cook Islands',
2954 'CR': 'Costa Rica',
2955 'CI': 'Côte d\'Ivoire',
2956 'HR': 'Croatia',
2957 'CU': 'Cuba',
2958 'CW': 'Curaçao',
2959 'CY': 'Cyprus',
2960 'CZ': 'Czech Republic',
2961 'DK': 'Denmark',
2962 'DJ': 'Djibouti',
2963 'DM': 'Dominica',
2964 'DO': 'Dominican Republic',
2965 'EC': 'Ecuador',
2966 'EG': 'Egypt',
2967 'SV': 'El Salvador',
2968 'GQ': 'Equatorial Guinea',
2969 'ER': 'Eritrea',
2970 'EE': 'Estonia',
2971 'ET': 'Ethiopia',
2972 'FK': 'Falkland Islands (Malvinas)',
2973 'FO': 'Faroe Islands',
2974 'FJ': 'Fiji',
2975 'FI': 'Finland',
2976 'FR': 'France',
2977 'GF': 'French Guiana',
2978 'PF': 'French Polynesia',
2979 'TF': 'French Southern Territories',
2980 'GA': 'Gabon',
2981 'GM': 'Gambia',
2982 'GE': 'Georgia',
2983 'DE': 'Germany',
2984 'GH': 'Ghana',
2985 'GI': 'Gibraltar',
2986 'GR': 'Greece',
2987 'GL': 'Greenland',
2988 'GD': 'Grenada',
2989 'GP': 'Guadeloupe',
2990 'GU': 'Guam',
2991 'GT': 'Guatemala',
2992 'GG': 'Guernsey',
2993 'GN': 'Guinea',
2994 'GW': 'Guinea-Bissau',
2995 'GY': 'Guyana',
2996 'HT': 'Haiti',
2997 'HM': 'Heard Island and McDonald Islands',
2998 'VA': 'Holy See (Vatican City State)',
2999 'HN': 'Honduras',
3000 'HK': 'Hong Kong',
3001 'HU': 'Hungary',
3002 'IS': 'Iceland',
3003 'IN': 'India',
3004 'ID': 'Indonesia',
3005 'IR': 'Iran, Islamic Republic of',
3006 'IQ': 'Iraq',
3007 'IE': 'Ireland',
3008 'IM': 'Isle of Man',
3009 'IL': 'Israel',
3010 'IT': 'Italy',
3011 'JM': 'Jamaica',
3012 'JP': 'Japan',
3013 'JE': 'Jersey',
3014 'JO': 'Jordan',
3015 'KZ': 'Kazakhstan',
3016 'KE': 'Kenya',
3017 'KI': 'Kiribati',
3018 'KP': 'Korea, Democratic People\'s Republic of',
3019 'KR': 'Korea, Republic of',
3020 'KW': 'Kuwait',
3021 'KG': 'Kyrgyzstan',
3022 'LA': 'Lao People\'s Democratic Republic',
3023 'LV': 'Latvia',
3024 'LB': 'Lebanon',
3025 'LS': 'Lesotho',
3026 'LR': 'Liberia',
3027 'LY': 'Libya',
3028 'LI': 'Liechtenstein',
3029 'LT': 'Lithuania',
3030 'LU': 'Luxembourg',
3031 'MO': 'Macao',
3032 'MK': 'Macedonia, the Former Yugoslav Republic of',
3033 'MG': 'Madagascar',
3034 'MW': 'Malawi',
3035 'MY': 'Malaysia',
3036 'MV': 'Maldives',
3037 'ML': 'Mali',
3038 'MT': 'Malta',
3039 'MH': 'Marshall Islands',
3040 'MQ': 'Martinique',
3041 'MR': 'Mauritania',
3042 'MU': 'Mauritius',
3043 'YT': 'Mayotte',
3044 'MX': 'Mexico',
3045 'FM': 'Micronesia, Federated States of',
3046 'MD': 'Moldova, Republic of',
3047 'MC': 'Monaco',
3048 'MN': 'Mongolia',
3049 'ME': 'Montenegro',
3050 'MS': 'Montserrat',
3051 'MA': 'Morocco',
3052 'MZ': 'Mozambique',
3053 'MM': 'Myanmar',
3054 'NA': 'Namibia',
3055 'NR': 'Nauru',
3056 'NP': 'Nepal',
3057 'NL': 'Netherlands',
3058 'NC': 'New Caledonia',
3059 'NZ': 'New Zealand',
3060 'NI': 'Nicaragua',
3061 'NE': 'Niger',
3062 'NG': 'Nigeria',
3063 'NU': 'Niue',
3064 'NF': 'Norfolk Island',
3065 'MP': 'Northern Mariana Islands',
3066 'NO': 'Norway',
3067 'OM': 'Oman',
3068 'PK': 'Pakistan',
3069 'PW': 'Palau',
3070 'PS': 'Palestine, State of',
3071 'PA': 'Panama',
3072 'PG': 'Papua New Guinea',
3073 'PY': 'Paraguay',
3074 'PE': 'Peru',
3075 'PH': 'Philippines',
3076 'PN': 'Pitcairn',
3077 'PL': 'Poland',
3078 'PT': 'Portugal',
3079 'PR': 'Puerto Rico',
3080 'QA': 'Qatar',
3081 'RE': 'Réunion',
3082 'RO': 'Romania',
3083 'RU': 'Russian Federation',
3084 'RW': 'Rwanda',
3085 'BL': 'Saint Barthélemy',
3086 'SH': 'Saint Helena, Ascension and Tristan da Cunha',
3087 'KN': 'Saint Kitts and Nevis',
3088 'LC': 'Saint Lucia',
3089 'MF': 'Saint Martin (French part)',
3090 'PM': 'Saint Pierre and Miquelon',
3091 'VC': 'Saint Vincent and the Grenadines',
3092 'WS': 'Samoa',
3093 'SM': 'San Marino',
3094 'ST': 'Sao Tome and Principe',
3095 'SA': 'Saudi Arabia',
3096 'SN': 'Senegal',
3097 'RS': 'Serbia',
3098 'SC': 'Seychelles',
3099 'SL': 'Sierra Leone',
3100 'SG': 'Singapore',
3101 'SX': 'Sint Maarten (Dutch part)',
3102 'SK': 'Slovakia',
3103 'SI': 'Slovenia',
3104 'SB': 'Solomon Islands',
3105 'SO': 'Somalia',
3106 'ZA': 'South Africa',
3107 'GS': 'South Georgia and the South Sandwich Islands',
3108 'SS': 'South Sudan',
3109 'ES': 'Spain',
3110 'LK': 'Sri Lanka',
3111 'SD': 'Sudan',
3112 'SR': 'Suriname',
3113 'SJ': 'Svalbard and Jan Mayen',
3114 'SZ': 'Swaziland',
3115 'SE': 'Sweden',
3116 'CH': 'Switzerland',
3117 'SY': 'Syrian Arab Republic',
3118 'TW': 'Taiwan, Province of China',
3119 'TJ': 'Tajikistan',
3120 'TZ': 'Tanzania, United Republic of',
3121 'TH': 'Thailand',
3122 'TL': 'Timor-Leste',
3123 'TG': 'Togo',
3124 'TK': 'Tokelau',
3125 'TO': 'Tonga',
3126 'TT': 'Trinidad and Tobago',
3127 'TN': 'Tunisia',
3128 'TR': 'Turkey',
3129 'TM': 'Turkmenistan',
3130 'TC': 'Turks and Caicos Islands',
3131 'TV': 'Tuvalu',
3132 'UG': 'Uganda',
3133 'UA': 'Ukraine',
3134 'AE': 'United Arab Emirates',
3135 'GB': 'United Kingdom',
3136 'US': 'United States',
3137 'UM': 'United States Minor Outlying Islands',
3138 'UY': 'Uruguay',
3139 'UZ': 'Uzbekistan',
3140 'VU': 'Vanuatu',
3141 'VE': 'Venezuela, Bolivarian Republic of',
3142 'VN': 'Viet Nam',
3143 'VG': 'Virgin Islands, British',
3144 'VI': 'Virgin Islands, U.S.',
3145 'WF': 'Wallis and Futuna',
3146 'EH': 'Western Sahara',
3147 'YE': 'Yemen',
3148 'ZM': 'Zambia',
3149 'ZW': 'Zimbabwe',
3150 }
3151
3152 @classmethod
3153 def short2full(cls, code):
3154 """Convert an ISO 3166-2 country code to the corresponding full name"""
3155 return cls._country_map.get(code.upper())
3156
3157
773f291d
S
3158class GeoUtils(object):
3159 # Major IPv4 address blocks per country
3160 _country_ip_map = {
3161 'AD': '85.94.160.0/19',
3162 'AE': '94.200.0.0/13',
3163 'AF': '149.54.0.0/17',
3164 'AG': '209.59.64.0/18',
3165 'AI': '204.14.248.0/21',
3166 'AL': '46.99.0.0/16',
3167 'AM': '46.70.0.0/15',
3168 'AO': '105.168.0.0/13',
3169 'AP': '159.117.192.0/21',
3170 'AR': '181.0.0.0/12',
3171 'AS': '202.70.112.0/20',
3172 'AT': '84.112.0.0/13',
3173 'AU': '1.128.0.0/11',
3174 'AW': '181.41.0.0/18',
3175 'AZ': '5.191.0.0/16',
3176 'BA': '31.176.128.0/17',
3177 'BB': '65.48.128.0/17',
3178 'BD': '114.130.0.0/16',
3179 'BE': '57.0.0.0/8',
3180 'BF': '129.45.128.0/17',
3181 'BG': '95.42.0.0/15',
3182 'BH': '37.131.0.0/17',
3183 'BI': '154.117.192.0/18',
3184 'BJ': '137.255.0.0/16',
3185 'BL': '192.131.134.0/24',
3186 'BM': '196.12.64.0/18',
3187 'BN': '156.31.0.0/16',
3188 'BO': '161.56.0.0/16',
3189 'BQ': '161.0.80.0/20',
3190 'BR': '152.240.0.0/12',
3191 'BS': '24.51.64.0/18',
3192 'BT': '119.2.96.0/19',
3193 'BW': '168.167.0.0/16',
3194 'BY': '178.120.0.0/13',
3195 'BZ': '179.42.192.0/18',
3196 'CA': '99.224.0.0/11',
3197 'CD': '41.243.0.0/16',
3198 'CF': '196.32.200.0/21',
3199 'CG': '197.214.128.0/17',
3200 'CH': '85.0.0.0/13',
3201 'CI': '154.232.0.0/14',
3202 'CK': '202.65.32.0/19',
3203 'CL': '152.172.0.0/14',
3204 'CM': '165.210.0.0/15',
3205 'CN': '36.128.0.0/10',
3206 'CO': '181.240.0.0/12',
3207 'CR': '201.192.0.0/12',
3208 'CU': '152.206.0.0/15',
3209 'CV': '165.90.96.0/19',
3210 'CW': '190.88.128.0/17',
3211 'CY': '46.198.0.0/15',
3212 'CZ': '88.100.0.0/14',
3213 'DE': '53.0.0.0/8',
3214 'DJ': '197.241.0.0/17',
3215 'DK': '87.48.0.0/12',
3216 'DM': '192.243.48.0/20',
3217 'DO': '152.166.0.0/15',
3218 'DZ': '41.96.0.0/12',
3219 'EC': '186.68.0.0/15',
3220 'EE': '90.190.0.0/15',
3221 'EG': '156.160.0.0/11',
3222 'ER': '196.200.96.0/20',
3223 'ES': '88.0.0.0/11',
3224 'ET': '196.188.0.0/14',
3225 'EU': '2.16.0.0/13',
3226 'FI': '91.152.0.0/13',
3227 'FJ': '144.120.0.0/16',
3228 'FM': '119.252.112.0/20',
3229 'FO': '88.85.32.0/19',
3230 'FR': '90.0.0.0/9',
3231 'GA': '41.158.0.0/15',
3232 'GB': '25.0.0.0/8',
3233 'GD': '74.122.88.0/21',
3234 'GE': '31.146.0.0/16',
3235 'GF': '161.22.64.0/18',
3236 'GG': '62.68.160.0/19',
3237 'GH': '45.208.0.0/14',
3238 'GI': '85.115.128.0/19',
3239 'GL': '88.83.0.0/19',
3240 'GM': '160.182.0.0/15',
3241 'GN': '197.149.192.0/18',
3242 'GP': '104.250.0.0/19',
3243 'GQ': '105.235.224.0/20',
3244 'GR': '94.64.0.0/13',
3245 'GT': '168.234.0.0/16',
3246 'GU': '168.123.0.0/16',
3247 'GW': '197.214.80.0/20',
3248 'GY': '181.41.64.0/18',
3249 'HK': '113.252.0.0/14',
3250 'HN': '181.210.0.0/16',
3251 'HR': '93.136.0.0/13',
3252 'HT': '148.102.128.0/17',
3253 'HU': '84.0.0.0/14',
3254 'ID': '39.192.0.0/10',
3255 'IE': '87.32.0.0/12',
3256 'IL': '79.176.0.0/13',
3257 'IM': '5.62.80.0/20',
3258 'IN': '117.192.0.0/10',
3259 'IO': '203.83.48.0/21',
3260 'IQ': '37.236.0.0/14',
3261 'IR': '2.176.0.0/12',
3262 'IS': '82.221.0.0/16',
3263 'IT': '79.0.0.0/10',
3264 'JE': '87.244.64.0/18',
3265 'JM': '72.27.0.0/17',
3266 'JO': '176.29.0.0/16',
3267 'JP': '126.0.0.0/8',
3268 'KE': '105.48.0.0/12',
3269 'KG': '158.181.128.0/17',
3270 'KH': '36.37.128.0/17',
3271 'KI': '103.25.140.0/22',
3272 'KM': '197.255.224.0/20',
3273 'KN': '198.32.32.0/19',
3274 'KP': '175.45.176.0/22',
3275 'KR': '175.192.0.0/10',
3276 'KW': '37.36.0.0/14',
3277 'KY': '64.96.0.0/15',
3278 'KZ': '2.72.0.0/13',
3279 'LA': '115.84.64.0/18',
3280 'LB': '178.135.0.0/16',
3281 'LC': '192.147.231.0/24',
3282 'LI': '82.117.0.0/19',
3283 'LK': '112.134.0.0/15',
3284 'LR': '41.86.0.0/19',
3285 'LS': '129.232.0.0/17',
3286 'LT': '78.56.0.0/13',
3287 'LU': '188.42.0.0/16',
3288 'LV': '46.109.0.0/16',
3289 'LY': '41.252.0.0/14',
3290 'MA': '105.128.0.0/11',
3291 'MC': '88.209.64.0/18',
3292 'MD': '37.246.0.0/16',
3293 'ME': '178.175.0.0/17',
3294 'MF': '74.112.232.0/21',
3295 'MG': '154.126.0.0/17',
3296 'MH': '117.103.88.0/21',
3297 'MK': '77.28.0.0/15',
3298 'ML': '154.118.128.0/18',
3299 'MM': '37.111.0.0/17',
3300 'MN': '49.0.128.0/17',
3301 'MO': '60.246.0.0/16',
3302 'MP': '202.88.64.0/20',
3303 'MQ': '109.203.224.0/19',
3304 'MR': '41.188.64.0/18',
3305 'MS': '208.90.112.0/22',
3306 'MT': '46.11.0.0/16',
3307 'MU': '105.16.0.0/12',
3308 'MV': '27.114.128.0/18',
3309 'MW': '105.234.0.0/16',
3310 'MX': '187.192.0.0/11',
3311 'MY': '175.136.0.0/13',
3312 'MZ': '197.218.0.0/15',
3313 'NA': '41.182.0.0/16',
3314 'NC': '101.101.0.0/18',
3315 'NE': '197.214.0.0/18',
3316 'NF': '203.17.240.0/22',
3317 'NG': '105.112.0.0/12',
3318 'NI': '186.76.0.0/15',
3319 'NL': '145.96.0.0/11',
3320 'NO': '84.208.0.0/13',
3321 'NP': '36.252.0.0/15',
3322 'NR': '203.98.224.0/19',
3323 'NU': '49.156.48.0/22',
3324 'NZ': '49.224.0.0/14',
3325 'OM': '5.36.0.0/15',
3326 'PA': '186.72.0.0/15',
3327 'PE': '186.160.0.0/14',
3328 'PF': '123.50.64.0/18',
3329 'PG': '124.240.192.0/19',
3330 'PH': '49.144.0.0/13',
3331 'PK': '39.32.0.0/11',
3332 'PL': '83.0.0.0/11',
3333 'PM': '70.36.0.0/20',
3334 'PR': '66.50.0.0/16',
3335 'PS': '188.161.0.0/16',
3336 'PT': '85.240.0.0/13',
3337 'PW': '202.124.224.0/20',
3338 'PY': '181.120.0.0/14',
3339 'QA': '37.210.0.0/15',
3340 'RE': '139.26.0.0/16',
3341 'RO': '79.112.0.0/13',
3342 'RS': '178.220.0.0/14',
3343 'RU': '5.136.0.0/13',
3344 'RW': '105.178.0.0/15',
3345 'SA': '188.48.0.0/13',
3346 'SB': '202.1.160.0/19',
3347 'SC': '154.192.0.0/11',
3348 'SD': '154.96.0.0/13',
3349 'SE': '78.64.0.0/12',
3350 'SG': '152.56.0.0/14',
3351 'SI': '188.196.0.0/14',
3352 'SK': '78.98.0.0/15',
3353 'SL': '197.215.0.0/17',
3354 'SM': '89.186.32.0/19',
3355 'SN': '41.82.0.0/15',
3356 'SO': '197.220.64.0/19',
3357 'SR': '186.179.128.0/17',
3358 'SS': '105.235.208.0/21',
3359 'ST': '197.159.160.0/19',
3360 'SV': '168.243.0.0/16',
3361 'SX': '190.102.0.0/20',
3362 'SY': '5.0.0.0/16',
3363 'SZ': '41.84.224.0/19',
3364 'TC': '65.255.48.0/20',
3365 'TD': '154.68.128.0/19',
3366 'TG': '196.168.0.0/14',
3367 'TH': '171.96.0.0/13',
3368 'TJ': '85.9.128.0/18',
3369 'TK': '27.96.24.0/21',
3370 'TL': '180.189.160.0/20',
3371 'TM': '95.85.96.0/19',
3372 'TN': '197.0.0.0/11',
3373 'TO': '175.176.144.0/21',
3374 'TR': '78.160.0.0/11',
3375 'TT': '186.44.0.0/15',
3376 'TV': '202.2.96.0/19',
3377 'TW': '120.96.0.0/11',
3378 'TZ': '156.156.0.0/14',
3379 'UA': '93.72.0.0/13',
3380 'UG': '154.224.0.0/13',
3381 'US': '3.0.0.0/8',
3382 'UY': '167.56.0.0/13',
3383 'UZ': '82.215.64.0/18',
3384 'VA': '212.77.0.0/19',
3385 'VC': '24.92.144.0/20',
3386 'VE': '186.88.0.0/13',
3387 'VG': '172.103.64.0/18',
3388 'VI': '146.226.0.0/16',
3389 'VN': '14.160.0.0/11',
3390 'VU': '202.80.32.0/20',
3391 'WF': '117.20.32.0/21',
3392 'WS': '202.4.32.0/19',
3393 'YE': '134.35.0.0/16',
3394 'YT': '41.242.116.0/22',
3395 'ZA': '41.0.0.0/11',
3396 'ZM': '165.56.0.0/13',
3397 'ZW': '41.85.192.0/19',
3398 }
3399
3400 @classmethod
3401 def random_ipv4(cls, code):
3402 block = cls._country_ip_map.get(code.upper())
3403 if not block:
3404 return None
3405 addr, preflen = block.split('/')
3406 addr_min = compat_struct_unpack('!L', socket.inet_aton(addr))[0]
3407 addr_max = addr_min | (0xffffffff >> int(preflen))
18a0defa 3408 return compat_str(socket.inet_ntoa(
4248dad9 3409 compat_struct_pack('!L', random.randint(addr_min, addr_max))))
773f291d
S
3410
3411
91410c9b 3412class PerRequestProxyHandler(compat_urllib_request.ProxyHandler):
2461f79d
PH
3413 def __init__(self, proxies=None):
3414 # Set default handlers
3415 for type in ('http', 'https'):
3416 setattr(self, '%s_open' % type,
3417 lambda r, proxy='__noproxy__', type=type, meth=self.proxy_open:
3418 meth(r, proxy, type))
3419 return compat_urllib_request.ProxyHandler.__init__(self, proxies)
3420
91410c9b 3421 def proxy_open(self, req, proxy, type):
2461f79d 3422 req_proxy = req.headers.get('Ytdl-request-proxy')
91410c9b
PH
3423 if req_proxy is not None:
3424 proxy = req_proxy
2461f79d
PH
3425 del req.headers['Ytdl-request-proxy']
3426
3427 if proxy == '__noproxy__':
3428 return None # No Proxy
51fb4995 3429 if compat_urlparse.urlparse(proxy).scheme.lower() in ('socks', 'socks4', 'socks4a', 'socks5'):
71aff188
YCH
3430 req.add_header('Ytdl-socks-proxy', proxy)
3431 # youtube-dl's http/https handlers do wrapping the socket with socks
3432 return None
91410c9b
PH
3433 return compat_urllib_request.ProxyHandler.proxy_open(
3434 self, req, proxy, type)
5bc880b9
YCH
3435
3436
0a5445dd
YCH
3437# Both long_to_bytes and bytes_to_long are adapted from PyCrypto, which is
3438# released into Public Domain
3439# https://github.com/dlitz/pycrypto/blob/master/lib/Crypto/Util/number.py#L387
3440
3441def long_to_bytes(n, blocksize=0):
3442 """long_to_bytes(n:long, blocksize:int) : string
3443 Convert a long integer to a byte string.
3444
3445 If optional blocksize is given and greater than zero, pad the front of the
3446 byte string with binary zeros so that the length is a multiple of
3447 blocksize.
3448 """
3449 # after much testing, this algorithm was deemed to be the fastest
3450 s = b''
3451 n = int(n)
3452 while n > 0:
3453 s = compat_struct_pack('>I', n & 0xffffffff) + s
3454 n = n >> 32
3455 # strip off leading zeros
3456 for i in range(len(s)):
3457 if s[i] != b'\000'[0]:
3458 break
3459 else:
3460 # only happens when n == 0
3461 s = b'\000'
3462 i = 0
3463 s = s[i:]
3464 # add back some pad bytes. this could be done more efficiently w.r.t. the
3465 # de-padding being done above, but sigh...
3466 if blocksize > 0 and len(s) % blocksize:
3467 s = (blocksize - len(s) % blocksize) * b'\000' + s
3468 return s
3469
3470
3471def bytes_to_long(s):
3472 """bytes_to_long(string) : long
3473 Convert a byte string to a long integer.
3474
3475 This is (essentially) the inverse of long_to_bytes().
3476 """
3477 acc = 0
3478 length = len(s)
3479 if length % 4:
3480 extra = (4 - length % 4)
3481 s = b'\000' * extra + s
3482 length = length + extra
3483 for i in range(0, length, 4):
3484 acc = (acc << 32) + compat_struct_unpack('>I', s[i:i + 4])[0]
3485 return acc
3486
3487
5bc880b9
YCH
3488def ohdave_rsa_encrypt(data, exponent, modulus):
3489 '''
3490 Implement OHDave's RSA algorithm. See http://www.ohdave.com/rsa/
3491
3492 Input:
3493 data: data to encrypt, bytes-like object
3494 exponent, modulus: parameter e and N of RSA algorithm, both integer
3495 Output: hex string of encrypted data
3496
3497 Limitation: supports one block encryption only
3498 '''
3499
3500 payload = int(binascii.hexlify(data[::-1]), 16)
3501 encrypted = pow(payload, exponent, modulus)
3502 return '%x' % encrypted
81bdc8fd
YCH
3503
3504
f48409c7
YCH
3505def pkcs1pad(data, length):
3506 """
3507 Padding input data with PKCS#1 scheme
3508
3509 @param {int[]} data input data
3510 @param {int} length target length
3511 @returns {int[]} padded data
3512 """
3513 if len(data) > length - 11:
3514 raise ValueError('Input data too long for PKCS#1 padding')
3515
3516 pseudo_random = [random.randint(0, 254) for _ in range(length - len(data) - 3)]
3517 return [0, 2] + pseudo_random + [0] + data
3518
3519
5eb6bdce 3520def encode_base_n(num, n, table=None):
59f898b7 3521 FULL_TABLE = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
59f898b7
YCH
3522 if not table:
3523 table = FULL_TABLE[:n]
3524
5eb6bdce
YCH
3525 if n > len(table):
3526 raise ValueError('base %d exceeds table length %d' % (n, len(table)))
3527
3528 if num == 0:
3529 return table[0]
3530
81bdc8fd
YCH
3531 ret = ''
3532 while num:
3533 ret = table[num % n] + ret
3534 num = num // n
3535 return ret
f52354a8
YCH
3536
3537
3538def decode_packed_codes(code):
06b3fe29 3539 mobj = re.search(PACKED_CODES_RE, code)
f52354a8
YCH
3540 obfucasted_code, base, count, symbols = mobj.groups()
3541 base = int(base)
3542 count = int(count)
3543 symbols = symbols.split('|')
3544 symbol_table = {}
3545
3546 while count:
3547 count -= 1
5eb6bdce 3548 base_n_count = encode_base_n(count, base)
f52354a8
YCH
3549 symbol_table[base_n_count] = symbols[count] or base_n_count
3550
3551 return re.sub(
3552 r'\b(\w+)\b', lambda mobj: symbol_table[mobj.group(0)],
3553 obfucasted_code)
e154c651 3554
3555
3556def parse_m3u8_attributes(attrib):
3557 info = {}
3558 for (key, val) in re.findall(r'(?P<key>[A-Z0-9-]+)=(?P<val>"[^"]+"|[^",]+)(?:,|$)', attrib):
3559 if val.startswith('"'):
3560 val = val[1:-1]
3561 info[key] = val
3562 return info
1143535d
YCH
3563
3564
3565def urshift(val, n):
3566 return val >> n if val >= 0 else (val + 0x100000000) >> n
d3f8e038
YCH
3567
3568
3569# Based on png2str() written by @gdkchan and improved by @yokrysty
3570# Originally posted at https://github.com/rg3/youtube-dl/issues/9706
3571def decode_png(png_data):
3572 # Reference: https://www.w3.org/TR/PNG/
3573 header = png_data[8:]
3574
3575 if png_data[:8] != b'\x89PNG\x0d\x0a\x1a\x0a' or header[4:8] != b'IHDR':
3576 raise IOError('Not a valid PNG file.')
3577
3578 int_map = {1: '>B', 2: '>H', 4: '>I'}
3579 unpack_integer = lambda x: compat_struct_unpack(int_map[len(x)], x)[0]
3580
3581 chunks = []
3582
3583 while header:
3584 length = unpack_integer(header[:4])
3585 header = header[4:]
3586
3587 chunk_type = header[:4]
3588 header = header[4:]
3589
3590 chunk_data = header[:length]
3591 header = header[length:]
3592
3593 header = header[4:] # Skip CRC
3594
3595 chunks.append({
3596 'type': chunk_type,
3597 'length': length,
3598 'data': chunk_data
3599 })
3600
3601 ihdr = chunks[0]['data']
3602
3603 width = unpack_integer(ihdr[:4])
3604 height = unpack_integer(ihdr[4:8])
3605
3606 idat = b''
3607
3608 for chunk in chunks:
3609 if chunk['type'] == b'IDAT':
3610 idat += chunk['data']
3611
3612 if not idat:
3613 raise IOError('Unable to read PNG data.')
3614
3615 decompressed_data = bytearray(zlib.decompress(idat))
3616
3617 stride = width * 3
3618 pixels = []
3619
3620 def _get_pixel(idx):
3621 x = idx % stride
3622 y = idx // stride
3623 return pixels[y][x]
3624
3625 for y in range(height):
3626 basePos = y * (1 + stride)
3627 filter_type = decompressed_data[basePos]
3628
3629 current_row = []
3630
3631 pixels.append(current_row)
3632
3633 for x in range(stride):
3634 color = decompressed_data[1 + basePos + x]
3635 basex = y * stride + x
3636 left = 0
3637 up = 0
3638
3639 if x > 2:
3640 left = _get_pixel(basex - 3)
3641 if y > 0:
3642 up = _get_pixel(basex - stride)
3643
3644 if filter_type == 1: # Sub
3645 color = (color + left) & 0xff
3646 elif filter_type == 2: # Up
3647 color = (color + up) & 0xff
3648 elif filter_type == 3: # Average
3649 color = (color + ((left + up) >> 1)) & 0xff
3650 elif filter_type == 4: # Paeth
3651 a = left
3652 b = up
3653 c = 0
3654
3655 if x > 2 and y > 0:
3656 c = _get_pixel(basex - stride - 3)
3657
3658 p = a + b - c
3659
3660 pa = abs(p - a)
3661 pb = abs(p - b)
3662 pc = abs(p - c)
3663
3664 if pa <= pb and pa <= pc:
3665 color = (color + a) & 0xff
3666 elif pb <= pc:
3667 color = (color + b) & 0xff
3668 else:
3669 color = (color + c) & 0xff
3670
3671 current_row.append(color)
3672
3673 return width, height, pixels
efa97bdc
YCH
3674
3675
3676def write_xattr(path, key, value):
3677 # This mess below finds the best xattr tool for the job
3678 try:
3679 # try the pyxattr module...
3680 import xattr
3681
53a7e3d2
YCH
3682 if hasattr(xattr, 'set'): # pyxattr
3683 # Unicode arguments are not supported in python-pyxattr until
3684 # version 0.5.0
3685 # See https://github.com/rg3/youtube-dl/issues/5498
3686 pyxattr_required_version = '0.5.0'
3687 if version_tuple(xattr.__version__) < version_tuple(pyxattr_required_version):
3688 # TODO: fallback to CLI tools
3689 raise XAttrUnavailableError(
3690 'python-pyxattr is detected but is too old. '
3691 'youtube-dl requires %s or above while your version is %s. '
3692 'Falling back to other xattr implementations' % (
3693 pyxattr_required_version, xattr.__version__))
3694
3695 setxattr = xattr.set
3696 else: # xattr
3697 setxattr = xattr.setxattr
efa97bdc
YCH
3698
3699 try:
53a7e3d2 3700 setxattr(path, key, value)
efa97bdc
YCH
3701 except EnvironmentError as e:
3702 raise XAttrMetadataError(e.errno, e.strerror)
3703
3704 except ImportError:
3705 if compat_os_name == 'nt':
3706 # Write xattrs to NTFS Alternate Data Streams:
3707 # http://en.wikipedia.org/wiki/NTFS#Alternate_data_streams_.28ADS.29
3708 assert ':' not in key
3709 assert os.path.exists(path)
3710
3711 ads_fn = path + ':' + key
3712 try:
3713 with open(ads_fn, 'wb') as f:
3714 f.write(value)
3715 except EnvironmentError as e:
3716 raise XAttrMetadataError(e.errno, e.strerror)
3717 else:
3718 user_has_setfattr = check_executable('setfattr', ['--version'])
3719 user_has_xattr = check_executable('xattr', ['-h'])
3720
3721 if user_has_setfattr or user_has_xattr:
3722
3723 value = value.decode('utf-8')
3724 if user_has_setfattr:
3725 executable = 'setfattr'
3726 opts = ['-n', key, '-v', value]
3727 elif user_has_xattr:
3728 executable = 'xattr'
3729 opts = ['-w', key, value]
3730
3731 cmd = ([encodeFilename(executable, True)] +
3732 [encodeArgument(o) for o in opts] +
3733 [encodeFilename(path, True)])
3734
3735 try:
3736 p = subprocess.Popen(
3737 cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
3738 except EnvironmentError as e:
3739 raise XAttrMetadataError(e.errno, e.strerror)
3740 stdout, stderr = p.communicate()
3741 stderr = stderr.decode('utf-8', 'replace')
3742 if p.returncode != 0:
3743 raise XAttrMetadataError(p.returncode, stderr)
3744
3745 else:
3746 # On Unix, and can't find pyxattr, setfattr, or xattr.
3747 if sys.platform.startswith('linux'):
3748 raise XAttrUnavailableError(
3749 "Couldn't find a tool to set the xattrs. "
3750 "Install either the python 'pyxattr' or 'xattr' "
3751 "modules, or the GNU 'attr' package "
3752 "(which contains the 'setfattr' tool).")
3753 else:
3754 raise XAttrUnavailableError(
3755 "Couldn't find a tool to set the xattrs. "
3756 "Install either the python 'xattr' module, "
3757 "or the 'xattr' binary.")