]> jfr.im git - yt-dlp.git/blame - youtube_dl/utils.py
[kwuo:song] Improve error detection (closes #10650)
[yt-dlp.git] / youtube_dl / utils.py
CommitLineData
d77c3dfd
FV
1#!/usr/bin/env python
2# -*- coding: utf-8 -*-
3
ecc0c5ee
PH
4from __future__ import unicode_literals
5
1e399778 6import base64
5bc880b9 7import binascii
912b38b4 8import calendar
676eb3f2 9import codecs
62e609ab 10import contextlib
e3946f98 11import ctypes
c496ca96
PH
12import datetime
13import email.utils
f45c185f 14import errno
be4a824d 15import functools
d77c3dfd 16import gzip
03f9daab 17import io
79a2e94e 18import itertools
f4bfd65f 19import json
d77c3dfd 20import locale
02dbf93f 21import math
347de493 22import operator
d77c3dfd 23import os
4eb7f1d1 24import pipes
c496ca96 25import platform
d77c3dfd 26import re
c496ca96 27import socket
79a2e94e 28import ssl
1c088fa8 29import subprocess
d77c3dfd 30import sys
181c8655 31import tempfile
01951dda 32import traceback
bcf89ce6 33import xml.etree.ElementTree
d77c3dfd 34import zlib
d77c3dfd 35
8c25f81b 36from .compat import (
8bb56eee 37 compat_HTMLParser,
8f9312c3 38 compat_basestring,
8c25f81b 39 compat_chr,
36e6f62c 40 compat_etree_fromstring,
8c25f81b 41 compat_html_entities,
55b2f099 42 compat_html_entities_html5,
be4a824d 43 compat_http_client,
c86b6142 44 compat_kwargs,
8c25f81b 45 compat_parse_qs,
702ccf2d 46 compat_shlex_quote,
be4a824d 47 compat_socket_create_connection,
8c25f81b 48 compat_str,
edaa23f8 49 compat_struct_pack,
d3f8e038 50 compat_struct_unpack,
8c25f81b
PH
51 compat_urllib_error,
52 compat_urllib_parse,
15707c7e 53 compat_urllib_parse_urlencode,
8c25f81b 54 compat_urllib_parse_urlparse,
7581bfc9 55 compat_urllib_parse_unquote_plus,
8c25f81b
PH
56 compat_urllib_request,
57 compat_urlparse,
810c10ba 58 compat_xpath,
8c25f81b 59)
4644ac55 60
71aff188
YCH
61from .socks import (
62 ProxyType,
63 sockssocket,
64)
65
4644ac55 66
51fb4995
YCH
67def register_socks_protocols():
68 # "Register" SOCKS protocols
d5ae6bb5
YCH
69 # In Python < 2.6.5, urlsplit() suffers from bug https://bugs.python.org/issue7904
70 # URLs with protocols not in urlparse.uses_netloc are not handled correctly
51fb4995
YCH
71 for scheme in ('socks', 'socks4', 'socks4a', 'socks5'):
72 if scheme not in compat_urlparse.uses_netloc:
73 compat_urlparse.uses_netloc.append(scheme)
74
75
468e2e92
FV
76# This is not clearly defined otherwise
77compiled_regex_type = type(re.compile(''))
78
3e669f36 79std_headers = {
15d10678 80 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20150101 Firefox/47.0 (Chrome)',
59ae15a5
PH
81 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
82 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
83 'Accept-Encoding': 'gzip, deflate',
84 'Accept-Language': 'en-us,en;q=0.5',
3e669f36 85}
f427df17 86
5f6a1245 87
bf42a990
S
88NO_DEFAULT = object()
89
7105440c
YCH
90ENGLISH_MONTH_NAMES = [
91 'January', 'February', 'March', 'April', 'May', 'June',
92 'July', 'August', 'September', 'October', 'November', 'December']
93
a7aaa398
S
94KNOWN_EXTENSIONS = (
95 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'aac',
96 'flv', 'f4v', 'f4a', 'f4b',
97 'webm', 'ogg', 'ogv', 'oga', 'ogx', 'spx', 'opus',
98 'mkv', 'mka', 'mk3d',
99 'avi', 'divx',
100 'mov',
101 'asf', 'wmv', 'wma',
102 '3gp', '3g2',
103 'mp3',
104 'flac',
105 'ape',
106 'wav',
107 'f4f', 'f4m', 'm3u8', 'smil')
108
c587cbb7 109# needed for sanitizing filenames in restricted mode
c8827027 110ACCENT_CHARS = dict(zip('ÂÃÄÀÁÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖŐØŒÙÚÛÜŰÝÞßàáâãäåæçèéêëìíîïðñòóôõöőøœùúûüűýþÿ',
111 itertools.chain('AAAAAA', ['AE'], 'CEEEEIIIIDNOOOOOOO', ['OE'], 'UUUUUYP', ['ss'],
112 'aaaaaa', ['ae'], 'ceeeeiiiionooooooo', ['oe'], 'uuuuuypy')))
c587cbb7 113
46f59e89
S
114DATE_FORMATS = (
115 '%d %B %Y',
116 '%d %b %Y',
117 '%B %d %Y',
118 '%b %d %Y',
119 '%b %dst %Y %I:%M',
120 '%b %dnd %Y %I:%M',
121 '%b %dth %Y %I:%M',
122 '%Y %m %d',
123 '%Y-%m-%d',
124 '%Y/%m/%d',
81c13222 125 '%Y/%m/%d %H:%M',
46f59e89
S
126 '%Y/%m/%d %H:%M:%S',
127 '%Y-%m-%d %H:%M:%S',
128 '%Y-%m-%d %H:%M:%S.%f',
129 '%d.%m.%Y %H:%M',
130 '%d.%m.%Y %H.%M',
131 '%Y-%m-%dT%H:%M:%SZ',
132 '%Y-%m-%dT%H:%M:%S.%fZ',
133 '%Y-%m-%dT%H:%M:%S.%f0Z',
134 '%Y-%m-%dT%H:%M:%S',
135 '%Y-%m-%dT%H:%M:%S.%f',
136 '%Y-%m-%dT%H:%M',
137)
138
139DATE_FORMATS_DAY_FIRST = list(DATE_FORMATS)
140DATE_FORMATS_DAY_FIRST.extend([
141 '%d-%m-%Y',
142 '%d.%m.%Y',
143 '%d.%m.%y',
144 '%d/%m/%Y',
145 '%d/%m/%y',
146 '%d/%m/%Y %H:%M:%S',
147])
148
149DATE_FORMATS_MONTH_FIRST = list(DATE_FORMATS)
150DATE_FORMATS_MONTH_FIRST.extend([
151 '%m-%d-%Y',
152 '%m.%d.%Y',
153 '%m/%d/%Y',
154 '%m/%d/%y',
155 '%m/%d/%Y %H:%M:%S',
156])
157
7105440c 158
d77c3dfd 159def preferredencoding():
59ae15a5 160 """Get preferred encoding.
d77c3dfd 161
59ae15a5
PH
162 Returns the best encoding scheme for the system, based on
163 locale.getpreferredencoding() and some further tweaks.
164 """
165 try:
166 pref = locale.getpreferredencoding()
28e614de 167 'TEST'.encode(pref)
70a1165b 168 except Exception:
59ae15a5 169 pref = 'UTF-8'
bae611f2 170
59ae15a5 171 return pref
d77c3dfd 172
f4bfd65f 173
181c8655 174def write_json_file(obj, fn):
1394646a 175 """ Encode obj as JSON and write it to fn, atomically if possible """
181c8655 176
92120217 177 fn = encodeFilename(fn)
61ee5aeb 178 if sys.version_info < (3, 0) and sys.platform != 'win32':
ec5f6016
JMF
179 encoding = get_filesystem_encoding()
180 # os.path.basename returns a bytes object, but NamedTemporaryFile
181 # will fail if the filename contains non ascii characters unless we
182 # use a unicode object
183 path_basename = lambda f: os.path.basename(fn).decode(encoding)
184 # the same for os.path.dirname
185 path_dirname = lambda f: os.path.dirname(fn).decode(encoding)
186 else:
187 path_basename = os.path.basename
188 path_dirname = os.path.dirname
189
73159f99
S
190 args = {
191 'suffix': '.tmp',
ec5f6016
JMF
192 'prefix': path_basename(fn) + '.',
193 'dir': path_dirname(fn),
73159f99
S
194 'delete': False,
195 }
196
181c8655
PH
197 # In Python 2.x, json.dump expects a bytestream.
198 # In Python 3.x, it writes to a character stream
199 if sys.version_info < (3, 0):
73159f99 200 args['mode'] = 'wb'
181c8655 201 else:
73159f99
S
202 args.update({
203 'mode': 'w',
204 'encoding': 'utf-8',
205 })
206
c86b6142 207 tf = tempfile.NamedTemporaryFile(**compat_kwargs(args))
181c8655
PH
208
209 try:
210 with tf:
211 json.dump(obj, tf)
1394646a
IK
212 if sys.platform == 'win32':
213 # Need to remove existing file on Windows, else os.rename raises
214 # WindowsError or FileExistsError.
215 try:
216 os.unlink(fn)
217 except OSError:
218 pass
181c8655 219 os.rename(tf.name, fn)
70a1165b 220 except Exception:
181c8655
PH
221 try:
222 os.remove(tf.name)
223 except OSError:
224 pass
225 raise
226
227
228if sys.version_info >= (2, 7):
ee114368 229 def find_xpath_attr(node, xpath, key, val=None):
59ae56fa 230 """ Find the xpath xpath[@key=val] """
5d2354f1 231 assert re.match(r'^[a-zA-Z_-]+$', key)
ee114368 232 expr = xpath + ('[@%s]' % key if val is None else "[@%s='%s']" % (key, val))
59ae56fa
PH
233 return node.find(expr)
234else:
ee114368 235 def find_xpath_attr(node, xpath, key, val=None):
810c10ba 236 for f in node.findall(compat_xpath(xpath)):
ee114368
S
237 if key not in f.attrib:
238 continue
239 if val is None or f.attrib.get(key) == val:
59ae56fa
PH
240 return f
241 return None
242
d7e66d39
JMF
243# On python2.6 the xml.etree.ElementTree.Element methods don't support
244# the namespace parameter
5f6a1245
JW
245
246
d7e66d39
JMF
247def xpath_with_ns(path, ns_map):
248 components = [c.split(':') for c in path.split('/')]
249 replaced = []
250 for c in components:
251 if len(c) == 1:
252 replaced.append(c[0])
253 else:
254 ns, tag = c
255 replaced.append('{%s}%s' % (ns_map[ns], tag))
256 return '/'.join(replaced)
257
d77c3dfd 258
a41fb80c 259def xpath_element(node, xpath, name=None, fatal=False, default=NO_DEFAULT):
578c0745 260 def _find_xpath(xpath):
810c10ba 261 return node.find(compat_xpath(xpath))
578c0745
S
262
263 if isinstance(xpath, (str, compat_str)):
264 n = _find_xpath(xpath)
265 else:
266 for xp in xpath:
267 n = _find_xpath(xp)
268 if n is not None:
269 break
d74bebd5 270
8e636da4 271 if n is None:
bf42a990
S
272 if default is not NO_DEFAULT:
273 return default
274 elif fatal:
bf0ff932
PH
275 name = xpath if name is None else name
276 raise ExtractorError('Could not find XML element %s' % name)
277 else:
278 return None
a41fb80c
S
279 return n
280
281
282def xpath_text(node, xpath, name=None, fatal=False, default=NO_DEFAULT):
8e636da4
S
283 n = xpath_element(node, xpath, name, fatal=fatal, default=default)
284 if n is None or n == default:
285 return n
286 if n.text is None:
287 if default is not NO_DEFAULT:
288 return default
289 elif fatal:
290 name = xpath if name is None else name
291 raise ExtractorError('Could not find XML element\'s text %s' % name)
292 else:
293 return None
294 return n.text
a41fb80c
S
295
296
297def xpath_attr(node, xpath, key, name=None, fatal=False, default=NO_DEFAULT):
298 n = find_xpath_attr(node, xpath, key)
299 if n is None:
300 if default is not NO_DEFAULT:
301 return default
302 elif fatal:
303 name = '%s[@%s]' % (xpath, key) if name is None else name
304 raise ExtractorError('Could not find XML attribute %s' % name)
305 else:
306 return None
307 return n.attrib[key]
bf0ff932
PH
308
309
9e6dd238 310def get_element_by_id(id, html):
43e8fafd 311 """Return the content of the tag with the specified ID in the passed HTML document"""
611c1dd9 312 return get_element_by_attribute('id', id, html)
43e8fafd 313
12ea2f30 314
84c237fb
YCH
315def get_element_by_class(class_name, html):
316 return get_element_by_attribute(
317 'class', r'[^\'"]*\b%s\b[^\'"]*' % re.escape(class_name),
318 html, escape_value=False)
319
320
321def get_element_by_attribute(attribute, value, html, escape_value=True):
43e8fafd 322 """Return the content of the tag with the specified attribute in the passed HTML document"""
9e6dd238 323
84c237fb
YCH
324 value = re.escape(value) if escape_value else value
325
38285056
PH
326 m = re.search(r'''(?xs)
327 <([a-zA-Z0-9:._-]+)
abc97b5e 328 (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]*|="[^"]*"|='[^']*'))*?
38285056 329 \s+%s=['"]?%s['"]?
abc97b5e 330 (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]*|="[^"]*"|='[^']*'))*?
38285056
PH
331 \s*>
332 (?P<content>.*?)
333 </\1>
84c237fb 334 ''' % (re.escape(attribute), value), html)
38285056
PH
335
336 if not m:
337 return None
338 res = m.group('content')
339
340 if res.startswith('"') or res.startswith("'"):
341 res = res[1:-1]
a921f407 342
38285056 343 return unescapeHTML(res)
a921f407 344
c5229f39 345
8bb56eee
BF
346class HTMLAttributeParser(compat_HTMLParser):
347 """Trivial HTML parser to gather the attributes for a single element"""
348 def __init__(self):
c5229f39 349 self.attrs = {}
8bb56eee
BF
350 compat_HTMLParser.__init__(self)
351
352 def handle_starttag(self, tag, attrs):
353 self.attrs = dict(attrs)
354
c5229f39 355
8bb56eee
BF
356def extract_attributes(html_element):
357 """Given a string for an HTML element such as
358 <el
359 a="foo" B="bar" c="&98;az" d=boz
360 empty= noval entity="&amp;"
361 sq='"' dq="'"
362 >
363 Decode and return a dictionary of attributes.
364 {
365 'a': 'foo', 'b': 'bar', c: 'baz', d: 'boz',
366 'empty': '', 'noval': None, 'entity': '&',
367 'sq': '"', 'dq': '\''
368 }.
369 NB HTMLParser is stricter in Python 2.6 & 3.2 than in later versions,
370 but the cases in the unit test will work for all of 2.6, 2.7, 3.2-3.5.
371 """
372 parser = HTMLAttributeParser()
373 parser.feed(html_element)
374 parser.close()
375 return parser.attrs
9e6dd238 376
c5229f39 377
9e6dd238 378def clean_html(html):
59ae15a5 379 """Clean an HTML snippet into a readable string"""
dd622d7c
PH
380
381 if html is None: # Convenience for sanitizing descriptions etc.
382 return html
383
59ae15a5
PH
384 # Newline vs <br />
385 html = html.replace('\n', ' ')
6b3aef80
FV
386 html = re.sub(r'\s*<\s*br\s*/?\s*>\s*', '\n', html)
387 html = re.sub(r'<\s*/\s*p\s*>\s*<\s*p[^>]*>', '\n', html)
59ae15a5
PH
388 # Strip html tags
389 html = re.sub('<.*?>', '', html)
390 # Replace html entities
391 html = unescapeHTML(html)
7decf895 392 return html.strip()
9e6dd238
FV
393
394
d77c3dfd 395def sanitize_open(filename, open_mode):
59ae15a5
PH
396 """Try to open the given filename, and slightly tweak it if this fails.
397
398 Attempts to open the given filename. If this fails, it tries to change
399 the filename slightly, step by step, until it's either able to open it
400 or it fails and raises a final exception, like the standard open()
401 function.
402
403 It returns the tuple (stream, definitive_file_name).
404 """
405 try:
28e614de 406 if filename == '-':
59ae15a5
PH
407 if sys.platform == 'win32':
408 import msvcrt
409 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
898280a0 410 return (sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout, filename)
59ae15a5
PH
411 stream = open(encodeFilename(filename), open_mode)
412 return (stream, filename)
413 except (IOError, OSError) as err:
f45c185f
PH
414 if err.errno in (errno.EACCES,):
415 raise
59ae15a5 416
f45c185f 417 # In case of error, try to remove win32 forbidden chars
d55de57b 418 alt_filename = sanitize_path(filename)
f45c185f
PH
419 if alt_filename == filename:
420 raise
421 else:
422 # An exception here should be caught in the caller
d55de57b 423 stream = open(encodeFilename(alt_filename), open_mode)
f45c185f 424 return (stream, alt_filename)
d77c3dfd
FV
425
426
427def timeconvert(timestr):
59ae15a5
PH
428 """Convert RFC 2822 defined time string into system timestamp"""
429 timestamp = None
430 timetuple = email.utils.parsedate_tz(timestr)
431 if timetuple is not None:
432 timestamp = email.utils.mktime_tz(timetuple)
433 return timestamp
1c469a94 434
5f6a1245 435
796173d0 436def sanitize_filename(s, restricted=False, is_id=False):
59ae15a5
PH
437 """Sanitizes a string so it could be used as part of a filename.
438 If restricted is set, use a stricter subset of allowed characters.
796173d0 439 Set is_id if this is not an arbitrary string, but an ID that should be kept if possible
59ae15a5
PH
440 """
441 def replace_insane(char):
c587cbb7
AT
442 if restricted and char in ACCENT_CHARS:
443 return ACCENT_CHARS[char]
59ae15a5
PH
444 if char == '?' or ord(char) < 32 or ord(char) == 127:
445 return ''
446 elif char == '"':
447 return '' if restricted else '\''
448 elif char == ':':
449 return '_-' if restricted else ' -'
450 elif char in '\\/|*<>':
451 return '_'
627dcfff 452 if restricted and (char in '!&\'()[]{}$;`^,#' or char.isspace()):
59ae15a5
PH
453 return '_'
454 if restricted and ord(char) > 127:
455 return '_'
456 return char
457
2aeb06d6
PH
458 # Handle timestamps
459 s = re.sub(r'[0-9]+(?::[0-9]+)+', lambda m: m.group(0).replace(':', '_'), s)
28e614de 460 result = ''.join(map(replace_insane, s))
796173d0
PH
461 if not is_id:
462 while '__' in result:
463 result = result.replace('__', '_')
464 result = result.strip('_')
465 # Common case of "Foreign band name - English song title"
466 if restricted and result.startswith('-_'):
467 result = result[2:]
5a42414b
PH
468 if result.startswith('-'):
469 result = '_' + result[len('-'):]
a7440261 470 result = result.lstrip('.')
796173d0
PH
471 if not result:
472 result = '_'
59ae15a5 473 return result
d77c3dfd 474
5f6a1245 475
a2aaf4db
S
476def sanitize_path(s):
477 """Sanitizes and normalizes path on Windows"""
478 if sys.platform != 'win32':
479 return s
be531ef1
S
480 drive_or_unc, _ = os.path.splitdrive(s)
481 if sys.version_info < (2, 7) and not drive_or_unc:
482 drive_or_unc, _ = os.path.splitunc(s)
483 norm_path = os.path.normpath(remove_start(s, drive_or_unc)).split(os.path.sep)
484 if drive_or_unc:
a2aaf4db
S
485 norm_path.pop(0)
486 sanitized_path = [
c90d16cf 487 path_part if path_part in ['.', '..'] else re.sub('(?:[/<>:"\\|\\\\?\\*]|[\s.]$)', '#', path_part)
a2aaf4db 488 for path_part in norm_path]
be531ef1
S
489 if drive_or_unc:
490 sanitized_path.insert(0, drive_or_unc + os.path.sep)
a2aaf4db
S
491 return os.path.join(*sanitized_path)
492
493
67dda517
S
494# Prepend protocol-less URLs with `http:` scheme in order to mitigate the number of
495# unwanted failures due to missing protocol
17bcc626
S
496def sanitize_url(url):
497 return 'http:%s' % url if url.startswith('//') else url
498
499
67dda517 500def sanitized_Request(url, *args, **kwargs):
17bcc626 501 return compat_urllib_request.Request(sanitize_url(url), *args, **kwargs)
67dda517
S
502
503
d77c3dfd 504def orderedSet(iterable):
59ae15a5
PH
505 """ Remove all duplicates from the input iterable """
506 res = []
507 for el in iterable:
508 if el not in res:
509 res.append(el)
510 return res
d77c3dfd 511
912b38b4 512
55b2f099 513def _htmlentity_transform(entity_with_semicolon):
4e408e47 514 """Transforms an HTML entity to a character."""
55b2f099
YCH
515 entity = entity_with_semicolon[:-1]
516
4e408e47
PH
517 # Known non-numeric HTML entity
518 if entity in compat_html_entities.name2codepoint:
519 return compat_chr(compat_html_entities.name2codepoint[entity])
520
55b2f099
YCH
521 # TODO: HTML5 allows entities without a semicolon. For example,
522 # '&Eacuteric' should be decoded as 'Éric'.
523 if entity_with_semicolon in compat_html_entities_html5:
524 return compat_html_entities_html5[entity_with_semicolon]
525
91757b0f 526 mobj = re.match(r'#(x[0-9a-fA-F]+|[0-9]+)', entity)
4e408e47
PH
527 if mobj is not None:
528 numstr = mobj.group(1)
28e614de 529 if numstr.startswith('x'):
4e408e47 530 base = 16
28e614de 531 numstr = '0%s' % numstr
4e408e47
PH
532 else:
533 base = 10
7aefc49c
S
534 # See https://github.com/rg3/youtube-dl/issues/7518
535 try:
536 return compat_chr(int(numstr, base))
537 except ValueError:
538 pass
4e408e47
PH
539
540 # Unknown entity in name, return its literal representation
7a3f0c00 541 return '&%s;' % entity
4e408e47
PH
542
543
d77c3dfd 544def unescapeHTML(s):
912b38b4
PH
545 if s is None:
546 return None
547 assert type(s) == compat_str
d77c3dfd 548
4e408e47 549 return re.sub(
55b2f099 550 r'&([^;]+;)', lambda m: _htmlentity_transform(m.group(1)), s)
d77c3dfd 551
8bf48f23 552
aa49acd1
S
553def get_subprocess_encoding():
554 if sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5:
555 # For subprocess calls, encode with locale encoding
556 # Refer to http://stackoverflow.com/a/9951851/35070
557 encoding = preferredencoding()
558 else:
559 encoding = sys.getfilesystemencoding()
560 if encoding is None:
561 encoding = 'utf-8'
562 return encoding
563
564
8bf48f23 565def encodeFilename(s, for_subprocess=False):
59ae15a5
PH
566 """
567 @param s The name of the file
568 """
d77c3dfd 569
8bf48f23 570 assert type(s) == compat_str
d77c3dfd 571
59ae15a5
PH
572 # Python 3 has a Unicode API
573 if sys.version_info >= (3, 0):
574 return s
0f00efed 575
aa49acd1
S
576 # Pass '' directly to use Unicode APIs on Windows 2000 and up
577 # (Detecting Windows NT 4 is tricky because 'major >= 4' would
578 # match Windows 9x series as well. Besides, NT 4 is obsolete.)
579 if not for_subprocess and sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5:
580 return s
581
8ee239e9
YCH
582 # Jython assumes filenames are Unicode strings though reported as Python 2.x compatible
583 if sys.platform.startswith('java'):
584 return s
585
aa49acd1
S
586 return s.encode(get_subprocess_encoding(), 'ignore')
587
588
589def decodeFilename(b, for_subprocess=False):
590
591 if sys.version_info >= (3, 0):
592 return b
593
594 if not isinstance(b, bytes):
595 return b
596
597 return b.decode(get_subprocess_encoding(), 'ignore')
8bf48f23 598
f07b74fc
PH
599
600def encodeArgument(s):
601 if not isinstance(s, compat_str):
602 # Legacy code that uses byte strings
603 # Uncomment the following line after fixing all post processors
7af808a5 604 # assert False, 'Internal error: %r should be of type %r, is %r' % (s, compat_str, type(s))
f07b74fc
PH
605 s = s.decode('ascii')
606 return encodeFilename(s, True)
607
608
aa49acd1
S
609def decodeArgument(b):
610 return decodeFilename(b, True)
611
612
8271226a
PH
613def decodeOption(optval):
614 if optval is None:
615 return optval
616 if isinstance(optval, bytes):
617 optval = optval.decode(preferredencoding())
618
619 assert isinstance(optval, compat_str)
620 return optval
1c256f70 621
5f6a1245 622
4539dd30
PH
623def formatSeconds(secs):
624 if secs > 3600:
625 return '%d:%02d:%02d' % (secs // 3600, (secs % 3600) // 60, secs % 60)
626 elif secs > 60:
627 return '%d:%02d' % (secs // 60, secs % 60)
628 else:
629 return '%d' % secs
630
a0ddb8a2 631
be4a824d
PH
632def make_HTTPS_handler(params, **kwargs):
633 opts_no_check_certificate = params.get('nocheckcertificate', False)
0db261ba 634 if hasattr(ssl, 'create_default_context'): # Python >= 3.4 or 2.7.9
be5f2c19 635 context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH)
0db261ba 636 if opts_no_check_certificate:
be5f2c19 637 context.check_hostname = False
0db261ba 638 context.verify_mode = ssl.CERT_NONE
a2366922 639 try:
be4a824d 640 return YoutubeDLHTTPSHandler(params, context=context, **kwargs)
a2366922
PH
641 except TypeError:
642 # Python 2.7.8
643 # (create_default_context present but HTTPSHandler has no context=)
644 pass
645
646 if sys.version_info < (3, 2):
d7932313 647 return YoutubeDLHTTPSHandler(params, **kwargs)
aa37e3d4 648 else: # Python < 3.4
d7932313 649 context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ea6d901e 650 context.verify_mode = (ssl.CERT_NONE
dca08720 651 if opts_no_check_certificate
ea6d901e 652 else ssl.CERT_REQUIRED)
303b479e 653 context.set_default_verify_paths()
be4a824d 654 return YoutubeDLHTTPSHandler(params, context=context, **kwargs)
ea6d901e 655
732ea2f0 656
08f2a92c
JMF
657def bug_reports_message():
658 if ytdl_is_updateable():
659 update_cmd = 'type youtube-dl -U to update'
660 else:
661 update_cmd = 'see https://yt-dl.org/update on how to update'
662 msg = '; please report this issue on https://yt-dl.org/bug .'
663 msg += ' Make sure you are using the latest version; %s.' % update_cmd
664 msg += ' Be sure to call youtube-dl with the --verbose flag and include its complete output.'
665 return msg
666
667
1c256f70
PH
668class ExtractorError(Exception):
669 """Error during info extraction."""
5f6a1245 670
d11271dd 671 def __init__(self, msg, tb=None, expected=False, cause=None, video_id=None):
9a82b238
PH
672 """ tb, if given, is the original traceback (so that it can be printed out).
673 If expected is set, this is a normal error message and most likely not a bug in youtube-dl.
674 """
675
676 if sys.exc_info()[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError):
677 expected = True
d11271dd
PH
678 if video_id is not None:
679 msg = video_id + ': ' + msg
410f3e73 680 if cause:
28e614de 681 msg += ' (caused by %r)' % cause
9a82b238 682 if not expected:
08f2a92c 683 msg += bug_reports_message()
1c256f70 684 super(ExtractorError, self).__init__(msg)
d5979c5d 685
1c256f70 686 self.traceback = tb
8cc83b8d 687 self.exc_info = sys.exc_info() # preserve original exception
2eabb802 688 self.cause = cause
d11271dd 689 self.video_id = video_id
1c256f70 690
01951dda
PH
691 def format_traceback(self):
692 if self.traceback is None:
693 return None
28e614de 694 return ''.join(traceback.format_tb(self.traceback))
01951dda 695
1c256f70 696
416c7fcb
PH
697class UnsupportedError(ExtractorError):
698 def __init__(self, url):
699 super(UnsupportedError, self).__init__(
700 'Unsupported URL: %s' % url, expected=True)
701 self.url = url
702
703
55b3e45b
JMF
704class RegexNotFoundError(ExtractorError):
705 """Error when a regex didn't match"""
706 pass
707
708
d77c3dfd 709class DownloadError(Exception):
59ae15a5 710 """Download Error exception.
d77c3dfd 711
59ae15a5
PH
712 This exception may be thrown by FileDownloader objects if they are not
713 configured to continue on errors. They will contain the appropriate
714 error message.
715 """
5f6a1245 716
8cc83b8d
FV
717 def __init__(self, msg, exc_info=None):
718 """ exc_info, if given, is the original exception that caused the trouble (as returned by sys.exc_info()). """
719 super(DownloadError, self).__init__(msg)
720 self.exc_info = exc_info
d77c3dfd
FV
721
722
723class SameFileError(Exception):
59ae15a5 724 """Same File exception.
d77c3dfd 725
59ae15a5
PH
726 This exception will be thrown by FileDownloader objects if they detect
727 multiple files would have to be downloaded to the same file on disk.
728 """
729 pass
d77c3dfd
FV
730
731
732class PostProcessingError(Exception):
59ae15a5 733 """Post Processing exception.
d77c3dfd 734
59ae15a5
PH
735 This exception may be raised by PostProcessor's .run() method to
736 indicate an error in the postprocessing task.
737 """
5f6a1245 738
7851b379
PH
739 def __init__(self, msg):
740 self.msg = msg
d77c3dfd 741
5f6a1245 742
d77c3dfd 743class MaxDownloadsReached(Exception):
59ae15a5
PH
744 """ --max-downloads limit has been reached. """
745 pass
d77c3dfd
FV
746
747
748class UnavailableVideoError(Exception):
59ae15a5 749 """Unavailable Format exception.
d77c3dfd 750
59ae15a5
PH
751 This exception will be thrown when a video is requested
752 in a format that is not available for that video.
753 """
754 pass
d77c3dfd
FV
755
756
757class ContentTooShortError(Exception):
59ae15a5 758 """Content Too Short exception.
d77c3dfd 759
59ae15a5
PH
760 This exception may be raised by FileDownloader objects when a file they
761 download is too small for what the server announced first, indicating
762 the connection was probably interrupted.
763 """
d77c3dfd 764
59ae15a5 765 def __init__(self, downloaded, expected):
2c7ed247 766 # Both in bytes
59ae15a5
PH
767 self.downloaded = downloaded
768 self.expected = expected
d77c3dfd 769
5f6a1245 770
c5a59d93 771def _create_http_connection(ydl_handler, http_class, is_https, *args, **kwargs):
e5e78797
S
772 # Working around python 2 bug (see http://bugs.python.org/issue17849) by limiting
773 # expected HTTP responses to meet HTTP/1.0 or later (see also
774 # https://github.com/rg3/youtube-dl/issues/6727)
775 if sys.version_info < (3, 0):
5a1a2e94 776 kwargs[b'strict'] = True
be4a824d
PH
777 hc = http_class(*args, **kwargs)
778 source_address = ydl_handler._params.get('source_address')
779 if source_address is not None:
780 sa = (source_address, 0)
781 if hasattr(hc, 'source_address'): # Python 2.7+
782 hc.source_address = sa
783 else: # Python 2.6
784 def _hc_connect(self, *args, **kwargs):
785 sock = compat_socket_create_connection(
786 (self.host, self.port), self.timeout, sa)
787 if is_https:
d7932313
PH
788 self.sock = ssl.wrap_socket(
789 sock, self.key_file, self.cert_file,
790 ssl_version=ssl.PROTOCOL_TLSv1)
be4a824d
PH
791 else:
792 self.sock = sock
793 hc.connect = functools.partial(_hc_connect, hc)
794
795 return hc
796
797
87f0e62d 798def handle_youtubedl_headers(headers):
992fc9d6
YCH
799 filtered_headers = headers
800
801 if 'Youtubedl-no-compression' in filtered_headers:
802 filtered_headers = dict((k, v) for k, v in filtered_headers.items() if k.lower() != 'accept-encoding')
87f0e62d 803 del filtered_headers['Youtubedl-no-compression']
87f0e62d 804
992fc9d6 805 return filtered_headers
87f0e62d
YCH
806
807
acebc9cd 808class YoutubeDLHandler(compat_urllib_request.HTTPHandler):
59ae15a5
PH
809 """Handler for HTTP requests and responses.
810
811 This class, when installed with an OpenerDirector, automatically adds
812 the standard headers to every HTTP request and handles gzipped and
813 deflated responses from web servers. If compression is to be avoided in
814 a particular request, the original request in the program code only has
0424ec30 815 to include the HTTP header "Youtubedl-no-compression", which will be
59ae15a5
PH
816 removed before making the real request.
817
818 Part of this code was copied from:
819
820 http://techknack.net/python-urllib2-handlers/
821
822 Andrew Rowls, the author of that code, agreed to release it to the
823 public domain.
824 """
825
be4a824d
PH
826 def __init__(self, params, *args, **kwargs):
827 compat_urllib_request.HTTPHandler.__init__(self, *args, **kwargs)
828 self._params = params
829
830 def http_open(self, req):
71aff188
YCH
831 conn_class = compat_http_client.HTTPConnection
832
833 socks_proxy = req.headers.get('Ytdl-socks-proxy')
834 if socks_proxy:
835 conn_class = make_socks_conn_class(conn_class, socks_proxy)
836 del req.headers['Ytdl-socks-proxy']
837
be4a824d 838 return self.do_open(functools.partial(
71aff188 839 _create_http_connection, self, conn_class, False),
be4a824d
PH
840 req)
841
59ae15a5
PH
842 @staticmethod
843 def deflate(data):
844 try:
845 return zlib.decompress(data, -zlib.MAX_WBITS)
846 except zlib.error:
847 return zlib.decompress(data)
848
849 @staticmethod
850 def addinfourl_wrapper(stream, headers, url, code):
851 if hasattr(compat_urllib_request.addinfourl, 'getcode'):
852 return compat_urllib_request.addinfourl(stream, headers, url, code)
853 ret = compat_urllib_request.addinfourl(stream, headers, url)
854 ret.code = code
855 return ret
856
acebc9cd 857 def http_request(self, req):
51f267d9
S
858 # According to RFC 3986, URLs can not contain non-ASCII characters, however this is not
859 # always respected by websites, some tend to give out URLs with non percent-encoded
860 # non-ASCII characters (see telemb.py, ard.py [#3412])
861 # urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991)
862 # To work around aforementioned issue we will replace request's original URL with
863 # percent-encoded one
864 # Since redirects are also affected (e.g. http://www.southpark.de/alle-episoden/s18e09)
865 # the code of this workaround has been moved here from YoutubeDL.urlopen()
866 url = req.get_full_url()
867 url_escaped = escape_url(url)
868
869 # Substitute URL if any change after escaping
870 if url != url_escaped:
15d260eb 871 req = update_Request(req, url=url_escaped)
51f267d9 872
33ac271b 873 for h, v in std_headers.items():
3d5f7a39
JK
874 # Capitalize is needed because of Python bug 2275: http://bugs.python.org/issue2275
875 # The dict keys are capitalized because of this bug by urllib
876 if h.capitalize() not in req.headers:
33ac271b 877 req.add_header(h, v)
87f0e62d
YCH
878
879 req.headers = handle_youtubedl_headers(req.headers)
989b4b2b
PH
880
881 if sys.version_info < (2, 7) and '#' in req.get_full_url():
882 # Python 2.6 is brain-dead when it comes to fragments
883 req._Request__original = req._Request__original.partition('#')[0]
884 req._Request__r_type = req._Request__r_type.partition('#')[0]
885
59ae15a5
PH
886 return req
887
acebc9cd 888 def http_response(self, req, resp):
59ae15a5
PH
889 old_resp = resp
890 # gzip
891 if resp.headers.get('Content-encoding', '') == 'gzip':
aa3e9507
PH
892 content = resp.read()
893 gz = gzip.GzipFile(fileobj=io.BytesIO(content), mode='rb')
894 try:
895 uncompressed = io.BytesIO(gz.read())
896 except IOError as original_ioerror:
897 # There may be junk add the end of the file
898 # See http://stackoverflow.com/q/4928560/35070 for details
899 for i in range(1, 1024):
900 try:
901 gz = gzip.GzipFile(fileobj=io.BytesIO(content[:-i]), mode='rb')
902 uncompressed = io.BytesIO(gz.read())
903 except IOError:
904 continue
905 break
906 else:
907 raise original_ioerror
908 resp = self.addinfourl_wrapper(uncompressed, old_resp.headers, old_resp.url, old_resp.code)
59ae15a5 909 resp.msg = old_resp.msg
c047270c 910 del resp.headers['Content-encoding']
59ae15a5
PH
911 # deflate
912 if resp.headers.get('Content-encoding', '') == 'deflate':
913 gz = io.BytesIO(self.deflate(resp.read()))
914 resp = self.addinfourl_wrapper(gz, old_resp.headers, old_resp.url, old_resp.code)
915 resp.msg = old_resp.msg
c047270c 916 del resp.headers['Content-encoding']
ad729172
S
917 # Percent-encode redirect URL of Location HTTP header to satisfy RFC 3986 (see
918 # https://github.com/rg3/youtube-dl/issues/6457).
5a4d9ddb
S
919 if 300 <= resp.code < 400:
920 location = resp.headers.get('Location')
921 if location:
922 # As of RFC 2616 default charset is iso-8859-1 that is respected by python 3
923 if sys.version_info >= (3, 0):
924 location = location.encode('iso-8859-1').decode('utf-8')
0ea59007
YCH
925 else:
926 location = location.decode('utf-8')
5a4d9ddb
S
927 location_escaped = escape_url(location)
928 if location != location_escaped:
929 del resp.headers['Location']
9a4aec8b
YCH
930 if sys.version_info < (3, 0):
931 location_escaped = location_escaped.encode('utf-8')
5a4d9ddb 932 resp.headers['Location'] = location_escaped
59ae15a5 933 return resp
0f8d03f8 934
acebc9cd
PH
935 https_request = http_request
936 https_response = http_response
bf50b038 937
5de90176 938
71aff188
YCH
939def make_socks_conn_class(base_class, socks_proxy):
940 assert issubclass(base_class, (
941 compat_http_client.HTTPConnection, compat_http_client.HTTPSConnection))
942
943 url_components = compat_urlparse.urlparse(socks_proxy)
944 if url_components.scheme.lower() == 'socks5':
945 socks_type = ProxyType.SOCKS5
946 elif url_components.scheme.lower() in ('socks', 'socks4'):
947 socks_type = ProxyType.SOCKS4
51fb4995
YCH
948 elif url_components.scheme.lower() == 'socks4a':
949 socks_type = ProxyType.SOCKS4A
71aff188 950
cdd94c2e
YCH
951 def unquote_if_non_empty(s):
952 if not s:
953 return s
954 return compat_urllib_parse_unquote_plus(s)
955
71aff188
YCH
956 proxy_args = (
957 socks_type,
958 url_components.hostname, url_components.port or 1080,
959 True, # Remote DNS
cdd94c2e
YCH
960 unquote_if_non_empty(url_components.username),
961 unquote_if_non_empty(url_components.password),
71aff188
YCH
962 )
963
964 class SocksConnection(base_class):
965 def connect(self):
966 self.sock = sockssocket()
967 self.sock.setproxy(*proxy_args)
968 if type(self.timeout) in (int, float):
969 self.sock.settimeout(self.timeout)
970 self.sock.connect((self.host, self.port))
971
972 if isinstance(self, compat_http_client.HTTPSConnection):
973 if hasattr(self, '_context'): # Python > 2.6
974 self.sock = self._context.wrap_socket(
975 self.sock, server_hostname=self.host)
976 else:
977 self.sock = ssl.wrap_socket(self.sock)
978
979 return SocksConnection
980
981
be4a824d
PH
982class YoutubeDLHTTPSHandler(compat_urllib_request.HTTPSHandler):
983 def __init__(self, params, https_conn_class=None, *args, **kwargs):
984 compat_urllib_request.HTTPSHandler.__init__(self, *args, **kwargs)
985 self._https_conn_class = https_conn_class or compat_http_client.HTTPSConnection
986 self._params = params
987
988 def https_open(self, req):
4f264c02 989 kwargs = {}
71aff188
YCH
990 conn_class = self._https_conn_class
991
4f264c02
JMF
992 if hasattr(self, '_context'): # python > 2.6
993 kwargs['context'] = self._context
994 if hasattr(self, '_check_hostname'): # python 3.x
995 kwargs['check_hostname'] = self._check_hostname
71aff188
YCH
996
997 socks_proxy = req.headers.get('Ytdl-socks-proxy')
998 if socks_proxy:
999 conn_class = make_socks_conn_class(conn_class, socks_proxy)
1000 del req.headers['Ytdl-socks-proxy']
1001
be4a824d 1002 return self.do_open(functools.partial(
71aff188 1003 _create_http_connection, self, conn_class, True),
4f264c02 1004 req, **kwargs)
be4a824d
PH
1005
1006
a6420bf5
S
1007class YoutubeDLCookieProcessor(compat_urllib_request.HTTPCookieProcessor):
1008 def __init__(self, cookiejar=None):
1009 compat_urllib_request.HTTPCookieProcessor.__init__(self, cookiejar)
1010
1011 def http_response(self, request, response):
1012 # Python 2 will choke on next HTTP request in row if there are non-ASCII
1013 # characters in Set-Cookie HTTP header of last response (see
1014 # https://github.com/rg3/youtube-dl/issues/6769).
1015 # In order to at least prevent crashing we will percent encode Set-Cookie
1016 # header before HTTPCookieProcessor starts processing it.
e28034c5
S
1017 # if sys.version_info < (3, 0) and response.headers:
1018 # for set_cookie_header in ('Set-Cookie', 'Set-Cookie2'):
1019 # set_cookie = response.headers.get(set_cookie_header)
1020 # if set_cookie:
1021 # set_cookie_escaped = compat_urllib_parse.quote(set_cookie, b"%/;:@&=+$,!~*'()?#[] ")
1022 # if set_cookie != set_cookie_escaped:
1023 # del response.headers[set_cookie_header]
1024 # response.headers[set_cookie_header] = set_cookie_escaped
a6420bf5
S
1025 return compat_urllib_request.HTTPCookieProcessor.http_response(self, request, response)
1026
1027 https_request = compat_urllib_request.HTTPCookieProcessor.http_request
1028 https_response = http_response
1029
1030
46f59e89
S
1031def extract_timezone(date_str):
1032 m = re.search(
1033 r'^.{8,}?(?P<tz>Z$| ?(?P<sign>\+|-)(?P<hours>[0-9]{2}):?(?P<minutes>[0-9]{2})$)',
1034 date_str)
1035 if not m:
1036 timezone = datetime.timedelta()
1037 else:
1038 date_str = date_str[:-len(m.group('tz'))]
1039 if not m.group('sign'):
1040 timezone = datetime.timedelta()
1041 else:
1042 sign = 1 if m.group('sign') == '+' else -1
1043 timezone = datetime.timedelta(
1044 hours=sign * int(m.group('hours')),
1045 minutes=sign * int(m.group('minutes')))
1046 return timezone, date_str
1047
1048
08b38d54 1049def parse_iso8601(date_str, delimiter='T', timezone=None):
912b38b4
PH
1050 """ Return a UNIX timestamp from the given date """
1051
1052 if date_str is None:
1053 return None
1054
52c3a6e4
S
1055 date_str = re.sub(r'\.[0-9]+', '', date_str)
1056
08b38d54 1057 if timezone is None:
46f59e89
S
1058 timezone, date_str = extract_timezone(date_str)
1059
52c3a6e4
S
1060 try:
1061 date_format = '%Y-%m-%d{0}%H:%M:%S'.format(delimiter)
1062 dt = datetime.datetime.strptime(date_str, date_format) - timezone
1063 return calendar.timegm(dt.timetuple())
1064 except ValueError:
1065 pass
912b38b4
PH
1066
1067
46f59e89
S
1068def date_formats(day_first=True):
1069 return DATE_FORMATS_DAY_FIRST if day_first else DATE_FORMATS_MONTH_FIRST
1070
1071
42bdd9d0 1072def unified_strdate(date_str, day_first=True):
bf50b038 1073 """Return a string with the date in the format YYYYMMDD"""
64e7ad60
PH
1074
1075 if date_str is None:
1076 return None
bf50b038 1077 upload_date = None
5f6a1245 1078 # Replace commas
026fcc04 1079 date_str = date_str.replace(',', ' ')
42bdd9d0 1080 # Remove AM/PM + timezone
9bb8e0a3 1081 date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str)
46f59e89 1082 _, date_str = extract_timezone(date_str)
42bdd9d0 1083
46f59e89 1084 for expression in date_formats(day_first):
bf50b038
JMF
1085 try:
1086 upload_date = datetime.datetime.strptime(date_str, expression).strftime('%Y%m%d')
5de90176 1087 except ValueError:
bf50b038 1088 pass
42393ce2
PH
1089 if upload_date is None:
1090 timetuple = email.utils.parsedate_tz(date_str)
1091 if timetuple:
c6b9cf05
S
1092 try:
1093 upload_date = datetime.datetime(*timetuple[:6]).strftime('%Y%m%d')
1094 except ValueError:
1095 pass
6a750402
JMF
1096 if upload_date is not None:
1097 return compat_str(upload_date)
bf50b038 1098
5f6a1245 1099
46f59e89
S
1100def unified_timestamp(date_str, day_first=True):
1101 if date_str is None:
1102 return None
1103
1104 date_str = date_str.replace(',', ' ')
1105
7dc2a74e 1106 pm_delta = 12 if re.search(r'(?i)PM', date_str) else 0
46f59e89
S
1107 timezone, date_str = extract_timezone(date_str)
1108
1109 # Remove AM/PM + timezone
1110 date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str)
1111
1112 for expression in date_formats(day_first):
1113 try:
7dc2a74e 1114 dt = datetime.datetime.strptime(date_str, expression) - timezone + datetime.timedelta(hours=pm_delta)
46f59e89
S
1115 return calendar.timegm(dt.timetuple())
1116 except ValueError:
1117 pass
1118 timetuple = email.utils.parsedate_tz(date_str)
1119 if timetuple:
7dc2a74e 1120 return calendar.timegm(timetuple) + pm_delta * 3600
46f59e89
S
1121
1122
28e614de 1123def determine_ext(url, default_ext='unknown_video'):
f4776371
S
1124 if url is None:
1125 return default_ext
9cb9a5df 1126 guess = url.partition('?')[0].rpartition('.')[2]
73e79f2a
PH
1127 if re.match(r'^[A-Za-z0-9]+$', guess):
1128 return guess
a7aaa398
S
1129 # Try extract ext from URLs like http://example.com/foo/bar.mp4/?download
1130 elif guess.rstrip('/') in KNOWN_EXTENSIONS:
9cb9a5df 1131 return guess.rstrip('/')
73e79f2a 1132 else:
cbdbb766 1133 return default_ext
73e79f2a 1134
5f6a1245 1135
d4051a8e 1136def subtitles_filename(filename, sub_lang, sub_format):
28e614de 1137 return filename.rsplit('.', 1)[0] + '.' + sub_lang + '.' + sub_format
d4051a8e 1138
5f6a1245 1139
bd558525 1140def date_from_str(date_str):
37254abc
JMF
1141 """
1142 Return a datetime object from a string in the format YYYYMMDD or
1143 (now|today)[+-][0-9](day|week|month|year)(s)?"""
1144 today = datetime.date.today()
f8795e10 1145 if date_str in ('now', 'today'):
37254abc 1146 return today
f8795e10
PH
1147 if date_str == 'yesterday':
1148 return today - datetime.timedelta(days=1)
37254abc
JMF
1149 match = re.match('(now|today)(?P<sign>[+-])(?P<time>\d+)(?P<unit>day|week|month|year)(s)?', date_str)
1150 if match is not None:
1151 sign = match.group('sign')
1152 time = int(match.group('time'))
1153 if sign == '-':
1154 time = -time
1155 unit = match.group('unit')
dfb1b146 1156 # A bad approximation?
37254abc
JMF
1157 if unit == 'month':
1158 unit = 'day'
1159 time *= 30
1160 elif unit == 'year':
1161 unit = 'day'
1162 time *= 365
1163 unit += 's'
1164 delta = datetime.timedelta(**{unit: time})
1165 return today + delta
611c1dd9 1166 return datetime.datetime.strptime(date_str, '%Y%m%d').date()
5f6a1245
JW
1167
1168
e63fc1be 1169def hyphenate_date(date_str):
1170 """
1171 Convert a date in 'YYYYMMDD' format to 'YYYY-MM-DD' format"""
1172 match = re.match(r'^(\d\d\d\d)(\d\d)(\d\d)$', date_str)
1173 if match is not None:
1174 return '-'.join(match.groups())
1175 else:
1176 return date_str
1177
5f6a1245 1178
bd558525
JMF
1179class DateRange(object):
1180 """Represents a time interval between two dates"""
5f6a1245 1181
bd558525
JMF
1182 def __init__(self, start=None, end=None):
1183 """start and end must be strings in the format accepted by date"""
1184 if start is not None:
1185 self.start = date_from_str(start)
1186 else:
1187 self.start = datetime.datetime.min.date()
1188 if end is not None:
1189 self.end = date_from_str(end)
1190 else:
1191 self.end = datetime.datetime.max.date()
37254abc 1192 if self.start > self.end:
bd558525 1193 raise ValueError('Date range: "%s" , the start date must be before the end date' % self)
5f6a1245 1194
bd558525
JMF
1195 @classmethod
1196 def day(cls, day):
1197 """Returns a range that only contains the given day"""
5f6a1245
JW
1198 return cls(day, day)
1199
bd558525
JMF
1200 def __contains__(self, date):
1201 """Check if the date is in the range"""
37254abc
JMF
1202 if not isinstance(date, datetime.date):
1203 date = date_from_str(date)
1204 return self.start <= date <= self.end
5f6a1245 1205
bd558525 1206 def __str__(self):
5f6a1245 1207 return '%s - %s' % (self.start.isoformat(), self.end.isoformat())
c496ca96
PH
1208
1209
1210def platform_name():
1211 """ Returns the platform name as a compat_str """
1212 res = platform.platform()
1213 if isinstance(res, bytes):
1214 res = res.decode(preferredencoding())
1215
1216 assert isinstance(res, compat_str)
1217 return res
c257baff
PH
1218
1219
b58ddb32
PH
1220def _windows_write_string(s, out):
1221 """ Returns True if the string was written using special methods,
1222 False if it has yet to be written out."""
1223 # Adapted from http://stackoverflow.com/a/3259271/35070
1224
1225 import ctypes
1226 import ctypes.wintypes
1227
1228 WIN_OUTPUT_IDS = {
1229 1: -11,
1230 2: -12,
1231 }
1232
a383a98a
PH
1233 try:
1234 fileno = out.fileno()
1235 except AttributeError:
1236 # If the output stream doesn't have a fileno, it's virtual
1237 return False
aa42e873
PH
1238 except io.UnsupportedOperation:
1239 # Some strange Windows pseudo files?
1240 return False
b58ddb32
PH
1241 if fileno not in WIN_OUTPUT_IDS:
1242 return False
1243
e2f89ec7 1244 GetStdHandle = ctypes.WINFUNCTYPE(
b58ddb32 1245 ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD)(
611c1dd9 1246 (b'GetStdHandle', ctypes.windll.kernel32))
b58ddb32
PH
1247 h = GetStdHandle(WIN_OUTPUT_IDS[fileno])
1248
e2f89ec7 1249 WriteConsoleW = ctypes.WINFUNCTYPE(
b58ddb32
PH
1250 ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.wintypes.LPWSTR,
1251 ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD),
611c1dd9 1252 ctypes.wintypes.LPVOID)((b'WriteConsoleW', ctypes.windll.kernel32))
b58ddb32
PH
1253 written = ctypes.wintypes.DWORD(0)
1254
611c1dd9 1255 GetFileType = ctypes.WINFUNCTYPE(ctypes.wintypes.DWORD, ctypes.wintypes.DWORD)((b'GetFileType', ctypes.windll.kernel32))
b58ddb32
PH
1256 FILE_TYPE_CHAR = 0x0002
1257 FILE_TYPE_REMOTE = 0x8000
e2f89ec7 1258 GetConsoleMode = ctypes.WINFUNCTYPE(
b58ddb32
PH
1259 ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE,
1260 ctypes.POINTER(ctypes.wintypes.DWORD))(
611c1dd9 1261 (b'GetConsoleMode', ctypes.windll.kernel32))
b58ddb32
PH
1262 INVALID_HANDLE_VALUE = ctypes.wintypes.DWORD(-1).value
1263
1264 def not_a_console(handle):
1265 if handle == INVALID_HANDLE_VALUE or handle is None:
1266 return True
8fb3ac36
PH
1267 return ((GetFileType(handle) & ~FILE_TYPE_REMOTE) != FILE_TYPE_CHAR or
1268 GetConsoleMode(handle, ctypes.byref(ctypes.wintypes.DWORD())) == 0)
b58ddb32
PH
1269
1270 if not_a_console(h):
1271 return False
1272
d1b9c912
PH
1273 def next_nonbmp_pos(s):
1274 try:
1275 return next(i for i, c in enumerate(s) if ord(c) > 0xffff)
1276 except StopIteration:
1277 return len(s)
1278
1279 while s:
1280 count = min(next_nonbmp_pos(s), 1024)
1281
b58ddb32 1282 ret = WriteConsoleW(
d1b9c912 1283 h, s, count if count else 2, ctypes.byref(written), None)
b58ddb32
PH
1284 if ret == 0:
1285 raise OSError('Failed to write string')
d1b9c912
PH
1286 if not count: # We just wrote a non-BMP character
1287 assert written.value == 2
1288 s = s[1:]
1289 else:
1290 assert written.value > 0
1291 s = s[written.value:]
b58ddb32
PH
1292 return True
1293
1294
734f90bb 1295def write_string(s, out=None, encoding=None):
7459e3a2
PH
1296 if out is None:
1297 out = sys.stderr
8bf48f23 1298 assert type(s) == compat_str
7459e3a2 1299
b58ddb32
PH
1300 if sys.platform == 'win32' and encoding is None and hasattr(out, 'fileno'):
1301 if _windows_write_string(s, out):
1302 return
1303
7459e3a2
PH
1304 if ('b' in getattr(out, 'mode', '') or
1305 sys.version_info[0] < 3): # Python 2 lies about mode of sys.stderr
104aa738
PH
1306 byt = s.encode(encoding or preferredencoding(), 'ignore')
1307 out.write(byt)
1308 elif hasattr(out, 'buffer'):
1309 enc = encoding or getattr(out, 'encoding', None) or preferredencoding()
1310 byt = s.encode(enc, 'ignore')
1311 out.buffer.write(byt)
1312 else:
8bf48f23 1313 out.write(s)
7459e3a2
PH
1314 out.flush()
1315
1316
48ea9cea
PH
1317def bytes_to_intlist(bs):
1318 if not bs:
1319 return []
1320 if isinstance(bs[0], int): # Python 3
1321 return list(bs)
1322 else:
1323 return [ord(c) for c in bs]
1324
c257baff 1325
cba892fa 1326def intlist_to_bytes(xs):
1327 if not xs:
1328 return b''
edaa23f8 1329 return compat_struct_pack('%dB' % len(xs), *xs)
c38b1e77
PH
1330
1331
c1c9a79c
PH
1332# Cross-platform file locking
1333if sys.platform == 'win32':
1334 import ctypes.wintypes
1335 import msvcrt
1336
1337 class OVERLAPPED(ctypes.Structure):
1338 _fields_ = [
1339 ('Internal', ctypes.wintypes.LPVOID),
1340 ('InternalHigh', ctypes.wintypes.LPVOID),
1341 ('Offset', ctypes.wintypes.DWORD),
1342 ('OffsetHigh', ctypes.wintypes.DWORD),
1343 ('hEvent', ctypes.wintypes.HANDLE),
1344 ]
1345
1346 kernel32 = ctypes.windll.kernel32
1347 LockFileEx = kernel32.LockFileEx
1348 LockFileEx.argtypes = [
1349 ctypes.wintypes.HANDLE, # hFile
1350 ctypes.wintypes.DWORD, # dwFlags
1351 ctypes.wintypes.DWORD, # dwReserved
1352 ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow
1353 ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh
1354 ctypes.POINTER(OVERLAPPED) # Overlapped
1355 ]
1356 LockFileEx.restype = ctypes.wintypes.BOOL
1357 UnlockFileEx = kernel32.UnlockFileEx
1358 UnlockFileEx.argtypes = [
1359 ctypes.wintypes.HANDLE, # hFile
1360 ctypes.wintypes.DWORD, # dwReserved
1361 ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow
1362 ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh
1363 ctypes.POINTER(OVERLAPPED) # Overlapped
1364 ]
1365 UnlockFileEx.restype = ctypes.wintypes.BOOL
1366 whole_low = 0xffffffff
1367 whole_high = 0x7fffffff
1368
1369 def _lock_file(f, exclusive):
1370 overlapped = OVERLAPPED()
1371 overlapped.Offset = 0
1372 overlapped.OffsetHigh = 0
1373 overlapped.hEvent = 0
1374 f._lock_file_overlapped_p = ctypes.pointer(overlapped)
1375 handle = msvcrt.get_osfhandle(f.fileno())
1376 if not LockFileEx(handle, 0x2 if exclusive else 0x0, 0,
1377 whole_low, whole_high, f._lock_file_overlapped_p):
1378 raise OSError('Locking file failed: %r' % ctypes.FormatError())
1379
1380 def _unlock_file(f):
1381 assert f._lock_file_overlapped_p
1382 handle = msvcrt.get_osfhandle(f.fileno())
1383 if not UnlockFileEx(handle, 0,
1384 whole_low, whole_high, f._lock_file_overlapped_p):
1385 raise OSError('Unlocking file failed: %r' % ctypes.FormatError())
1386
1387else:
399a76e6
YCH
1388 # Some platforms, such as Jython, is missing fcntl
1389 try:
1390 import fcntl
c1c9a79c 1391
399a76e6
YCH
1392 def _lock_file(f, exclusive):
1393 fcntl.flock(f, fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH)
c1c9a79c 1394
399a76e6
YCH
1395 def _unlock_file(f):
1396 fcntl.flock(f, fcntl.LOCK_UN)
1397 except ImportError:
1398 UNSUPPORTED_MSG = 'file locking is not supported on this platform'
1399
1400 def _lock_file(f, exclusive):
1401 raise IOError(UNSUPPORTED_MSG)
1402
1403 def _unlock_file(f):
1404 raise IOError(UNSUPPORTED_MSG)
c1c9a79c
PH
1405
1406
1407class locked_file(object):
1408 def __init__(self, filename, mode, encoding=None):
1409 assert mode in ['r', 'a', 'w']
1410 self.f = io.open(filename, mode, encoding=encoding)
1411 self.mode = mode
1412
1413 def __enter__(self):
1414 exclusive = self.mode != 'r'
1415 try:
1416 _lock_file(self.f, exclusive)
1417 except IOError:
1418 self.f.close()
1419 raise
1420 return self
1421
1422 def __exit__(self, etype, value, traceback):
1423 try:
1424 _unlock_file(self.f)
1425 finally:
1426 self.f.close()
1427
1428 def __iter__(self):
1429 return iter(self.f)
1430
1431 def write(self, *args):
1432 return self.f.write(*args)
1433
1434 def read(self, *args):
1435 return self.f.read(*args)
4eb7f1d1
JMF
1436
1437
4644ac55
S
1438def get_filesystem_encoding():
1439 encoding = sys.getfilesystemencoding()
1440 return encoding if encoding is not None else 'utf-8'
1441
1442
4eb7f1d1 1443def shell_quote(args):
a6a173c2 1444 quoted_args = []
4644ac55 1445 encoding = get_filesystem_encoding()
a6a173c2
JMF
1446 for a in args:
1447 if isinstance(a, bytes):
1448 # We may get a filename encoded with 'encodeFilename'
1449 a = a.decode(encoding)
1450 quoted_args.append(pipes.quote(a))
28e614de 1451 return ' '.join(quoted_args)
9d4660ca
PH
1452
1453
1454def smuggle_url(url, data):
1455 """ Pass additional data in a URL for internal use. """
1456
81953d1a
RA
1457 url, idata = unsmuggle_url(url, {})
1458 data.update(idata)
15707c7e 1459 sdata = compat_urllib_parse_urlencode(
28e614de
PH
1460 {'__youtubedl_smuggle': json.dumps(data)})
1461 return url + '#' + sdata
9d4660ca
PH
1462
1463
79f82953 1464def unsmuggle_url(smug_url, default=None):
83e865a3 1465 if '#__youtubedl_smuggle' not in smug_url:
79f82953 1466 return smug_url, default
28e614de
PH
1467 url, _, sdata = smug_url.rpartition('#')
1468 jsond = compat_parse_qs(sdata)['__youtubedl_smuggle'][0]
9d4660ca
PH
1469 data = json.loads(jsond)
1470 return url, data
02dbf93f
PH
1471
1472
02dbf93f
PH
1473def format_bytes(bytes):
1474 if bytes is None:
28e614de 1475 return 'N/A'
02dbf93f
PH
1476 if type(bytes) is str:
1477 bytes = float(bytes)
1478 if bytes == 0.0:
1479 exponent = 0
1480 else:
1481 exponent = int(math.log(bytes, 1024.0))
28e614de 1482 suffix = ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB'][exponent]
02dbf93f 1483 converted = float(bytes) / float(1024 ** exponent)
28e614de 1484 return '%.2f%s' % (converted, suffix)
f53c966a 1485
1c088fa8 1486
fb47597b
S
1487def lookup_unit_table(unit_table, s):
1488 units_re = '|'.join(re.escape(u) for u in unit_table)
1489 m = re.match(
782b1b5b 1490 r'(?P<num>[0-9]+(?:[,.][0-9]*)?)\s*(?P<unit>%s)\b' % units_re, s)
fb47597b
S
1491 if not m:
1492 return None
1493 num_str = m.group('num').replace(',', '.')
1494 mult = unit_table[m.group('unit')]
1495 return int(float(num_str) * mult)
1496
1497
be64b5b0
PH
1498def parse_filesize(s):
1499 if s is None:
1500 return None
1501
dfb1b146 1502 # The lower-case forms are of course incorrect and unofficial,
be64b5b0
PH
1503 # but we support those too
1504 _UNIT_TABLE = {
1505 'B': 1,
1506 'b': 1,
70852b47 1507 'bytes': 1,
be64b5b0
PH
1508 'KiB': 1024,
1509 'KB': 1000,
1510 'kB': 1024,
1511 'Kb': 1000,
13585d76 1512 'kb': 1000,
70852b47
YCH
1513 'kilobytes': 1000,
1514 'kibibytes': 1024,
be64b5b0
PH
1515 'MiB': 1024 ** 2,
1516 'MB': 1000 ** 2,
1517 'mB': 1024 ** 2,
1518 'Mb': 1000 ** 2,
13585d76 1519 'mb': 1000 ** 2,
70852b47
YCH
1520 'megabytes': 1000 ** 2,
1521 'mebibytes': 1024 ** 2,
be64b5b0
PH
1522 'GiB': 1024 ** 3,
1523 'GB': 1000 ** 3,
1524 'gB': 1024 ** 3,
1525 'Gb': 1000 ** 3,
13585d76 1526 'gb': 1000 ** 3,
70852b47
YCH
1527 'gigabytes': 1000 ** 3,
1528 'gibibytes': 1024 ** 3,
be64b5b0
PH
1529 'TiB': 1024 ** 4,
1530 'TB': 1000 ** 4,
1531 'tB': 1024 ** 4,
1532 'Tb': 1000 ** 4,
13585d76 1533 'tb': 1000 ** 4,
70852b47
YCH
1534 'terabytes': 1000 ** 4,
1535 'tebibytes': 1024 ** 4,
be64b5b0
PH
1536 'PiB': 1024 ** 5,
1537 'PB': 1000 ** 5,
1538 'pB': 1024 ** 5,
1539 'Pb': 1000 ** 5,
13585d76 1540 'pb': 1000 ** 5,
70852b47
YCH
1541 'petabytes': 1000 ** 5,
1542 'pebibytes': 1024 ** 5,
be64b5b0
PH
1543 'EiB': 1024 ** 6,
1544 'EB': 1000 ** 6,
1545 'eB': 1024 ** 6,
1546 'Eb': 1000 ** 6,
13585d76 1547 'eb': 1000 ** 6,
70852b47
YCH
1548 'exabytes': 1000 ** 6,
1549 'exbibytes': 1024 ** 6,
be64b5b0
PH
1550 'ZiB': 1024 ** 7,
1551 'ZB': 1000 ** 7,
1552 'zB': 1024 ** 7,
1553 'Zb': 1000 ** 7,
13585d76 1554 'zb': 1000 ** 7,
70852b47
YCH
1555 'zettabytes': 1000 ** 7,
1556 'zebibytes': 1024 ** 7,
be64b5b0
PH
1557 'YiB': 1024 ** 8,
1558 'YB': 1000 ** 8,
1559 'yB': 1024 ** 8,
1560 'Yb': 1000 ** 8,
13585d76 1561 'yb': 1000 ** 8,
70852b47
YCH
1562 'yottabytes': 1000 ** 8,
1563 'yobibytes': 1024 ** 8,
be64b5b0
PH
1564 }
1565
fb47597b
S
1566 return lookup_unit_table(_UNIT_TABLE, s)
1567
1568
1569def parse_count(s):
1570 if s is None:
be64b5b0
PH
1571 return None
1572
fb47597b
S
1573 s = s.strip()
1574
1575 if re.match(r'^[\d,.]+$', s):
1576 return str_to_int(s)
1577
1578 _UNIT_TABLE = {
1579 'k': 1000,
1580 'K': 1000,
1581 'm': 1000 ** 2,
1582 'M': 1000 ** 2,
1583 'kk': 1000 ** 2,
1584 'KK': 1000 ** 2,
1585 }
be64b5b0 1586
fb47597b 1587 return lookup_unit_table(_UNIT_TABLE, s)
be64b5b0 1588
2f7ae819 1589
caefb1de
PH
1590def month_by_name(name):
1591 """ Return the number of a month by (locale-independently) English name """
1592
caefb1de 1593 try:
7105440c
YCH
1594 return ENGLISH_MONTH_NAMES.index(name) + 1
1595 except ValueError:
1596 return None
1597
1598
1599def month_by_abbreviation(abbrev):
1600 """ Return the number of a month by (locale-independently) English
1601 abbreviations """
1602
1603 try:
1604 return [s[:3] for s in ENGLISH_MONTH_NAMES].index(abbrev) + 1
caefb1de
PH
1605 except ValueError:
1606 return None
18258362
JMF
1607
1608
5aafe895 1609def fix_xml_ampersands(xml_str):
18258362 1610 """Replace all the '&' by '&amp;' in XML"""
5aafe895
PH
1611 return re.sub(
1612 r'&(?!amp;|lt;|gt;|apos;|quot;|#x[0-9a-fA-F]{,4};|#[0-9]{,4};)',
28e614de 1613 '&amp;',
5aafe895 1614 xml_str)
e3946f98
PH
1615
1616
1617def setproctitle(title):
8bf48f23 1618 assert isinstance(title, compat_str)
c1c05c67
YCH
1619
1620 # ctypes in Jython is not complete
1621 # http://bugs.jython.org/issue2148
1622 if sys.platform.startswith('java'):
1623 return
1624
e3946f98 1625 try:
611c1dd9 1626 libc = ctypes.cdll.LoadLibrary('libc.so.6')
e3946f98
PH
1627 except OSError:
1628 return
6eefe533
PH
1629 title_bytes = title.encode('utf-8')
1630 buf = ctypes.create_string_buffer(len(title_bytes))
1631 buf.value = title_bytes
e3946f98 1632 try:
6eefe533 1633 libc.prctl(15, buf, 0, 0, 0)
e3946f98
PH
1634 except AttributeError:
1635 return # Strange libc, just skip this
d7dda168
PH
1636
1637
1638def remove_start(s, start):
46bc9b7d 1639 return s[len(start):] if s is not None and s.startswith(start) else s
29eb5174
PH
1640
1641
2b9faf55 1642def remove_end(s, end):
46bc9b7d 1643 return s[:-len(end)] if s is not None and s.endswith(end) else s
2b9faf55
PH
1644
1645
31b2051e
S
1646def remove_quotes(s):
1647 if s is None or len(s) < 2:
1648 return s
1649 for quote in ('"', "'", ):
1650 if s[0] == quote and s[-1] == quote:
1651 return s[1:-1]
1652 return s
1653
1654
29eb5174 1655def url_basename(url):
9b8aaeed 1656 path = compat_urlparse.urlparse(url).path
28e614de 1657 return path.strip('/').split('/')[-1]
aa94a6d3
PH
1658
1659
1660class HEADRequest(compat_urllib_request.Request):
1661 def get_method(self):
611c1dd9 1662 return 'HEAD'
7217e148
PH
1663
1664
95cf60e8
S
1665class PUTRequest(compat_urllib_request.Request):
1666 def get_method(self):
1667 return 'PUT'
1668
1669
9732d77e 1670def int_or_none(v, scale=1, default=None, get_attr=None, invscale=1):
28746fbd
PH
1671 if get_attr:
1672 if v is not None:
1673 v = getattr(v, get_attr, None)
9572013d
PH
1674 if v == '':
1675 v = None
1812afb7
S
1676 if v is None:
1677 return default
1678 try:
1679 return int(v) * invscale // scale
1680 except ValueError:
af98f8ff 1681 return default
9732d77e 1682
9572013d 1683
40a90862
JMF
1684def str_or_none(v, default=None):
1685 return default if v is None else compat_str(v)
1686
9732d77e
PH
1687
1688def str_to_int(int_str):
48d4681e 1689 """ A more relaxed version of int_or_none """
9732d77e
PH
1690 if int_str is None:
1691 return None
28e614de 1692 int_str = re.sub(r'[,\.\+]', '', int_str)
9732d77e 1693 return int(int_str)
608d11f5
PH
1694
1695
9732d77e 1696def float_or_none(v, scale=1, invscale=1, default=None):
caf80631
S
1697 if v is None:
1698 return default
1699 try:
1700 return float(v) * invscale / scale
1701 except ValueError:
1702 return default
43f775e4
PH
1703
1704
b72b4431
S
1705def strip_or_none(v):
1706 return None if v is None else v.strip()
1707
1708
608d11f5 1709def parse_duration(s):
8f9312c3 1710 if not isinstance(s, compat_basestring):
608d11f5
PH
1711 return None
1712
ca7b3246
S
1713 s = s.strip()
1714
acaff495 1715 days, hours, mins, secs, ms = [None] * 5
1716 m = re.match(r'(?:(?:(?:(?P<days>[0-9]+):)?(?P<hours>[0-9]+):)?(?P<mins>[0-9]+):)?(?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?$', s)
1717 if m:
1718 days, hours, mins, secs, ms = m.groups()
1719 else:
1720 m = re.match(
1721 r'''(?ix)(?:P?T)?
8f4b58d7 1722 (?:
acaff495 1723 (?P<days>[0-9]+)\s*d(?:ays?)?\s*
8f4b58d7 1724 )?
acaff495 1725 (?:
1726 (?P<hours>[0-9]+)\s*h(?:ours?)?\s*
1727 )?
1728 (?:
1729 (?P<mins>[0-9]+)\s*m(?:in(?:ute)?s?)?\s*
1730 )?
1731 (?:
1732 (?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?\s*s(?:ec(?:ond)?s?)?\s*
1733 )?$''', s)
1734 if m:
1735 days, hours, mins, secs, ms = m.groups()
1736 else:
1737 m = re.match(r'(?i)(?:(?P<hours>[0-9.]+)\s*(?:hours?)|(?P<mins>[0-9.]+)\s*(?:mins?\.?|minutes?)\s*)$', s)
1738 if m:
1739 hours, mins = m.groups()
1740 else:
1741 return None
1742
1743 duration = 0
1744 if secs:
1745 duration += float(secs)
1746 if mins:
1747 duration += float(mins) * 60
1748 if hours:
1749 duration += float(hours) * 60 * 60
1750 if days:
1751 duration += float(days) * 24 * 60 * 60
1752 if ms:
1753 duration += float(ms)
1754 return duration
91d7d0b3
JMF
1755
1756
e65e4c88 1757def prepend_extension(filename, ext, expected_real_ext=None):
5f6a1245 1758 name, real_ext = os.path.splitext(filename)
e65e4c88
S
1759 return (
1760 '{0}.{1}{2}'.format(name, ext, real_ext)
1761 if not expected_real_ext or real_ext[1:] == expected_real_ext
1762 else '{0}.{1}'.format(filename, ext))
d70ad093
PH
1763
1764
b3ed15b7
S
1765def replace_extension(filename, ext, expected_real_ext=None):
1766 name, real_ext = os.path.splitext(filename)
1767 return '{0}.{1}'.format(
1768 name if not expected_real_ext or real_ext[1:] == expected_real_ext else filename,
1769 ext)
1770
1771
d70ad093
PH
1772def check_executable(exe, args=[]):
1773 """ Checks if the given binary is installed somewhere in PATH, and returns its name.
1774 args can be a list of arguments for a short output (like -version) """
1775 try:
1776 subprocess.Popen([exe] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
1777 except OSError:
1778 return False
1779 return exe
b7ab0590
PH
1780
1781
95807118 1782def get_exe_version(exe, args=['--version'],
cae97f65 1783 version_re=None, unrecognized='present'):
95807118
PH
1784 """ Returns the version of the specified executable,
1785 or False if the executable is not present """
1786 try:
cae97f65 1787 out, _ = subprocess.Popen(
54116803 1788 [encodeArgument(exe)] + args,
95807118
PH
1789 stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()
1790 except OSError:
1791 return False
cae97f65
PH
1792 if isinstance(out, bytes): # Python 2.x
1793 out = out.decode('ascii', 'ignore')
1794 return detect_exe_version(out, version_re, unrecognized)
1795
1796
1797def detect_exe_version(output, version_re=None, unrecognized='present'):
1798 assert isinstance(output, compat_str)
1799 if version_re is None:
1800 version_re = r'version\s+([-0-9._a-zA-Z]+)'
1801 m = re.search(version_re, output)
95807118
PH
1802 if m:
1803 return m.group(1)
1804 else:
1805 return unrecognized
1806
1807
b7ab0590 1808class PagedList(object):
dd26ced1
PH
1809 def __len__(self):
1810 # This is only useful for tests
1811 return len(self.getslice())
1812
9c44d242
PH
1813
1814class OnDemandPagedList(PagedList):
b95dc034 1815 def __init__(self, pagefunc, pagesize, use_cache=False):
9c44d242
PH
1816 self._pagefunc = pagefunc
1817 self._pagesize = pagesize
b95dc034
YCH
1818 self._use_cache = use_cache
1819 if use_cache:
1820 self._cache = {}
9c44d242 1821
b7ab0590
PH
1822 def getslice(self, start=0, end=None):
1823 res = []
1824 for pagenum in itertools.count(start // self._pagesize):
1825 firstid = pagenum * self._pagesize
1826 nextfirstid = pagenum * self._pagesize + self._pagesize
1827 if start >= nextfirstid:
1828 continue
1829
b95dc034
YCH
1830 page_results = None
1831 if self._use_cache:
1832 page_results = self._cache.get(pagenum)
1833 if page_results is None:
1834 page_results = list(self._pagefunc(pagenum))
1835 if self._use_cache:
1836 self._cache[pagenum] = page_results
b7ab0590
PH
1837
1838 startv = (
1839 start % self._pagesize
1840 if firstid <= start < nextfirstid
1841 else 0)
1842
1843 endv = (
1844 ((end - 1) % self._pagesize) + 1
1845 if (end is not None and firstid <= end <= nextfirstid)
1846 else None)
1847
1848 if startv != 0 or endv is not None:
1849 page_results = page_results[startv:endv]
1850 res.extend(page_results)
1851
1852 # A little optimization - if current page is not "full", ie. does
1853 # not contain page_size videos then we can assume that this page
1854 # is the last one - there are no more ids on further pages -
1855 # i.e. no need to query again.
1856 if len(page_results) + startv < self._pagesize:
1857 break
1858
1859 # If we got the whole page, but the next page is not interesting,
1860 # break out early as well
1861 if end == nextfirstid:
1862 break
1863 return res
81c2f20b
PH
1864
1865
9c44d242
PH
1866class InAdvancePagedList(PagedList):
1867 def __init__(self, pagefunc, pagecount, pagesize):
1868 self._pagefunc = pagefunc
1869 self._pagecount = pagecount
1870 self._pagesize = pagesize
1871
1872 def getslice(self, start=0, end=None):
1873 res = []
1874 start_page = start // self._pagesize
1875 end_page = (
1876 self._pagecount if end is None else (end // self._pagesize + 1))
1877 skip_elems = start - start_page * self._pagesize
1878 only_more = None if end is None else end - start
1879 for pagenum in range(start_page, end_page):
1880 page = list(self._pagefunc(pagenum))
1881 if skip_elems:
1882 page = page[skip_elems:]
1883 skip_elems = None
1884 if only_more is not None:
1885 if len(page) < only_more:
1886 only_more -= len(page)
1887 else:
1888 page = page[:only_more]
1889 res.extend(page)
1890 break
1891 res.extend(page)
1892 return res
1893
1894
81c2f20b 1895def uppercase_escape(s):
676eb3f2 1896 unicode_escape = codecs.getdecoder('unicode_escape')
81c2f20b 1897 return re.sub(
a612753d 1898 r'\\U[0-9a-fA-F]{8}',
676eb3f2
PH
1899 lambda m: unicode_escape(m.group(0))[0],
1900 s)
0fe2ff78
YCH
1901
1902
1903def lowercase_escape(s):
1904 unicode_escape = codecs.getdecoder('unicode_escape')
1905 return re.sub(
1906 r'\\u[0-9a-fA-F]{4}',
1907 lambda m: unicode_escape(m.group(0))[0],
1908 s)
b53466e1 1909
d05cfe06
S
1910
1911def escape_rfc3986(s):
1912 """Escape non-ASCII characters as suggested by RFC 3986"""
8f9312c3 1913 if sys.version_info < (3, 0) and isinstance(s, compat_str):
d05cfe06 1914 s = s.encode('utf-8')
ecc0c5ee 1915 return compat_urllib_parse.quote(s, b"%/;:@&=+$,!~*'()?#[]")
d05cfe06
S
1916
1917
1918def escape_url(url):
1919 """Escape URL as suggested by RFC 3986"""
1920 url_parsed = compat_urllib_parse_urlparse(url)
1921 return url_parsed._replace(
efbed08d 1922 netloc=url_parsed.netloc.encode('idna').decode('ascii'),
d05cfe06
S
1923 path=escape_rfc3986(url_parsed.path),
1924 params=escape_rfc3986(url_parsed.params),
1925 query=escape_rfc3986(url_parsed.query),
1926 fragment=escape_rfc3986(url_parsed.fragment)
1927 ).geturl()
1928
62e609ab
PH
1929
1930def read_batch_urls(batch_fd):
1931 def fixup(url):
1932 if not isinstance(url, compat_str):
1933 url = url.decode('utf-8', 'replace')
28e614de 1934 BOM_UTF8 = '\xef\xbb\xbf'
62e609ab
PH
1935 if url.startswith(BOM_UTF8):
1936 url = url[len(BOM_UTF8):]
1937 url = url.strip()
1938 if url.startswith(('#', ';', ']')):
1939 return False
1940 return url
1941
1942 with contextlib.closing(batch_fd) as fd:
1943 return [url for url in map(fixup, fd) if url]
b74fa8cd
JMF
1944
1945
1946def urlencode_postdata(*args, **kargs):
15707c7e 1947 return compat_urllib_parse_urlencode(*args, **kargs).encode('ascii')
bcf89ce6
PH
1948
1949
38f9ef31 1950def update_url_query(url, query):
cacd9966
YCH
1951 if not query:
1952 return url
38f9ef31 1953 parsed_url = compat_urlparse.urlparse(url)
1954 qs = compat_parse_qs(parsed_url.query)
1955 qs.update(query)
1956 return compat_urlparse.urlunparse(parsed_url._replace(
15707c7e 1957 query=compat_urllib_parse_urlencode(qs, True)))
16392824 1958
8e60dc75 1959
ed0291d1
S
1960def update_Request(req, url=None, data=None, headers={}, query={}):
1961 req_headers = req.headers.copy()
1962 req_headers.update(headers)
1963 req_data = data or req.data
1964 req_url = update_url_query(url or req.get_full_url(), query)
95cf60e8
S
1965 req_get_method = req.get_method()
1966 if req_get_method == 'HEAD':
1967 req_type = HEADRequest
1968 elif req_get_method == 'PUT':
1969 req_type = PUTRequest
1970 else:
1971 req_type = compat_urllib_request.Request
ed0291d1
S
1972 new_req = req_type(
1973 req_url, data=req_data, headers=req_headers,
1974 origin_req_host=req.origin_req_host, unverifiable=req.unverifiable)
1975 if hasattr(req, 'timeout'):
1976 new_req.timeout = req.timeout
1977 return new_req
1978
1979
86296ad2 1980def dict_get(d, key_or_keys, default=None, skip_false_values=True):
cbecc9b9
S
1981 if isinstance(key_or_keys, (list, tuple)):
1982 for key in key_or_keys:
86296ad2
S
1983 if key not in d or d[key] is None or skip_false_values and not d[key]:
1984 continue
1985 return d[key]
cbecc9b9
S
1986 return default
1987 return d.get(key_or_keys, default)
1988
1989
329ca3be
S
1990def try_get(src, getter, expected_type=None):
1991 try:
1992 v = getter(src)
1993 except (AttributeError, KeyError, TypeError, IndexError):
1994 pass
1995 else:
1996 if expected_type is None or isinstance(v, expected_type):
1997 return v
1998
1999
8e60dc75
S
2000def encode_compat_str(string, encoding=preferredencoding(), errors='strict'):
2001 return string if isinstance(string, compat_str) else compat_str(string, encoding, errors)
2002
16392824 2003
a1a530b0
PH
2004US_RATINGS = {
2005 'G': 0,
2006 'PG': 10,
2007 'PG-13': 13,
2008 'R': 16,
2009 'NC': 18,
2010}
fac55558
PH
2011
2012
a8795327
S
2013TV_PARENTAL_GUIDELINES = {
2014 'TV-Y': 0,
2015 'TV-Y7': 7,
2016 'TV-G': 0,
2017 'TV-PG': 0,
2018 'TV-14': 14,
2019 'TV-MA': 17,
2020}
2021
2022
146c80e2 2023def parse_age_limit(s):
a8795327
S
2024 if type(s) == int:
2025 return s if 0 <= s <= 21 else None
2026 if not isinstance(s, compat_basestring):
d838b1bd 2027 return None
146c80e2 2028 m = re.match(r'^(?P<age>\d{1,2})\+?$', s)
a8795327
S
2029 if m:
2030 return int(m.group('age'))
2031 if s in US_RATINGS:
2032 return US_RATINGS[s]
2033 return TV_PARENTAL_GUIDELINES.get(s)
146c80e2
S
2034
2035
fac55558 2036def strip_jsonp(code):
609a61e3 2037 return re.sub(
5950cb1d 2038 r'(?s)^[a-zA-Z0-9_.$]+\s*\(\s*(.*)\);?\s*?(?://[^\n]*)*$', r'\1', code)
478c2c61
PH
2039
2040
e05f6939
PH
2041def js_to_json(code):
2042 def fix_kv(m):
e7b6d122
PH
2043 v = m.group(0)
2044 if v in ('true', 'false', 'null'):
2045 return v
bd1e4844 2046 elif v.startswith('/*') or v == ',':
2047 return ""
2048
2049 if v[0] in ("'", '"'):
2050 v = re.sub(r'(?s)\\.|"', lambda m: {
e7b6d122 2051 '"': '\\"',
bd1e4844 2052 "\\'": "'",
2053 '\\\n': '',
2054 '\\x': '\\u00',
2055 }.get(m.group(0), m.group(0)), v[1:-1])
2056
89ac4a19 2057 INTEGER_TABLE = (
e4659b45
YCH
2058 (r'^(0[xX][0-9a-fA-F]+)\s*:?$', 16),
2059 (r'^(0+[0-7]+)\s*:?$', 8),
89ac4a19
S
2060 )
2061
2062 for regex, base in INTEGER_TABLE:
2063 im = re.match(regex, v)
2064 if im:
e4659b45 2065 i = int(im.group(1), base)
89ac4a19
S
2066 return '"%d":' % i if v.endswith(':') else '%d' % i
2067
e7b6d122 2068 return '"%s"' % v
e05f6939 2069
bd1e4844 2070 return re.sub(r'''(?sx)
2071 "(?:[^"\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^"\\]*"|
2072 '(?:[^'\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^'\\]*'|
2073 /\*.*?\*/|,(?=\s*[\]}])|
2074 [a-zA-Z_][.a-zA-Z_0-9]*|
47212f7b 2075 \b(?:0[xX][0-9a-fA-F]+|0+[0-7]+)(?:\s*:)?|
bd1e4844 2076 [0-9]+(?=\s*:)
e05f6939 2077 ''', fix_kv, code)
e05f6939
PH
2078
2079
478c2c61
PH
2080def qualities(quality_ids):
2081 """ Get a numeric quality value out of a list of possible values """
2082 def q(qid):
2083 try:
2084 return quality_ids.index(qid)
2085 except ValueError:
2086 return -1
2087 return q
2088
acd69589
PH
2089
2090DEFAULT_OUTTMPL = '%(title)s-%(id)s.%(ext)s'
0a871f68 2091
a020a0dc
PH
2092
2093def limit_length(s, length):
2094 """ Add ellipses to overly long strings """
2095 if s is None:
2096 return None
2097 ELLIPSES = '...'
2098 if len(s) > length:
2099 return s[:length - len(ELLIPSES)] + ELLIPSES
2100 return s
48844745
PH
2101
2102
2103def version_tuple(v):
5f9b8394 2104 return tuple(int(e) for e in re.split(r'[-.]', v))
48844745
PH
2105
2106
2107def is_outdated_version(version, limit, assume_new=True):
2108 if not version:
2109 return not assume_new
2110 try:
2111 return version_tuple(version) < version_tuple(limit)
2112 except ValueError:
2113 return not assume_new
732ea2f0
PH
2114
2115
2116def ytdl_is_updateable():
2117 """ Returns if youtube-dl can be updated with -U """
2118 from zipimport import zipimporter
2119
2120 return isinstance(globals().get('__loader__'), zipimporter) or hasattr(sys, 'frozen')
7d4111ed
PH
2121
2122
2123def args_to_str(args):
2124 # Get a short string representation for a subprocess command
702ccf2d 2125 return ' '.join(compat_shlex_quote(a) for a in args)
2ccd1b10
PH
2126
2127
9b9c5355 2128def error_to_compat_str(err):
fdae2358
S
2129 err_str = str(err)
2130 # On python 2 error byte string must be decoded with proper
2131 # encoding rather than ascii
2132 if sys.version_info[0] < 3:
2133 err_str = err_str.decode(preferredencoding())
2134 return err_str
2135
2136
c460bdd5 2137def mimetype2ext(mt):
eb9ee194
S
2138 if mt is None:
2139 return None
2140
765ac263
JMF
2141 ext = {
2142 'audio/mp4': 'm4a',
6c33d24b
YCH
2143 # Per RFC 3003, audio/mpeg can be .mp1, .mp2 or .mp3. Here use .mp3 as
2144 # it's the most popular one
2145 'audio/mpeg': 'mp3',
765ac263
JMF
2146 }.get(mt)
2147 if ext is not None:
2148 return ext
2149
c460bdd5 2150 _, _, res = mt.rpartition('/')
6562d34a 2151 res = res.split(';')[0].strip().lower()
c460bdd5
PH
2152
2153 return {
f6861ec9 2154 '3gpp': '3gp',
cafcf657 2155 'smptett+xml': 'tt',
2156 'srt': 'srt',
2157 'ttaf+xml': 'dfxp',
a0d8d704 2158 'ttml+xml': 'ttml',
cafcf657 2159 'vtt': 'vtt',
f6861ec9 2160 'x-flv': 'flv',
a0d8d704
YCH
2161 'x-mp4-fragmented': 'mp4',
2162 'x-ms-wmv': 'wmv',
b4173f15
RA
2163 'mpegurl': 'm3u8',
2164 'x-mpegurl': 'm3u8',
2165 'vnd.apple.mpegurl': 'm3u8',
2166 'dash+xml': 'mpd',
2167 'f4m': 'f4m',
2168 'f4m+xml': 'f4m',
f164b971 2169 'hds+xml': 'f4m',
e910fe2f 2170 'vnd.ms-sstr+xml': 'ism',
c2b2c7e1 2171 'quicktime': 'mov',
c460bdd5
PH
2172 }.get(res, res)
2173
2174
4f3c5e06 2175def parse_codecs(codecs_str):
2176 # http://tools.ietf.org/html/rfc6381
2177 if not codecs_str:
2178 return {}
2179 splited_codecs = list(filter(None, map(
2180 lambda str: str.strip(), codecs_str.strip().strip(',').split(','))))
2181 vcodec, acodec = None, None
2182 for full_codec in splited_codecs:
2183 codec = full_codec.split('.')[0]
2184 if codec in ('avc1', 'avc2', 'avc3', 'avc4', 'vp9', 'vp8', 'hev1', 'hev2', 'h263', 'h264', 'mp4v'):
2185 if not vcodec:
2186 vcodec = full_codec
073ac122 2187 elif codec in ('mp4a', 'opus', 'vorbis', 'mp3', 'aac', 'ac-3'):
4f3c5e06 2188 if not acodec:
2189 acodec = full_codec
2190 else:
2191 write_string('WARNING: Unknown codec %s' % full_codec, sys.stderr)
2192 if not vcodec and not acodec:
2193 if len(splited_codecs) == 2:
2194 return {
2195 'vcodec': vcodec,
2196 'acodec': acodec,
2197 }
2198 elif len(splited_codecs) == 1:
2199 return {
2200 'vcodec': 'none',
2201 'acodec': vcodec,
2202 }
2203 else:
2204 return {
2205 'vcodec': vcodec or 'none',
2206 'acodec': acodec or 'none',
2207 }
2208 return {}
2209
2210
2ccd1b10 2211def urlhandle_detect_ext(url_handle):
79298173 2212 getheader = url_handle.headers.get
2ccd1b10 2213
b55ee18f
PH
2214 cd = getheader('Content-Disposition')
2215 if cd:
2216 m = re.match(r'attachment;\s*filename="(?P<filename>[^"]+)"', cd)
2217 if m:
2218 e = determine_ext(m.group('filename'), default_ext=None)
2219 if e:
2220 return e
2221
c460bdd5 2222 return mimetype2ext(getheader('Content-Type'))
05900629
PH
2223
2224
1e399778
YCH
2225def encode_data_uri(data, mime_type):
2226 return 'data:%s;base64,%s' % (mime_type, base64.b64encode(data).decode('ascii'))
2227
2228
05900629 2229def age_restricted(content_limit, age_limit):
6ec6cb4e 2230 """ Returns True iff the content should be blocked """
05900629
PH
2231
2232 if age_limit is None: # No limit set
2233 return False
2234 if content_limit is None:
2235 return False # Content available for everyone
2236 return age_limit < content_limit
61ca9a80
PH
2237
2238
2239def is_html(first_bytes):
2240 """ Detect whether a file contains HTML by examining its first bytes. """
2241
2242 BOMS = [
2243 (b'\xef\xbb\xbf', 'utf-8'),
2244 (b'\x00\x00\xfe\xff', 'utf-32-be'),
2245 (b'\xff\xfe\x00\x00', 'utf-32-le'),
2246 (b'\xff\xfe', 'utf-16-le'),
2247 (b'\xfe\xff', 'utf-16-be'),
2248 ]
2249 for bom, enc in BOMS:
2250 if first_bytes.startswith(bom):
2251 s = first_bytes[len(bom):].decode(enc, 'replace')
2252 break
2253 else:
2254 s = first_bytes.decode('utf-8', 'replace')
2255
2256 return re.match(r'^\s*<', s)
a055469f
PH
2257
2258
2259def determine_protocol(info_dict):
2260 protocol = info_dict.get('protocol')
2261 if protocol is not None:
2262 return protocol
2263
2264 url = info_dict['url']
2265 if url.startswith('rtmp'):
2266 return 'rtmp'
2267 elif url.startswith('mms'):
2268 return 'mms'
2269 elif url.startswith('rtsp'):
2270 return 'rtsp'
2271
2272 ext = determine_ext(url)
2273 if ext == 'm3u8':
2274 return 'm3u8'
2275 elif ext == 'f4m':
2276 return 'f4m'
2277
2278 return compat_urllib_parse_urlparse(url).scheme
cfb56d1a
PH
2279
2280
2281def render_table(header_row, data):
2282 """ Render a list of rows, each as a list of values """
2283 table = [header_row] + data
2284 max_lens = [max(len(compat_str(v)) for v in col) for col in zip(*table)]
2285 format_str = ' '.join('%-' + compat_str(ml + 1) + 's' for ml in max_lens[:-1]) + '%s'
2286 return '\n'.join(format_str % tuple(row) for row in table)
347de493
PH
2287
2288
2289def _match_one(filter_part, dct):
2290 COMPARISON_OPERATORS = {
2291 '<': operator.lt,
2292 '<=': operator.le,
2293 '>': operator.gt,
2294 '>=': operator.ge,
2295 '=': operator.eq,
2296 '!=': operator.ne,
2297 }
2298 operator_rex = re.compile(r'''(?x)\s*
2299 (?P<key>[a-z_]+)
2300 \s*(?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
2301 (?:
2302 (?P<intval>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)|
2303 (?P<strval>(?![0-9.])[a-z0-9A-Z]*)
2304 )
2305 \s*$
2306 ''' % '|'.join(map(re.escape, COMPARISON_OPERATORS.keys())))
2307 m = operator_rex.search(filter_part)
2308 if m:
2309 op = COMPARISON_OPERATORS[m.group('op')]
2310 if m.group('strval') is not None:
2311 if m.group('op') not in ('=', '!='):
2312 raise ValueError(
2313 'Operator %s does not support string values!' % m.group('op'))
2314 comparison_value = m.group('strval')
2315 else:
2316 try:
2317 comparison_value = int(m.group('intval'))
2318 except ValueError:
2319 comparison_value = parse_filesize(m.group('intval'))
2320 if comparison_value is None:
2321 comparison_value = parse_filesize(m.group('intval') + 'B')
2322 if comparison_value is None:
2323 raise ValueError(
2324 'Invalid integer value %r in filter part %r' % (
2325 m.group('intval'), filter_part))
2326 actual_value = dct.get(m.group('key'))
2327 if actual_value is None:
2328 return m.group('none_inclusive')
2329 return op(actual_value, comparison_value)
2330
2331 UNARY_OPERATORS = {
2332 '': lambda v: v is not None,
2333 '!': lambda v: v is None,
2334 }
2335 operator_rex = re.compile(r'''(?x)\s*
2336 (?P<op>%s)\s*(?P<key>[a-z_]+)
2337 \s*$
2338 ''' % '|'.join(map(re.escape, UNARY_OPERATORS.keys())))
2339 m = operator_rex.search(filter_part)
2340 if m:
2341 op = UNARY_OPERATORS[m.group('op')]
2342 actual_value = dct.get(m.group('key'))
2343 return op(actual_value)
2344
2345 raise ValueError('Invalid filter part %r' % filter_part)
2346
2347
2348def match_str(filter_str, dct):
2349 """ Filter a dictionary with a simple string syntax. Returns True (=passes filter) or false """
2350
2351 return all(
2352 _match_one(filter_part, dct) for filter_part in filter_str.split('&'))
2353
2354
2355def match_filter_func(filter_str):
2356 def _match_func(info_dict):
2357 if match_str(filter_str, info_dict):
2358 return None
2359 else:
2360 video_title = info_dict.get('title', info_dict.get('id', 'video'))
2361 return '%s does not pass filter %s, skipping ..' % (video_title, filter_str)
2362 return _match_func
91410c9b
PH
2363
2364
bf6427d2
YCH
2365def parse_dfxp_time_expr(time_expr):
2366 if not time_expr:
d631d5f9 2367 return
bf6427d2
YCH
2368
2369 mobj = re.match(r'^(?P<time_offset>\d+(?:\.\d+)?)s?$', time_expr)
2370 if mobj:
2371 return float(mobj.group('time_offset'))
2372
db2fe38b 2373 mobj = re.match(r'^(\d+):(\d\d):(\d\d(?:(?:\.|:)\d+)?)$', time_expr)
bf6427d2 2374 if mobj:
db2fe38b 2375 return 3600 * int(mobj.group(1)) + 60 * int(mobj.group(2)) + float(mobj.group(3).replace(':', '.'))
bf6427d2
YCH
2376
2377
c1c924ab
YCH
2378def srt_subtitles_timecode(seconds):
2379 return '%02d:%02d:%02d,%03d' % (seconds / 3600, (seconds % 3600) / 60, seconds % 60, (seconds % 1) * 1000)
bf6427d2
YCH
2380
2381
2382def dfxp2srt(dfxp_data):
4e335771
YCH
2383 _x = functools.partial(xpath_with_ns, ns_map={
2384 'ttml': 'http://www.w3.org/ns/ttml',
2385 'ttaf1': 'http://www.w3.org/2006/10/ttaf1',
5bf28d78 2386 'ttaf1_0604': 'http://www.w3.org/2006/04/ttaf1',
4e335771 2387 })
bf6427d2 2388
87de7069 2389 class TTMLPElementParser(object):
2b14cb56 2390 out = ''
bf6427d2 2391
2b14cb56 2392 def start(self, tag, attrib):
2393 if tag in (_x('ttml:br'), _x('ttaf1:br'), 'br'):
2394 self.out += '\n'
bf6427d2 2395
2b14cb56 2396 def end(self, tag):
2397 pass
bf6427d2 2398
2b14cb56 2399 def data(self, data):
2400 self.out += data
2401
2402 def close(self):
2403 return self.out.strip()
2404
2405 def parse_node(node):
2406 target = TTMLPElementParser()
2407 parser = xml.etree.ElementTree.XMLParser(target=target)
2408 parser.feed(xml.etree.ElementTree.tostring(node))
2409 return parser.close()
bf6427d2 2410
36e6f62c 2411 dfxp = compat_etree_fromstring(dfxp_data.encode('utf-8'))
bf6427d2 2412 out = []
5bf28d78 2413 paras = dfxp.findall(_x('.//ttml:p')) or dfxp.findall(_x('.//ttaf1:p')) or dfxp.findall(_x('.//ttaf1_0604:p')) or dfxp.findall('.//p')
1b0427e6
YCH
2414
2415 if not paras:
2416 raise ValueError('Invalid dfxp/TTML subtitle')
bf6427d2
YCH
2417
2418 for para, index in zip(paras, itertools.count(1)):
d631d5f9 2419 begin_time = parse_dfxp_time_expr(para.attrib.get('begin'))
7dff0363 2420 end_time = parse_dfxp_time_expr(para.attrib.get('end'))
d631d5f9
YCH
2421 dur = parse_dfxp_time_expr(para.attrib.get('dur'))
2422 if begin_time is None:
2423 continue
7dff0363 2424 if not end_time:
d631d5f9
YCH
2425 if not dur:
2426 continue
2427 end_time = begin_time + dur
bf6427d2
YCH
2428 out.append('%d\n%s --> %s\n%s\n\n' % (
2429 index,
c1c924ab
YCH
2430 srt_subtitles_timecode(begin_time),
2431 srt_subtitles_timecode(end_time),
bf6427d2
YCH
2432 parse_node(para)))
2433
2434 return ''.join(out)
2435
2436
66e289ba
S
2437def cli_option(params, command_option, param):
2438 param = params.get(param)
98e698f1
RA
2439 if param:
2440 param = compat_str(param)
66e289ba
S
2441 return [command_option, param] if param is not None else []
2442
2443
2444def cli_bool_option(params, command_option, param, true_value='true', false_value='false', separator=None):
2445 param = params.get(param)
2446 assert isinstance(param, bool)
2447 if separator:
2448 return [command_option + separator + (true_value if param else false_value)]
2449 return [command_option, true_value if param else false_value]
2450
2451
2452def cli_valueless_option(params, command_option, param, expected_value=True):
2453 param = params.get(param)
2454 return [command_option] if param == expected_value else []
2455
2456
2457def cli_configuration_args(params, param, default=[]):
2458 ex_args = params.get(param)
2459 if ex_args is None:
2460 return default
2461 assert isinstance(ex_args, list)
2462 return ex_args
2463
2464
39672624
YCH
2465class ISO639Utils(object):
2466 # See http://www.loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt
2467 _lang_map = {
2468 'aa': 'aar',
2469 'ab': 'abk',
2470 'ae': 'ave',
2471 'af': 'afr',
2472 'ak': 'aka',
2473 'am': 'amh',
2474 'an': 'arg',
2475 'ar': 'ara',
2476 'as': 'asm',
2477 'av': 'ava',
2478 'ay': 'aym',
2479 'az': 'aze',
2480 'ba': 'bak',
2481 'be': 'bel',
2482 'bg': 'bul',
2483 'bh': 'bih',
2484 'bi': 'bis',
2485 'bm': 'bam',
2486 'bn': 'ben',
2487 'bo': 'bod',
2488 'br': 'bre',
2489 'bs': 'bos',
2490 'ca': 'cat',
2491 'ce': 'che',
2492 'ch': 'cha',
2493 'co': 'cos',
2494 'cr': 'cre',
2495 'cs': 'ces',
2496 'cu': 'chu',
2497 'cv': 'chv',
2498 'cy': 'cym',
2499 'da': 'dan',
2500 'de': 'deu',
2501 'dv': 'div',
2502 'dz': 'dzo',
2503 'ee': 'ewe',
2504 'el': 'ell',
2505 'en': 'eng',
2506 'eo': 'epo',
2507 'es': 'spa',
2508 'et': 'est',
2509 'eu': 'eus',
2510 'fa': 'fas',
2511 'ff': 'ful',
2512 'fi': 'fin',
2513 'fj': 'fij',
2514 'fo': 'fao',
2515 'fr': 'fra',
2516 'fy': 'fry',
2517 'ga': 'gle',
2518 'gd': 'gla',
2519 'gl': 'glg',
2520 'gn': 'grn',
2521 'gu': 'guj',
2522 'gv': 'glv',
2523 'ha': 'hau',
2524 'he': 'heb',
2525 'hi': 'hin',
2526 'ho': 'hmo',
2527 'hr': 'hrv',
2528 'ht': 'hat',
2529 'hu': 'hun',
2530 'hy': 'hye',
2531 'hz': 'her',
2532 'ia': 'ina',
2533 'id': 'ind',
2534 'ie': 'ile',
2535 'ig': 'ibo',
2536 'ii': 'iii',
2537 'ik': 'ipk',
2538 'io': 'ido',
2539 'is': 'isl',
2540 'it': 'ita',
2541 'iu': 'iku',
2542 'ja': 'jpn',
2543 'jv': 'jav',
2544 'ka': 'kat',
2545 'kg': 'kon',
2546 'ki': 'kik',
2547 'kj': 'kua',
2548 'kk': 'kaz',
2549 'kl': 'kal',
2550 'km': 'khm',
2551 'kn': 'kan',
2552 'ko': 'kor',
2553 'kr': 'kau',
2554 'ks': 'kas',
2555 'ku': 'kur',
2556 'kv': 'kom',
2557 'kw': 'cor',
2558 'ky': 'kir',
2559 'la': 'lat',
2560 'lb': 'ltz',
2561 'lg': 'lug',
2562 'li': 'lim',
2563 'ln': 'lin',
2564 'lo': 'lao',
2565 'lt': 'lit',
2566 'lu': 'lub',
2567 'lv': 'lav',
2568 'mg': 'mlg',
2569 'mh': 'mah',
2570 'mi': 'mri',
2571 'mk': 'mkd',
2572 'ml': 'mal',
2573 'mn': 'mon',
2574 'mr': 'mar',
2575 'ms': 'msa',
2576 'mt': 'mlt',
2577 'my': 'mya',
2578 'na': 'nau',
2579 'nb': 'nob',
2580 'nd': 'nde',
2581 'ne': 'nep',
2582 'ng': 'ndo',
2583 'nl': 'nld',
2584 'nn': 'nno',
2585 'no': 'nor',
2586 'nr': 'nbl',
2587 'nv': 'nav',
2588 'ny': 'nya',
2589 'oc': 'oci',
2590 'oj': 'oji',
2591 'om': 'orm',
2592 'or': 'ori',
2593 'os': 'oss',
2594 'pa': 'pan',
2595 'pi': 'pli',
2596 'pl': 'pol',
2597 'ps': 'pus',
2598 'pt': 'por',
2599 'qu': 'que',
2600 'rm': 'roh',
2601 'rn': 'run',
2602 'ro': 'ron',
2603 'ru': 'rus',
2604 'rw': 'kin',
2605 'sa': 'san',
2606 'sc': 'srd',
2607 'sd': 'snd',
2608 'se': 'sme',
2609 'sg': 'sag',
2610 'si': 'sin',
2611 'sk': 'slk',
2612 'sl': 'slv',
2613 'sm': 'smo',
2614 'sn': 'sna',
2615 'so': 'som',
2616 'sq': 'sqi',
2617 'sr': 'srp',
2618 'ss': 'ssw',
2619 'st': 'sot',
2620 'su': 'sun',
2621 'sv': 'swe',
2622 'sw': 'swa',
2623 'ta': 'tam',
2624 'te': 'tel',
2625 'tg': 'tgk',
2626 'th': 'tha',
2627 'ti': 'tir',
2628 'tk': 'tuk',
2629 'tl': 'tgl',
2630 'tn': 'tsn',
2631 'to': 'ton',
2632 'tr': 'tur',
2633 'ts': 'tso',
2634 'tt': 'tat',
2635 'tw': 'twi',
2636 'ty': 'tah',
2637 'ug': 'uig',
2638 'uk': 'ukr',
2639 'ur': 'urd',
2640 'uz': 'uzb',
2641 've': 'ven',
2642 'vi': 'vie',
2643 'vo': 'vol',
2644 'wa': 'wln',
2645 'wo': 'wol',
2646 'xh': 'xho',
2647 'yi': 'yid',
2648 'yo': 'yor',
2649 'za': 'zha',
2650 'zh': 'zho',
2651 'zu': 'zul',
2652 }
2653
2654 @classmethod
2655 def short2long(cls, code):
2656 """Convert language code from ISO 639-1 to ISO 639-2/T"""
2657 return cls._lang_map.get(code[:2])
2658
2659 @classmethod
2660 def long2short(cls, code):
2661 """Convert language code from ISO 639-2/T to ISO 639-1"""
2662 for short_name, long_name in cls._lang_map.items():
2663 if long_name == code:
2664 return short_name
2665
2666
4eb10f66
YCH
2667class ISO3166Utils(object):
2668 # From http://data.okfn.org/data/core/country-list
2669 _country_map = {
2670 'AF': 'Afghanistan',
2671 'AX': 'Åland Islands',
2672 'AL': 'Albania',
2673 'DZ': 'Algeria',
2674 'AS': 'American Samoa',
2675 'AD': 'Andorra',
2676 'AO': 'Angola',
2677 'AI': 'Anguilla',
2678 'AQ': 'Antarctica',
2679 'AG': 'Antigua and Barbuda',
2680 'AR': 'Argentina',
2681 'AM': 'Armenia',
2682 'AW': 'Aruba',
2683 'AU': 'Australia',
2684 'AT': 'Austria',
2685 'AZ': 'Azerbaijan',
2686 'BS': 'Bahamas',
2687 'BH': 'Bahrain',
2688 'BD': 'Bangladesh',
2689 'BB': 'Barbados',
2690 'BY': 'Belarus',
2691 'BE': 'Belgium',
2692 'BZ': 'Belize',
2693 'BJ': 'Benin',
2694 'BM': 'Bermuda',
2695 'BT': 'Bhutan',
2696 'BO': 'Bolivia, Plurinational State of',
2697 'BQ': 'Bonaire, Sint Eustatius and Saba',
2698 'BA': 'Bosnia and Herzegovina',
2699 'BW': 'Botswana',
2700 'BV': 'Bouvet Island',
2701 'BR': 'Brazil',
2702 'IO': 'British Indian Ocean Territory',
2703 'BN': 'Brunei Darussalam',
2704 'BG': 'Bulgaria',
2705 'BF': 'Burkina Faso',
2706 'BI': 'Burundi',
2707 'KH': 'Cambodia',
2708 'CM': 'Cameroon',
2709 'CA': 'Canada',
2710 'CV': 'Cape Verde',
2711 'KY': 'Cayman Islands',
2712 'CF': 'Central African Republic',
2713 'TD': 'Chad',
2714 'CL': 'Chile',
2715 'CN': 'China',
2716 'CX': 'Christmas Island',
2717 'CC': 'Cocos (Keeling) Islands',
2718 'CO': 'Colombia',
2719 'KM': 'Comoros',
2720 'CG': 'Congo',
2721 'CD': 'Congo, the Democratic Republic of the',
2722 'CK': 'Cook Islands',
2723 'CR': 'Costa Rica',
2724 'CI': 'Côte d\'Ivoire',
2725 'HR': 'Croatia',
2726 'CU': 'Cuba',
2727 'CW': 'Curaçao',
2728 'CY': 'Cyprus',
2729 'CZ': 'Czech Republic',
2730 'DK': 'Denmark',
2731 'DJ': 'Djibouti',
2732 'DM': 'Dominica',
2733 'DO': 'Dominican Republic',
2734 'EC': 'Ecuador',
2735 'EG': 'Egypt',
2736 'SV': 'El Salvador',
2737 'GQ': 'Equatorial Guinea',
2738 'ER': 'Eritrea',
2739 'EE': 'Estonia',
2740 'ET': 'Ethiopia',
2741 'FK': 'Falkland Islands (Malvinas)',
2742 'FO': 'Faroe Islands',
2743 'FJ': 'Fiji',
2744 'FI': 'Finland',
2745 'FR': 'France',
2746 'GF': 'French Guiana',
2747 'PF': 'French Polynesia',
2748 'TF': 'French Southern Territories',
2749 'GA': 'Gabon',
2750 'GM': 'Gambia',
2751 'GE': 'Georgia',
2752 'DE': 'Germany',
2753 'GH': 'Ghana',
2754 'GI': 'Gibraltar',
2755 'GR': 'Greece',
2756 'GL': 'Greenland',
2757 'GD': 'Grenada',
2758 'GP': 'Guadeloupe',
2759 'GU': 'Guam',
2760 'GT': 'Guatemala',
2761 'GG': 'Guernsey',
2762 'GN': 'Guinea',
2763 'GW': 'Guinea-Bissau',
2764 'GY': 'Guyana',
2765 'HT': 'Haiti',
2766 'HM': 'Heard Island and McDonald Islands',
2767 'VA': 'Holy See (Vatican City State)',
2768 'HN': 'Honduras',
2769 'HK': 'Hong Kong',
2770 'HU': 'Hungary',
2771 'IS': 'Iceland',
2772 'IN': 'India',
2773 'ID': 'Indonesia',
2774 'IR': 'Iran, Islamic Republic of',
2775 'IQ': 'Iraq',
2776 'IE': 'Ireland',
2777 'IM': 'Isle of Man',
2778 'IL': 'Israel',
2779 'IT': 'Italy',
2780 'JM': 'Jamaica',
2781 'JP': 'Japan',
2782 'JE': 'Jersey',
2783 'JO': 'Jordan',
2784 'KZ': 'Kazakhstan',
2785 'KE': 'Kenya',
2786 'KI': 'Kiribati',
2787 'KP': 'Korea, Democratic People\'s Republic of',
2788 'KR': 'Korea, Republic of',
2789 'KW': 'Kuwait',
2790 'KG': 'Kyrgyzstan',
2791 'LA': 'Lao People\'s Democratic Republic',
2792 'LV': 'Latvia',
2793 'LB': 'Lebanon',
2794 'LS': 'Lesotho',
2795 'LR': 'Liberia',
2796 'LY': 'Libya',
2797 'LI': 'Liechtenstein',
2798 'LT': 'Lithuania',
2799 'LU': 'Luxembourg',
2800 'MO': 'Macao',
2801 'MK': 'Macedonia, the Former Yugoslav Republic of',
2802 'MG': 'Madagascar',
2803 'MW': 'Malawi',
2804 'MY': 'Malaysia',
2805 'MV': 'Maldives',
2806 'ML': 'Mali',
2807 'MT': 'Malta',
2808 'MH': 'Marshall Islands',
2809 'MQ': 'Martinique',
2810 'MR': 'Mauritania',
2811 'MU': 'Mauritius',
2812 'YT': 'Mayotte',
2813 'MX': 'Mexico',
2814 'FM': 'Micronesia, Federated States of',
2815 'MD': 'Moldova, Republic of',
2816 'MC': 'Monaco',
2817 'MN': 'Mongolia',
2818 'ME': 'Montenegro',
2819 'MS': 'Montserrat',
2820 'MA': 'Morocco',
2821 'MZ': 'Mozambique',
2822 'MM': 'Myanmar',
2823 'NA': 'Namibia',
2824 'NR': 'Nauru',
2825 'NP': 'Nepal',
2826 'NL': 'Netherlands',
2827 'NC': 'New Caledonia',
2828 'NZ': 'New Zealand',
2829 'NI': 'Nicaragua',
2830 'NE': 'Niger',
2831 'NG': 'Nigeria',
2832 'NU': 'Niue',
2833 'NF': 'Norfolk Island',
2834 'MP': 'Northern Mariana Islands',
2835 'NO': 'Norway',
2836 'OM': 'Oman',
2837 'PK': 'Pakistan',
2838 'PW': 'Palau',
2839 'PS': 'Palestine, State of',
2840 'PA': 'Panama',
2841 'PG': 'Papua New Guinea',
2842 'PY': 'Paraguay',
2843 'PE': 'Peru',
2844 'PH': 'Philippines',
2845 'PN': 'Pitcairn',
2846 'PL': 'Poland',
2847 'PT': 'Portugal',
2848 'PR': 'Puerto Rico',
2849 'QA': 'Qatar',
2850 'RE': 'Réunion',
2851 'RO': 'Romania',
2852 'RU': 'Russian Federation',
2853 'RW': 'Rwanda',
2854 'BL': 'Saint Barthélemy',
2855 'SH': 'Saint Helena, Ascension and Tristan da Cunha',
2856 'KN': 'Saint Kitts and Nevis',
2857 'LC': 'Saint Lucia',
2858 'MF': 'Saint Martin (French part)',
2859 'PM': 'Saint Pierre and Miquelon',
2860 'VC': 'Saint Vincent and the Grenadines',
2861 'WS': 'Samoa',
2862 'SM': 'San Marino',
2863 'ST': 'Sao Tome and Principe',
2864 'SA': 'Saudi Arabia',
2865 'SN': 'Senegal',
2866 'RS': 'Serbia',
2867 'SC': 'Seychelles',
2868 'SL': 'Sierra Leone',
2869 'SG': 'Singapore',
2870 'SX': 'Sint Maarten (Dutch part)',
2871 'SK': 'Slovakia',
2872 'SI': 'Slovenia',
2873 'SB': 'Solomon Islands',
2874 'SO': 'Somalia',
2875 'ZA': 'South Africa',
2876 'GS': 'South Georgia and the South Sandwich Islands',
2877 'SS': 'South Sudan',
2878 'ES': 'Spain',
2879 'LK': 'Sri Lanka',
2880 'SD': 'Sudan',
2881 'SR': 'Suriname',
2882 'SJ': 'Svalbard and Jan Mayen',
2883 'SZ': 'Swaziland',
2884 'SE': 'Sweden',
2885 'CH': 'Switzerland',
2886 'SY': 'Syrian Arab Republic',
2887 'TW': 'Taiwan, Province of China',
2888 'TJ': 'Tajikistan',
2889 'TZ': 'Tanzania, United Republic of',
2890 'TH': 'Thailand',
2891 'TL': 'Timor-Leste',
2892 'TG': 'Togo',
2893 'TK': 'Tokelau',
2894 'TO': 'Tonga',
2895 'TT': 'Trinidad and Tobago',
2896 'TN': 'Tunisia',
2897 'TR': 'Turkey',
2898 'TM': 'Turkmenistan',
2899 'TC': 'Turks and Caicos Islands',
2900 'TV': 'Tuvalu',
2901 'UG': 'Uganda',
2902 'UA': 'Ukraine',
2903 'AE': 'United Arab Emirates',
2904 'GB': 'United Kingdom',
2905 'US': 'United States',
2906 'UM': 'United States Minor Outlying Islands',
2907 'UY': 'Uruguay',
2908 'UZ': 'Uzbekistan',
2909 'VU': 'Vanuatu',
2910 'VE': 'Venezuela, Bolivarian Republic of',
2911 'VN': 'Viet Nam',
2912 'VG': 'Virgin Islands, British',
2913 'VI': 'Virgin Islands, U.S.',
2914 'WF': 'Wallis and Futuna',
2915 'EH': 'Western Sahara',
2916 'YE': 'Yemen',
2917 'ZM': 'Zambia',
2918 'ZW': 'Zimbabwe',
2919 }
2920
2921 @classmethod
2922 def short2full(cls, code):
2923 """Convert an ISO 3166-2 country code to the corresponding full name"""
2924 return cls._country_map.get(code.upper())
2925
2926
91410c9b 2927class PerRequestProxyHandler(compat_urllib_request.ProxyHandler):
2461f79d
PH
2928 def __init__(self, proxies=None):
2929 # Set default handlers
2930 for type in ('http', 'https'):
2931 setattr(self, '%s_open' % type,
2932 lambda r, proxy='__noproxy__', type=type, meth=self.proxy_open:
2933 meth(r, proxy, type))
2934 return compat_urllib_request.ProxyHandler.__init__(self, proxies)
2935
91410c9b 2936 def proxy_open(self, req, proxy, type):
2461f79d 2937 req_proxy = req.headers.get('Ytdl-request-proxy')
91410c9b
PH
2938 if req_proxy is not None:
2939 proxy = req_proxy
2461f79d
PH
2940 del req.headers['Ytdl-request-proxy']
2941
2942 if proxy == '__noproxy__':
2943 return None # No Proxy
51fb4995 2944 if compat_urlparse.urlparse(proxy).scheme.lower() in ('socks', 'socks4', 'socks4a', 'socks5'):
71aff188
YCH
2945 req.add_header('Ytdl-socks-proxy', proxy)
2946 # youtube-dl's http/https handlers do wrapping the socket with socks
2947 return None
91410c9b
PH
2948 return compat_urllib_request.ProxyHandler.proxy_open(
2949 self, req, proxy, type)
5bc880b9
YCH
2950
2951
2952def ohdave_rsa_encrypt(data, exponent, modulus):
2953 '''
2954 Implement OHDave's RSA algorithm. See http://www.ohdave.com/rsa/
2955
2956 Input:
2957 data: data to encrypt, bytes-like object
2958 exponent, modulus: parameter e and N of RSA algorithm, both integer
2959 Output: hex string of encrypted data
2960
2961 Limitation: supports one block encryption only
2962 '''
2963
2964 payload = int(binascii.hexlify(data[::-1]), 16)
2965 encrypted = pow(payload, exponent, modulus)
2966 return '%x' % encrypted
81bdc8fd
YCH
2967
2968
5eb6bdce 2969def encode_base_n(num, n, table=None):
59f898b7 2970 FULL_TABLE = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
59f898b7
YCH
2971 if not table:
2972 table = FULL_TABLE[:n]
2973
5eb6bdce
YCH
2974 if n > len(table):
2975 raise ValueError('base %d exceeds table length %d' % (n, len(table)))
2976
2977 if num == 0:
2978 return table[0]
2979
81bdc8fd
YCH
2980 ret = ''
2981 while num:
2982 ret = table[num % n] + ret
2983 num = num // n
2984 return ret
f52354a8
YCH
2985
2986
2987def decode_packed_codes(code):
2988 mobj = re.search(
680079be 2989 r"}\('(.+)',(\d+),(\d+),'([^']+)'\.split\('\|'\)",
f52354a8
YCH
2990 code)
2991 obfucasted_code, base, count, symbols = mobj.groups()
2992 base = int(base)
2993 count = int(count)
2994 symbols = symbols.split('|')
2995 symbol_table = {}
2996
2997 while count:
2998 count -= 1
5eb6bdce 2999 base_n_count = encode_base_n(count, base)
f52354a8
YCH
3000 symbol_table[base_n_count] = symbols[count] or base_n_count
3001
3002 return re.sub(
3003 r'\b(\w+)\b', lambda mobj: symbol_table[mobj.group(0)],
3004 obfucasted_code)
e154c651 3005
3006
3007def parse_m3u8_attributes(attrib):
3008 info = {}
3009 for (key, val) in re.findall(r'(?P<key>[A-Z0-9-]+)=(?P<val>"[^"]+"|[^",]+)(?:,|$)', attrib):
3010 if val.startswith('"'):
3011 val = val[1:-1]
3012 info[key] = val
3013 return info
1143535d
YCH
3014
3015
3016def urshift(val, n):
3017 return val >> n if val >= 0 else (val + 0x100000000) >> n
d3f8e038
YCH
3018
3019
3020# Based on png2str() written by @gdkchan and improved by @yokrysty
3021# Originally posted at https://github.com/rg3/youtube-dl/issues/9706
3022def decode_png(png_data):
3023 # Reference: https://www.w3.org/TR/PNG/
3024 header = png_data[8:]
3025
3026 if png_data[:8] != b'\x89PNG\x0d\x0a\x1a\x0a' or header[4:8] != b'IHDR':
3027 raise IOError('Not a valid PNG file.')
3028
3029 int_map = {1: '>B', 2: '>H', 4: '>I'}
3030 unpack_integer = lambda x: compat_struct_unpack(int_map[len(x)], x)[0]
3031
3032 chunks = []
3033
3034 while header:
3035 length = unpack_integer(header[:4])
3036 header = header[4:]
3037
3038 chunk_type = header[:4]
3039 header = header[4:]
3040
3041 chunk_data = header[:length]
3042 header = header[length:]
3043
3044 header = header[4:] # Skip CRC
3045
3046 chunks.append({
3047 'type': chunk_type,
3048 'length': length,
3049 'data': chunk_data
3050 })
3051
3052 ihdr = chunks[0]['data']
3053
3054 width = unpack_integer(ihdr[:4])
3055 height = unpack_integer(ihdr[4:8])
3056
3057 idat = b''
3058
3059 for chunk in chunks:
3060 if chunk['type'] == b'IDAT':
3061 idat += chunk['data']
3062
3063 if not idat:
3064 raise IOError('Unable to read PNG data.')
3065
3066 decompressed_data = bytearray(zlib.decompress(idat))
3067
3068 stride = width * 3
3069 pixels = []
3070
3071 def _get_pixel(idx):
3072 x = idx % stride
3073 y = idx // stride
3074 return pixels[y][x]
3075
3076 for y in range(height):
3077 basePos = y * (1 + stride)
3078 filter_type = decompressed_data[basePos]
3079
3080 current_row = []
3081
3082 pixels.append(current_row)
3083
3084 for x in range(stride):
3085 color = decompressed_data[1 + basePos + x]
3086 basex = y * stride + x
3087 left = 0
3088 up = 0
3089
3090 if x > 2:
3091 left = _get_pixel(basex - 3)
3092 if y > 0:
3093 up = _get_pixel(basex - stride)
3094
3095 if filter_type == 1: # Sub
3096 color = (color + left) & 0xff
3097 elif filter_type == 2: # Up
3098 color = (color + up) & 0xff
3099 elif filter_type == 3: # Average
3100 color = (color + ((left + up) >> 1)) & 0xff
3101 elif filter_type == 4: # Paeth
3102 a = left
3103 b = up
3104 c = 0
3105
3106 if x > 2 and y > 0:
3107 c = _get_pixel(basex - stride - 3)
3108
3109 p = a + b - c
3110
3111 pa = abs(p - a)
3112 pb = abs(p - b)
3113 pc = abs(p - c)
3114
3115 if pa <= pb and pa <= pc:
3116 color = (color + a) & 0xff
3117 elif pb <= pc:
3118 color = (color + b) & 0xff
3119 else:
3120 color = (color + c) & 0xff
3121
3122 current_row.append(color)
3123
3124 return width, height, pixels