]> jfr.im git - yt-dlp.git/blob - youtube_dl/utils.py
Merge branch 'jython-support'
[yt-dlp.git] / youtube_dl / utils.py
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 from __future__ import unicode_literals
5
6 import base64
7 import binascii
8 import calendar
9 import codecs
10 import contextlib
11 import ctypes
12 import datetime
13 import email.utils
14 import errno
15 import functools
16 import gzip
17 import itertools
18 import io
19 import json
20 import locale
21 import math
22 import operator
23 import os
24 import pipes
25 import platform
26 import re
27 import ssl
28 import socket
29 import struct
30 import subprocess
31 import sys
32 import tempfile
33 import traceback
34 import xml.etree.ElementTree
35 import zlib
36
37 from .compat import (
38 compat_basestring,
39 compat_chr,
40 compat_etree_fromstring,
41 compat_html_entities,
42 compat_http_client,
43 compat_kwargs,
44 compat_parse_qs,
45 compat_socket_create_connection,
46 compat_str,
47 compat_urllib_error,
48 compat_urllib_parse,
49 compat_urllib_parse_urlparse,
50 compat_urllib_request,
51 compat_urlparse,
52 shlex_quote,
53 )
54
55
56 # This is not clearly defined otherwise
57 compiled_regex_type = type(re.compile(''))
58
59 std_headers = {
60 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20150101 Firefox/44.0 (Chrome)',
61 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
62 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
63 'Accept-Encoding': 'gzip, deflate',
64 'Accept-Language': 'en-us,en;q=0.5',
65 }
66
67
68 NO_DEFAULT = object()
69
70 ENGLISH_MONTH_NAMES = [
71 'January', 'February', 'March', 'April', 'May', 'June',
72 'July', 'August', 'September', 'October', 'November', 'December']
73
74 KNOWN_EXTENSIONS = (
75 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'aac',
76 'flv', 'f4v', 'f4a', 'f4b',
77 'webm', 'ogg', 'ogv', 'oga', 'ogx', 'spx', 'opus',
78 'mkv', 'mka', 'mk3d',
79 'avi', 'divx',
80 'mov',
81 'asf', 'wmv', 'wma',
82 '3gp', '3g2',
83 'mp3',
84 'flac',
85 'ape',
86 'wav',
87 'f4f', 'f4m', 'm3u8', 'smil')
88
89
90 def preferredencoding():
91 """Get preferred encoding.
92
93 Returns the best encoding scheme for the system, based on
94 locale.getpreferredencoding() and some further tweaks.
95 """
96 try:
97 pref = locale.getpreferredencoding()
98 'TEST'.encode(pref)
99 except Exception:
100 pref = 'UTF-8'
101
102 return pref
103
104
105 def write_json_file(obj, fn):
106 """ Encode obj as JSON and write it to fn, atomically if possible """
107
108 fn = encodeFilename(fn)
109 if sys.version_info < (3, 0) and sys.platform != 'win32':
110 encoding = get_filesystem_encoding()
111 # os.path.basename returns a bytes object, but NamedTemporaryFile
112 # will fail if the filename contains non ascii characters unless we
113 # use a unicode object
114 path_basename = lambda f: os.path.basename(fn).decode(encoding)
115 # the same for os.path.dirname
116 path_dirname = lambda f: os.path.dirname(fn).decode(encoding)
117 else:
118 path_basename = os.path.basename
119 path_dirname = os.path.dirname
120
121 args = {
122 'suffix': '.tmp',
123 'prefix': path_basename(fn) + '.',
124 'dir': path_dirname(fn),
125 'delete': False,
126 }
127
128 # In Python 2.x, json.dump expects a bytestream.
129 # In Python 3.x, it writes to a character stream
130 if sys.version_info < (3, 0):
131 args['mode'] = 'wb'
132 else:
133 args.update({
134 'mode': 'w',
135 'encoding': 'utf-8',
136 })
137
138 tf = tempfile.NamedTemporaryFile(**compat_kwargs(args))
139
140 try:
141 with tf:
142 json.dump(obj, tf)
143 if sys.platform == 'win32':
144 # Need to remove existing file on Windows, else os.rename raises
145 # WindowsError or FileExistsError.
146 try:
147 os.unlink(fn)
148 except OSError:
149 pass
150 os.rename(tf.name, fn)
151 except Exception:
152 try:
153 os.remove(tf.name)
154 except OSError:
155 pass
156 raise
157
158
159 if sys.version_info >= (2, 7):
160 def find_xpath_attr(node, xpath, key, val=None):
161 """ Find the xpath xpath[@key=val] """
162 assert re.match(r'^[a-zA-Z_-]+$', key)
163 expr = xpath + ('[@%s]' % key if val is None else "[@%s='%s']" % (key, val))
164 return node.find(expr)
165 else:
166 def find_xpath_attr(node, xpath, key, val=None):
167 # Here comes the crazy part: In 2.6, if the xpath is a unicode,
168 # .//node does not match if a node is a direct child of . !
169 if isinstance(xpath, compat_str):
170 xpath = xpath.encode('ascii')
171
172 for f in node.findall(xpath):
173 if key not in f.attrib:
174 continue
175 if val is None or f.attrib.get(key) == val:
176 return f
177 return None
178
179 # On python2.6 the xml.etree.ElementTree.Element methods don't support
180 # the namespace parameter
181
182
183 def xpath_with_ns(path, ns_map):
184 components = [c.split(':') for c in path.split('/')]
185 replaced = []
186 for c in components:
187 if len(c) == 1:
188 replaced.append(c[0])
189 else:
190 ns, tag = c
191 replaced.append('{%s}%s' % (ns_map[ns], tag))
192 return '/'.join(replaced)
193
194
195 def xpath_element(node, xpath, name=None, fatal=False, default=NO_DEFAULT):
196 def _find_xpath(xpath):
197 if sys.version_info < (2, 7): # Crazy 2.6
198 xpath = xpath.encode('ascii')
199 return node.find(xpath)
200
201 if isinstance(xpath, (str, compat_str)):
202 n = _find_xpath(xpath)
203 else:
204 for xp in xpath:
205 n = _find_xpath(xp)
206 if n is not None:
207 break
208
209 if n is None:
210 if default is not NO_DEFAULT:
211 return default
212 elif fatal:
213 name = xpath if name is None else name
214 raise ExtractorError('Could not find XML element %s' % name)
215 else:
216 return None
217 return n
218
219
220 def xpath_text(node, xpath, name=None, fatal=False, default=NO_DEFAULT):
221 n = xpath_element(node, xpath, name, fatal=fatal, default=default)
222 if n is None or n == default:
223 return n
224 if n.text is None:
225 if default is not NO_DEFAULT:
226 return default
227 elif fatal:
228 name = xpath if name is None else name
229 raise ExtractorError('Could not find XML element\'s text %s' % name)
230 else:
231 return None
232 return n.text
233
234
235 def xpath_attr(node, xpath, key, name=None, fatal=False, default=NO_DEFAULT):
236 n = find_xpath_attr(node, xpath, key)
237 if n is None:
238 if default is not NO_DEFAULT:
239 return default
240 elif fatal:
241 name = '%s[@%s]' % (xpath, key) if name is None else name
242 raise ExtractorError('Could not find XML attribute %s' % name)
243 else:
244 return None
245 return n.attrib[key]
246
247
248 def get_element_by_id(id, html):
249 """Return the content of the tag with the specified ID in the passed HTML document"""
250 return get_element_by_attribute('id', id, html)
251
252
253 def get_element_by_attribute(attribute, value, html):
254 """Return the content of the tag with the specified attribute in the passed HTML document"""
255
256 m = re.search(r'''(?xs)
257 <([a-zA-Z0-9:._-]+)
258 (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]+|="[^"]+"|='[^']+'))*?
259 \s+%s=['"]?%s['"]?
260 (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]+|="[^"]+"|='[^']+'))*?
261 \s*>
262 (?P<content>.*?)
263 </\1>
264 ''' % (re.escape(attribute), re.escape(value)), html)
265
266 if not m:
267 return None
268 res = m.group('content')
269
270 if res.startswith('"') or res.startswith("'"):
271 res = res[1:-1]
272
273 return unescapeHTML(res)
274
275
276 def clean_html(html):
277 """Clean an HTML snippet into a readable string"""
278
279 if html is None: # Convenience for sanitizing descriptions etc.
280 return html
281
282 # Newline vs <br />
283 html = html.replace('\n', ' ')
284 html = re.sub(r'\s*<\s*br\s*/?\s*>\s*', '\n', html)
285 html = re.sub(r'<\s*/\s*p\s*>\s*<\s*p[^>]*>', '\n', html)
286 # Strip html tags
287 html = re.sub('<.*?>', '', html)
288 # Replace html entities
289 html = unescapeHTML(html)
290 return html.strip()
291
292
293 def sanitize_open(filename, open_mode):
294 """Try to open the given filename, and slightly tweak it if this fails.
295
296 Attempts to open the given filename. If this fails, it tries to change
297 the filename slightly, step by step, until it's either able to open it
298 or it fails and raises a final exception, like the standard open()
299 function.
300
301 It returns the tuple (stream, definitive_file_name).
302 """
303 try:
304 if filename == '-':
305 if sys.platform == 'win32':
306 import msvcrt
307 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
308 return (sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout, filename)
309 stream = open(encodeFilename(filename), open_mode)
310 return (stream, filename)
311 except (IOError, OSError) as err:
312 if err.errno in (errno.EACCES,):
313 raise
314
315 # In case of error, try to remove win32 forbidden chars
316 alt_filename = sanitize_path(filename)
317 if alt_filename == filename:
318 raise
319 else:
320 # An exception here should be caught in the caller
321 stream = open(encodeFilename(alt_filename), open_mode)
322 return (stream, alt_filename)
323
324
325 def timeconvert(timestr):
326 """Convert RFC 2822 defined time string into system timestamp"""
327 timestamp = None
328 timetuple = email.utils.parsedate_tz(timestr)
329 if timetuple is not None:
330 timestamp = email.utils.mktime_tz(timetuple)
331 return timestamp
332
333
334 def sanitize_filename(s, restricted=False, is_id=False):
335 """Sanitizes a string so it could be used as part of a filename.
336 If restricted is set, use a stricter subset of allowed characters.
337 Set is_id if this is not an arbitrary string, but an ID that should be kept if possible
338 """
339 def replace_insane(char):
340 if char == '?' or ord(char) < 32 or ord(char) == 127:
341 return ''
342 elif char == '"':
343 return '' if restricted else '\''
344 elif char == ':':
345 return '_-' if restricted else ' -'
346 elif char in '\\/|*<>':
347 return '_'
348 if restricted and (char in '!&\'()[]{}$;`^,#' or char.isspace()):
349 return '_'
350 if restricted and ord(char) > 127:
351 return '_'
352 return char
353
354 # Handle timestamps
355 s = re.sub(r'[0-9]+(?::[0-9]+)+', lambda m: m.group(0).replace(':', '_'), s)
356 result = ''.join(map(replace_insane, s))
357 if not is_id:
358 while '__' in result:
359 result = result.replace('__', '_')
360 result = result.strip('_')
361 # Common case of "Foreign band name - English song title"
362 if restricted and result.startswith('-_'):
363 result = result[2:]
364 if result.startswith('-'):
365 result = '_' + result[len('-'):]
366 result = result.lstrip('.')
367 if not result:
368 result = '_'
369 return result
370
371
372 def sanitize_path(s):
373 """Sanitizes and normalizes path on Windows"""
374 if sys.platform != 'win32':
375 return s
376 drive_or_unc, _ = os.path.splitdrive(s)
377 if sys.version_info < (2, 7) and not drive_or_unc:
378 drive_or_unc, _ = os.path.splitunc(s)
379 norm_path = os.path.normpath(remove_start(s, drive_or_unc)).split(os.path.sep)
380 if drive_or_unc:
381 norm_path.pop(0)
382 sanitized_path = [
383 path_part if path_part in ['.', '..'] else re.sub('(?:[/<>:"\\|\\\\?\\*]|[\s.]$)', '#', path_part)
384 for path_part in norm_path]
385 if drive_or_unc:
386 sanitized_path.insert(0, drive_or_unc + os.path.sep)
387 return os.path.join(*sanitized_path)
388
389
390 # Prepend protocol-less URLs with `http:` scheme in order to mitigate the number of
391 # unwanted failures due to missing protocol
392 def sanitized_Request(url, *args, **kwargs):
393 return compat_urllib_request.Request(
394 'http:%s' % url if url.startswith('//') else url, *args, **kwargs)
395
396
397 def orderedSet(iterable):
398 """ Remove all duplicates from the input iterable """
399 res = []
400 for el in iterable:
401 if el not in res:
402 res.append(el)
403 return res
404
405
406 def _htmlentity_transform(entity):
407 """Transforms an HTML entity to a character."""
408 # Known non-numeric HTML entity
409 if entity in compat_html_entities.name2codepoint:
410 return compat_chr(compat_html_entities.name2codepoint[entity])
411
412 mobj = re.match(r'#(x[0-9a-fA-F]+|[0-9]+)', entity)
413 if mobj is not None:
414 numstr = mobj.group(1)
415 if numstr.startswith('x'):
416 base = 16
417 numstr = '0%s' % numstr
418 else:
419 base = 10
420 # See https://github.com/rg3/youtube-dl/issues/7518
421 try:
422 return compat_chr(int(numstr, base))
423 except ValueError:
424 pass
425
426 # Unknown entity in name, return its literal representation
427 return '&%s;' % entity
428
429
430 def unescapeHTML(s):
431 if s is None:
432 return None
433 assert type(s) == compat_str
434
435 return re.sub(
436 r'&([^;]+);', lambda m: _htmlentity_transform(m.group(1)), s)
437
438
439 def get_subprocess_encoding():
440 if sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5:
441 # For subprocess calls, encode with locale encoding
442 # Refer to http://stackoverflow.com/a/9951851/35070
443 encoding = preferredencoding()
444 else:
445 encoding = sys.getfilesystemencoding()
446 if encoding is None:
447 encoding = 'utf-8'
448 return encoding
449
450
451 def encodeFilename(s, for_subprocess=False):
452 """
453 @param s The name of the file
454 """
455
456 assert type(s) == compat_str
457
458 # Python 3 has a Unicode API
459 if sys.version_info >= (3, 0):
460 return s
461
462 # Pass '' directly to use Unicode APIs on Windows 2000 and up
463 # (Detecting Windows NT 4 is tricky because 'major >= 4' would
464 # match Windows 9x series as well. Besides, NT 4 is obsolete.)
465 if not for_subprocess and sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5:
466 return s
467
468 # Jython assumes filenames are Unicode strings though reported as Python 2.x compatible
469 if sys.platform.startswith('java'):
470 return s
471
472 return s.encode(get_subprocess_encoding(), 'ignore')
473
474
475 def decodeFilename(b, for_subprocess=False):
476
477 if sys.version_info >= (3, 0):
478 return b
479
480 if not isinstance(b, bytes):
481 return b
482
483 return b.decode(get_subprocess_encoding(), 'ignore')
484
485
486 def encodeArgument(s):
487 if not isinstance(s, compat_str):
488 # Legacy code that uses byte strings
489 # Uncomment the following line after fixing all post processors
490 # assert False, 'Internal error: %r should be of type %r, is %r' % (s, compat_str, type(s))
491 s = s.decode('ascii')
492 return encodeFilename(s, True)
493
494
495 def decodeArgument(b):
496 return decodeFilename(b, True)
497
498
499 def decodeOption(optval):
500 if optval is None:
501 return optval
502 if isinstance(optval, bytes):
503 optval = optval.decode(preferredencoding())
504
505 assert isinstance(optval, compat_str)
506 return optval
507
508
509 def formatSeconds(secs):
510 if secs > 3600:
511 return '%d:%02d:%02d' % (secs // 3600, (secs % 3600) // 60, secs % 60)
512 elif secs > 60:
513 return '%d:%02d' % (secs // 60, secs % 60)
514 else:
515 return '%d' % secs
516
517
518 def make_HTTPS_handler(params, **kwargs):
519 opts_no_check_certificate = params.get('nocheckcertificate', False)
520 if hasattr(ssl, 'create_default_context'): # Python >= 3.4 or 2.7.9
521 context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH)
522 if opts_no_check_certificate:
523 context.check_hostname = False
524 context.verify_mode = ssl.CERT_NONE
525 try:
526 return YoutubeDLHTTPSHandler(params, context=context, **kwargs)
527 except TypeError:
528 # Python 2.7.8
529 # (create_default_context present but HTTPSHandler has no context=)
530 pass
531
532 if sys.version_info < (3, 2):
533 return YoutubeDLHTTPSHandler(params, **kwargs)
534 else: # Python < 3.4
535 context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
536 context.verify_mode = (ssl.CERT_NONE
537 if opts_no_check_certificate
538 else ssl.CERT_REQUIRED)
539 context.set_default_verify_paths()
540 return YoutubeDLHTTPSHandler(params, context=context, **kwargs)
541
542
543 def bug_reports_message():
544 if ytdl_is_updateable():
545 update_cmd = 'type youtube-dl -U to update'
546 else:
547 update_cmd = 'see https://yt-dl.org/update on how to update'
548 msg = '; please report this issue on https://yt-dl.org/bug .'
549 msg += ' Make sure you are using the latest version; %s.' % update_cmd
550 msg += ' Be sure to call youtube-dl with the --verbose flag and include its complete output.'
551 return msg
552
553
554 class ExtractorError(Exception):
555 """Error during info extraction."""
556
557 def __init__(self, msg, tb=None, expected=False, cause=None, video_id=None):
558 """ tb, if given, is the original traceback (so that it can be printed out).
559 If expected is set, this is a normal error message and most likely not a bug in youtube-dl.
560 """
561
562 if sys.exc_info()[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError):
563 expected = True
564 if video_id is not None:
565 msg = video_id + ': ' + msg
566 if cause:
567 msg += ' (caused by %r)' % cause
568 if not expected:
569 msg += bug_reports_message()
570 super(ExtractorError, self).__init__(msg)
571
572 self.traceback = tb
573 self.exc_info = sys.exc_info() # preserve original exception
574 self.cause = cause
575 self.video_id = video_id
576
577 def format_traceback(self):
578 if self.traceback is None:
579 return None
580 return ''.join(traceback.format_tb(self.traceback))
581
582
583 class UnsupportedError(ExtractorError):
584 def __init__(self, url):
585 super(UnsupportedError, self).__init__(
586 'Unsupported URL: %s' % url, expected=True)
587 self.url = url
588
589
590 class RegexNotFoundError(ExtractorError):
591 """Error when a regex didn't match"""
592 pass
593
594
595 class DownloadError(Exception):
596 """Download Error exception.
597
598 This exception may be thrown by FileDownloader objects if they are not
599 configured to continue on errors. They will contain the appropriate
600 error message.
601 """
602
603 def __init__(self, msg, exc_info=None):
604 """ exc_info, if given, is the original exception that caused the trouble (as returned by sys.exc_info()). """
605 super(DownloadError, self).__init__(msg)
606 self.exc_info = exc_info
607
608
609 class SameFileError(Exception):
610 """Same File exception.
611
612 This exception will be thrown by FileDownloader objects if they detect
613 multiple files would have to be downloaded to the same file on disk.
614 """
615 pass
616
617
618 class PostProcessingError(Exception):
619 """Post Processing exception.
620
621 This exception may be raised by PostProcessor's .run() method to
622 indicate an error in the postprocessing task.
623 """
624
625 def __init__(self, msg):
626 self.msg = msg
627
628
629 class MaxDownloadsReached(Exception):
630 """ --max-downloads limit has been reached. """
631 pass
632
633
634 class UnavailableVideoError(Exception):
635 """Unavailable Format exception.
636
637 This exception will be thrown when a video is requested
638 in a format that is not available for that video.
639 """
640 pass
641
642
643 class ContentTooShortError(Exception):
644 """Content Too Short exception.
645
646 This exception may be raised by FileDownloader objects when a file they
647 download is too small for what the server announced first, indicating
648 the connection was probably interrupted.
649 """
650
651 def __init__(self, downloaded, expected):
652 # Both in bytes
653 self.downloaded = downloaded
654 self.expected = expected
655
656
657 def _create_http_connection(ydl_handler, http_class, is_https, *args, **kwargs):
658 # Working around python 2 bug (see http://bugs.python.org/issue17849) by limiting
659 # expected HTTP responses to meet HTTP/1.0 or later (see also
660 # https://github.com/rg3/youtube-dl/issues/6727)
661 if sys.version_info < (3, 0):
662 kwargs[b'strict'] = True
663 hc = http_class(*args, **kwargs)
664 source_address = ydl_handler._params.get('source_address')
665 if source_address is not None:
666 sa = (source_address, 0)
667 if hasattr(hc, 'source_address'): # Python 2.7+
668 hc.source_address = sa
669 else: # Python 2.6
670 def _hc_connect(self, *args, **kwargs):
671 sock = compat_socket_create_connection(
672 (self.host, self.port), self.timeout, sa)
673 if is_https:
674 self.sock = ssl.wrap_socket(
675 sock, self.key_file, self.cert_file,
676 ssl_version=ssl.PROTOCOL_TLSv1)
677 else:
678 self.sock = sock
679 hc.connect = functools.partial(_hc_connect, hc)
680
681 return hc
682
683
684 def handle_youtubedl_headers(headers):
685 filtered_headers = headers
686
687 if 'Youtubedl-no-compression' in filtered_headers:
688 filtered_headers = dict((k, v) for k, v in filtered_headers.items() if k.lower() != 'accept-encoding')
689 del filtered_headers['Youtubedl-no-compression']
690
691 return filtered_headers
692
693
694 class YoutubeDLHandler(compat_urllib_request.HTTPHandler):
695 """Handler for HTTP requests and responses.
696
697 This class, when installed with an OpenerDirector, automatically adds
698 the standard headers to every HTTP request and handles gzipped and
699 deflated responses from web servers. If compression is to be avoided in
700 a particular request, the original request in the program code only has
701 to include the HTTP header "Youtubedl-no-compression", which will be
702 removed before making the real request.
703
704 Part of this code was copied from:
705
706 http://techknack.net/python-urllib2-handlers/
707
708 Andrew Rowls, the author of that code, agreed to release it to the
709 public domain.
710 """
711
712 def __init__(self, params, *args, **kwargs):
713 compat_urllib_request.HTTPHandler.__init__(self, *args, **kwargs)
714 self._params = params
715
716 def http_open(self, req):
717 return self.do_open(functools.partial(
718 _create_http_connection, self, compat_http_client.HTTPConnection, False),
719 req)
720
721 @staticmethod
722 def deflate(data):
723 try:
724 return zlib.decompress(data, -zlib.MAX_WBITS)
725 except zlib.error:
726 return zlib.decompress(data)
727
728 @staticmethod
729 def addinfourl_wrapper(stream, headers, url, code):
730 if hasattr(compat_urllib_request.addinfourl, 'getcode'):
731 return compat_urllib_request.addinfourl(stream, headers, url, code)
732 ret = compat_urllib_request.addinfourl(stream, headers, url)
733 ret.code = code
734 return ret
735
736 def http_request(self, req):
737 # According to RFC 3986, URLs can not contain non-ASCII characters, however this is not
738 # always respected by websites, some tend to give out URLs with non percent-encoded
739 # non-ASCII characters (see telemb.py, ard.py [#3412])
740 # urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991)
741 # To work around aforementioned issue we will replace request's original URL with
742 # percent-encoded one
743 # Since redirects are also affected (e.g. http://www.southpark.de/alle-episoden/s18e09)
744 # the code of this workaround has been moved here from YoutubeDL.urlopen()
745 url = req.get_full_url()
746 url_escaped = escape_url(url)
747
748 # Substitute URL if any change after escaping
749 if url != url_escaped:
750 req_type = HEADRequest if req.get_method() == 'HEAD' else compat_urllib_request.Request
751 new_req = req_type(
752 url_escaped, data=req.data, headers=req.headers,
753 origin_req_host=req.origin_req_host, unverifiable=req.unverifiable)
754 new_req.timeout = req.timeout
755 req = new_req
756
757 for h, v in std_headers.items():
758 # Capitalize is needed because of Python bug 2275: http://bugs.python.org/issue2275
759 # The dict keys are capitalized because of this bug by urllib
760 if h.capitalize() not in req.headers:
761 req.add_header(h, v)
762
763 req.headers = handle_youtubedl_headers(req.headers)
764
765 if sys.version_info < (2, 7) and '#' in req.get_full_url():
766 # Python 2.6 is brain-dead when it comes to fragments
767 req._Request__original = req._Request__original.partition('#')[0]
768 req._Request__r_type = req._Request__r_type.partition('#')[0]
769
770 return req
771
772 def http_response(self, req, resp):
773 old_resp = resp
774 # gzip
775 if resp.headers.get('Content-encoding', '') == 'gzip':
776 content = resp.read()
777 gz = gzip.GzipFile(fileobj=io.BytesIO(content), mode='rb')
778 try:
779 uncompressed = io.BytesIO(gz.read())
780 except IOError as original_ioerror:
781 # There may be junk add the end of the file
782 # See http://stackoverflow.com/q/4928560/35070 for details
783 for i in range(1, 1024):
784 try:
785 gz = gzip.GzipFile(fileobj=io.BytesIO(content[:-i]), mode='rb')
786 uncompressed = io.BytesIO(gz.read())
787 except IOError:
788 continue
789 break
790 else:
791 raise original_ioerror
792 resp = self.addinfourl_wrapper(uncompressed, old_resp.headers, old_resp.url, old_resp.code)
793 resp.msg = old_resp.msg
794 del resp.headers['Content-encoding']
795 # deflate
796 if resp.headers.get('Content-encoding', '') == 'deflate':
797 gz = io.BytesIO(self.deflate(resp.read()))
798 resp = self.addinfourl_wrapper(gz, old_resp.headers, old_resp.url, old_resp.code)
799 resp.msg = old_resp.msg
800 del resp.headers['Content-encoding']
801 # Percent-encode redirect URL of Location HTTP header to satisfy RFC 3986 (see
802 # https://github.com/rg3/youtube-dl/issues/6457).
803 if 300 <= resp.code < 400:
804 location = resp.headers.get('Location')
805 if location:
806 # As of RFC 2616 default charset is iso-8859-1 that is respected by python 3
807 if sys.version_info >= (3, 0):
808 location = location.encode('iso-8859-1').decode('utf-8')
809 location_escaped = escape_url(location)
810 if location != location_escaped:
811 del resp.headers['Location']
812 resp.headers['Location'] = location_escaped
813 return resp
814
815 https_request = http_request
816 https_response = http_response
817
818
819 class YoutubeDLHTTPSHandler(compat_urllib_request.HTTPSHandler):
820 def __init__(self, params, https_conn_class=None, *args, **kwargs):
821 compat_urllib_request.HTTPSHandler.__init__(self, *args, **kwargs)
822 self._https_conn_class = https_conn_class or compat_http_client.HTTPSConnection
823 self._params = params
824
825 def https_open(self, req):
826 kwargs = {}
827 if hasattr(self, '_context'): # python > 2.6
828 kwargs['context'] = self._context
829 if hasattr(self, '_check_hostname'): # python 3.x
830 kwargs['check_hostname'] = self._check_hostname
831 return self.do_open(functools.partial(
832 _create_http_connection, self, self._https_conn_class, True),
833 req, **kwargs)
834
835
836 class YoutubeDLCookieProcessor(compat_urllib_request.HTTPCookieProcessor):
837 def __init__(self, cookiejar=None):
838 compat_urllib_request.HTTPCookieProcessor.__init__(self, cookiejar)
839
840 def http_response(self, request, response):
841 # Python 2 will choke on next HTTP request in row if there are non-ASCII
842 # characters in Set-Cookie HTTP header of last response (see
843 # https://github.com/rg3/youtube-dl/issues/6769).
844 # In order to at least prevent crashing we will percent encode Set-Cookie
845 # header before HTTPCookieProcessor starts processing it.
846 # if sys.version_info < (3, 0) and response.headers:
847 # for set_cookie_header in ('Set-Cookie', 'Set-Cookie2'):
848 # set_cookie = response.headers.get(set_cookie_header)
849 # if set_cookie:
850 # set_cookie_escaped = compat_urllib_parse.quote(set_cookie, b"%/;:@&=+$,!~*'()?#[] ")
851 # if set_cookie != set_cookie_escaped:
852 # del response.headers[set_cookie_header]
853 # response.headers[set_cookie_header] = set_cookie_escaped
854 return compat_urllib_request.HTTPCookieProcessor.http_response(self, request, response)
855
856 https_request = compat_urllib_request.HTTPCookieProcessor.http_request
857 https_response = http_response
858
859
860 def parse_iso8601(date_str, delimiter='T', timezone=None):
861 """ Return a UNIX timestamp from the given date """
862
863 if date_str is None:
864 return None
865
866 date_str = re.sub(r'\.[0-9]+', '', date_str)
867
868 if timezone is None:
869 m = re.search(
870 r'(?:Z$| ?(?P<sign>\+|-)(?P<hours>[0-9]{2}):?(?P<minutes>[0-9]{2})$)',
871 date_str)
872 if not m:
873 timezone = datetime.timedelta()
874 else:
875 date_str = date_str[:-len(m.group(0))]
876 if not m.group('sign'):
877 timezone = datetime.timedelta()
878 else:
879 sign = 1 if m.group('sign') == '+' else -1
880 timezone = datetime.timedelta(
881 hours=sign * int(m.group('hours')),
882 minutes=sign * int(m.group('minutes')))
883 try:
884 date_format = '%Y-%m-%d{0}%H:%M:%S'.format(delimiter)
885 dt = datetime.datetime.strptime(date_str, date_format) - timezone
886 return calendar.timegm(dt.timetuple())
887 except ValueError:
888 pass
889
890
891 def unified_strdate(date_str, day_first=True):
892 """Return a string with the date in the format YYYYMMDD"""
893
894 if date_str is None:
895 return None
896 upload_date = None
897 # Replace commas
898 date_str = date_str.replace(',', ' ')
899 # %z (UTC offset) is only supported in python>=3.2
900 if not re.match(r'^[0-9]{1,2}-[0-9]{1,2}-[0-9]{4}$', date_str):
901 date_str = re.sub(r' ?(\+|-)[0-9]{2}:?[0-9]{2}$', '', date_str)
902 # Remove AM/PM + timezone
903 date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str)
904
905 format_expressions = [
906 '%d %B %Y',
907 '%d %b %Y',
908 '%B %d %Y',
909 '%b %d %Y',
910 '%b %dst %Y %I:%M',
911 '%b %dnd %Y %I:%M',
912 '%b %dth %Y %I:%M',
913 '%Y %m %d',
914 '%Y-%m-%d',
915 '%Y/%m/%d',
916 '%Y/%m/%d %H:%M:%S',
917 '%Y-%m-%d %H:%M:%S',
918 '%Y-%m-%d %H:%M:%S.%f',
919 '%d.%m.%Y %H:%M',
920 '%d.%m.%Y %H.%M',
921 '%Y-%m-%dT%H:%M:%SZ',
922 '%Y-%m-%dT%H:%M:%S.%fZ',
923 '%Y-%m-%dT%H:%M:%S.%f0Z',
924 '%Y-%m-%dT%H:%M:%S',
925 '%Y-%m-%dT%H:%M:%S.%f',
926 '%Y-%m-%dT%H:%M',
927 ]
928 if day_first:
929 format_expressions.extend([
930 '%d-%m-%Y',
931 '%d.%m.%Y',
932 '%d/%m/%Y',
933 '%d/%m/%y',
934 '%d/%m/%Y %H:%M:%S',
935 ])
936 else:
937 format_expressions.extend([
938 '%m-%d-%Y',
939 '%m.%d.%Y',
940 '%m/%d/%Y',
941 '%m/%d/%y',
942 '%m/%d/%Y %H:%M:%S',
943 ])
944 for expression in format_expressions:
945 try:
946 upload_date = datetime.datetime.strptime(date_str, expression).strftime('%Y%m%d')
947 except ValueError:
948 pass
949 if upload_date is None:
950 timetuple = email.utils.parsedate_tz(date_str)
951 if timetuple:
952 upload_date = datetime.datetime(*timetuple[:6]).strftime('%Y%m%d')
953 if upload_date is not None:
954 return compat_str(upload_date)
955
956
957 def determine_ext(url, default_ext='unknown_video'):
958 if url is None:
959 return default_ext
960 guess = url.partition('?')[0].rpartition('.')[2]
961 if re.match(r'^[A-Za-z0-9]+$', guess):
962 return guess
963 # Try extract ext from URLs like http://example.com/foo/bar.mp4/?download
964 elif guess.rstrip('/') in KNOWN_EXTENSIONS:
965 return guess.rstrip('/')
966 else:
967 return default_ext
968
969
970 def subtitles_filename(filename, sub_lang, sub_format):
971 return filename.rsplit('.', 1)[0] + '.' + sub_lang + '.' + sub_format
972
973
974 def date_from_str(date_str):
975 """
976 Return a datetime object from a string in the format YYYYMMDD or
977 (now|today)[+-][0-9](day|week|month|year)(s)?"""
978 today = datetime.date.today()
979 if date_str in ('now', 'today'):
980 return today
981 if date_str == 'yesterday':
982 return today - datetime.timedelta(days=1)
983 match = re.match('(now|today)(?P<sign>[+-])(?P<time>\d+)(?P<unit>day|week|month|year)(s)?', date_str)
984 if match is not None:
985 sign = match.group('sign')
986 time = int(match.group('time'))
987 if sign == '-':
988 time = -time
989 unit = match.group('unit')
990 # A bad approximation?
991 if unit == 'month':
992 unit = 'day'
993 time *= 30
994 elif unit == 'year':
995 unit = 'day'
996 time *= 365
997 unit += 's'
998 delta = datetime.timedelta(**{unit: time})
999 return today + delta
1000 return datetime.datetime.strptime(date_str, '%Y%m%d').date()
1001
1002
1003 def hyphenate_date(date_str):
1004 """
1005 Convert a date in 'YYYYMMDD' format to 'YYYY-MM-DD' format"""
1006 match = re.match(r'^(\d\d\d\d)(\d\d)(\d\d)$', date_str)
1007 if match is not None:
1008 return '-'.join(match.groups())
1009 else:
1010 return date_str
1011
1012
1013 class DateRange(object):
1014 """Represents a time interval between two dates"""
1015
1016 def __init__(self, start=None, end=None):
1017 """start and end must be strings in the format accepted by date"""
1018 if start is not None:
1019 self.start = date_from_str(start)
1020 else:
1021 self.start = datetime.datetime.min.date()
1022 if end is not None:
1023 self.end = date_from_str(end)
1024 else:
1025 self.end = datetime.datetime.max.date()
1026 if self.start > self.end:
1027 raise ValueError('Date range: "%s" , the start date must be before the end date' % self)
1028
1029 @classmethod
1030 def day(cls, day):
1031 """Returns a range that only contains the given day"""
1032 return cls(day, day)
1033
1034 def __contains__(self, date):
1035 """Check if the date is in the range"""
1036 if not isinstance(date, datetime.date):
1037 date = date_from_str(date)
1038 return self.start <= date <= self.end
1039
1040 def __str__(self):
1041 return '%s - %s' % (self.start.isoformat(), self.end.isoformat())
1042
1043
1044 def platform_name():
1045 """ Returns the platform name as a compat_str """
1046 res = platform.platform()
1047 if isinstance(res, bytes):
1048 res = res.decode(preferredencoding())
1049
1050 assert isinstance(res, compat_str)
1051 return res
1052
1053
1054 def _windows_write_string(s, out):
1055 """ Returns True if the string was written using special methods,
1056 False if it has yet to be written out."""
1057 # Adapted from http://stackoverflow.com/a/3259271/35070
1058
1059 import ctypes
1060 import ctypes.wintypes
1061
1062 WIN_OUTPUT_IDS = {
1063 1: -11,
1064 2: -12,
1065 }
1066
1067 try:
1068 fileno = out.fileno()
1069 except AttributeError:
1070 # If the output stream doesn't have a fileno, it's virtual
1071 return False
1072 except io.UnsupportedOperation:
1073 # Some strange Windows pseudo files?
1074 return False
1075 if fileno not in WIN_OUTPUT_IDS:
1076 return False
1077
1078 GetStdHandle = ctypes.WINFUNCTYPE(
1079 ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD)(
1080 (b'GetStdHandle', ctypes.windll.kernel32))
1081 h = GetStdHandle(WIN_OUTPUT_IDS[fileno])
1082
1083 WriteConsoleW = ctypes.WINFUNCTYPE(
1084 ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.wintypes.LPWSTR,
1085 ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD),
1086 ctypes.wintypes.LPVOID)((b'WriteConsoleW', ctypes.windll.kernel32))
1087 written = ctypes.wintypes.DWORD(0)
1088
1089 GetFileType = ctypes.WINFUNCTYPE(ctypes.wintypes.DWORD, ctypes.wintypes.DWORD)((b'GetFileType', ctypes.windll.kernel32))
1090 FILE_TYPE_CHAR = 0x0002
1091 FILE_TYPE_REMOTE = 0x8000
1092 GetConsoleMode = ctypes.WINFUNCTYPE(
1093 ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE,
1094 ctypes.POINTER(ctypes.wintypes.DWORD))(
1095 (b'GetConsoleMode', ctypes.windll.kernel32))
1096 INVALID_HANDLE_VALUE = ctypes.wintypes.DWORD(-1).value
1097
1098 def not_a_console(handle):
1099 if handle == INVALID_HANDLE_VALUE or handle is None:
1100 return True
1101 return ((GetFileType(handle) & ~FILE_TYPE_REMOTE) != FILE_TYPE_CHAR or
1102 GetConsoleMode(handle, ctypes.byref(ctypes.wintypes.DWORD())) == 0)
1103
1104 if not_a_console(h):
1105 return False
1106
1107 def next_nonbmp_pos(s):
1108 try:
1109 return next(i for i, c in enumerate(s) if ord(c) > 0xffff)
1110 except StopIteration:
1111 return len(s)
1112
1113 while s:
1114 count = min(next_nonbmp_pos(s), 1024)
1115
1116 ret = WriteConsoleW(
1117 h, s, count if count else 2, ctypes.byref(written), None)
1118 if ret == 0:
1119 raise OSError('Failed to write string')
1120 if not count: # We just wrote a non-BMP character
1121 assert written.value == 2
1122 s = s[1:]
1123 else:
1124 assert written.value > 0
1125 s = s[written.value:]
1126 return True
1127
1128
1129 def write_string(s, out=None, encoding=None):
1130 if out is None:
1131 out = sys.stderr
1132 assert type(s) == compat_str
1133
1134 if sys.platform == 'win32' and encoding is None and hasattr(out, 'fileno'):
1135 if _windows_write_string(s, out):
1136 return
1137
1138 if ('b' in getattr(out, 'mode', '') or
1139 sys.version_info[0] < 3): # Python 2 lies about mode of sys.stderr
1140 byt = s.encode(encoding or preferredencoding(), 'ignore')
1141 out.write(byt)
1142 elif hasattr(out, 'buffer'):
1143 enc = encoding or getattr(out, 'encoding', None) or preferredencoding()
1144 byt = s.encode(enc, 'ignore')
1145 out.buffer.write(byt)
1146 else:
1147 out.write(s)
1148 out.flush()
1149
1150
1151 def bytes_to_intlist(bs):
1152 if not bs:
1153 return []
1154 if isinstance(bs[0], int): # Python 3
1155 return list(bs)
1156 else:
1157 return [ord(c) for c in bs]
1158
1159
1160 def intlist_to_bytes(xs):
1161 if not xs:
1162 return b''
1163 return struct_pack('%dB' % len(xs), *xs)
1164
1165
1166 # Cross-platform file locking
1167 if sys.platform == 'win32':
1168 import ctypes.wintypes
1169 import msvcrt
1170
1171 class OVERLAPPED(ctypes.Structure):
1172 _fields_ = [
1173 ('Internal', ctypes.wintypes.LPVOID),
1174 ('InternalHigh', ctypes.wintypes.LPVOID),
1175 ('Offset', ctypes.wintypes.DWORD),
1176 ('OffsetHigh', ctypes.wintypes.DWORD),
1177 ('hEvent', ctypes.wintypes.HANDLE),
1178 ]
1179
1180 kernel32 = ctypes.windll.kernel32
1181 LockFileEx = kernel32.LockFileEx
1182 LockFileEx.argtypes = [
1183 ctypes.wintypes.HANDLE, # hFile
1184 ctypes.wintypes.DWORD, # dwFlags
1185 ctypes.wintypes.DWORD, # dwReserved
1186 ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow
1187 ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh
1188 ctypes.POINTER(OVERLAPPED) # Overlapped
1189 ]
1190 LockFileEx.restype = ctypes.wintypes.BOOL
1191 UnlockFileEx = kernel32.UnlockFileEx
1192 UnlockFileEx.argtypes = [
1193 ctypes.wintypes.HANDLE, # hFile
1194 ctypes.wintypes.DWORD, # dwReserved
1195 ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow
1196 ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh
1197 ctypes.POINTER(OVERLAPPED) # Overlapped
1198 ]
1199 UnlockFileEx.restype = ctypes.wintypes.BOOL
1200 whole_low = 0xffffffff
1201 whole_high = 0x7fffffff
1202
1203 def _lock_file(f, exclusive):
1204 overlapped = OVERLAPPED()
1205 overlapped.Offset = 0
1206 overlapped.OffsetHigh = 0
1207 overlapped.hEvent = 0
1208 f._lock_file_overlapped_p = ctypes.pointer(overlapped)
1209 handle = msvcrt.get_osfhandle(f.fileno())
1210 if not LockFileEx(handle, 0x2 if exclusive else 0x0, 0,
1211 whole_low, whole_high, f._lock_file_overlapped_p):
1212 raise OSError('Locking file failed: %r' % ctypes.FormatError())
1213
1214 def _unlock_file(f):
1215 assert f._lock_file_overlapped_p
1216 handle = msvcrt.get_osfhandle(f.fileno())
1217 if not UnlockFileEx(handle, 0,
1218 whole_low, whole_high, f._lock_file_overlapped_p):
1219 raise OSError('Unlocking file failed: %r' % ctypes.FormatError())
1220
1221 else:
1222 # Some platforms, such as Jython, is missing fcntl
1223 try:
1224 import fcntl
1225
1226 def _lock_file(f, exclusive):
1227 fcntl.flock(f, fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH)
1228
1229 def _unlock_file(f):
1230 fcntl.flock(f, fcntl.LOCK_UN)
1231 except ImportError:
1232 UNSUPPORTED_MSG = 'file locking is not supported on this platform'
1233
1234 def _lock_file(f, exclusive):
1235 raise IOError(UNSUPPORTED_MSG)
1236
1237 def _unlock_file(f):
1238 raise IOError(UNSUPPORTED_MSG)
1239
1240
1241 class locked_file(object):
1242 def __init__(self, filename, mode, encoding=None):
1243 assert mode in ['r', 'a', 'w']
1244 self.f = io.open(filename, mode, encoding=encoding)
1245 self.mode = mode
1246
1247 def __enter__(self):
1248 exclusive = self.mode != 'r'
1249 try:
1250 _lock_file(self.f, exclusive)
1251 except IOError:
1252 self.f.close()
1253 raise
1254 return self
1255
1256 def __exit__(self, etype, value, traceback):
1257 try:
1258 _unlock_file(self.f)
1259 finally:
1260 self.f.close()
1261
1262 def __iter__(self):
1263 return iter(self.f)
1264
1265 def write(self, *args):
1266 return self.f.write(*args)
1267
1268 def read(self, *args):
1269 return self.f.read(*args)
1270
1271
1272 def get_filesystem_encoding():
1273 encoding = sys.getfilesystemencoding()
1274 return encoding if encoding is not None else 'utf-8'
1275
1276
1277 def shell_quote(args):
1278 quoted_args = []
1279 encoding = get_filesystem_encoding()
1280 for a in args:
1281 if isinstance(a, bytes):
1282 # We may get a filename encoded with 'encodeFilename'
1283 a = a.decode(encoding)
1284 quoted_args.append(pipes.quote(a))
1285 return ' '.join(quoted_args)
1286
1287
1288 def smuggle_url(url, data):
1289 """ Pass additional data in a URL for internal use. """
1290
1291 sdata = compat_urllib_parse.urlencode(
1292 {'__youtubedl_smuggle': json.dumps(data)})
1293 return url + '#' + sdata
1294
1295
1296 def unsmuggle_url(smug_url, default=None):
1297 if '#__youtubedl_smuggle' not in smug_url:
1298 return smug_url, default
1299 url, _, sdata = smug_url.rpartition('#')
1300 jsond = compat_parse_qs(sdata)['__youtubedl_smuggle'][0]
1301 data = json.loads(jsond)
1302 return url, data
1303
1304
1305 def format_bytes(bytes):
1306 if bytes is None:
1307 return 'N/A'
1308 if type(bytes) is str:
1309 bytes = float(bytes)
1310 if bytes == 0.0:
1311 exponent = 0
1312 else:
1313 exponent = int(math.log(bytes, 1024.0))
1314 suffix = ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB'][exponent]
1315 converted = float(bytes) / float(1024 ** exponent)
1316 return '%.2f%s' % (converted, suffix)
1317
1318
1319 def parse_filesize(s):
1320 if s is None:
1321 return None
1322
1323 # The lower-case forms are of course incorrect and unofficial,
1324 # but we support those too
1325 _UNIT_TABLE = {
1326 'B': 1,
1327 'b': 1,
1328 'KiB': 1024,
1329 'KB': 1000,
1330 'kB': 1024,
1331 'Kb': 1000,
1332 'MiB': 1024 ** 2,
1333 'MB': 1000 ** 2,
1334 'mB': 1024 ** 2,
1335 'Mb': 1000 ** 2,
1336 'GiB': 1024 ** 3,
1337 'GB': 1000 ** 3,
1338 'gB': 1024 ** 3,
1339 'Gb': 1000 ** 3,
1340 'TiB': 1024 ** 4,
1341 'TB': 1000 ** 4,
1342 'tB': 1024 ** 4,
1343 'Tb': 1000 ** 4,
1344 'PiB': 1024 ** 5,
1345 'PB': 1000 ** 5,
1346 'pB': 1024 ** 5,
1347 'Pb': 1000 ** 5,
1348 'EiB': 1024 ** 6,
1349 'EB': 1000 ** 6,
1350 'eB': 1024 ** 6,
1351 'Eb': 1000 ** 6,
1352 'ZiB': 1024 ** 7,
1353 'ZB': 1000 ** 7,
1354 'zB': 1024 ** 7,
1355 'Zb': 1000 ** 7,
1356 'YiB': 1024 ** 8,
1357 'YB': 1000 ** 8,
1358 'yB': 1024 ** 8,
1359 'Yb': 1000 ** 8,
1360 }
1361
1362 units_re = '|'.join(re.escape(u) for u in _UNIT_TABLE)
1363 m = re.match(
1364 r'(?P<num>[0-9]+(?:[,.][0-9]*)?)\s*(?P<unit>%s)' % units_re, s)
1365 if not m:
1366 return None
1367
1368 num_str = m.group('num').replace(',', '.')
1369 mult = _UNIT_TABLE[m.group('unit')]
1370 return int(float(num_str) * mult)
1371
1372
1373 def month_by_name(name):
1374 """ Return the number of a month by (locale-independently) English name """
1375
1376 try:
1377 return ENGLISH_MONTH_NAMES.index(name) + 1
1378 except ValueError:
1379 return None
1380
1381
1382 def month_by_abbreviation(abbrev):
1383 """ Return the number of a month by (locale-independently) English
1384 abbreviations """
1385
1386 try:
1387 return [s[:3] for s in ENGLISH_MONTH_NAMES].index(abbrev) + 1
1388 except ValueError:
1389 return None
1390
1391
1392 def fix_xml_ampersands(xml_str):
1393 """Replace all the '&' by '&amp;' in XML"""
1394 return re.sub(
1395 r'&(?!amp;|lt;|gt;|apos;|quot;|#x[0-9a-fA-F]{,4};|#[0-9]{,4};)',
1396 '&amp;',
1397 xml_str)
1398
1399
1400 def setproctitle(title):
1401 assert isinstance(title, compat_str)
1402
1403 # ctypes in Jython is not complete
1404 # http://bugs.jython.org/issue2148
1405 if sys.platform.startswith('java'):
1406 return
1407
1408 try:
1409 libc = ctypes.cdll.LoadLibrary('libc.so.6')
1410 except OSError:
1411 return
1412 title_bytes = title.encode('utf-8')
1413 buf = ctypes.create_string_buffer(len(title_bytes))
1414 buf.value = title_bytes
1415 try:
1416 libc.prctl(15, buf, 0, 0, 0)
1417 except AttributeError:
1418 return # Strange libc, just skip this
1419
1420
1421 def remove_start(s, start):
1422 if s.startswith(start):
1423 return s[len(start):]
1424 return s
1425
1426
1427 def remove_end(s, end):
1428 if s.endswith(end):
1429 return s[:-len(end)]
1430 return s
1431
1432
1433 def remove_quotes(s):
1434 if s is None or len(s) < 2:
1435 return s
1436 for quote in ('"', "'", ):
1437 if s[0] == quote and s[-1] == quote:
1438 return s[1:-1]
1439 return s
1440
1441
1442 def url_basename(url):
1443 path = compat_urlparse.urlparse(url).path
1444 return path.strip('/').split('/')[-1]
1445
1446
1447 class HEADRequest(compat_urllib_request.Request):
1448 def get_method(self):
1449 return 'HEAD'
1450
1451
1452 def int_or_none(v, scale=1, default=None, get_attr=None, invscale=1):
1453 if get_attr:
1454 if v is not None:
1455 v = getattr(v, get_attr, None)
1456 if v == '':
1457 v = None
1458 if v is None:
1459 return default
1460 try:
1461 return int(v) * invscale // scale
1462 except ValueError:
1463 return default
1464
1465
1466 def str_or_none(v, default=None):
1467 return default if v is None else compat_str(v)
1468
1469
1470 def str_to_int(int_str):
1471 """ A more relaxed version of int_or_none """
1472 if int_str is None:
1473 return None
1474 int_str = re.sub(r'[,\.\+]', '', int_str)
1475 return int(int_str)
1476
1477
1478 def float_or_none(v, scale=1, invscale=1, default=None):
1479 if v is None:
1480 return default
1481 try:
1482 return float(v) * invscale / scale
1483 except ValueError:
1484 return default
1485
1486
1487 def parse_duration(s):
1488 if not isinstance(s, compat_basestring):
1489 return None
1490
1491 s = s.strip()
1492
1493 m = re.match(
1494 r'''(?ix)(?:P?T)?
1495 (?:
1496 (?P<only_mins>[0-9.]+)\s*(?:mins?\.?|minutes?)\s*|
1497 (?P<only_hours>[0-9.]+)\s*(?:hours?)|
1498
1499 \s*(?P<hours_reversed>[0-9]+)\s*(?:[:h]|hours?)\s*(?P<mins_reversed>[0-9]+)\s*(?:[:m]|mins?\.?|minutes?)\s*|
1500 (?:
1501 (?:
1502 (?:(?P<days>[0-9]+)\s*(?:[:d]|days?)\s*)?
1503 (?P<hours>[0-9]+)\s*(?:[:h]|hours?)\s*
1504 )?
1505 (?P<mins>[0-9]+)\s*(?:[:m]|mins?|minutes?)\s*
1506 )?
1507 (?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?\s*(?:s|secs?|seconds?)?
1508 )$''', s)
1509 if not m:
1510 return None
1511 res = 0
1512 if m.group('only_mins'):
1513 return float_or_none(m.group('only_mins'), invscale=60)
1514 if m.group('only_hours'):
1515 return float_or_none(m.group('only_hours'), invscale=60 * 60)
1516 if m.group('secs'):
1517 res += int(m.group('secs'))
1518 if m.group('mins_reversed'):
1519 res += int(m.group('mins_reversed')) * 60
1520 if m.group('mins'):
1521 res += int(m.group('mins')) * 60
1522 if m.group('hours'):
1523 res += int(m.group('hours')) * 60 * 60
1524 if m.group('hours_reversed'):
1525 res += int(m.group('hours_reversed')) * 60 * 60
1526 if m.group('days'):
1527 res += int(m.group('days')) * 24 * 60 * 60
1528 if m.group('ms'):
1529 res += float(m.group('ms'))
1530 return res
1531
1532
1533 def prepend_extension(filename, ext, expected_real_ext=None):
1534 name, real_ext = os.path.splitext(filename)
1535 return (
1536 '{0}.{1}{2}'.format(name, ext, real_ext)
1537 if not expected_real_ext or real_ext[1:] == expected_real_ext
1538 else '{0}.{1}'.format(filename, ext))
1539
1540
1541 def replace_extension(filename, ext, expected_real_ext=None):
1542 name, real_ext = os.path.splitext(filename)
1543 return '{0}.{1}'.format(
1544 name if not expected_real_ext or real_ext[1:] == expected_real_ext else filename,
1545 ext)
1546
1547
1548 def check_executable(exe, args=[]):
1549 """ Checks if the given binary is installed somewhere in PATH, and returns its name.
1550 args can be a list of arguments for a short output (like -version) """
1551 try:
1552 subprocess.Popen([exe] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
1553 except OSError:
1554 return False
1555 return exe
1556
1557
1558 def get_exe_version(exe, args=['--version'],
1559 version_re=None, unrecognized='present'):
1560 """ Returns the version of the specified executable,
1561 or False if the executable is not present """
1562 try:
1563 out, _ = subprocess.Popen(
1564 [encodeArgument(exe)] + args,
1565 stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()
1566 except OSError:
1567 return False
1568 if isinstance(out, bytes): # Python 2.x
1569 out = out.decode('ascii', 'ignore')
1570 return detect_exe_version(out, version_re, unrecognized)
1571
1572
1573 def detect_exe_version(output, version_re=None, unrecognized='present'):
1574 assert isinstance(output, compat_str)
1575 if version_re is None:
1576 version_re = r'version\s+([-0-9._a-zA-Z]+)'
1577 m = re.search(version_re, output)
1578 if m:
1579 return m.group(1)
1580 else:
1581 return unrecognized
1582
1583
1584 class PagedList(object):
1585 def __len__(self):
1586 # This is only useful for tests
1587 return len(self.getslice())
1588
1589
1590 class OnDemandPagedList(PagedList):
1591 def __init__(self, pagefunc, pagesize, use_cache=False):
1592 self._pagefunc = pagefunc
1593 self._pagesize = pagesize
1594 self._use_cache = use_cache
1595 if use_cache:
1596 self._cache = {}
1597
1598 def getslice(self, start=0, end=None):
1599 res = []
1600 for pagenum in itertools.count(start // self._pagesize):
1601 firstid = pagenum * self._pagesize
1602 nextfirstid = pagenum * self._pagesize + self._pagesize
1603 if start >= nextfirstid:
1604 continue
1605
1606 page_results = None
1607 if self._use_cache:
1608 page_results = self._cache.get(pagenum)
1609 if page_results is None:
1610 page_results = list(self._pagefunc(pagenum))
1611 if self._use_cache:
1612 self._cache[pagenum] = page_results
1613
1614 startv = (
1615 start % self._pagesize
1616 if firstid <= start < nextfirstid
1617 else 0)
1618
1619 endv = (
1620 ((end - 1) % self._pagesize) + 1
1621 if (end is not None and firstid <= end <= nextfirstid)
1622 else None)
1623
1624 if startv != 0 or endv is not None:
1625 page_results = page_results[startv:endv]
1626 res.extend(page_results)
1627
1628 # A little optimization - if current page is not "full", ie. does
1629 # not contain page_size videos then we can assume that this page
1630 # is the last one - there are no more ids on further pages -
1631 # i.e. no need to query again.
1632 if len(page_results) + startv < self._pagesize:
1633 break
1634
1635 # If we got the whole page, but the next page is not interesting,
1636 # break out early as well
1637 if end == nextfirstid:
1638 break
1639 return res
1640
1641
1642 class InAdvancePagedList(PagedList):
1643 def __init__(self, pagefunc, pagecount, pagesize):
1644 self._pagefunc = pagefunc
1645 self._pagecount = pagecount
1646 self._pagesize = pagesize
1647
1648 def getslice(self, start=0, end=None):
1649 res = []
1650 start_page = start // self._pagesize
1651 end_page = (
1652 self._pagecount if end is None else (end // self._pagesize + 1))
1653 skip_elems = start - start_page * self._pagesize
1654 only_more = None if end is None else end - start
1655 for pagenum in range(start_page, end_page):
1656 page = list(self._pagefunc(pagenum))
1657 if skip_elems:
1658 page = page[skip_elems:]
1659 skip_elems = None
1660 if only_more is not None:
1661 if len(page) < only_more:
1662 only_more -= len(page)
1663 else:
1664 page = page[:only_more]
1665 res.extend(page)
1666 break
1667 res.extend(page)
1668 return res
1669
1670
1671 def uppercase_escape(s):
1672 unicode_escape = codecs.getdecoder('unicode_escape')
1673 return re.sub(
1674 r'\\U[0-9a-fA-F]{8}',
1675 lambda m: unicode_escape(m.group(0))[0],
1676 s)
1677
1678
1679 def lowercase_escape(s):
1680 unicode_escape = codecs.getdecoder('unicode_escape')
1681 return re.sub(
1682 r'\\u[0-9a-fA-F]{4}',
1683 lambda m: unicode_escape(m.group(0))[0],
1684 s)
1685
1686
1687 def escape_rfc3986(s):
1688 """Escape non-ASCII characters as suggested by RFC 3986"""
1689 if sys.version_info < (3, 0) and isinstance(s, compat_str):
1690 s = s.encode('utf-8')
1691 return compat_urllib_parse.quote(s, b"%/;:@&=+$,!~*'()?#[]")
1692
1693
1694 def escape_url(url):
1695 """Escape URL as suggested by RFC 3986"""
1696 url_parsed = compat_urllib_parse_urlparse(url)
1697 return url_parsed._replace(
1698 path=escape_rfc3986(url_parsed.path),
1699 params=escape_rfc3986(url_parsed.params),
1700 query=escape_rfc3986(url_parsed.query),
1701 fragment=escape_rfc3986(url_parsed.fragment)
1702 ).geturl()
1703
1704 try:
1705 struct.pack('!I', 0)
1706 except TypeError:
1707 # In Python 2.6 (and some 2.7 versions), struct requires a bytes argument
1708 def struct_pack(spec, *args):
1709 if isinstance(spec, compat_str):
1710 spec = spec.encode('ascii')
1711 return struct.pack(spec, *args)
1712
1713 def struct_unpack(spec, *args):
1714 if isinstance(spec, compat_str):
1715 spec = spec.encode('ascii')
1716 return struct.unpack(spec, *args)
1717 else:
1718 struct_pack = struct.pack
1719 struct_unpack = struct.unpack
1720
1721
1722 def read_batch_urls(batch_fd):
1723 def fixup(url):
1724 if not isinstance(url, compat_str):
1725 url = url.decode('utf-8', 'replace')
1726 BOM_UTF8 = '\xef\xbb\xbf'
1727 if url.startswith(BOM_UTF8):
1728 url = url[len(BOM_UTF8):]
1729 url = url.strip()
1730 if url.startswith(('#', ';', ']')):
1731 return False
1732 return url
1733
1734 with contextlib.closing(batch_fd) as fd:
1735 return [url for url in map(fixup, fd) if url]
1736
1737
1738 def urlencode_postdata(*args, **kargs):
1739 return compat_urllib_parse.urlencode(*args, **kargs).encode('ascii')
1740
1741
1742 def encode_dict(d, encoding='utf-8'):
1743 def encode(v):
1744 return v.encode(encoding) if isinstance(v, compat_basestring) else v
1745 return dict((encode(k), encode(v)) for k, v in d.items())
1746
1747
1748 def dict_get(d, key_or_keys, default=None, skip_false_values=True):
1749 if isinstance(key_or_keys, (list, tuple)):
1750 for key in key_or_keys:
1751 if key not in d or d[key] is None or skip_false_values and not d[key]:
1752 continue
1753 return d[key]
1754 return default
1755 return d.get(key_or_keys, default)
1756
1757
1758 def encode_compat_str(string, encoding=preferredencoding(), errors='strict'):
1759 return string if isinstance(string, compat_str) else compat_str(string, encoding, errors)
1760
1761
1762 US_RATINGS = {
1763 'G': 0,
1764 'PG': 10,
1765 'PG-13': 13,
1766 'R': 16,
1767 'NC': 18,
1768 }
1769
1770
1771 def parse_age_limit(s):
1772 if s is None:
1773 return None
1774 m = re.match(r'^(?P<age>\d{1,2})\+?$', s)
1775 return int(m.group('age')) if m else US_RATINGS.get(s)
1776
1777
1778 def strip_jsonp(code):
1779 return re.sub(
1780 r'(?s)^[a-zA-Z0-9_.]+\s*\(\s*(.*)\);?\s*?(?://[^\n]*)*$', r'\1', code)
1781
1782
1783 def js_to_json(code):
1784 def fix_kv(m):
1785 v = m.group(0)
1786 if v in ('true', 'false', 'null'):
1787 return v
1788 if v.startswith('"'):
1789 v = re.sub(r"\\'", "'", v[1:-1])
1790 elif v.startswith("'"):
1791 v = v[1:-1]
1792 v = re.sub(r"\\\\|\\'|\"", lambda m: {
1793 '\\\\': '\\\\',
1794 "\\'": "'",
1795 '"': '\\"',
1796 }[m.group(0)], v)
1797 return '"%s"' % v
1798
1799 res = re.sub(r'''(?x)
1800 "(?:[^"\\]*(?:\\\\|\\['"nu]))*[^"\\]*"|
1801 '(?:[^'\\]*(?:\\\\|\\['"nu]))*[^'\\]*'|
1802 [a-zA-Z_][.a-zA-Z_0-9]*
1803 ''', fix_kv, code)
1804 res = re.sub(r',(\s*[\]}])', lambda m: m.group(1), res)
1805 return res
1806
1807
1808 def qualities(quality_ids):
1809 """ Get a numeric quality value out of a list of possible values """
1810 def q(qid):
1811 try:
1812 return quality_ids.index(qid)
1813 except ValueError:
1814 return -1
1815 return q
1816
1817
1818 DEFAULT_OUTTMPL = '%(title)s-%(id)s.%(ext)s'
1819
1820
1821 def limit_length(s, length):
1822 """ Add ellipses to overly long strings """
1823 if s is None:
1824 return None
1825 ELLIPSES = '...'
1826 if len(s) > length:
1827 return s[:length - len(ELLIPSES)] + ELLIPSES
1828 return s
1829
1830
1831 def version_tuple(v):
1832 return tuple(int(e) for e in re.split(r'[-.]', v))
1833
1834
1835 def is_outdated_version(version, limit, assume_new=True):
1836 if not version:
1837 return not assume_new
1838 try:
1839 return version_tuple(version) < version_tuple(limit)
1840 except ValueError:
1841 return not assume_new
1842
1843
1844 def ytdl_is_updateable():
1845 """ Returns if youtube-dl can be updated with -U """
1846 from zipimport import zipimporter
1847
1848 return isinstance(globals().get('__loader__'), zipimporter) or hasattr(sys, 'frozen')
1849
1850
1851 def args_to_str(args):
1852 # Get a short string representation for a subprocess command
1853 return ' '.join(shlex_quote(a) for a in args)
1854
1855
1856 def error_to_compat_str(err):
1857 err_str = str(err)
1858 # On python 2 error byte string must be decoded with proper
1859 # encoding rather than ascii
1860 if sys.version_info[0] < 3:
1861 err_str = err_str.decode(preferredencoding())
1862 return err_str
1863
1864
1865 def mimetype2ext(mt):
1866 ext = {
1867 'audio/mp4': 'm4a',
1868 }.get(mt)
1869 if ext is not None:
1870 return ext
1871
1872 _, _, res = mt.rpartition('/')
1873
1874 return {
1875 '3gpp': '3gp',
1876 'smptett+xml': 'tt',
1877 'srt': 'srt',
1878 'ttaf+xml': 'dfxp',
1879 'ttml+xml': 'ttml',
1880 'vtt': 'vtt',
1881 'x-flv': 'flv',
1882 'x-mp4-fragmented': 'mp4',
1883 'x-ms-wmv': 'wmv',
1884 }.get(res, res)
1885
1886
1887 def urlhandle_detect_ext(url_handle):
1888 try:
1889 url_handle.headers
1890 getheader = lambda h: url_handle.headers[h]
1891 except AttributeError: # Python < 3
1892 getheader = url_handle.info().getheader
1893
1894 cd = getheader('Content-Disposition')
1895 if cd:
1896 m = re.match(r'attachment;\s*filename="(?P<filename>[^"]+)"', cd)
1897 if m:
1898 e = determine_ext(m.group('filename'), default_ext=None)
1899 if e:
1900 return e
1901
1902 return mimetype2ext(getheader('Content-Type'))
1903
1904
1905 def encode_data_uri(data, mime_type):
1906 return 'data:%s;base64,%s' % (mime_type, base64.b64encode(data).decode('ascii'))
1907
1908
1909 def age_restricted(content_limit, age_limit):
1910 """ Returns True iff the content should be blocked """
1911
1912 if age_limit is None: # No limit set
1913 return False
1914 if content_limit is None:
1915 return False # Content available for everyone
1916 return age_limit < content_limit
1917
1918
1919 def is_html(first_bytes):
1920 """ Detect whether a file contains HTML by examining its first bytes. """
1921
1922 BOMS = [
1923 (b'\xef\xbb\xbf', 'utf-8'),
1924 (b'\x00\x00\xfe\xff', 'utf-32-be'),
1925 (b'\xff\xfe\x00\x00', 'utf-32-le'),
1926 (b'\xff\xfe', 'utf-16-le'),
1927 (b'\xfe\xff', 'utf-16-be'),
1928 ]
1929 for bom, enc in BOMS:
1930 if first_bytes.startswith(bom):
1931 s = first_bytes[len(bom):].decode(enc, 'replace')
1932 break
1933 else:
1934 s = first_bytes.decode('utf-8', 'replace')
1935
1936 return re.match(r'^\s*<', s)
1937
1938
1939 def determine_protocol(info_dict):
1940 protocol = info_dict.get('protocol')
1941 if protocol is not None:
1942 return protocol
1943
1944 url = info_dict['url']
1945 if url.startswith('rtmp'):
1946 return 'rtmp'
1947 elif url.startswith('mms'):
1948 return 'mms'
1949 elif url.startswith('rtsp'):
1950 return 'rtsp'
1951
1952 ext = determine_ext(url)
1953 if ext == 'm3u8':
1954 return 'm3u8'
1955 elif ext == 'f4m':
1956 return 'f4m'
1957
1958 return compat_urllib_parse_urlparse(url).scheme
1959
1960
1961 def render_table(header_row, data):
1962 """ Render a list of rows, each as a list of values """
1963 table = [header_row] + data
1964 max_lens = [max(len(compat_str(v)) for v in col) for col in zip(*table)]
1965 format_str = ' '.join('%-' + compat_str(ml + 1) + 's' for ml in max_lens[:-1]) + '%s'
1966 return '\n'.join(format_str % tuple(row) for row in table)
1967
1968
1969 def _match_one(filter_part, dct):
1970 COMPARISON_OPERATORS = {
1971 '<': operator.lt,
1972 '<=': operator.le,
1973 '>': operator.gt,
1974 '>=': operator.ge,
1975 '=': operator.eq,
1976 '!=': operator.ne,
1977 }
1978 operator_rex = re.compile(r'''(?x)\s*
1979 (?P<key>[a-z_]+)
1980 \s*(?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
1981 (?:
1982 (?P<intval>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)|
1983 (?P<strval>(?![0-9.])[a-z0-9A-Z]*)
1984 )
1985 \s*$
1986 ''' % '|'.join(map(re.escape, COMPARISON_OPERATORS.keys())))
1987 m = operator_rex.search(filter_part)
1988 if m:
1989 op = COMPARISON_OPERATORS[m.group('op')]
1990 if m.group('strval') is not None:
1991 if m.group('op') not in ('=', '!='):
1992 raise ValueError(
1993 'Operator %s does not support string values!' % m.group('op'))
1994 comparison_value = m.group('strval')
1995 else:
1996 try:
1997 comparison_value = int(m.group('intval'))
1998 except ValueError:
1999 comparison_value = parse_filesize(m.group('intval'))
2000 if comparison_value is None:
2001 comparison_value = parse_filesize(m.group('intval') + 'B')
2002 if comparison_value is None:
2003 raise ValueError(
2004 'Invalid integer value %r in filter part %r' % (
2005 m.group('intval'), filter_part))
2006 actual_value = dct.get(m.group('key'))
2007 if actual_value is None:
2008 return m.group('none_inclusive')
2009 return op(actual_value, comparison_value)
2010
2011 UNARY_OPERATORS = {
2012 '': lambda v: v is not None,
2013 '!': lambda v: v is None,
2014 }
2015 operator_rex = re.compile(r'''(?x)\s*
2016 (?P<op>%s)\s*(?P<key>[a-z_]+)
2017 \s*$
2018 ''' % '|'.join(map(re.escape, UNARY_OPERATORS.keys())))
2019 m = operator_rex.search(filter_part)
2020 if m:
2021 op = UNARY_OPERATORS[m.group('op')]
2022 actual_value = dct.get(m.group('key'))
2023 return op(actual_value)
2024
2025 raise ValueError('Invalid filter part %r' % filter_part)
2026
2027
2028 def match_str(filter_str, dct):
2029 """ Filter a dictionary with a simple string syntax. Returns True (=passes filter) or false """
2030
2031 return all(
2032 _match_one(filter_part, dct) for filter_part in filter_str.split('&'))
2033
2034
2035 def match_filter_func(filter_str):
2036 def _match_func(info_dict):
2037 if match_str(filter_str, info_dict):
2038 return None
2039 else:
2040 video_title = info_dict.get('title', info_dict.get('id', 'video'))
2041 return '%s does not pass filter %s, skipping ..' % (video_title, filter_str)
2042 return _match_func
2043
2044
2045 def parse_dfxp_time_expr(time_expr):
2046 if not time_expr:
2047 return
2048
2049 mobj = re.match(r'^(?P<time_offset>\d+(?:\.\d+)?)s?$', time_expr)
2050 if mobj:
2051 return float(mobj.group('time_offset'))
2052
2053 mobj = re.match(r'^(\d+):(\d\d):(\d\d(?:(?:\.|:)\d+)?)$', time_expr)
2054 if mobj:
2055 return 3600 * int(mobj.group(1)) + 60 * int(mobj.group(2)) + float(mobj.group(3).replace(':', '.'))
2056
2057
2058 def srt_subtitles_timecode(seconds):
2059 return '%02d:%02d:%02d,%03d' % (seconds / 3600, (seconds % 3600) / 60, seconds % 60, (seconds % 1) * 1000)
2060
2061
2062 def dfxp2srt(dfxp_data):
2063 _x = functools.partial(xpath_with_ns, ns_map={
2064 'ttml': 'http://www.w3.org/ns/ttml',
2065 'ttaf1': 'http://www.w3.org/2006/10/ttaf1',
2066 })
2067
2068 class TTMLPElementParser(object):
2069 out = ''
2070
2071 def start(self, tag, attrib):
2072 if tag in (_x('ttml:br'), _x('ttaf1:br'), 'br'):
2073 self.out += '\n'
2074
2075 def end(self, tag):
2076 pass
2077
2078 def data(self, data):
2079 self.out += data
2080
2081 def close(self):
2082 return self.out.strip()
2083
2084 def parse_node(node):
2085 target = TTMLPElementParser()
2086 parser = xml.etree.ElementTree.XMLParser(target=target)
2087 parser.feed(xml.etree.ElementTree.tostring(node))
2088 return parser.close()
2089
2090 dfxp = compat_etree_fromstring(dfxp_data.encode('utf-8'))
2091 out = []
2092 paras = dfxp.findall(_x('.//ttml:p')) or dfxp.findall(_x('.//ttaf1:p')) or dfxp.findall('.//p')
2093
2094 if not paras:
2095 raise ValueError('Invalid dfxp/TTML subtitle')
2096
2097 for para, index in zip(paras, itertools.count(1)):
2098 begin_time = parse_dfxp_time_expr(para.attrib.get('begin'))
2099 end_time = parse_dfxp_time_expr(para.attrib.get('end'))
2100 dur = parse_dfxp_time_expr(para.attrib.get('dur'))
2101 if begin_time is None:
2102 continue
2103 if not end_time:
2104 if not dur:
2105 continue
2106 end_time = begin_time + dur
2107 out.append('%d\n%s --> %s\n%s\n\n' % (
2108 index,
2109 srt_subtitles_timecode(begin_time),
2110 srt_subtitles_timecode(end_time),
2111 parse_node(para)))
2112
2113 return ''.join(out)
2114
2115
2116 def cli_option(params, command_option, param):
2117 param = params.get(param)
2118 return [command_option, param] if param is not None else []
2119
2120
2121 def cli_bool_option(params, command_option, param, true_value='true', false_value='false', separator=None):
2122 param = params.get(param)
2123 assert isinstance(param, bool)
2124 if separator:
2125 return [command_option + separator + (true_value if param else false_value)]
2126 return [command_option, true_value if param else false_value]
2127
2128
2129 def cli_valueless_option(params, command_option, param, expected_value=True):
2130 param = params.get(param)
2131 return [command_option] if param == expected_value else []
2132
2133
2134 def cli_configuration_args(params, param, default=[]):
2135 ex_args = params.get(param)
2136 if ex_args is None:
2137 return default
2138 assert isinstance(ex_args, list)
2139 return ex_args
2140
2141
2142 class ISO639Utils(object):
2143 # See http://www.loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt
2144 _lang_map = {
2145 'aa': 'aar',
2146 'ab': 'abk',
2147 'ae': 'ave',
2148 'af': 'afr',
2149 'ak': 'aka',
2150 'am': 'amh',
2151 'an': 'arg',
2152 'ar': 'ara',
2153 'as': 'asm',
2154 'av': 'ava',
2155 'ay': 'aym',
2156 'az': 'aze',
2157 'ba': 'bak',
2158 'be': 'bel',
2159 'bg': 'bul',
2160 'bh': 'bih',
2161 'bi': 'bis',
2162 'bm': 'bam',
2163 'bn': 'ben',
2164 'bo': 'bod',
2165 'br': 'bre',
2166 'bs': 'bos',
2167 'ca': 'cat',
2168 'ce': 'che',
2169 'ch': 'cha',
2170 'co': 'cos',
2171 'cr': 'cre',
2172 'cs': 'ces',
2173 'cu': 'chu',
2174 'cv': 'chv',
2175 'cy': 'cym',
2176 'da': 'dan',
2177 'de': 'deu',
2178 'dv': 'div',
2179 'dz': 'dzo',
2180 'ee': 'ewe',
2181 'el': 'ell',
2182 'en': 'eng',
2183 'eo': 'epo',
2184 'es': 'spa',
2185 'et': 'est',
2186 'eu': 'eus',
2187 'fa': 'fas',
2188 'ff': 'ful',
2189 'fi': 'fin',
2190 'fj': 'fij',
2191 'fo': 'fao',
2192 'fr': 'fra',
2193 'fy': 'fry',
2194 'ga': 'gle',
2195 'gd': 'gla',
2196 'gl': 'glg',
2197 'gn': 'grn',
2198 'gu': 'guj',
2199 'gv': 'glv',
2200 'ha': 'hau',
2201 'he': 'heb',
2202 'hi': 'hin',
2203 'ho': 'hmo',
2204 'hr': 'hrv',
2205 'ht': 'hat',
2206 'hu': 'hun',
2207 'hy': 'hye',
2208 'hz': 'her',
2209 'ia': 'ina',
2210 'id': 'ind',
2211 'ie': 'ile',
2212 'ig': 'ibo',
2213 'ii': 'iii',
2214 'ik': 'ipk',
2215 'io': 'ido',
2216 'is': 'isl',
2217 'it': 'ita',
2218 'iu': 'iku',
2219 'ja': 'jpn',
2220 'jv': 'jav',
2221 'ka': 'kat',
2222 'kg': 'kon',
2223 'ki': 'kik',
2224 'kj': 'kua',
2225 'kk': 'kaz',
2226 'kl': 'kal',
2227 'km': 'khm',
2228 'kn': 'kan',
2229 'ko': 'kor',
2230 'kr': 'kau',
2231 'ks': 'kas',
2232 'ku': 'kur',
2233 'kv': 'kom',
2234 'kw': 'cor',
2235 'ky': 'kir',
2236 'la': 'lat',
2237 'lb': 'ltz',
2238 'lg': 'lug',
2239 'li': 'lim',
2240 'ln': 'lin',
2241 'lo': 'lao',
2242 'lt': 'lit',
2243 'lu': 'lub',
2244 'lv': 'lav',
2245 'mg': 'mlg',
2246 'mh': 'mah',
2247 'mi': 'mri',
2248 'mk': 'mkd',
2249 'ml': 'mal',
2250 'mn': 'mon',
2251 'mr': 'mar',
2252 'ms': 'msa',
2253 'mt': 'mlt',
2254 'my': 'mya',
2255 'na': 'nau',
2256 'nb': 'nob',
2257 'nd': 'nde',
2258 'ne': 'nep',
2259 'ng': 'ndo',
2260 'nl': 'nld',
2261 'nn': 'nno',
2262 'no': 'nor',
2263 'nr': 'nbl',
2264 'nv': 'nav',
2265 'ny': 'nya',
2266 'oc': 'oci',
2267 'oj': 'oji',
2268 'om': 'orm',
2269 'or': 'ori',
2270 'os': 'oss',
2271 'pa': 'pan',
2272 'pi': 'pli',
2273 'pl': 'pol',
2274 'ps': 'pus',
2275 'pt': 'por',
2276 'qu': 'que',
2277 'rm': 'roh',
2278 'rn': 'run',
2279 'ro': 'ron',
2280 'ru': 'rus',
2281 'rw': 'kin',
2282 'sa': 'san',
2283 'sc': 'srd',
2284 'sd': 'snd',
2285 'se': 'sme',
2286 'sg': 'sag',
2287 'si': 'sin',
2288 'sk': 'slk',
2289 'sl': 'slv',
2290 'sm': 'smo',
2291 'sn': 'sna',
2292 'so': 'som',
2293 'sq': 'sqi',
2294 'sr': 'srp',
2295 'ss': 'ssw',
2296 'st': 'sot',
2297 'su': 'sun',
2298 'sv': 'swe',
2299 'sw': 'swa',
2300 'ta': 'tam',
2301 'te': 'tel',
2302 'tg': 'tgk',
2303 'th': 'tha',
2304 'ti': 'tir',
2305 'tk': 'tuk',
2306 'tl': 'tgl',
2307 'tn': 'tsn',
2308 'to': 'ton',
2309 'tr': 'tur',
2310 'ts': 'tso',
2311 'tt': 'tat',
2312 'tw': 'twi',
2313 'ty': 'tah',
2314 'ug': 'uig',
2315 'uk': 'ukr',
2316 'ur': 'urd',
2317 'uz': 'uzb',
2318 've': 'ven',
2319 'vi': 'vie',
2320 'vo': 'vol',
2321 'wa': 'wln',
2322 'wo': 'wol',
2323 'xh': 'xho',
2324 'yi': 'yid',
2325 'yo': 'yor',
2326 'za': 'zha',
2327 'zh': 'zho',
2328 'zu': 'zul',
2329 }
2330
2331 @classmethod
2332 def short2long(cls, code):
2333 """Convert language code from ISO 639-1 to ISO 639-2/T"""
2334 return cls._lang_map.get(code[:2])
2335
2336 @classmethod
2337 def long2short(cls, code):
2338 """Convert language code from ISO 639-2/T to ISO 639-1"""
2339 for short_name, long_name in cls._lang_map.items():
2340 if long_name == code:
2341 return short_name
2342
2343
2344 class ISO3166Utils(object):
2345 # From http://data.okfn.org/data/core/country-list
2346 _country_map = {
2347 'AF': 'Afghanistan',
2348 'AX': 'Åland Islands',
2349 'AL': 'Albania',
2350 'DZ': 'Algeria',
2351 'AS': 'American Samoa',
2352 'AD': 'Andorra',
2353 'AO': 'Angola',
2354 'AI': 'Anguilla',
2355 'AQ': 'Antarctica',
2356 'AG': 'Antigua and Barbuda',
2357 'AR': 'Argentina',
2358 'AM': 'Armenia',
2359 'AW': 'Aruba',
2360 'AU': 'Australia',
2361 'AT': 'Austria',
2362 'AZ': 'Azerbaijan',
2363 'BS': 'Bahamas',
2364 'BH': 'Bahrain',
2365 'BD': 'Bangladesh',
2366 'BB': 'Barbados',
2367 'BY': 'Belarus',
2368 'BE': 'Belgium',
2369 'BZ': 'Belize',
2370 'BJ': 'Benin',
2371 'BM': 'Bermuda',
2372 'BT': 'Bhutan',
2373 'BO': 'Bolivia, Plurinational State of',
2374 'BQ': 'Bonaire, Sint Eustatius and Saba',
2375 'BA': 'Bosnia and Herzegovina',
2376 'BW': 'Botswana',
2377 'BV': 'Bouvet Island',
2378 'BR': 'Brazil',
2379 'IO': 'British Indian Ocean Territory',
2380 'BN': 'Brunei Darussalam',
2381 'BG': 'Bulgaria',
2382 'BF': 'Burkina Faso',
2383 'BI': 'Burundi',
2384 'KH': 'Cambodia',
2385 'CM': 'Cameroon',
2386 'CA': 'Canada',
2387 'CV': 'Cape Verde',
2388 'KY': 'Cayman Islands',
2389 'CF': 'Central African Republic',
2390 'TD': 'Chad',
2391 'CL': 'Chile',
2392 'CN': 'China',
2393 'CX': 'Christmas Island',
2394 'CC': 'Cocos (Keeling) Islands',
2395 'CO': 'Colombia',
2396 'KM': 'Comoros',
2397 'CG': 'Congo',
2398 'CD': 'Congo, the Democratic Republic of the',
2399 'CK': 'Cook Islands',
2400 'CR': 'Costa Rica',
2401 'CI': 'Côte d\'Ivoire',
2402 'HR': 'Croatia',
2403 'CU': 'Cuba',
2404 'CW': 'Curaçao',
2405 'CY': 'Cyprus',
2406 'CZ': 'Czech Republic',
2407 'DK': 'Denmark',
2408 'DJ': 'Djibouti',
2409 'DM': 'Dominica',
2410 'DO': 'Dominican Republic',
2411 'EC': 'Ecuador',
2412 'EG': 'Egypt',
2413 'SV': 'El Salvador',
2414 'GQ': 'Equatorial Guinea',
2415 'ER': 'Eritrea',
2416 'EE': 'Estonia',
2417 'ET': 'Ethiopia',
2418 'FK': 'Falkland Islands (Malvinas)',
2419 'FO': 'Faroe Islands',
2420 'FJ': 'Fiji',
2421 'FI': 'Finland',
2422 'FR': 'France',
2423 'GF': 'French Guiana',
2424 'PF': 'French Polynesia',
2425 'TF': 'French Southern Territories',
2426 'GA': 'Gabon',
2427 'GM': 'Gambia',
2428 'GE': 'Georgia',
2429 'DE': 'Germany',
2430 'GH': 'Ghana',
2431 'GI': 'Gibraltar',
2432 'GR': 'Greece',
2433 'GL': 'Greenland',
2434 'GD': 'Grenada',
2435 'GP': 'Guadeloupe',
2436 'GU': 'Guam',
2437 'GT': 'Guatemala',
2438 'GG': 'Guernsey',
2439 'GN': 'Guinea',
2440 'GW': 'Guinea-Bissau',
2441 'GY': 'Guyana',
2442 'HT': 'Haiti',
2443 'HM': 'Heard Island and McDonald Islands',
2444 'VA': 'Holy See (Vatican City State)',
2445 'HN': 'Honduras',
2446 'HK': 'Hong Kong',
2447 'HU': 'Hungary',
2448 'IS': 'Iceland',
2449 'IN': 'India',
2450 'ID': 'Indonesia',
2451 'IR': 'Iran, Islamic Republic of',
2452 'IQ': 'Iraq',
2453 'IE': 'Ireland',
2454 'IM': 'Isle of Man',
2455 'IL': 'Israel',
2456 'IT': 'Italy',
2457 'JM': 'Jamaica',
2458 'JP': 'Japan',
2459 'JE': 'Jersey',
2460 'JO': 'Jordan',
2461 'KZ': 'Kazakhstan',
2462 'KE': 'Kenya',
2463 'KI': 'Kiribati',
2464 'KP': 'Korea, Democratic People\'s Republic of',
2465 'KR': 'Korea, Republic of',
2466 'KW': 'Kuwait',
2467 'KG': 'Kyrgyzstan',
2468 'LA': 'Lao People\'s Democratic Republic',
2469 'LV': 'Latvia',
2470 'LB': 'Lebanon',
2471 'LS': 'Lesotho',
2472 'LR': 'Liberia',
2473 'LY': 'Libya',
2474 'LI': 'Liechtenstein',
2475 'LT': 'Lithuania',
2476 'LU': 'Luxembourg',
2477 'MO': 'Macao',
2478 'MK': 'Macedonia, the Former Yugoslav Republic of',
2479 'MG': 'Madagascar',
2480 'MW': 'Malawi',
2481 'MY': 'Malaysia',
2482 'MV': 'Maldives',
2483 'ML': 'Mali',
2484 'MT': 'Malta',
2485 'MH': 'Marshall Islands',
2486 'MQ': 'Martinique',
2487 'MR': 'Mauritania',
2488 'MU': 'Mauritius',
2489 'YT': 'Mayotte',
2490 'MX': 'Mexico',
2491 'FM': 'Micronesia, Federated States of',
2492 'MD': 'Moldova, Republic of',
2493 'MC': 'Monaco',
2494 'MN': 'Mongolia',
2495 'ME': 'Montenegro',
2496 'MS': 'Montserrat',
2497 'MA': 'Morocco',
2498 'MZ': 'Mozambique',
2499 'MM': 'Myanmar',
2500 'NA': 'Namibia',
2501 'NR': 'Nauru',
2502 'NP': 'Nepal',
2503 'NL': 'Netherlands',
2504 'NC': 'New Caledonia',
2505 'NZ': 'New Zealand',
2506 'NI': 'Nicaragua',
2507 'NE': 'Niger',
2508 'NG': 'Nigeria',
2509 'NU': 'Niue',
2510 'NF': 'Norfolk Island',
2511 'MP': 'Northern Mariana Islands',
2512 'NO': 'Norway',
2513 'OM': 'Oman',
2514 'PK': 'Pakistan',
2515 'PW': 'Palau',
2516 'PS': 'Palestine, State of',
2517 'PA': 'Panama',
2518 'PG': 'Papua New Guinea',
2519 'PY': 'Paraguay',
2520 'PE': 'Peru',
2521 'PH': 'Philippines',
2522 'PN': 'Pitcairn',
2523 'PL': 'Poland',
2524 'PT': 'Portugal',
2525 'PR': 'Puerto Rico',
2526 'QA': 'Qatar',
2527 'RE': 'Réunion',
2528 'RO': 'Romania',
2529 'RU': 'Russian Federation',
2530 'RW': 'Rwanda',
2531 'BL': 'Saint Barthélemy',
2532 'SH': 'Saint Helena, Ascension and Tristan da Cunha',
2533 'KN': 'Saint Kitts and Nevis',
2534 'LC': 'Saint Lucia',
2535 'MF': 'Saint Martin (French part)',
2536 'PM': 'Saint Pierre and Miquelon',
2537 'VC': 'Saint Vincent and the Grenadines',
2538 'WS': 'Samoa',
2539 'SM': 'San Marino',
2540 'ST': 'Sao Tome and Principe',
2541 'SA': 'Saudi Arabia',
2542 'SN': 'Senegal',
2543 'RS': 'Serbia',
2544 'SC': 'Seychelles',
2545 'SL': 'Sierra Leone',
2546 'SG': 'Singapore',
2547 'SX': 'Sint Maarten (Dutch part)',
2548 'SK': 'Slovakia',
2549 'SI': 'Slovenia',
2550 'SB': 'Solomon Islands',
2551 'SO': 'Somalia',
2552 'ZA': 'South Africa',
2553 'GS': 'South Georgia and the South Sandwich Islands',
2554 'SS': 'South Sudan',
2555 'ES': 'Spain',
2556 'LK': 'Sri Lanka',
2557 'SD': 'Sudan',
2558 'SR': 'Suriname',
2559 'SJ': 'Svalbard and Jan Mayen',
2560 'SZ': 'Swaziland',
2561 'SE': 'Sweden',
2562 'CH': 'Switzerland',
2563 'SY': 'Syrian Arab Republic',
2564 'TW': 'Taiwan, Province of China',
2565 'TJ': 'Tajikistan',
2566 'TZ': 'Tanzania, United Republic of',
2567 'TH': 'Thailand',
2568 'TL': 'Timor-Leste',
2569 'TG': 'Togo',
2570 'TK': 'Tokelau',
2571 'TO': 'Tonga',
2572 'TT': 'Trinidad and Tobago',
2573 'TN': 'Tunisia',
2574 'TR': 'Turkey',
2575 'TM': 'Turkmenistan',
2576 'TC': 'Turks and Caicos Islands',
2577 'TV': 'Tuvalu',
2578 'UG': 'Uganda',
2579 'UA': 'Ukraine',
2580 'AE': 'United Arab Emirates',
2581 'GB': 'United Kingdom',
2582 'US': 'United States',
2583 'UM': 'United States Minor Outlying Islands',
2584 'UY': 'Uruguay',
2585 'UZ': 'Uzbekistan',
2586 'VU': 'Vanuatu',
2587 'VE': 'Venezuela, Bolivarian Republic of',
2588 'VN': 'Viet Nam',
2589 'VG': 'Virgin Islands, British',
2590 'VI': 'Virgin Islands, U.S.',
2591 'WF': 'Wallis and Futuna',
2592 'EH': 'Western Sahara',
2593 'YE': 'Yemen',
2594 'ZM': 'Zambia',
2595 'ZW': 'Zimbabwe',
2596 }
2597
2598 @classmethod
2599 def short2full(cls, code):
2600 """Convert an ISO 3166-2 country code to the corresponding full name"""
2601 return cls._country_map.get(code.upper())
2602
2603
2604 class PerRequestProxyHandler(compat_urllib_request.ProxyHandler):
2605 def __init__(self, proxies=None):
2606 # Set default handlers
2607 for type in ('http', 'https'):
2608 setattr(self, '%s_open' % type,
2609 lambda r, proxy='__noproxy__', type=type, meth=self.proxy_open:
2610 meth(r, proxy, type))
2611 return compat_urllib_request.ProxyHandler.__init__(self, proxies)
2612
2613 def proxy_open(self, req, proxy, type):
2614 req_proxy = req.headers.get('Ytdl-request-proxy')
2615 if req_proxy is not None:
2616 proxy = req_proxy
2617 del req.headers['Ytdl-request-proxy']
2618
2619 if proxy == '__noproxy__':
2620 return None # No Proxy
2621 return compat_urllib_request.ProxyHandler.proxy_open(
2622 self, req, proxy, type)
2623
2624
2625 def ohdave_rsa_encrypt(data, exponent, modulus):
2626 '''
2627 Implement OHDave's RSA algorithm. See http://www.ohdave.com/rsa/
2628
2629 Input:
2630 data: data to encrypt, bytes-like object
2631 exponent, modulus: parameter e and N of RSA algorithm, both integer
2632 Output: hex string of encrypted data
2633
2634 Limitation: supports one block encryption only
2635 '''
2636
2637 payload = int(binascii.hexlify(data[::-1]), 16)
2638 encrypted = pow(payload, exponent, modulus)
2639 return '%x' % encrypted
2640
2641
2642 def encode_base_n(num, n, table=None):
2643 FULL_TABLE = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
2644 if not table:
2645 table = FULL_TABLE[:n]
2646
2647 if n > len(table):
2648 raise ValueError('base %d exceeds table length %d' % (n, len(table)))
2649
2650 if num == 0:
2651 return table[0]
2652
2653 ret = ''
2654 while num:
2655 ret = table[num % n] + ret
2656 num = num // n
2657 return ret
2658
2659
2660 def decode_packed_codes(code):
2661 mobj = re.search(
2662 r"}\('(.+)',(\d+),(\d+),'([^']+)'\.split\('\|'\)",
2663 code)
2664 obfucasted_code, base, count, symbols = mobj.groups()
2665 base = int(base)
2666 count = int(count)
2667 symbols = symbols.split('|')
2668 symbol_table = {}
2669
2670 while count:
2671 count -= 1
2672 base_n_count = encode_base_n(count, base)
2673 symbol_table[base_n_count] = symbols[count] or base_n_count
2674
2675 return re.sub(
2676 r'\b(\w+)\b', lambda mobj: symbol_table[mobj.group(0)],
2677 obfucasted_code)