from __future__ import unicode_literals
import asyncio
+import atexit
import base64
import binascii
import calendar
compat_HTMLParser,
compat_HTTPError,
compat_basestring,
+ compat_brotli,
compat_chr,
compat_cookiejar,
compat_ctypes_WINFUNCTYPE,
return _USER_AGENT_TPL % random.choice(_CHROME_VERSIONS)
+SUPPORTED_ENCODINGS = [
+ 'gzip', 'deflate'
+]
+if compat_brotli:
+ SUPPORTED_ENCODINGS.append('br')
+
std_headers = {
'User-Agent': random_user_agent(),
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
- 'Accept-Encoding': 'gzip, deflate',
+ 'Accept-Encoding': ', '.join(SUPPORTED_ENCODINGS),
'Accept-Language': 'en-us,en;q=0.5',
'Sec-Fetch-Mode': 'navigate',
}
if sys.exc_info()[0] in network_exceptions:
expected = True
- self.msg = str(msg)
+ self.orig_msg = str(msg)
self.traceback = tb
self.expected = expected
self.cause = cause
super(ExtractorError, self).__init__(''.join((
format_field(ie, template='[%s] '),
format_field(video_id, template='%s: '),
- self.msg,
+ msg,
format_field(cause, template=' (caused by %r)'),
'' if expected else bug_reports_message())))
def format_traceback(self):
- if self.traceback is None:
- return None
- return ''.join(traceback.format_tb(self.traceback))
+ return join_nonempty(
+ self.traceback and ''.join(traceback.format_tb(self.traceback)),
+ self.cause and ''.join(traceback.format_exception(self.cause)[1:]),
+ delim='\n') or None
class UnsupportedError(ExtractorError):
except zlib.error:
return zlib.decompress(data)
+ @staticmethod
+ def brotli(data):
+ if not data:
+ return data
+ return compat_brotli.decompress(data)
+
def http_request(self, req):
# According to RFC 3986, URLs can not contain non-ASCII characters, however this is not
# always respected by websites, some tend to give out URLs with non percent-encoded
if url != url_escaped:
req = update_Request(req, url=url_escaped)
- for h, v in std_headers.items():
+ for h, v in self._params.get('http_headers', std_headers).items():
# Capitalize is needed because of Python bug 2275: http://bugs.python.org/issue2275
# The dict keys are capitalized because of this bug by urllib
if h.capitalize() not in req.headers:
resp = compat_urllib_request.addinfourl(gz, old_resp.headers, old_resp.url, old_resp.code)
resp.msg = old_resp.msg
del resp.headers['Content-encoding']
+ # brotli
+ if resp.headers.get('Content-encoding', '') == 'br':
+ resp = compat_urllib_request.addinfourl(
+ io.BytesIO(self.brotli(resp.read())), old_resp.headers, old_resp.url, old_resp.code)
+ resp.msg = old_resp.msg
+ del resp.headers['Content-encoding']
# Percent-encode redirect URL of Location HTTP header to satisfy RFC 3986 (see
# https://github.com/ytdl-org/youtube-dl/issues/6457).
if 300 <= resp.code < 400:
whole_low = 0xffffffff
whole_high = 0x7fffffff
- def _lock_file(f, exclusive, block): # todo: block unused on win32
+ def _lock_file(f, exclusive, block):
overlapped = OVERLAPPED()
overlapped.Offset = 0
overlapped.OffsetHigh = 0
overlapped.hEvent = 0
f._lock_file_overlapped_p = ctypes.pointer(overlapped)
- handle = msvcrt.get_osfhandle(f.fileno())
- if not LockFileEx(handle, 0x2 if exclusive else 0x0, 0,
- whole_low, whole_high, f._lock_file_overlapped_p):
- raise OSError('Locking file failed: %r' % ctypes.FormatError())
+
+ if not LockFileEx(msvcrt.get_osfhandle(f.fileno()),
+ (0x2 if exclusive else 0x0) | (0x0 if block else 0x1),
+ 0, whole_low, whole_high, f._lock_file_overlapped_p):
+ raise BlockingIOError('Locking file failed: %r' % ctypes.FormatError())
def _unlock_file(f):
assert f._lock_file_overlapped_p
handle = msvcrt.get_osfhandle(f.fileno())
- if not UnlockFileEx(handle, 0,
- whole_low, whole_high, f._lock_file_overlapped_p):
+ if not UnlockFileEx(handle, 0, whole_low, whole_high, f._lock_file_overlapped_p):
raise OSError('Unlocking file failed: %r' % ctypes.FormatError())
else:
- # Some platforms, such as Jython, is missing fcntl
try:
import fcntl
def _lock_file(f, exclusive, block):
- fcntl.flock(f,
- fcntl.LOCK_SH if not exclusive
- else fcntl.LOCK_EX if block
- else fcntl.LOCK_EX | fcntl.LOCK_NB)
+ try:
+ fcntl.flock(f,
+ fcntl.LOCK_SH if not exclusive
+ else fcntl.LOCK_EX if block
+ else fcntl.LOCK_EX | fcntl.LOCK_NB)
+ except BlockingIOError:
+ raise
+ except OSError: # AOSP does not have flock()
+ fcntl.lockf(f,
+ fcntl.LOCK_SH if not exclusive
+ else fcntl.LOCK_EX if block
+ else fcntl.LOCK_EX | fcntl.LOCK_NB)
def _unlock_file(f):
- fcntl.flock(f, fcntl.LOCK_UN)
+ try:
+ fcntl.flock(f, fcntl.LOCK_UN)
+ except OSError:
+ fcntl.lockf(f, fcntl.LOCK_UN)
except ImportError:
UNSUPPORTED_MSG = 'file locking is not supported on this platform'
class locked_file(object):
+ _closed = False
+
def __init__(self, filename, mode, block=True, encoding=None):
assert mode in ['r', 'rb', 'a', 'ab', 'w', 'wb']
self.f = io.open(filename, mode, encoding=encoding)
def __exit__(self, etype, value, traceback):
try:
- _unlock_file(self.f)
+ if not self._closed:
+ _unlock_file(self.f)
finally:
self.f.close()
+ self._closed = True
def __iter__(self):
return iter(self.f)
def format_decimal_suffix(num, fmt='%d%s', *, factor=1000):
""" Formats numbers with decimal sufixes like K, M, etc """
num, factor = float_or_none(num), float(factor)
- if num is None:
+ if num is None or num < 0:
return None
exponent = 0 if num == 0 else int(math.log(num, factor))
suffix = ['', *'kMGTPEZY'][exponent]
def __init__(self, pagefunc, pagesize, use_cache=True):
self._pagefunc = pagefunc
self._pagesize = pagesize
+ self._pagecount = float('inf')
self._use_cache = use_cache
self._cache = {}
def getpage(self, pagenum):
page_results = self._cache.get(pagenum)
if page_results is None:
- page_results = list(self._pagefunc(pagenum))
+ page_results = [] if pagenum > self._pagecount else list(self._pagefunc(pagenum))
if self._use_cache:
self._cache[pagenum] = page_results
return page_results
raise NotImplementedError('This method must be implemented by subclasses')
def __getitem__(self, idx):
- # NOTE: cache must be enabled if this is used
+ assert self._use_cache, 'Indexing PagedList requires cache'
if not isinstance(idx, int) or idx < 0:
raise TypeError('indices must be non-negative integers')
entries = self.getslice(idx, idx + 1)
if (end is not None and firstid <= end <= nextfirstid)
else None)
- page_results = self.getpage(pagenum)
+ try:
+ page_results = self.getpage(pagenum)
+ except Exception:
+ self._pagecount = pagenum - 1
+ raise
if startv != 0 or endv is not None:
page_results = page_results[startv:endv]
yield from page_results
class InAdvancePagedList(PagedList):
def __init__(self, pagefunc, pagecount, pagesize):
- self._pagecount = pagecount
PagedList.__init__(self, pagefunc, pagesize, True)
+ self._pagecount = pagecount
def _getslice(self, start, end):
start_page = start // self._pagesize
extra_gap += 1
if delim:
table = [header_row, [delim * (ml + extra_gap) for ml in max_lens]] + data
- table[1][-1] = table[1][-1][:-extra_gap] # Remove extra_gap from end of delimiter
+ table[1][-1] = table[1][-1][:-extra_gap * len(delim)] # Remove extra_gap from end of delimiter
for row in table:
for pos, text in enumerate(map(str, row)):
if '\t' in text:
return delim.join(map(str, filter(None, values)))
+def scale_thumbnails_to_max_format_width(formats, thumbnails, url_width_re):
+ """
+ Find the largest format dimensions in terms of video width and, for each thumbnail:
+ * Modify the URL: Match the width with the provided regex and replace with the former width
+ * Update dimensions
+
+ This function is useful with video services that scale the provided thumbnails on demand
+ """
+ _keys = ('width', 'height')
+ max_dimensions = max(
+ [tuple(format.get(k) or 0 for k in _keys) for format in formats],
+ default=(0, 0))
+ if not max_dimensions[0]:
+ return thumbnails
+ return [
+ merge_dicts(
+ {'url': re.sub(url_width_re, str(max_dimensions[0]), thumbnail['url'])},
+ dict(zip(_keys, max_dimensions)), thumbnail)
+ for thumbnail in thumbnails
+ ]
+
+
+def parse_http_range(range):
+ """ Parse value of "Range" or "Content-Range" HTTP header into tuple. """
+ if not range:
+ return None, None, None
+ crg = re.search(r'bytes[ =](\d+)-(\d+)?(?:/(\d+))?', range)
+ if not crg:
+ return None, None, None
+ return int(crg.group(1)), int_or_none(crg.group(2)), int_or_none(crg.group(3))
+
+
class Config:
own_args = None
filename = None
self.conn = compat_websockets.connect(
url, extra_headers=headers, ping_interval=None,
close_timeout=float('inf'), loop=self.loop, ping_timeout=float('inf'))
+ atexit.register(self.__exit__, None, None, None)
def __enter__(self):
self.pool = self.run_with_loop(self.conn.__aenter__(), self.loop)
return self.run_with_loop(self.conn.__aexit__(type, value, traceback), self.loop)
finally:
self.loop.close()
- self.r_cancel_all_tasks(self.loop)
+ self._cancel_all_tasks(self.loop)
# taken from https://github.com/python/cpython/blob/3.9/Lib/asyncio/runners.py with modifications
# for contributors: If there's any new library using asyncio needs to be run in non-async, move these function out of this class
has_websockets = bool(compat_websockets)
+
+
+def merge_headers(*dicts):
+ """Merge dicts of network headers case insensitively, prioritizing the latter ones"""
+ return {k.capitalize(): v for k, v in itertools.chain.from_iterable(map(dict.items, dicts))}