]> jfr.im git - yt-dlp.git/blob - yt_dlp/downloader/http.py
[cleanup] Misc fixes
[yt-dlp.git] / yt_dlp / downloader / http.py
1 from __future__ import unicode_literals
2
3 import os
4 import ssl
5 import time
6 import random
7
8 from .common import FileDownloader
9 from ..compat import (
10 compat_urllib_error,
11 compat_http_client
12 )
13 from ..utils import (
14 ContentTooShortError,
15 encodeFilename,
16 int_or_none,
17 parse_http_range,
18 sanitized_Request,
19 ThrottledDownload,
20 try_call,
21 write_xattr,
22 XAttrMetadataError,
23 XAttrUnavailableError,
24 )
25
26 RESPONSE_READ_EXCEPTIONS = (TimeoutError, ConnectionError, ssl.SSLError, compat_http_client.HTTPException)
27
28
29 class HttpFD(FileDownloader):
30 def real_download(self, filename, info_dict):
31 url = info_dict['url']
32 request_data = info_dict.get('request_data', None)
33
34 class DownloadContext(dict):
35 __getattr__ = dict.get
36 __setattr__ = dict.__setitem__
37 __delattr__ = dict.__delitem__
38
39 ctx = DownloadContext()
40 ctx.filename = filename
41 ctx.tmpfilename = self.temp_name(filename)
42 ctx.stream = None
43
44 # Do not include the Accept-Encoding header
45 headers = {'Youtubedl-no-compression': 'True'}
46 add_headers = info_dict.get('http_headers')
47 if add_headers:
48 headers.update(add_headers)
49
50 is_test = self.params.get('test', False)
51 chunk_size = self._TEST_FILE_SIZE if is_test else (
52 self.params.get('http_chunk_size')
53 or info_dict.get('downloader_options', {}).get('http_chunk_size')
54 or 0)
55
56 ctx.open_mode = 'wb'
57 ctx.resume_len = 0
58 ctx.block_size = self.params.get('buffersize', 1024)
59 ctx.start_time = time.time()
60
61 # parse given Range
62 req_start, req_end, _ = parse_http_range(headers.get('Range'))
63
64 if self.params.get('continuedl', True):
65 # Establish possible resume length
66 if os.path.isfile(encodeFilename(ctx.tmpfilename)):
67 ctx.resume_len = os.path.getsize(
68 encodeFilename(ctx.tmpfilename))
69
70 ctx.is_resume = ctx.resume_len > 0
71
72 count = 0
73 retries = self.params.get('retries', 0)
74
75 class SucceedDownload(Exception):
76 pass
77
78 class RetryDownload(Exception):
79 def __init__(self, source_error):
80 self.source_error = source_error
81
82 class NextFragment(Exception):
83 pass
84
85 def establish_connection():
86 ctx.chunk_size = (random.randint(int(chunk_size * 0.95), chunk_size)
87 if not is_test and chunk_size else chunk_size)
88 if ctx.resume_len > 0:
89 range_start = ctx.resume_len
90 if req_start is not None:
91 # offset the beginning of Range to be within request
92 range_start += req_start
93 if ctx.is_resume:
94 self.report_resuming_byte(ctx.resume_len)
95 ctx.open_mode = 'ab'
96 elif req_start is not None:
97 range_start = req_start
98 elif ctx.chunk_size > 0:
99 range_start = 0
100 else:
101 range_start = None
102 ctx.is_resume = False
103
104 if ctx.chunk_size:
105 chunk_aware_end = range_start + ctx.chunk_size - 1
106 # we're not allowed to download outside Range
107 range_end = chunk_aware_end if req_end is None else min(chunk_aware_end, req_end)
108 elif req_end is not None:
109 # there's no need for chunked downloads, so download until the end of Range
110 range_end = req_end
111 else:
112 range_end = None
113
114 if try_call(lambda: range_start > range_end):
115 ctx.resume_len = 0
116 ctx.open_mode = 'wb'
117 raise RetryDownload(Exception(f'Conflicting range. (start={range_start} > end={range_end})'))
118
119 if try_call(lambda: range_end >= ctx.content_len):
120 range_end = ctx.content_len - 1
121
122 request = sanitized_Request(url, request_data, headers)
123 has_range = range_start is not None
124 if has_range:
125 request.add_header('Range', f'bytes={int(range_start)}-{int_or_none(range_end) or ""}')
126 # Establish connection
127 try:
128 ctx.data = self.ydl.urlopen(request)
129 # When trying to resume, Content-Range HTTP header of response has to be checked
130 # to match the value of requested Range HTTP header. This is due to a webservers
131 # that don't support resuming and serve a whole file with no Content-Range
132 # set in response despite of requested Range (see
133 # https://github.com/ytdl-org/youtube-dl/issues/6057#issuecomment-126129799)
134 if has_range:
135 content_range = ctx.data.headers.get('Content-Range')
136 content_range_start, content_range_end, content_len = parse_http_range(content_range)
137 if content_range_start is not None and range_start == content_range_start:
138 # Content-Range is present and matches requested Range, resume is possible
139 accept_content_len = (
140 # Non-chunked download
141 not ctx.chunk_size
142 # Chunked download and requested piece or
143 # its part is promised to be served
144 or content_range_end == range_end
145 or content_len < range_end)
146 if accept_content_len:
147 ctx.content_len = content_len
148 ctx.data_len = min(content_len, req_end or content_len) - (req_start or 0)
149 return
150 # Content-Range is either not present or invalid. Assuming remote webserver is
151 # trying to send the whole file, resume is not possible, so wiping the local file
152 # and performing entire redownload
153 self.report_unable_to_resume()
154 ctx.resume_len = 0
155 ctx.open_mode = 'wb'
156 ctx.data_len = ctx.content_len = int_or_none(ctx.data.info().get('Content-length', None))
157 except (compat_urllib_error.HTTPError, ) as err:
158 if err.code == 416:
159 # Unable to resume (requested range not satisfiable)
160 try:
161 # Open the connection again without the range header
162 ctx.data = self.ydl.urlopen(
163 sanitized_Request(url, request_data, headers))
164 content_length = ctx.data.info()['Content-Length']
165 except (compat_urllib_error.HTTPError, ) as err:
166 if err.code < 500 or err.code >= 600:
167 raise
168 else:
169 # Examine the reported length
170 if (content_length is not None
171 and (ctx.resume_len - 100 < int(content_length) < ctx.resume_len + 100)):
172 # The file had already been fully downloaded.
173 # Explanation to the above condition: in issue #175 it was revealed that
174 # YouTube sometimes adds or removes a few bytes from the end of the file,
175 # changing the file size slightly and causing problems for some users. So
176 # I decided to implement a suggested change and consider the file
177 # completely downloaded if the file size differs less than 100 bytes from
178 # the one in the hard drive.
179 self.report_file_already_downloaded(ctx.filename)
180 self.try_rename(ctx.tmpfilename, ctx.filename)
181 self._hook_progress({
182 'filename': ctx.filename,
183 'status': 'finished',
184 'downloaded_bytes': ctx.resume_len,
185 'total_bytes': ctx.resume_len,
186 }, info_dict)
187 raise SucceedDownload()
188 else:
189 # The length does not match, we start the download over
190 self.report_unable_to_resume()
191 ctx.resume_len = 0
192 ctx.open_mode = 'wb'
193 return
194 elif err.code < 500 or err.code >= 600:
195 # Unexpected HTTP error
196 raise
197 raise RetryDownload(err)
198 except compat_urllib_error.URLError as err:
199 if isinstance(err.reason, ssl.CertificateError):
200 raise
201 raise RetryDownload(err)
202 # In urllib.request.AbstractHTTPHandler, the response is partially read on request.
203 # Any errors that occur during this will not be wrapped by URLError
204 except RESPONSE_READ_EXCEPTIONS as err:
205 raise RetryDownload(err)
206
207 def download():
208 data_len = ctx.data.info().get('Content-length', None)
209
210 # Range HTTP header may be ignored/unsupported by a webserver
211 # (e.g. extractor/scivee.py, extractor/bambuser.py).
212 # However, for a test we still would like to download just a piece of a file.
213 # To achieve this we limit data_len to _TEST_FILE_SIZE and manually control
214 # block size when downloading a file.
215 if is_test and (data_len is None or int(data_len) > self._TEST_FILE_SIZE):
216 data_len = self._TEST_FILE_SIZE
217
218 if data_len is not None:
219 data_len = int(data_len) + ctx.resume_len
220 min_data_len = self.params.get('min_filesize')
221 max_data_len = self.params.get('max_filesize')
222 if min_data_len is not None and data_len < min_data_len:
223 self.to_screen('\r[download] File is smaller than min-filesize (%s bytes < %s bytes). Aborting.' % (data_len, min_data_len))
224 return False
225 if max_data_len is not None and data_len > max_data_len:
226 self.to_screen('\r[download] File is larger than max-filesize (%s bytes > %s bytes). Aborting.' % (data_len, max_data_len))
227 return False
228
229 byte_counter = 0 + ctx.resume_len
230 block_size = ctx.block_size
231 start = time.time()
232
233 # measure time over whole while-loop, so slow_down() and best_block_size() work together properly
234 now = None # needed for slow_down() in the first loop run
235 before = start # start measuring
236
237 def retry(e):
238 to_stdout = ctx.tmpfilename == '-'
239 if ctx.stream is not None:
240 if not to_stdout:
241 ctx.stream.close()
242 ctx.stream = None
243 ctx.resume_len = byte_counter if to_stdout else os.path.getsize(encodeFilename(ctx.tmpfilename))
244 raise RetryDownload(e)
245
246 while True:
247 try:
248 # Download and write
249 data_block = ctx.data.read(block_size if not is_test else min(block_size, data_len - byte_counter))
250 except RESPONSE_READ_EXCEPTIONS as err:
251 retry(err)
252
253 byte_counter += len(data_block)
254
255 # exit loop when download is finished
256 if len(data_block) == 0:
257 break
258
259 # Open destination file just in time
260 if ctx.stream is None:
261 try:
262 ctx.stream, ctx.tmpfilename = self.sanitize_open(
263 ctx.tmpfilename, ctx.open_mode)
264 assert ctx.stream is not None
265 ctx.filename = self.undo_temp_name(ctx.tmpfilename)
266 self.report_destination(ctx.filename)
267 except (OSError, IOError) as err:
268 self.report_error('unable to open for writing: %s' % str(err))
269 return False
270
271 if self.params.get('xattr_set_filesize', False) and data_len is not None:
272 try:
273 write_xattr(ctx.tmpfilename, 'user.ytdl.filesize', str(data_len).encode('utf-8'))
274 except (XAttrUnavailableError, XAttrMetadataError) as err:
275 self.report_error('unable to set filesize xattr: %s' % str(err))
276
277 try:
278 ctx.stream.write(data_block)
279 except (IOError, OSError) as err:
280 self.to_stderr('\n')
281 self.report_error('unable to write data: %s' % str(err))
282 return False
283
284 # Apply rate limit
285 self.slow_down(start, now, byte_counter - ctx.resume_len)
286
287 # end measuring of one loop run
288 now = time.time()
289 after = now
290
291 # Adjust block size
292 if not self.params.get('noresizebuffer', False):
293 block_size = self.best_block_size(after - before, len(data_block))
294
295 before = after
296
297 # Progress message
298 speed = self.calc_speed(start, now, byte_counter - ctx.resume_len)
299 if ctx.data_len is None:
300 eta = None
301 else:
302 eta = self.calc_eta(start, time.time(), ctx.data_len - ctx.resume_len, byte_counter - ctx.resume_len)
303
304 self._hook_progress({
305 'status': 'downloading',
306 'downloaded_bytes': byte_counter,
307 'total_bytes': ctx.data_len,
308 'tmpfilename': ctx.tmpfilename,
309 'filename': ctx.filename,
310 'eta': eta,
311 'speed': speed,
312 'elapsed': now - ctx.start_time,
313 'ctx_id': info_dict.get('ctx_id'),
314 }, info_dict)
315
316 if data_len is not None and byte_counter == data_len:
317 break
318
319 if speed and speed < (self.params.get('throttledratelimit') or 0):
320 # The speed must stay below the limit for 3 seconds
321 # This prevents raising error when the speed temporarily goes down
322 if ctx.throttle_start is None:
323 ctx.throttle_start = now
324 elif now - ctx.throttle_start > 3:
325 if ctx.stream is not None and ctx.tmpfilename != '-':
326 ctx.stream.close()
327 raise ThrottledDownload()
328 elif speed:
329 ctx.throttle_start = None
330
331 if not is_test and ctx.chunk_size and ctx.content_len is not None and byte_counter < ctx.content_len:
332 ctx.resume_len = byte_counter
333 # ctx.block_size = block_size
334 raise NextFragment()
335
336 if ctx.stream is None:
337 self.to_stderr('\n')
338 self.report_error('Did not get any data blocks')
339 return False
340 if ctx.tmpfilename != '-':
341 ctx.stream.close()
342
343 if data_len is not None and byte_counter != data_len:
344 err = ContentTooShortError(byte_counter, int(data_len))
345 if count <= retries:
346 retry(err)
347 raise err
348
349 self.try_rename(ctx.tmpfilename, ctx.filename)
350
351 # Update file modification time
352 if self.params.get('updatetime', True):
353 info_dict['filetime'] = self.try_utime(ctx.filename, ctx.data.info().get('last-modified', None))
354
355 self._hook_progress({
356 'downloaded_bytes': byte_counter,
357 'total_bytes': byte_counter,
358 'filename': ctx.filename,
359 'status': 'finished',
360 'elapsed': time.time() - ctx.start_time,
361 'ctx_id': info_dict.get('ctx_id'),
362 }, info_dict)
363
364 return True
365
366 while count <= retries:
367 try:
368 establish_connection()
369 return download()
370 except RetryDownload as e:
371 count += 1
372 if count <= retries:
373 self.report_retry(e.source_error, count, retries)
374 else:
375 self.to_screen(f'[download] Got server HTTP error: {e.source_error}')
376 continue
377 except NextFragment:
378 continue
379 except SucceedDownload:
380 return True
381
382 self.report_error('giving up after %s retries' % retries)
383 return False