1 from __future__
import division
, unicode_literals
9 import concurrent
.futures
10 can_threaded_download
= True
12 can_threaded_download
= False
14 from .common
import FileDownloader
15 from .http
import HttpFD
16 from ..aes
import aes_cbc_decrypt_bytes
17 from ..compat
import (
30 class HttpQuietDownloader(HttpFD
):
31 def to_screen(self
, *args
, **kargs
):
34 def report_retry(self
, err
, count
, retries
):
36 f
'[download] Got server HTTP error: {err}. Retrying (attempt {count} of {self.format_retries(retries)}) ...')
39 class FragmentFD(FileDownloader
):
41 A base file downloader class for fragmented media (e.g. f4m/m3u8 manifests).
45 fragment_retries: Number of times to retry a fragment for HTTP error (DASH
47 skip_unavailable_fragments:
48 Skip unavailable fragments (DASH and hlsnative only)
49 keep_fragments: Keep downloaded fragments on disk after downloading is
51 _no_ytdl_file: Don't use .ytdl file
53 For each incomplete fragment download yt-dlp keeps on disk a special
54 bookkeeping file with download state and metadata (in future such files will
55 be used for any incomplete download handled by yt-dlp). This file is
56 used to properly handle resuming, check download file consistency and detect
57 potential errors. The file has a .ytdl extension and represents a standard
58 JSON file of the following format:
61 Dictionary of extractor related data. TBD.
64 Dictionary of downloader related data. May contain following data:
66 Dictionary with current (being downloaded) fragment data:
67 index: 0-based index of current fragment among all fragments
69 Total count of fragments
71 This feature is experimental and file format may change in future.
74 def report_retry_fragment(self
, err
, frag_index
, count
, retries
):
76 '\r[download] Got server HTTP error: %s. Retrying fragment %d (attempt %d of %s) ...'
77 % (error_to_compat_str(err
), frag_index
, count
, self
.format_retries(retries
)))
79 def report_skip_fragment(self
, frag_index
, err
=None):
80 err
= f
' {err};' if err
else ''
81 self
.to_screen(f
'[download]{err} Skipping fragment {frag_index:d} ...')
83 def _prepare_url(self
, info_dict
, url
):
84 headers
= info_dict
.get('http_headers')
85 return sanitized_Request(url
, None, headers
) if headers
else url
87 def _prepare_and_start_frag_download(self
, ctx
, info_dict
):
88 self
._prepare
_frag
_download
(ctx
)
89 self
._start
_frag
_download
(ctx
, info_dict
)
91 def __do_ytdl_file(self
, ctx
):
92 return not ctx
['live'] and not ctx
['tmpfilename'] == '-' and not self
.params
.get('_no_ytdl_file')
94 def _read_ytdl_file(self
, ctx
):
95 assert 'ytdl_corrupt' not in ctx
96 stream
, _
= sanitize_open(self
.ytdl_filename(ctx
['filename']), 'r')
98 ytdl_data
= json
.loads(stream
.read())
99 ctx
['fragment_index'] = ytdl_data
['downloader']['current_fragment']['index']
100 if 'extra_state' in ytdl_data
['downloader']:
101 ctx
['extra_state'] = ytdl_data
['downloader']['extra_state']
103 ctx
['ytdl_corrupt'] = True
107 def _write_ytdl_file(self
, ctx
):
108 frag_index_stream
, _
= sanitize_open(self
.ytdl_filename(ctx
['filename']), 'w')
111 'current_fragment': {
112 'index': ctx
['fragment_index'],
115 if 'extra_state' in ctx
:
116 downloader
['extra_state'] = ctx
['extra_state']
117 if ctx
.get('fragment_count') is not None:
118 downloader
['fragment_count'] = ctx
['fragment_count']
119 frag_index_stream
.write(json
.dumps({'downloader': downloader}
))
121 frag_index_stream
.close()
123 def _download_fragment(self
, ctx
, frag_url
, info_dict
, headers
=None, request_data
=None):
124 fragment_filename
= '%s-Frag%d' % (ctx
['tmpfilename'], ctx
['fragment_index'])
125 fragment_info_dict
= {
127 'http_headers': headers
or info_dict
.get('http_headers'),
128 'request_data': request_data
,
129 'ctx_id': ctx
.get('ctx_id'),
131 success
= ctx
['dl'].download(fragment_filename
, fragment_info_dict
)
134 if fragment_info_dict
.get('filetime'):
135 ctx
['fragment_filetime'] = fragment_info_dict
.get('filetime')
136 ctx
['fragment_filename_sanitized'] = fragment_filename
137 return True, self
._read
_fragment
(ctx
)
139 def _read_fragment(self
, ctx
):
140 down
, frag_sanitized
= sanitize_open(ctx
['fragment_filename_sanitized'], 'rb')
141 ctx
['fragment_filename_sanitized'] = frag_sanitized
142 frag_content
= down
.read()
146 def _append_fragment(self
, ctx
, frag_content
):
148 ctx
['dest_stream'].write(frag_content
)
149 ctx
['dest_stream'].flush()
151 if self
.__do
_ytdl
_file
(ctx
):
152 self
._write
_ytdl
_file
(ctx
)
153 if not self
.params
.get('keep_fragments', False):
154 os
.remove(encodeFilename(ctx
['fragment_filename_sanitized']))
155 del ctx
['fragment_filename_sanitized']
157 def _prepare_frag_download(self
, ctx
):
158 if 'live' not in ctx
:
161 total_frags_str
= '%d' % ctx
['total_frags']
162 ad_frags
= ctx
.get('ad_frags', 0)
164 total_frags_str
+= ' (not including %d ad)' % ad_frags
166 total_frags_str
= 'unknown (live)'
168 '[%s] Total fragments: %s' % (self
.FD_NAME
, total_frags_str
))
169 self
.report_destination(ctx
['filename'])
170 dl
= HttpQuietDownloader(
174 'quiet': self
.params
.get('quiet'),
176 'ratelimit': self
.params
.get('ratelimit'),
177 'retries': self
.params
.get('retries', 0),
178 'nopart': self
.params
.get('nopart', False),
179 'test': self
.params
.get('test', False),
182 tmpfilename
= self
.temp_name(ctx
['filename'])
186 # Establish possible resume length
187 if os
.path
.isfile(encodeFilename(tmpfilename
)):
189 resume_len
= os
.path
.getsize(encodeFilename(tmpfilename
))
191 # Should be initialized before ytdl file check
193 'tmpfilename': tmpfilename
,
197 if self
.__do
_ytdl
_file
(ctx
):
198 if os
.path
.isfile(encodeFilename(self
.ytdl_filename(ctx
['filename']))):
199 self
._read
_ytdl
_file
(ctx
)
200 is_corrupt
= ctx
.get('ytdl_corrupt') is True
201 is_inconsistent
= ctx
['fragment_index'] > 0 and resume_len
== 0
202 if is_corrupt
or is_inconsistent
:
204 '.ytdl file is corrupt' if is_corrupt
else
205 'Inconsistent state of incomplete fragment download')
207 '%s. Restarting from the beginning ...' % message
)
208 ctx
['fragment_index'] = resume_len
= 0
209 if 'ytdl_corrupt' in ctx
:
210 del ctx
['ytdl_corrupt']
211 self
._write
_ytdl
_file
(ctx
)
213 self
._write
_ytdl
_file
(ctx
)
214 assert ctx
['fragment_index'] == 0
216 dest_stream
, tmpfilename
= sanitize_open(tmpfilename
, open_mode
)
220 'dest_stream': dest_stream
,
221 'tmpfilename': tmpfilename
,
222 # Total complete fragments downloaded so far in bytes
223 'complete_frags_downloaded_bytes': resume_len
,
226 def _start_frag_download(self
, ctx
, info_dict
):
227 resume_len
= ctx
['complete_frags_downloaded_bytes']
228 total_frags
= ctx
['total_frags']
229 ctx_id
= ctx
.get('ctx_id')
230 # This dict stores the download progress, it's updated by the progress
233 'status': 'downloading',
234 'downloaded_bytes': resume_len
,
235 'fragment_index': ctx
['fragment_index'],
236 'fragment_count': total_frags
,
237 'filename': ctx
['filename'],
238 'tmpfilename': ctx
['tmpfilename'],
244 'fragment_started': start
,
245 # Amount of fragment's bytes downloaded by the time of the previous
246 # frag progress hook invocation
247 'prev_frag_downloaded_bytes': 0,
250 def frag_progress_hook(s
):
251 if s
['status'] not in ('downloading', 'finished'):
254 if ctx_id
is not None and s
.get('ctx_id') != ctx_id
:
257 state
['max_progress'] = ctx
.get('max_progress')
258 state
['progress_idx'] = ctx
.get('progress_idx')
260 time_now
= time
.time()
261 state
['elapsed'] = time_now
- start
262 frag_total_bytes
= s
.get('total_bytes') or 0
263 s
['fragment_info_dict'] = s
.pop('info_dict', {})
266 (ctx
['complete_frags_downloaded_bytes'] + frag_total_bytes
)
267 / (state
['fragment_index'] + 1) * total_frags
)
268 state
['total_bytes_estimate'] = estimated_size
270 if s
['status'] == 'finished':
271 state
['fragment_index'] += 1
272 ctx
['fragment_index'] = state
['fragment_index']
273 state
['downloaded_bytes'] += frag_total_bytes
- ctx
['prev_frag_downloaded_bytes']
274 ctx
['complete_frags_downloaded_bytes'] = state
['downloaded_bytes']
275 ctx
['speed'] = state
['speed'] = self
.calc_speed(
276 ctx
['fragment_started'], time_now
, frag_total_bytes
)
277 ctx
['fragment_started'] = time
.time()
278 ctx
['prev_frag_downloaded_bytes'] = 0
280 frag_downloaded_bytes
= s
['downloaded_bytes']
281 state
['downloaded_bytes'] += frag_downloaded_bytes
- ctx
['prev_frag_downloaded_bytes']
283 state
['eta'] = self
.calc_eta(
284 start
, time_now
, estimated_size
- resume_len
,
285 state
['downloaded_bytes'] - resume_len
)
286 ctx
['speed'] = state
['speed'] = self
.calc_speed(
287 ctx
['fragment_started'], time_now
, frag_downloaded_bytes
)
288 ctx
['prev_frag_downloaded_bytes'] = frag_downloaded_bytes
289 self
._hook
_progress
(state
, info_dict
)
291 ctx
['dl'].add_progress_hook(frag_progress_hook
)
295 def _finish_frag_download(self
, ctx
, info_dict
):
296 ctx
['dest_stream'].close()
297 if self
.__do
_ytdl
_file
(ctx
):
298 ytdl_filename
= encodeFilename(self
.ytdl_filename(ctx
['filename']))
299 if os
.path
.isfile(ytdl_filename
):
300 os
.remove(ytdl_filename
)
301 elapsed
= time
.time() - ctx
['started']
303 if ctx
['tmpfilename'] == '-':
304 downloaded_bytes
= ctx
['complete_frags_downloaded_bytes']
306 self
.try_rename(ctx
['tmpfilename'], ctx
['filename'])
307 if self
.params
.get('updatetime', True):
308 filetime
= ctx
.get('fragment_filetime')
311 os
.utime(ctx
['filename'], (time
.time(), filetime
))
314 downloaded_bytes
= os
.path
.getsize(encodeFilename(ctx
['filename']))
316 self
._hook
_progress
({
317 'downloaded_bytes': downloaded_bytes
,
318 'total_bytes': downloaded_bytes
,
319 'filename': ctx
['filename'],
320 'status': 'finished',
322 'ctx_id': ctx
.get('ctx_id'),
323 'max_progress': ctx
.get('max_progress'),
324 'progress_idx': ctx
.get('progress_idx'),
327 def _prepare_external_frag_download(self
, ctx
):
328 if 'live' not in ctx
:
331 total_frags_str
= '%d' % ctx
['total_frags']
332 ad_frags
= ctx
.get('ad_frags', 0)
334 total_frags_str
+= ' (not including %d ad)' % ad_frags
336 total_frags_str
= 'unknown (live)'
338 '[%s] Total fragments: %s' % (self
.FD_NAME
, total_frags_str
))
340 tmpfilename
= self
.temp_name(ctx
['filename'])
342 # Should be initialized before ytdl file check
344 'tmpfilename': tmpfilename
,
348 def decrypter(self
, info_dict
):
352 if url
not in _key_cache
:
353 _key_cache
[url
] = self
.ydl
.urlopen(self
._prepare
_url
(info_dict
, url
)).read()
354 return _key_cache
[url
]
356 def decrypt_fragment(fragment
, frag_content
):
357 decrypt_info
= fragment
.get('decrypt_info')
358 if not decrypt_info
or decrypt_info
['METHOD'] != 'AES-128':
360 iv
= decrypt_info
.get('IV') or compat_struct_pack('>8xq', fragment
['media_sequence'])
361 decrypt_info
['KEY'] = decrypt_info
.get('KEY') or _get_key(info_dict
.get('_decryption_key_url') or decrypt_info
['URI'])
362 # Don't decrypt the content in tests since the data is explicitly truncated and it's not to a valid block
363 # size (see https://github.com/ytdl-org/youtube-dl/pull/27660). Tests only care that the correct data downloaded,
364 # not what it decrypts to.
365 if self
.params
.get('test', False):
367 decrypted_data
= aes_cbc_decrypt_bytes(frag_content
, decrypt_info
['KEY'], iv
)
368 return decrypted_data
[:-decrypted_data
[-1]]
370 return decrypt_fragment
372 def download_and_append_fragments_multiple(self
, *args
, pack_func
=None, finish_func
=None):
374 @params (ctx1, fragments1, info_dict1), (ctx2, fragments2, info_dict2), ...
375 all args must be either tuple or list
377 max_progress
= len(args
)
378 if max_progress
== 1:
379 return self
.download_and_append_fragments(*args
[0], pack_func
=pack_func
, finish_func
=finish_func
)
380 max_workers
= self
.params
.get('concurrent_fragment_downloads', max_progress
)
382 self
._prepare
_multiline
_status
(max_progress
)
384 def thread_func(idx
, ctx
, fragments
, info_dict
, tpe
):
385 ctx
['max_progress'] = max_progress
386 ctx
['progress_idx'] = idx
387 return self
.download_and_append_fragments(ctx
, fragments
, info_dict
, pack_func
=pack_func
, finish_func
=finish_func
, tpe
=tpe
)
389 class FTPE(concurrent
.futures
.ThreadPoolExecutor
):
390 # has to stop this or it's going to wait on the worker thread itself
391 def __exit__(self
, exc_type
, exc_val
, exc_tb
):
395 for idx
, (ctx
, fragments
, info_dict
) in enumerate(args
):
396 tpe
= FTPE(ceil(max_workers
/ max_progress
))
397 job
= tpe
.submit(thread_func
, idx
, ctx
, fragments
, info_dict
, tpe
)
398 spins
.append((tpe
, job
))
401 for tpe
, job
in spins
:
403 result
= result
and job
.result()
405 tpe
.shutdown(wait
=True)
408 def download_and_append_fragments(self
, ctx
, fragments
, info_dict
, *, pack_func
=None, finish_func
=None, tpe
=None):
409 fragment_retries
= self
.params
.get('fragment_retries', 0)
410 is_fatal
= (lambda idx
: idx
== 0) if self
.params
.get('skip_unavailable_fragments', True) else (lambda _
: True)
412 pack_func
= lambda frag_content
, _
: frag_content
414 def download_fragment(fragment
, ctx
):
415 frag_index
= ctx
['fragment_index'] = fragment
['frag_index']
416 headers
= info_dict
.get('http_headers', {}).copy()
417 byte_range
= fragment
.get('byte_range')
419 headers
['Range'] = 'bytes=%d-%d' % (byte_range
['start'], byte_range
['end'] - 1)
421 # Never skip the first fragment
422 fatal
= is_fatal(fragment
.get('index') or (frag_index
- 1))
423 count
, frag_content
= 0, None
424 while count
<= fragment_retries
:
426 success
, frag_content
= self
._download
_fragment
(ctx
, fragment
['url'], info_dict
, headers
)
428 return False, frag_index
430 except compat_urllib_error
.HTTPError
as err
:
431 # Unavailable (possibly temporary) fragments may be served.
432 # First we try to retry then either skip or abort.
433 # See https://github.com/ytdl-org/youtube-dl/issues/10165,
434 # https://github.com/ytdl-org/youtube-dl/issues/10448).
436 if count
<= fragment_retries
:
437 self
.report_retry_fragment(err
, frag_index
, count
, fragment_retries
)
438 except DownloadError
:
439 # Don't retry fragment if error occurred during HTTP downloading
440 # itself since it has own retry settings
445 if count
> fragment_retries
:
447 return False, frag_index
448 ctx
['dest_stream'].close()
449 self
.report_error('Giving up after %s fragment retries' % fragment_retries
)
450 return False, frag_index
451 return frag_content
, frag_index
453 def append_fragment(frag_content
, frag_index
, ctx
):
455 if not is_fatal(frag_index
- 1):
456 self
.report_skip_fragment(frag_index
, 'fragment not found')
459 ctx
['dest_stream'].close()
461 'fragment %s not found, unable to continue' % frag_index
)
463 self
._append
_fragment
(ctx
, pack_func(frag_content
, frag_index
))
466 decrypt_fragment
= self
.decrypter(info_dict
)
468 max_workers
= self
.params
.get('concurrent_fragment_downloads', 1)
469 if can_threaded_download
and max_workers
> 1:
471 def _download_fragment(fragment
):
472 ctx_copy
= ctx
.copy()
473 frag_content
, frag_index
= download_fragment(fragment
, ctx_copy
)
474 return fragment
, frag_content
, frag_index
, ctx_copy
.get('fragment_filename_sanitized')
476 self
.report_warning('The download speed shown is only of one thread. This is a known issue and patches are welcome')
477 with tpe
or concurrent
.futures
.ThreadPoolExecutor(max_workers
) as pool
:
478 for fragment
, frag_content
, frag_index
, frag_filename
in pool
.map(_download_fragment
, fragments
):
479 ctx
['fragment_filename_sanitized'] = frag_filename
480 ctx
['fragment_index'] = frag_index
481 result
= append_fragment(decrypt_fragment(fragment
, frag_content
), frag_index
, ctx
)
485 for fragment
in fragments
:
486 frag_content
, frag_index
= download_fragment(fragment
, ctx
)
487 result
= append_fragment(decrypt_fragment(fragment
, frag_content
), frag_index
, ctx
)
491 if finish_func
is not None:
492 ctx
['dest_stream'].write(finish_func())
493 ctx
['dest_stream'].flush()
494 self
._finish
_frag
_download
(ctx
, info_dict
)