]> jfr.im git - yt-dlp.git/blob - yt_dlp/YoutubeDL.py
99db8be9237aa251180008d3e75fdb0fd89436c4
[yt-dlp.git] / yt_dlp / YoutubeDL.py
1 import collections
2 import contextlib
3 import datetime
4 import errno
5 import fileinput
6 import functools
7 import io
8 import itertools
9 import json
10 import locale
11 import operator
12 import os
13 import random
14 import re
15 import shutil
16 import subprocess
17 import sys
18 import tempfile
19 import time
20 import tokenize
21 import traceback
22 import unicodedata
23 import urllib.request
24 from string import ascii_letters
25
26 from .cache import Cache
27 from .compat import compat_os_name, compat_shlex_quote
28 from .cookies import load_cookies
29 from .downloader import FFmpegFD, get_suitable_downloader, shorten_protocol_name
30 from .downloader.rtmp import rtmpdump_version
31 from .extractor import gen_extractor_classes, get_info_extractor
32 from .extractor.common import UnsupportedURLIE
33 from .extractor.openload import PhantomJSwrapper
34 from .minicurses import format_text
35 from .postprocessor import _PLUGIN_CLASSES as plugin_postprocessors
36 from .postprocessor import (
37 EmbedThumbnailPP,
38 FFmpegFixupDuplicateMoovPP,
39 FFmpegFixupDurationPP,
40 FFmpegFixupM3u8PP,
41 FFmpegFixupM4aPP,
42 FFmpegFixupStretchedPP,
43 FFmpegFixupTimestampPP,
44 FFmpegMergerPP,
45 FFmpegPostProcessor,
46 FFmpegVideoConvertorPP,
47 MoveFilesAfterDownloadPP,
48 get_postprocessor,
49 )
50 from .postprocessor.ffmpeg import resolve_mapping as resolve_recode_mapping
51 from .update import REPOSITORY, current_git_head, detect_variant
52 from .utils import (
53 DEFAULT_OUTTMPL,
54 IDENTITY,
55 LINK_TEMPLATES,
56 MEDIA_EXTENSIONS,
57 NO_DEFAULT,
58 NUMBER_RE,
59 OUTTMPL_TYPES,
60 POSTPROCESS_WHEN,
61 STR_FORMAT_RE_TMPL,
62 STR_FORMAT_TYPES,
63 ContentTooShortError,
64 DateRange,
65 DownloadCancelled,
66 DownloadError,
67 EntryNotInPlaylist,
68 ExistingVideoReached,
69 ExtractorError,
70 GeoRestrictedError,
71 HEADRequest,
72 ISO3166Utils,
73 LazyList,
74 MaxDownloadsReached,
75 Namespace,
76 PagedList,
77 PerRequestProxyHandler,
78 PlaylistEntries,
79 Popen,
80 PostProcessingError,
81 ReExtractInfo,
82 RejectedVideoReached,
83 SameFileError,
84 UnavailableVideoError,
85 UserNotLive,
86 YoutubeDLCookieProcessor,
87 YoutubeDLHandler,
88 YoutubeDLRedirectHandler,
89 age_restricted,
90 args_to_str,
91 bug_reports_message,
92 date_from_str,
93 deprecation_warning,
94 determine_ext,
95 determine_protocol,
96 encode_compat_str,
97 encodeFilename,
98 error_to_compat_str,
99 escapeHTML,
100 expand_path,
101 filter_dict,
102 float_or_none,
103 format_bytes,
104 format_decimal_suffix,
105 format_field,
106 formatSeconds,
107 get_compatible_ext,
108 get_domain,
109 int_or_none,
110 iri_to_uri,
111 join_nonempty,
112 locked_file,
113 make_archive_id,
114 make_dir,
115 make_HTTPS_handler,
116 merge_headers,
117 network_exceptions,
118 number_of_digits,
119 orderedSet,
120 orderedSet_from_options,
121 parse_filesize,
122 preferredencoding,
123 prepend_extension,
124 register_socks_protocols,
125 remove_terminal_sequences,
126 render_table,
127 replace_extension,
128 sanitize_filename,
129 sanitize_path,
130 sanitize_url,
131 sanitized_Request,
132 std_headers,
133 str_or_none,
134 strftime_or_none,
135 subtitles_filename,
136 supports_terminal_sequences,
137 system_identifier,
138 timetuple_from_msec,
139 to_high_limit_path,
140 traverse_obj,
141 try_call,
142 try_get,
143 url_basename,
144 variadic,
145 version_tuple,
146 windows_enable_vt_mode,
147 write_json_file,
148 write_string,
149 )
150 from .version import RELEASE_GIT_HEAD, VARIANT, __version__
151
152 if compat_os_name == 'nt':
153 import ctypes
154
155
156 class YoutubeDL:
157 """YoutubeDL class.
158
159 YoutubeDL objects are the ones responsible of downloading the
160 actual video file and writing it to disk if the user has requested
161 it, among some other tasks. In most cases there should be one per
162 program. As, given a video URL, the downloader doesn't know how to
163 extract all the needed information, task that InfoExtractors do, it
164 has to pass the URL to one of them.
165
166 For this, YoutubeDL objects have a method that allows
167 InfoExtractors to be registered in a given order. When it is passed
168 a URL, the YoutubeDL object handles it to the first InfoExtractor it
169 finds that reports being able to handle it. The InfoExtractor extracts
170 all the information about the video or videos the URL refers to, and
171 YoutubeDL process the extracted information, possibly using a File
172 Downloader to download the video.
173
174 YoutubeDL objects accept a lot of parameters. In order not to saturate
175 the object constructor with arguments, it receives a dictionary of
176 options instead. These options are available through the params
177 attribute for the InfoExtractors to use. The YoutubeDL also
178 registers itself as the downloader in charge for the InfoExtractors
179 that are added to it, so this is a "mutual registration".
180
181 Available options:
182
183 username: Username for authentication purposes.
184 password: Password for authentication purposes.
185 videopassword: Password for accessing a video.
186 ap_mso: Adobe Pass multiple-system operator identifier.
187 ap_username: Multiple-system operator account username.
188 ap_password: Multiple-system operator account password.
189 usenetrc: Use netrc for authentication instead.
190 verbose: Print additional info to stdout.
191 quiet: Do not print messages to stdout.
192 no_warnings: Do not print out anything for warnings.
193 forceprint: A dict with keys WHEN mapped to a list of templates to
194 print to stdout. The allowed keys are video or any of the
195 items in utils.POSTPROCESS_WHEN.
196 For compatibility, a single list is also accepted
197 print_to_file: A dict with keys WHEN (same as forceprint) mapped to
198 a list of tuples with (template, filename)
199 forcejson: Force printing info_dict as JSON.
200 dump_single_json: Force printing the info_dict of the whole playlist
201 (or video) as a single JSON line.
202 force_write_download_archive: Force writing download archive regardless
203 of 'skip_download' or 'simulate'.
204 simulate: Do not download the video files. If unset (or None),
205 simulate only if listsubtitles, listformats or list_thumbnails is used
206 format: Video format code. see "FORMAT SELECTION" for more details.
207 You can also pass a function. The function takes 'ctx' as
208 argument and returns the formats to download.
209 See "build_format_selector" for an implementation
210 allow_unplayable_formats: Allow unplayable formats to be extracted and downloaded.
211 ignore_no_formats_error: Ignore "No video formats" error. Usefull for
212 extracting metadata even if the video is not actually
213 available for download (experimental)
214 format_sort: A list of fields by which to sort the video formats.
215 See "Sorting Formats" for more details.
216 format_sort_force: Force the given format_sort. see "Sorting Formats"
217 for more details.
218 prefer_free_formats: Whether to prefer video formats with free containers
219 over non-free ones of same quality.
220 allow_multiple_video_streams: Allow multiple video streams to be merged
221 into a single file
222 allow_multiple_audio_streams: Allow multiple audio streams to be merged
223 into a single file
224 check_formats Whether to test if the formats are downloadable.
225 Can be True (check all), False (check none),
226 'selected' (check selected formats),
227 or None (check only if requested by extractor)
228 paths: Dictionary of output paths. The allowed keys are 'home'
229 'temp' and the keys of OUTTMPL_TYPES (in utils.py)
230 outtmpl: Dictionary of templates for output names. Allowed keys
231 are 'default' and the keys of OUTTMPL_TYPES (in utils.py).
232 For compatibility with youtube-dl, a single string can also be used
233 outtmpl_na_placeholder: Placeholder for unavailable meta fields.
234 restrictfilenames: Do not allow "&" and spaces in file names
235 trim_file_name: Limit length of filename (extension excluded)
236 windowsfilenames: Force the filenames to be windows compatible
237 ignoreerrors: Do not stop on download/postprocessing errors.
238 Can be 'only_download' to ignore only download errors.
239 Default is 'only_download' for CLI, but False for API
240 skip_playlist_after_errors: Number of allowed failures until the rest of
241 the playlist is skipped
242 allowed_extractors: List of regexes to match against extractor names that are allowed
243 overwrites: Overwrite all video and metadata files if True,
244 overwrite only non-video files if None
245 and don't overwrite any file if False
246 For compatibility with youtube-dl,
247 "nooverwrites" may also be used instead
248 playlist_items: Specific indices of playlist to download.
249 playlistrandom: Download playlist items in random order.
250 lazy_playlist: Process playlist entries as they are received.
251 matchtitle: Download only matching titles.
252 rejecttitle: Reject downloads for matching titles.
253 logger: Log messages to a logging.Logger instance.
254 logtostderr: Log messages to stderr instead of stdout.
255 consoletitle: Display progress in console window's titlebar.
256 writedescription: Write the video description to a .description file
257 writeinfojson: Write the video description to a .info.json file
258 clean_infojson: Remove private fields from the infojson
259 getcomments: Extract video comments. This will not be written to disk
260 unless writeinfojson is also given
261 writeannotations: Write the video annotations to a .annotations.xml file
262 writethumbnail: Write the thumbnail image to a file
263 allow_playlist_files: Whether to write playlists' description, infojson etc
264 also to disk when using the 'write*' options
265 write_all_thumbnails: Write all thumbnail formats to files
266 writelink: Write an internet shortcut file, depending on the
267 current platform (.url/.webloc/.desktop)
268 writeurllink: Write a Windows internet shortcut file (.url)
269 writewebloclink: Write a macOS internet shortcut file (.webloc)
270 writedesktoplink: Write a Linux internet shortcut file (.desktop)
271 writesubtitles: Write the video subtitles to a file
272 writeautomaticsub: Write the automatically generated subtitles to a file
273 listsubtitles: Lists all available subtitles for the video
274 subtitlesformat: The format code for subtitles
275 subtitleslangs: List of languages of the subtitles to download (can be regex).
276 The list may contain "all" to refer to all the available
277 subtitles. The language can be prefixed with a "-" to
278 exclude it from the requested languages, e.g. ['all', '-live_chat']
279 keepvideo: Keep the video file after post-processing
280 daterange: A DateRange object, download only if the upload_date is in the range.
281 skip_download: Skip the actual download of the video file
282 cachedir: Location of the cache files in the filesystem.
283 False to disable filesystem cache.
284 noplaylist: Download single video instead of a playlist if in doubt.
285 age_limit: An integer representing the user's age in years.
286 Unsuitable videos for the given age are skipped.
287 min_views: An integer representing the minimum view count the video
288 must have in order to not be skipped.
289 Videos without view count information are always
290 downloaded. None for no limit.
291 max_views: An integer representing the maximum view count.
292 Videos that are more popular than that are not
293 downloaded.
294 Videos without view count information are always
295 downloaded. None for no limit.
296 download_archive: File name of a file where all downloads are recorded.
297 Videos already present in the file are not downloaded
298 again.
299 break_on_existing: Stop the download process after attempting to download a
300 file that is in the archive.
301 break_on_reject: Stop the download process when encountering a video that
302 has been filtered out.
303 break_per_url: Whether break_on_reject and break_on_existing
304 should act on each input URL as opposed to for the entire queue
305 cookiefile: File name or text stream from where cookies should be read and dumped to
306 cookiesfrombrowser: A tuple containing the name of the browser, the profile
307 name/path from where cookies are loaded, the name of the keyring,
308 and the container name, e.g. ('chrome', ) or
309 ('vivaldi', 'default', 'BASICTEXT') or ('firefox', 'default', None, 'Meta')
310 legacyserverconnect: Explicitly allow HTTPS connection to servers that do not
311 support RFC 5746 secure renegotiation
312 nocheckcertificate: Do not verify SSL certificates
313 client_certificate: Path to client certificate file in PEM format. May include the private key
314 client_certificate_key: Path to private key file for client certificate
315 client_certificate_password: Password for client certificate private key, if encrypted.
316 If not provided and the key is encrypted, yt-dlp will ask interactively
317 prefer_insecure: Use HTTP instead of HTTPS to retrieve information.
318 (Only supported by some extractors)
319 http_headers: A dictionary of custom headers to be used for all requests
320 proxy: URL of the proxy server to use
321 geo_verification_proxy: URL of the proxy to use for IP address verification
322 on geo-restricted sites.
323 socket_timeout: Time to wait for unresponsive hosts, in seconds
324 bidi_workaround: Work around buggy terminals without bidirectional text
325 support, using fridibi
326 debug_printtraffic:Print out sent and received HTTP traffic
327 default_search: Prepend this string if an input url is not valid.
328 'auto' for elaborate guessing
329 encoding: Use this encoding instead of the system-specified.
330 extract_flat: Whether to resolve and process url_results further
331 * False: Always process (default)
332 * True: Never process
333 * 'in_playlist': Do not process inside playlist/multi_video
334 * 'discard': Always process, but don't return the result
335 from inside playlist/multi_video
336 * 'discard_in_playlist': Same as "discard", but only for
337 playlists (not multi_video)
338 wait_for_video: If given, wait for scheduled streams to become available.
339 The value should be a tuple containing the range
340 (min_secs, max_secs) to wait between retries
341 postprocessors: A list of dictionaries, each with an entry
342 * key: The name of the postprocessor. See
343 yt_dlp/postprocessor/__init__.py for a list.
344 * when: When to run the postprocessor. Allowed values are
345 the entries of utils.POSTPROCESS_WHEN
346 Assumed to be 'post_process' if not given
347 progress_hooks: A list of functions that get called on download
348 progress, with a dictionary with the entries
349 * status: One of "downloading", "error", or "finished".
350 Check this first and ignore unknown values.
351 * info_dict: The extracted info_dict
352
353 If status is one of "downloading", or "finished", the
354 following properties may also be present:
355 * filename: The final filename (always present)
356 * tmpfilename: The filename we're currently writing to
357 * downloaded_bytes: Bytes on disk
358 * total_bytes: Size of the whole file, None if unknown
359 * total_bytes_estimate: Guess of the eventual file size,
360 None if unavailable.
361 * elapsed: The number of seconds since download started.
362 * eta: The estimated time in seconds, None if unknown
363 * speed: The download speed in bytes/second, None if
364 unknown
365 * fragment_index: The counter of the currently
366 downloaded video fragment.
367 * fragment_count: The number of fragments (= individual
368 files that will be merged)
369
370 Progress hooks are guaranteed to be called at least once
371 (with status "finished") if the download is successful.
372 postprocessor_hooks: A list of functions that get called on postprocessing
373 progress, with a dictionary with the entries
374 * status: One of "started", "processing", or "finished".
375 Check this first and ignore unknown values.
376 * postprocessor: Name of the postprocessor
377 * info_dict: The extracted info_dict
378
379 Progress hooks are guaranteed to be called at least twice
380 (with status "started" and "finished") if the processing is successful.
381 merge_output_format: "/" separated list of extensions to use when merging formats.
382 final_ext: Expected final extension; used to detect when the file was
383 already downloaded and converted
384 fixup: Automatically correct known faults of the file.
385 One of:
386 - "never": do nothing
387 - "warn": only emit a warning
388 - "detect_or_warn": check whether we can do anything
389 about it, warn otherwise (default)
390 source_address: Client-side IP address to bind to.
391 sleep_interval_requests: Number of seconds to sleep between requests
392 during extraction
393 sleep_interval: Number of seconds to sleep before each download when
394 used alone or a lower bound of a range for randomized
395 sleep before each download (minimum possible number
396 of seconds to sleep) when used along with
397 max_sleep_interval.
398 max_sleep_interval:Upper bound of a range for randomized sleep before each
399 download (maximum possible number of seconds to sleep).
400 Must only be used along with sleep_interval.
401 Actual sleep time will be a random float from range
402 [sleep_interval; max_sleep_interval].
403 sleep_interval_subtitles: Number of seconds to sleep before each subtitle download
404 listformats: Print an overview of available video formats and exit.
405 list_thumbnails: Print a table of all thumbnails and exit.
406 match_filter: A function that gets called for every video with the signature
407 (info_dict, *, incomplete: bool) -> Optional[str]
408 For backward compatibility with youtube-dl, the signature
409 (info_dict) -> Optional[str] is also allowed.
410 - If it returns a message, the video is ignored.
411 - If it returns None, the video is downloaded.
412 - If it returns utils.NO_DEFAULT, the user is interactively
413 asked whether to download the video.
414 match_filter_func in utils.py is one example for this.
415 no_color: Do not emit color codes in output.
416 geo_bypass: Bypass geographic restriction via faking X-Forwarded-For
417 HTTP header
418 geo_bypass_country:
419 Two-letter ISO 3166-2 country code that will be used for
420 explicit geographic restriction bypassing via faking
421 X-Forwarded-For HTTP header
422 geo_bypass_ip_block:
423 IP range in CIDR notation that will be used similarly to
424 geo_bypass_country
425 external_downloader: A dictionary of protocol keys and the executable of the
426 external downloader to use for it. The allowed protocols
427 are default|http|ftp|m3u8|dash|rtsp|rtmp|mms.
428 Set the value to 'native' to use the native downloader
429 compat_opts: Compatibility options. See "Differences in default behavior".
430 The following options do not work when used through the API:
431 filename, abort-on-error, multistreams, no-live-chat, format-sort
432 no-clean-infojson, no-playlist-metafiles, no-keep-subs, no-attach-info-json.
433 Refer __init__.py for their implementation
434 progress_template: Dictionary of templates for progress outputs.
435 Allowed keys are 'download', 'postprocess',
436 'download-title' (console title) and 'postprocess-title'.
437 The template is mapped on a dictionary with keys 'progress' and 'info'
438 retry_sleep_functions: Dictionary of functions that takes the number of attempts
439 as argument and returns the time to sleep in seconds.
440 Allowed keys are 'http', 'fragment', 'file_access'
441 download_ranges: A callback function that gets called for every video with
442 the signature (info_dict, ydl) -> Iterable[Section].
443 Only the returned sections will be downloaded.
444 Each Section is a dict with the following keys:
445 * start_time: Start time of the section in seconds
446 * end_time: End time of the section in seconds
447 * title: Section title (Optional)
448 * index: Section number (Optional)
449 force_keyframes_at_cuts: Re-encode the video when downloading ranges to get precise cuts
450 noprogress: Do not print the progress bar
451 live_from_start: Whether to download livestreams videos from the start
452
453 The following parameters are not used by YoutubeDL itself, they are used by
454 the downloader (see yt_dlp/downloader/common.py):
455 nopart, updatetime, buffersize, ratelimit, throttledratelimit, min_filesize,
456 max_filesize, test, noresizebuffer, retries, file_access_retries, fragment_retries,
457 continuedl, xattr_set_filesize, hls_use_mpegts, http_chunk_size,
458 external_downloader_args, concurrent_fragment_downloads.
459
460 The following options are used by the post processors:
461 ffmpeg_location: Location of the ffmpeg/avconv binary; either the path
462 to the binary or its containing directory.
463 postprocessor_args: A dictionary of postprocessor/executable keys (in lower case)
464 and a list of additional command-line arguments for the
465 postprocessor/executable. The dict can also have "PP+EXE" keys
466 which are used when the given exe is used by the given PP.
467 Use 'default' as the name for arguments to passed to all PP
468 For compatibility with youtube-dl, a single list of args
469 can also be used
470
471 The following options are used by the extractors:
472 extractor_retries: Number of times to retry for known errors
473 dynamic_mpd: Whether to process dynamic DASH manifests (default: True)
474 hls_split_discontinuity: Split HLS playlists to different formats at
475 discontinuities such as ad breaks (default: False)
476 extractor_args: A dictionary of arguments to be passed to the extractors.
477 See "EXTRACTOR ARGUMENTS" for details.
478 E.g. {'youtube': {'skip': ['dash', 'hls']}}
479 mark_watched: Mark videos watched (even with --simulate). Only for YouTube
480
481 The following options are deprecated and may be removed in the future:
482
483 force_generic_extractor: Force downloader to use the generic extractor
484 - Use allowed_extractors = ['generic', 'default']
485 playliststart: - Use playlist_items
486 Playlist item to start at.
487 playlistend: - Use playlist_items
488 Playlist item to end at.
489 playlistreverse: - Use playlist_items
490 Download playlist items in reverse order.
491 forceurl: - Use forceprint
492 Force printing final URL.
493 forcetitle: - Use forceprint
494 Force printing title.
495 forceid: - Use forceprint
496 Force printing ID.
497 forcethumbnail: - Use forceprint
498 Force printing thumbnail URL.
499 forcedescription: - Use forceprint
500 Force printing description.
501 forcefilename: - Use forceprint
502 Force printing final filename.
503 forceduration: - Use forceprint
504 Force printing duration.
505 allsubtitles: - Use subtitleslangs = ['all']
506 Downloads all the subtitles of the video
507 (requires writesubtitles or writeautomaticsub)
508 include_ads: - Doesn't work
509 Download ads as well
510 call_home: - Not implemented
511 Boolean, true iff we are allowed to contact the
512 yt-dlp servers for debugging.
513 post_hooks: - Register a custom postprocessor
514 A list of functions that get called as the final step
515 for each video file, after all postprocessors have been
516 called. The filename will be passed as the only argument.
517 hls_prefer_native: - Use external_downloader = {'m3u8': 'native'} or {'m3u8': 'ffmpeg'}.
518 Use the native HLS downloader instead of ffmpeg/avconv
519 if True, otherwise use ffmpeg/avconv if False, otherwise
520 use downloader suggested by extractor if None.
521 prefer_ffmpeg: - avconv support is deprecated
522 If False, use avconv instead of ffmpeg if both are available,
523 otherwise prefer ffmpeg.
524 youtube_include_dash_manifest: - Use extractor_args
525 If True (default), DASH manifests and related
526 data will be downloaded and processed by extractor.
527 You can reduce network I/O by disabling it if you don't
528 care about DASH. (only for youtube)
529 youtube_include_hls_manifest: - Use extractor_args
530 If True (default), HLS manifests and related
531 data will be downloaded and processed by extractor.
532 You can reduce network I/O by disabling it if you don't
533 care about HLS. (only for youtube)
534 """
535
536 _NUMERIC_FIELDS = {
537 'width', 'height', 'asr', 'audio_channels', 'fps',
538 'tbr', 'abr', 'vbr', 'filesize', 'filesize_approx',
539 'timestamp', 'release_timestamp',
540 'duration', 'view_count', 'like_count', 'dislike_count', 'repost_count',
541 'average_rating', 'comment_count', 'age_limit',
542 'start_time', 'end_time',
543 'chapter_number', 'season_number', 'episode_number',
544 'track_number', 'disc_number', 'release_year',
545 }
546
547 _format_fields = {
548 # NB: Keep in sync with the docstring of extractor/common.py
549 'url', 'manifest_url', 'manifest_stream_number', 'ext', 'format', 'format_id', 'format_note',
550 'width', 'height', 'resolution', 'dynamic_range', 'tbr', 'abr', 'acodec', 'asr', 'audio_channels',
551 'vbr', 'fps', 'vcodec', 'container', 'filesize', 'filesize_approx',
552 'player_url', 'protocol', 'fragment_base_url', 'fragments', 'is_from_start',
553 'preference', 'language', 'language_preference', 'quality', 'source_preference',
554 'http_headers', 'stretched_ratio', 'no_resume', 'has_drm', 'downloader_options',
555 'page_url', 'app', 'play_path', 'tc_url', 'flash_version', 'rtmp_live', 'rtmp_conn', 'rtmp_protocol', 'rtmp_real_time'
556 }
557 _format_selection_exts = {
558 'audio': set(MEDIA_EXTENSIONS.common_audio),
559 'video': set(MEDIA_EXTENSIONS.common_video + ('3gp', )),
560 'storyboards': set(MEDIA_EXTENSIONS.storyboards),
561 }
562
563 def __init__(self, params=None, auto_init=True):
564 """Create a FileDownloader object with the given options.
565 @param auto_init Whether to load the default extractors and print header (if verbose).
566 Set to 'no_verbose_header' to not print the header
567 """
568 if params is None:
569 params = {}
570 self.params = params
571 self._ies = {}
572 self._ies_instances = {}
573 self._pps = {k: [] for k in POSTPROCESS_WHEN}
574 self._printed_messages = set()
575 self._first_webpage_request = True
576 self._post_hooks = []
577 self._progress_hooks = []
578 self._postprocessor_hooks = []
579 self._download_retcode = 0
580 self._num_downloads = 0
581 self._num_videos = 0
582 self._playlist_level = 0
583 self._playlist_urls = set()
584 self.cache = Cache(self)
585
586 windows_enable_vt_mode()
587 stdout = sys.stderr if self.params.get('logtostderr') else sys.stdout
588 self._out_files = Namespace(
589 out=stdout,
590 error=sys.stderr,
591 screen=sys.stderr if self.params.get('quiet') else stdout,
592 console=None if compat_os_name == 'nt' else next(
593 filter(supports_terminal_sequences, (sys.stderr, sys.stdout)), None)
594 )
595 self._allow_colors = Namespace(**{
596 type_: not self.params.get('no_color') and supports_terminal_sequences(stream)
597 for type_, stream in self._out_files.items_ if type_ != 'console'
598 })
599
600 # The code is left like this to be reused for future deprecations
601 MIN_SUPPORTED, MIN_RECOMMENDED = (3, 7), (3, 7)
602 current_version = sys.version_info[:2]
603 if current_version < MIN_RECOMMENDED:
604 msg = ('Support for Python version %d.%d has been deprecated. '
605 'See https://github.com/yt-dlp/yt-dlp/issues/3764 for more details.'
606 '\n You will no longer receive updates on this version')
607 if current_version < MIN_SUPPORTED:
608 msg = 'Python version %d.%d is no longer supported'
609 self.deprecation_warning(
610 f'{msg}! Please update to Python %d.%d or above' % (*current_version, *MIN_RECOMMENDED))
611
612 if self.params.get('allow_unplayable_formats'):
613 self.report_warning(
614 f'You have asked for {self._format_err("UNPLAYABLE", self.Styles.EMPHASIS)} formats to be listed/downloaded. '
615 'This is a developer option intended for debugging. \n'
616 ' If you experience any issues while using this option, '
617 f'{self._format_err("DO NOT", self.Styles.ERROR)} open a bug report')
618
619 def check_deprecated(param, option, suggestion):
620 if self.params.get(param) is not None:
621 self.report_warning(f'{option} is deprecated. Use {suggestion} instead')
622 return True
623 return False
624
625 if check_deprecated('cn_verification_proxy', '--cn-verification-proxy', '--geo-verification-proxy'):
626 if self.params.get('geo_verification_proxy') is None:
627 self.params['geo_verification_proxy'] = self.params['cn_verification_proxy']
628
629 check_deprecated('autonumber', '--auto-number', '-o "%(autonumber)s-%(title)s.%(ext)s"')
630 check_deprecated('usetitle', '--title', '-o "%(title)s-%(id)s.%(ext)s"')
631 check_deprecated('useid', '--id', '-o "%(id)s.%(ext)s"')
632
633 for msg in self.params.get('_warnings', []):
634 self.report_warning(msg)
635 for msg in self.params.get('_deprecation_warnings', []):
636 self.deprecated_feature(msg)
637
638 self.params['compat_opts'] = set(self.params.get('compat_opts', ()))
639 if 'list-formats' in self.params['compat_opts']:
640 self.params['listformats_table'] = False
641
642 if 'overwrites' not in self.params and self.params.get('nooverwrites') is not None:
643 # nooverwrites was unnecessarily changed to overwrites
644 # in 0c3d0f51778b153f65c21906031c2e091fcfb641
645 # This ensures compatibility with both keys
646 self.params['overwrites'] = not self.params['nooverwrites']
647 elif self.params.get('overwrites') is None:
648 self.params.pop('overwrites', None)
649 else:
650 self.params['nooverwrites'] = not self.params['overwrites']
651
652 self.params.setdefault('forceprint', {})
653 self.params.setdefault('print_to_file', {})
654
655 # Compatibility with older syntax
656 if not isinstance(params['forceprint'], dict):
657 self.params['forceprint'] = {'video': params['forceprint']}
658
659 if self.params.get('bidi_workaround', False):
660 try:
661 import pty
662 master, slave = pty.openpty()
663 width = shutil.get_terminal_size().columns
664 width_args = [] if width is None else ['-w', str(width)]
665 sp_kwargs = {'stdin': subprocess.PIPE, 'stdout': slave, 'stderr': self._out_files.error}
666 try:
667 self._output_process = Popen(['bidiv'] + width_args, **sp_kwargs)
668 except OSError:
669 self._output_process = Popen(['fribidi', '-c', 'UTF-8'] + width_args, **sp_kwargs)
670 self._output_channel = os.fdopen(master, 'rb')
671 except OSError as ose:
672 if ose.errno == errno.ENOENT:
673 self.report_warning(
674 'Could not find fribidi executable, ignoring --bidi-workaround. '
675 'Make sure that fribidi is an executable file in one of the directories in your $PATH.')
676 else:
677 raise
678
679 if auto_init:
680 if auto_init != 'no_verbose_header':
681 self.print_debug_header()
682 self.add_default_info_extractors()
683
684 if (sys.platform != 'win32'
685 and sys.getfilesystemencoding() in ['ascii', 'ANSI_X3.4-1968']
686 and not self.params.get('restrictfilenames', False)):
687 # Unicode filesystem API will throw errors (#1474, #13027)
688 self.report_warning(
689 'Assuming --restrict-filenames since file system encoding '
690 'cannot encode all characters. '
691 'Set the LC_ALL environment variable to fix this.')
692 self.params['restrictfilenames'] = True
693
694 self._parse_outtmpl()
695
696 # Creating format selector here allows us to catch syntax errors before the extraction
697 self.format_selector = (
698 self.params.get('format') if self.params.get('format') in (None, '-')
699 else self.params['format'] if callable(self.params['format'])
700 else self.build_format_selector(self.params['format']))
701
702 # Set http_headers defaults according to std_headers
703 self.params['http_headers'] = merge_headers(std_headers, self.params.get('http_headers', {}))
704
705 hooks = {
706 'post_hooks': self.add_post_hook,
707 'progress_hooks': self.add_progress_hook,
708 'postprocessor_hooks': self.add_postprocessor_hook,
709 }
710 for opt, fn in hooks.items():
711 for ph in self.params.get(opt, []):
712 fn(ph)
713
714 for pp_def_raw in self.params.get('postprocessors', []):
715 pp_def = dict(pp_def_raw)
716 when = pp_def.pop('when', 'post_process')
717 self.add_post_processor(
718 get_postprocessor(pp_def.pop('key'))(self, **pp_def),
719 when=when)
720
721 self._setup_opener()
722 register_socks_protocols()
723
724 def preload_download_archive(fn):
725 """Preload the archive, if any is specified"""
726 if fn is None:
727 return False
728 self.write_debug(f'Loading archive file {fn!r}')
729 try:
730 with locked_file(fn, 'r', encoding='utf-8') as archive_file:
731 for line in archive_file:
732 self.archive.add(line.strip())
733 except OSError as ioe:
734 if ioe.errno != errno.ENOENT:
735 raise
736 return False
737 return True
738
739 self.archive = set()
740 preload_download_archive(self.params.get('download_archive'))
741
742 def warn_if_short_id(self, argv):
743 # short YouTube ID starting with dash?
744 idxs = [
745 i for i, a in enumerate(argv)
746 if re.match(r'^-[0-9A-Za-z_-]{10}$', a)]
747 if idxs:
748 correct_argv = (
749 ['yt-dlp']
750 + [a for i, a in enumerate(argv) if i not in idxs]
751 + ['--'] + [argv[i] for i in idxs]
752 )
753 self.report_warning(
754 'Long argument string detected. '
755 'Use -- to separate parameters and URLs, like this:\n%s' %
756 args_to_str(correct_argv))
757
758 def add_info_extractor(self, ie):
759 """Add an InfoExtractor object to the end of the list."""
760 ie_key = ie.ie_key()
761 self._ies[ie_key] = ie
762 if not isinstance(ie, type):
763 self._ies_instances[ie_key] = ie
764 ie.set_downloader(self)
765
766 def get_info_extractor(self, ie_key):
767 """
768 Get an instance of an IE with name ie_key, it will try to get one from
769 the _ies list, if there's no instance it will create a new one and add
770 it to the extractor list.
771 """
772 ie = self._ies_instances.get(ie_key)
773 if ie is None:
774 ie = get_info_extractor(ie_key)()
775 self.add_info_extractor(ie)
776 return ie
777
778 def add_default_info_extractors(self):
779 """
780 Add the InfoExtractors returned by gen_extractors to the end of the list
781 """
782 all_ies = {ie.IE_NAME.lower(): ie for ie in gen_extractor_classes()}
783 all_ies['end'] = UnsupportedURLIE()
784 try:
785 ie_names = orderedSet_from_options(
786 self.params.get('allowed_extractors', ['default']), {
787 'all': list(all_ies),
788 'default': [name for name, ie in all_ies.items() if ie._ENABLED],
789 }, use_regex=True)
790 except re.error as e:
791 raise ValueError(f'Wrong regex for allowed_extractors: {e.pattern}')
792 for name in ie_names:
793 self.add_info_extractor(all_ies[name])
794 self.write_debug(f'Loaded {len(ie_names)} extractors')
795
796 def add_post_processor(self, pp, when='post_process'):
797 """Add a PostProcessor object to the end of the chain."""
798 assert when in POSTPROCESS_WHEN, f'Invalid when={when}'
799 self._pps[when].append(pp)
800 pp.set_downloader(self)
801
802 def add_post_hook(self, ph):
803 """Add the post hook"""
804 self._post_hooks.append(ph)
805
806 def add_progress_hook(self, ph):
807 """Add the download progress hook"""
808 self._progress_hooks.append(ph)
809
810 def add_postprocessor_hook(self, ph):
811 """Add the postprocessing progress hook"""
812 self._postprocessor_hooks.append(ph)
813 for pps in self._pps.values():
814 for pp in pps:
815 pp.add_progress_hook(ph)
816
817 def _bidi_workaround(self, message):
818 if not hasattr(self, '_output_channel'):
819 return message
820
821 assert hasattr(self, '_output_process')
822 assert isinstance(message, str)
823 line_count = message.count('\n') + 1
824 self._output_process.stdin.write((message + '\n').encode())
825 self._output_process.stdin.flush()
826 res = ''.join(self._output_channel.readline().decode()
827 for _ in range(line_count))
828 return res[:-len('\n')]
829
830 def _write_string(self, message, out=None, only_once=False):
831 if only_once:
832 if message in self._printed_messages:
833 return
834 self._printed_messages.add(message)
835 write_string(message, out=out, encoding=self.params.get('encoding'))
836
837 def to_stdout(self, message, skip_eol=False, quiet=None):
838 """Print message to stdout"""
839 if quiet is not None:
840 self.deprecation_warning('"YoutubeDL.to_stdout" no longer accepts the argument quiet. '
841 'Use "YoutubeDL.to_screen" instead')
842 if skip_eol is not False:
843 self.deprecation_warning('"YoutubeDL.to_stdout" no longer accepts the argument skip_eol. '
844 'Use "YoutubeDL.to_screen" instead')
845 self._write_string(f'{self._bidi_workaround(message)}\n', self._out_files.out)
846
847 def to_screen(self, message, skip_eol=False, quiet=None):
848 """Print message to screen if not in quiet mode"""
849 if self.params.get('logger'):
850 self.params['logger'].debug(message)
851 return
852 if (self.params.get('quiet') if quiet is None else quiet) and not self.params.get('verbose'):
853 return
854 self._write_string(
855 '%s%s' % (self._bidi_workaround(message), ('' if skip_eol else '\n')),
856 self._out_files.screen)
857
858 def to_stderr(self, message, only_once=False):
859 """Print message to stderr"""
860 assert isinstance(message, str)
861 if self.params.get('logger'):
862 self.params['logger'].error(message)
863 else:
864 self._write_string(f'{self._bidi_workaround(message)}\n', self._out_files.error, only_once=only_once)
865
866 def _send_console_code(self, code):
867 if compat_os_name == 'nt' or not self._out_files.console:
868 return
869 self._write_string(code, self._out_files.console)
870
871 def to_console_title(self, message):
872 if not self.params.get('consoletitle', False):
873 return
874 message = remove_terminal_sequences(message)
875 if compat_os_name == 'nt':
876 if ctypes.windll.kernel32.GetConsoleWindow():
877 # c_wchar_p() might not be necessary if `message` is
878 # already of type unicode()
879 ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(message))
880 else:
881 self._send_console_code(f'\033]0;{message}\007')
882
883 def save_console_title(self):
884 if not self.params.get('consoletitle') or self.params.get('simulate'):
885 return
886 self._send_console_code('\033[22;0t') # Save the title on stack
887
888 def restore_console_title(self):
889 if not self.params.get('consoletitle') or self.params.get('simulate'):
890 return
891 self._send_console_code('\033[23;0t') # Restore the title from stack
892
893 def __enter__(self):
894 self.save_console_title()
895 return self
896
897 def __exit__(self, *args):
898 self.restore_console_title()
899
900 if self.params.get('cookiefile') is not None:
901 self.cookiejar.save(ignore_discard=True, ignore_expires=True)
902
903 def trouble(self, message=None, tb=None, is_error=True):
904 """Determine action to take when a download problem appears.
905
906 Depending on if the downloader has been configured to ignore
907 download errors or not, this method may throw an exception or
908 not when errors are found, after printing the message.
909
910 @param tb If given, is additional traceback information
911 @param is_error Whether to raise error according to ignorerrors
912 """
913 if message is not None:
914 self.to_stderr(message)
915 if self.params.get('verbose'):
916 if tb is None:
917 if sys.exc_info()[0]: # if .trouble has been called from an except block
918 tb = ''
919 if hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
920 tb += ''.join(traceback.format_exception(*sys.exc_info()[1].exc_info))
921 tb += encode_compat_str(traceback.format_exc())
922 else:
923 tb_data = traceback.format_list(traceback.extract_stack())
924 tb = ''.join(tb_data)
925 if tb:
926 self.to_stderr(tb)
927 if not is_error:
928 return
929 if not self.params.get('ignoreerrors'):
930 if sys.exc_info()[0] and hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
931 exc_info = sys.exc_info()[1].exc_info
932 else:
933 exc_info = sys.exc_info()
934 raise DownloadError(message, exc_info)
935 self._download_retcode = 1
936
937 Styles = Namespace(
938 HEADERS='yellow',
939 EMPHASIS='light blue',
940 FILENAME='green',
941 ID='green',
942 DELIM='blue',
943 ERROR='red',
944 WARNING='yellow',
945 SUPPRESS='light black',
946 )
947
948 def _format_text(self, handle, allow_colors, text, f, fallback=None, *, test_encoding=False):
949 text = str(text)
950 if test_encoding:
951 original_text = text
952 # handle.encoding can be None. See https://github.com/yt-dlp/yt-dlp/issues/2711
953 encoding = self.params.get('encoding') or getattr(handle, 'encoding', None) or 'ascii'
954 text = text.encode(encoding, 'ignore').decode(encoding)
955 if fallback is not None and text != original_text:
956 text = fallback
957 return format_text(text, f) if allow_colors else text if fallback is None else fallback
958
959 def _format_out(self, *args, **kwargs):
960 return self._format_text(self._out_files.out, self._allow_colors.out, *args, **kwargs)
961
962 def _format_screen(self, *args, **kwargs):
963 return self._format_text(self._out_files.screen, self._allow_colors.screen, *args, **kwargs)
964
965 def _format_err(self, *args, **kwargs):
966 return self._format_text(self._out_files.error, self._allow_colors.error, *args, **kwargs)
967
968 def report_warning(self, message, only_once=False):
969 '''
970 Print the message to stderr, it will be prefixed with 'WARNING:'
971 If stderr is a tty file the 'WARNING:' will be colored
972 '''
973 if self.params.get('logger') is not None:
974 self.params['logger'].warning(message)
975 else:
976 if self.params.get('no_warnings'):
977 return
978 self.to_stderr(f'{self._format_err("WARNING:", self.Styles.WARNING)} {message}', only_once)
979
980 def deprecation_warning(self, message, *, stacklevel=0):
981 deprecation_warning(
982 message, stacklevel=stacklevel + 1, printer=self.report_error, is_error=False)
983
984 def deprecated_feature(self, message):
985 if self.params.get('logger') is not None:
986 self.params['logger'].warning(f'Deprecated Feature: {message}')
987 self.to_stderr(f'{self._format_err("Deprecated Feature:", self.Styles.ERROR)} {message}', True)
988
989 def report_error(self, message, *args, **kwargs):
990 '''
991 Do the same as trouble, but prefixes the message with 'ERROR:', colored
992 in red if stderr is a tty file.
993 '''
994 self.trouble(f'{self._format_err("ERROR:", self.Styles.ERROR)} {message}', *args, **kwargs)
995
996 def write_debug(self, message, only_once=False):
997 '''Log debug message or Print message to stderr'''
998 if not self.params.get('verbose', False):
999 return
1000 message = f'[debug] {message}'
1001 if self.params.get('logger'):
1002 self.params['logger'].debug(message)
1003 else:
1004 self.to_stderr(message, only_once)
1005
1006 def report_file_already_downloaded(self, file_name):
1007 """Report file has already been fully downloaded."""
1008 try:
1009 self.to_screen('[download] %s has already been downloaded' % file_name)
1010 except UnicodeEncodeError:
1011 self.to_screen('[download] The file has already been downloaded')
1012
1013 def report_file_delete(self, file_name):
1014 """Report that existing file will be deleted."""
1015 try:
1016 self.to_screen('Deleting existing file %s' % file_name)
1017 except UnicodeEncodeError:
1018 self.to_screen('Deleting existing file')
1019
1020 def raise_no_formats(self, info, forced=False, *, msg=None):
1021 has_drm = info.get('_has_drm')
1022 ignored, expected = self.params.get('ignore_no_formats_error'), bool(msg)
1023 msg = msg or has_drm and 'This video is DRM protected' or 'No video formats found!'
1024 if forced or not ignored:
1025 raise ExtractorError(msg, video_id=info['id'], ie=info['extractor'],
1026 expected=has_drm or ignored or expected)
1027 else:
1028 self.report_warning(msg)
1029
1030 def parse_outtmpl(self):
1031 self.deprecation_warning('"YoutubeDL.parse_outtmpl" is deprecated and may be removed in a future version')
1032 self._parse_outtmpl()
1033 return self.params['outtmpl']
1034
1035 def _parse_outtmpl(self):
1036 sanitize = IDENTITY
1037 if self.params.get('restrictfilenames'): # Remove spaces in the default template
1038 sanitize = lambda x: x.replace(' - ', ' ').replace(' ', '-')
1039
1040 outtmpl = self.params.setdefault('outtmpl', {})
1041 if not isinstance(outtmpl, dict):
1042 self.params['outtmpl'] = outtmpl = {'default': outtmpl}
1043 outtmpl.update({k: sanitize(v) for k, v in DEFAULT_OUTTMPL.items() if outtmpl.get(k) is None})
1044
1045 def get_output_path(self, dir_type='', filename=None):
1046 paths = self.params.get('paths', {})
1047 assert isinstance(paths, dict), '"paths" parameter must be a dictionary'
1048 path = os.path.join(
1049 expand_path(paths.get('home', '').strip()),
1050 expand_path(paths.get(dir_type, '').strip()) if dir_type else '',
1051 filename or '')
1052 return sanitize_path(path, force=self.params.get('windowsfilenames'))
1053
1054 @staticmethod
1055 def _outtmpl_expandpath(outtmpl):
1056 # expand_path translates '%%' into '%' and '$$' into '$'
1057 # correspondingly that is not what we want since we need to keep
1058 # '%%' intact for template dict substitution step. Working around
1059 # with boundary-alike separator hack.
1060 sep = ''.join([random.choice(ascii_letters) for _ in range(32)])
1061 outtmpl = outtmpl.replace('%%', f'%{sep}%').replace('$$', f'${sep}$')
1062
1063 # outtmpl should be expand_path'ed before template dict substitution
1064 # because meta fields may contain env variables we don't want to
1065 # be expanded. E.g. for outtmpl "%(title)s.%(ext)s" and
1066 # title "Hello $PATH", we don't want `$PATH` to be expanded.
1067 return expand_path(outtmpl).replace(sep, '')
1068
1069 @staticmethod
1070 def escape_outtmpl(outtmpl):
1071 ''' Escape any remaining strings like %s, %abc% etc. '''
1072 return re.sub(
1073 STR_FORMAT_RE_TMPL.format('', '(?![%(\0])'),
1074 lambda mobj: ('' if mobj.group('has_key') else '%') + mobj.group(0),
1075 outtmpl)
1076
1077 @classmethod
1078 def validate_outtmpl(cls, outtmpl):
1079 ''' @return None or Exception object '''
1080 outtmpl = re.sub(
1081 STR_FORMAT_RE_TMPL.format('[^)]*', '[ljhqBUDS]'),
1082 lambda mobj: f'{mobj.group(0)[:-1]}s',
1083 cls._outtmpl_expandpath(outtmpl))
1084 try:
1085 cls.escape_outtmpl(outtmpl) % collections.defaultdict(int)
1086 return None
1087 except ValueError as err:
1088 return err
1089
1090 @staticmethod
1091 def _copy_infodict(info_dict):
1092 info_dict = dict(info_dict)
1093 info_dict.pop('__postprocessors', None)
1094 info_dict.pop('__pending_error', None)
1095 return info_dict
1096
1097 def prepare_outtmpl(self, outtmpl, info_dict, sanitize=False):
1098 """ Make the outtmpl and info_dict suitable for substitution: ydl.escape_outtmpl(outtmpl) % info_dict
1099 @param sanitize Whether to sanitize the output as a filename.
1100 For backward compatibility, a function can also be passed
1101 """
1102
1103 info_dict.setdefault('epoch', int(time.time())) # keep epoch consistent once set
1104
1105 info_dict = self._copy_infodict(info_dict)
1106 info_dict['duration_string'] = ( # %(duration>%H-%M-%S)s is wrong if duration > 24hrs
1107 formatSeconds(info_dict['duration'], '-' if sanitize else ':')
1108 if info_dict.get('duration', None) is not None
1109 else None)
1110 info_dict['autonumber'] = int(self.params.get('autonumber_start', 1) - 1 + self._num_downloads)
1111 info_dict['video_autonumber'] = self._num_videos
1112 if info_dict.get('resolution') is None:
1113 info_dict['resolution'] = self.format_resolution(info_dict, default=None)
1114
1115 # For fields playlist_index, playlist_autonumber and autonumber convert all occurrences
1116 # of %(field)s to %(field)0Nd for backward compatibility
1117 field_size_compat_map = {
1118 'playlist_index': number_of_digits(info_dict.get('__last_playlist_index') or 0),
1119 'playlist_autonumber': number_of_digits(info_dict.get('n_entries') or 0),
1120 'autonumber': self.params.get('autonumber_size') or 5,
1121 }
1122
1123 TMPL_DICT = {}
1124 EXTERNAL_FORMAT_RE = re.compile(STR_FORMAT_RE_TMPL.format('[^)]*', f'[{STR_FORMAT_TYPES}ljhqBUDS]'))
1125 MATH_FUNCTIONS = {
1126 '+': float.__add__,
1127 '-': float.__sub__,
1128 }
1129 # Field is of the form key1.key2...
1130 # where keys (except first) can be string, int, slice or "{field, ...}"
1131 FIELD_INNER_RE = r'(?:\w+|%(num)s|%(num)s?(?::%(num)s?){1,2})' % {'num': r'(?:-?\d+)'}
1132 FIELD_RE = r'\w*(?:\.(?:%(inner)s|{%(field)s(?:,%(field)s)*}))*' % {
1133 'inner': FIELD_INNER_RE,
1134 'field': rf'\w*(?:\.{FIELD_INNER_RE})*'
1135 }
1136 MATH_FIELD_RE = rf'(?:{FIELD_RE}|-?{NUMBER_RE})'
1137 MATH_OPERATORS_RE = r'(?:%s)' % '|'.join(map(re.escape, MATH_FUNCTIONS.keys()))
1138 INTERNAL_FORMAT_RE = re.compile(rf'''(?x)
1139 (?P<negate>-)?
1140 (?P<fields>{FIELD_RE})
1141 (?P<maths>(?:{MATH_OPERATORS_RE}{MATH_FIELD_RE})*)
1142 (?:>(?P<strf_format>.+?))?
1143 (?P<remaining>
1144 (?P<alternate>(?<!\\),[^|&)]+)?
1145 (?:&(?P<replacement>.*?))?
1146 (?:\|(?P<default>.*?))?
1147 )$''')
1148
1149 def _traverse_infodict(fields):
1150 fields = [f for x in re.split(r'\.({.+?})\.?', fields)
1151 for f in ([x] if x.startswith('{') else x.split('.'))]
1152 for i in (0, -1):
1153 if fields and not fields[i]:
1154 fields.pop(i)
1155
1156 for i, f in enumerate(fields):
1157 if not f.startswith('{'):
1158 continue
1159 assert f.endswith('}'), f'No closing brace for {f} in {fields}'
1160 fields[i] = {k: k.split('.') for k in f[1:-1].split(',')}
1161
1162 return traverse_obj(info_dict, fields, is_user_input=True, traverse_string=True)
1163
1164 def get_value(mdict):
1165 # Object traversal
1166 value = _traverse_infodict(mdict['fields'])
1167 # Negative
1168 if mdict['negate']:
1169 value = float_or_none(value)
1170 if value is not None:
1171 value *= -1
1172 # Do maths
1173 offset_key = mdict['maths']
1174 if offset_key:
1175 value = float_or_none(value)
1176 operator = None
1177 while offset_key:
1178 item = re.match(
1179 MATH_FIELD_RE if operator else MATH_OPERATORS_RE,
1180 offset_key).group(0)
1181 offset_key = offset_key[len(item):]
1182 if operator is None:
1183 operator = MATH_FUNCTIONS[item]
1184 continue
1185 item, multiplier = (item[1:], -1) if item[0] == '-' else (item, 1)
1186 offset = float_or_none(item)
1187 if offset is None:
1188 offset = float_or_none(_traverse_infodict(item))
1189 try:
1190 value = operator(value, multiplier * offset)
1191 except (TypeError, ZeroDivisionError):
1192 return None
1193 operator = None
1194 # Datetime formatting
1195 if mdict['strf_format']:
1196 value = strftime_or_none(value, mdict['strf_format'].replace('\\,', ','))
1197
1198 # XXX: Workaround for https://github.com/yt-dlp/yt-dlp/issues/4485
1199 if sanitize and value == '':
1200 value = None
1201 return value
1202
1203 na = self.params.get('outtmpl_na_placeholder', 'NA')
1204
1205 def filename_sanitizer(key, value, restricted=self.params.get('restrictfilenames')):
1206 return sanitize_filename(str(value), restricted=restricted, is_id=(
1207 bool(re.search(r'(^|[_.])id(\.|$)', key))
1208 if 'filename-sanitization' in self.params['compat_opts']
1209 else NO_DEFAULT))
1210
1211 sanitizer = sanitize if callable(sanitize) else filename_sanitizer
1212 sanitize = bool(sanitize)
1213
1214 def _dumpjson_default(obj):
1215 if isinstance(obj, (set, LazyList)):
1216 return list(obj)
1217 return repr(obj)
1218
1219 def create_key(outer_mobj):
1220 if not outer_mobj.group('has_key'):
1221 return outer_mobj.group(0)
1222 key = outer_mobj.group('key')
1223 mobj = re.match(INTERNAL_FORMAT_RE, key)
1224 initial_field = mobj.group('fields') if mobj else ''
1225 value, replacement, default = None, None, na
1226 while mobj:
1227 mobj = mobj.groupdict()
1228 default = mobj['default'] if mobj['default'] is not None else default
1229 value = get_value(mobj)
1230 replacement = mobj['replacement']
1231 if value is None and mobj['alternate']:
1232 mobj = re.match(INTERNAL_FORMAT_RE, mobj['remaining'][1:])
1233 else:
1234 break
1235
1236 fmt = outer_mobj.group('format')
1237 if fmt == 's' and value is not None and key in field_size_compat_map.keys():
1238 fmt = f'0{field_size_compat_map[key]:d}d'
1239
1240 value = default if value is None else value if replacement is None else replacement
1241
1242 flags = outer_mobj.group('conversion') or ''
1243 str_fmt = f'{fmt[:-1]}s'
1244 if fmt[-1] == 'l': # list
1245 delim = '\n' if '#' in flags else ', '
1246 value, fmt = delim.join(map(str, variadic(value, allowed_types=(str, bytes)))), str_fmt
1247 elif fmt[-1] == 'j': # json
1248 value, fmt = json.dumps(value, default=_dumpjson_default, indent=4 if '#' in flags else None), str_fmt
1249 elif fmt[-1] == 'h': # html
1250 value, fmt = escapeHTML(value), str_fmt
1251 elif fmt[-1] == 'q': # quoted
1252 value = map(str, variadic(value) if '#' in flags else [value])
1253 value, fmt = ' '.join(map(compat_shlex_quote, value)), str_fmt
1254 elif fmt[-1] == 'B': # bytes
1255 value = f'%{str_fmt}'.encode() % str(value).encode()
1256 value, fmt = value.decode('utf-8', 'ignore'), 's'
1257 elif fmt[-1] == 'U': # unicode normalized
1258 value, fmt = unicodedata.normalize(
1259 # "+" = compatibility equivalence, "#" = NFD
1260 'NF%s%s' % ('K' if '+' in flags else '', 'D' if '#' in flags else 'C'),
1261 value), str_fmt
1262 elif fmt[-1] == 'D': # decimal suffix
1263 num_fmt, fmt = fmt[:-1].replace('#', ''), 's'
1264 value = format_decimal_suffix(value, f'%{num_fmt}f%s' if num_fmt else '%d%s',
1265 factor=1024 if '#' in flags else 1000)
1266 elif fmt[-1] == 'S': # filename sanitization
1267 value, fmt = filename_sanitizer(initial_field, value, restricted='#' in flags), str_fmt
1268 elif fmt[-1] == 'c':
1269 if value:
1270 value = str(value)[0]
1271 else:
1272 fmt = str_fmt
1273 elif fmt[-1] not in 'rs': # numeric
1274 value = float_or_none(value)
1275 if value is None:
1276 value, fmt = default, 's'
1277
1278 if sanitize:
1279 if fmt[-1] == 'r':
1280 # If value is an object, sanitize might convert it to a string
1281 # So we convert it to repr first
1282 value, fmt = repr(value), str_fmt
1283 if fmt[-1] in 'csr':
1284 value = sanitizer(initial_field, value)
1285
1286 key = '%s\0%s' % (key.replace('%', '%\0'), outer_mobj.group('format'))
1287 TMPL_DICT[key] = value
1288 return '{prefix}%({key}){fmt}'.format(key=key, fmt=fmt, prefix=outer_mobj.group('prefix'))
1289
1290 return EXTERNAL_FORMAT_RE.sub(create_key, outtmpl), TMPL_DICT
1291
1292 def evaluate_outtmpl(self, outtmpl, info_dict, *args, **kwargs):
1293 outtmpl, info_dict = self.prepare_outtmpl(outtmpl, info_dict, *args, **kwargs)
1294 return self.escape_outtmpl(outtmpl) % info_dict
1295
1296 def _prepare_filename(self, info_dict, *, outtmpl=None, tmpl_type=None):
1297 assert None in (outtmpl, tmpl_type), 'outtmpl and tmpl_type are mutually exclusive'
1298 if outtmpl is None:
1299 outtmpl = self.params['outtmpl'].get(tmpl_type or 'default', self.params['outtmpl']['default'])
1300 try:
1301 outtmpl = self._outtmpl_expandpath(outtmpl)
1302 filename = self.evaluate_outtmpl(outtmpl, info_dict, True)
1303 if not filename:
1304 return None
1305
1306 if tmpl_type in ('', 'temp'):
1307 final_ext, ext = self.params.get('final_ext'), info_dict.get('ext')
1308 if final_ext and ext and final_ext != ext and filename.endswith(f'.{final_ext}'):
1309 filename = replace_extension(filename, ext, final_ext)
1310 elif tmpl_type:
1311 force_ext = OUTTMPL_TYPES[tmpl_type]
1312 if force_ext:
1313 filename = replace_extension(filename, force_ext, info_dict.get('ext'))
1314
1315 # https://github.com/blackjack4494/youtube-dlc/issues/85
1316 trim_file_name = self.params.get('trim_file_name', False)
1317 if trim_file_name:
1318 no_ext, *ext = filename.rsplit('.', 2)
1319 filename = join_nonempty(no_ext[:trim_file_name], *ext, delim='.')
1320
1321 return filename
1322 except ValueError as err:
1323 self.report_error('Error in output template: ' + str(err) + ' (encoding: ' + repr(preferredencoding()) + ')')
1324 return None
1325
1326 def prepare_filename(self, info_dict, dir_type='', *, outtmpl=None, warn=False):
1327 """Generate the output filename"""
1328 if outtmpl:
1329 assert not dir_type, 'outtmpl and dir_type are mutually exclusive'
1330 dir_type = None
1331 filename = self._prepare_filename(info_dict, tmpl_type=dir_type, outtmpl=outtmpl)
1332 if not filename and dir_type not in ('', 'temp'):
1333 return ''
1334
1335 if warn:
1336 if not self.params.get('paths'):
1337 pass
1338 elif filename == '-':
1339 self.report_warning('--paths is ignored when an outputting to stdout', only_once=True)
1340 elif os.path.isabs(filename):
1341 self.report_warning('--paths is ignored since an absolute path is given in output template', only_once=True)
1342 if filename == '-' or not filename:
1343 return filename
1344
1345 return self.get_output_path(dir_type, filename)
1346
1347 def _match_entry(self, info_dict, incomplete=False, silent=False):
1348 """ Returns None if the file should be downloaded """
1349
1350 video_title = info_dict.get('title', info_dict.get('id', 'entry'))
1351
1352 def check_filter():
1353 if 'title' in info_dict:
1354 # This can happen when we're just evaluating the playlist
1355 title = info_dict['title']
1356 matchtitle = self.params.get('matchtitle', False)
1357 if matchtitle:
1358 if not re.search(matchtitle, title, re.IGNORECASE):
1359 return '"' + title + '" title did not match pattern "' + matchtitle + '"'
1360 rejecttitle = self.params.get('rejecttitle', False)
1361 if rejecttitle:
1362 if re.search(rejecttitle, title, re.IGNORECASE):
1363 return '"' + title + '" title matched reject pattern "' + rejecttitle + '"'
1364 date = info_dict.get('upload_date')
1365 if date is not None:
1366 dateRange = self.params.get('daterange', DateRange())
1367 if date not in dateRange:
1368 return f'{date_from_str(date).isoformat()} upload date is not in range {dateRange}'
1369 view_count = info_dict.get('view_count')
1370 if view_count is not None:
1371 min_views = self.params.get('min_views')
1372 if min_views is not None and view_count < min_views:
1373 return 'Skipping %s, because it has not reached minimum view count (%d/%d)' % (video_title, view_count, min_views)
1374 max_views = self.params.get('max_views')
1375 if max_views is not None and view_count > max_views:
1376 return 'Skipping %s, because it has exceeded the maximum view count (%d/%d)' % (video_title, view_count, max_views)
1377 if age_restricted(info_dict.get('age_limit'), self.params.get('age_limit')):
1378 return 'Skipping "%s" because it is age restricted' % video_title
1379
1380 match_filter = self.params.get('match_filter')
1381 if match_filter is not None:
1382 try:
1383 ret = match_filter(info_dict, incomplete=incomplete)
1384 except TypeError:
1385 # For backward compatibility
1386 ret = None if incomplete else match_filter(info_dict)
1387 if ret is NO_DEFAULT:
1388 while True:
1389 filename = self._format_screen(self.prepare_filename(info_dict), self.Styles.FILENAME)
1390 reply = input(self._format_screen(
1391 f'Download "{filename}"? (Y/n): ', self.Styles.EMPHASIS)).lower().strip()
1392 if reply in {'y', ''}:
1393 return None
1394 elif reply == 'n':
1395 return f'Skipping {video_title}'
1396 elif ret is not None:
1397 return ret
1398 return None
1399
1400 if self.in_download_archive(info_dict):
1401 reason = '%s has already been recorded in the archive' % video_title
1402 break_opt, break_err = 'break_on_existing', ExistingVideoReached
1403 else:
1404 reason = check_filter()
1405 break_opt, break_err = 'break_on_reject', RejectedVideoReached
1406 if reason is not None:
1407 if not silent:
1408 self.to_screen('[download] ' + reason)
1409 if self.params.get(break_opt, False):
1410 raise break_err()
1411 return reason
1412
1413 @staticmethod
1414 def add_extra_info(info_dict, extra_info):
1415 '''Set the keys from extra_info in info dict if they are missing'''
1416 for key, value in extra_info.items():
1417 info_dict.setdefault(key, value)
1418
1419 def extract_info(self, url, download=True, ie_key=None, extra_info=None,
1420 process=True, force_generic_extractor=False):
1421 """
1422 Return a list with a dictionary for each video extracted.
1423
1424 Arguments:
1425 url -- URL to extract
1426
1427 Keyword arguments:
1428 download -- whether to download videos during extraction
1429 ie_key -- extractor key hint
1430 extra_info -- dictionary containing the extra values to add to each result
1431 process -- whether to resolve all unresolved references (URLs, playlist items),
1432 must be True for download to work.
1433 force_generic_extractor -- force using the generic extractor
1434 """
1435
1436 if extra_info is None:
1437 extra_info = {}
1438
1439 if not ie_key and force_generic_extractor:
1440 ie_key = 'Generic'
1441
1442 if ie_key:
1443 ies = {ie_key: self._ies[ie_key]} if ie_key in self._ies else {}
1444 else:
1445 ies = self._ies
1446
1447 for key, ie in ies.items():
1448 if not ie.suitable(url):
1449 continue
1450
1451 if not ie.working():
1452 self.report_warning('The program functionality for this site has been marked as broken, '
1453 'and will probably not work.')
1454
1455 temp_id = ie.get_temp_id(url)
1456 if temp_id is not None and self.in_download_archive({'id': temp_id, 'ie_key': key}):
1457 self.to_screen(f'[{key}] {temp_id}: has already been recorded in the archive')
1458 if self.params.get('break_on_existing', False):
1459 raise ExistingVideoReached()
1460 break
1461 return self.__extract_info(url, self.get_info_extractor(key), download, extra_info, process)
1462 else:
1463 extractors_restricted = self.params.get('allowed_extractors') not in (None, ['default'])
1464 self.report_error(f'No suitable extractor{format_field(ie_key, None, " (%s)")} found for URL {url}',
1465 tb=False if extractors_restricted else None)
1466
1467 def _handle_extraction_exceptions(func):
1468 @functools.wraps(func)
1469 def wrapper(self, *args, **kwargs):
1470 while True:
1471 try:
1472 return func(self, *args, **kwargs)
1473 except (DownloadCancelled, LazyList.IndexError, PagedList.IndexError):
1474 raise
1475 except ReExtractInfo as e:
1476 if e.expected:
1477 self.to_screen(f'{e}; Re-extracting data')
1478 else:
1479 self.to_stderr('\r')
1480 self.report_warning(f'{e}; Re-extracting data')
1481 continue
1482 except GeoRestrictedError as e:
1483 msg = e.msg
1484 if e.countries:
1485 msg += '\nThis video is available in %s.' % ', '.join(
1486 map(ISO3166Utils.short2full, e.countries))
1487 msg += '\nYou might want to use a VPN or a proxy server (with --proxy) to workaround.'
1488 self.report_error(msg)
1489 except ExtractorError as e: # An error we somewhat expected
1490 self.report_error(str(e), e.format_traceback())
1491 except Exception as e:
1492 if self.params.get('ignoreerrors'):
1493 self.report_error(str(e), tb=encode_compat_str(traceback.format_exc()))
1494 else:
1495 raise
1496 break
1497 return wrapper
1498
1499 def _wait_for_video(self, ie_result={}):
1500 if (not self.params.get('wait_for_video')
1501 or ie_result.get('_type', 'video') != 'video'
1502 or ie_result.get('formats') or ie_result.get('url')):
1503 return
1504
1505 format_dur = lambda dur: '%02d:%02d:%02d' % timetuple_from_msec(dur * 1000)[:-1]
1506 last_msg = ''
1507
1508 def progress(msg):
1509 nonlocal last_msg
1510 full_msg = f'{msg}\n'
1511 if not self.params.get('noprogress'):
1512 full_msg = msg + ' ' * (len(last_msg) - len(msg)) + '\r'
1513 elif last_msg:
1514 return
1515 self.to_screen(full_msg, skip_eol=True)
1516 last_msg = msg
1517
1518 min_wait, max_wait = self.params.get('wait_for_video')
1519 diff = try_get(ie_result, lambda x: x['release_timestamp'] - time.time())
1520 if diff is None and ie_result.get('live_status') == 'is_upcoming':
1521 diff = round(random.uniform(min_wait, max_wait) if (max_wait and min_wait) else (max_wait or min_wait), 0)
1522 self.report_warning('Release time of video is not known')
1523 elif ie_result and (diff or 0) <= 0:
1524 self.report_warning('Video should already be available according to extracted info')
1525 diff = min(max(diff or 0, min_wait or 0), max_wait or float('inf'))
1526 self.to_screen(f'[wait] Waiting for {format_dur(diff)} - Press Ctrl+C to try now')
1527
1528 wait_till = time.time() + diff
1529 try:
1530 while True:
1531 diff = wait_till - time.time()
1532 if diff <= 0:
1533 progress('')
1534 raise ReExtractInfo('[wait] Wait period ended', expected=True)
1535 progress(f'[wait] Remaining time until next attempt: {self._format_screen(format_dur(diff), self.Styles.EMPHASIS)}')
1536 time.sleep(1)
1537 except KeyboardInterrupt:
1538 progress('')
1539 raise ReExtractInfo('[wait] Interrupted by user', expected=True)
1540 except BaseException as e:
1541 if not isinstance(e, ReExtractInfo):
1542 self.to_screen('')
1543 raise
1544
1545 @_handle_extraction_exceptions
1546 def __extract_info(self, url, ie, download, extra_info, process):
1547 try:
1548 ie_result = ie.extract(url)
1549 except UserNotLive as e:
1550 if process:
1551 if self.params.get('wait_for_video'):
1552 self.report_warning(e)
1553 self._wait_for_video()
1554 raise
1555 if ie_result is None: # Finished already (backwards compatibility; listformats and friends should be moved here)
1556 self.report_warning(f'Extractor {ie.IE_NAME} returned nothing{bug_reports_message()}')
1557 return
1558 if isinstance(ie_result, list):
1559 # Backwards compatibility: old IE result format
1560 ie_result = {
1561 '_type': 'compat_list',
1562 'entries': ie_result,
1563 }
1564 if extra_info.get('original_url'):
1565 ie_result.setdefault('original_url', extra_info['original_url'])
1566 self.add_default_extra_info(ie_result, ie, url)
1567 if process:
1568 self._wait_for_video(ie_result)
1569 return self.process_ie_result(ie_result, download, extra_info)
1570 else:
1571 return ie_result
1572
1573 def add_default_extra_info(self, ie_result, ie, url):
1574 if url is not None:
1575 self.add_extra_info(ie_result, {
1576 'webpage_url': url,
1577 'original_url': url,
1578 })
1579 webpage_url = ie_result.get('webpage_url')
1580 if webpage_url:
1581 self.add_extra_info(ie_result, {
1582 'webpage_url_basename': url_basename(webpage_url),
1583 'webpage_url_domain': get_domain(webpage_url),
1584 })
1585 if ie is not None:
1586 self.add_extra_info(ie_result, {
1587 'extractor': ie.IE_NAME,
1588 'extractor_key': ie.ie_key(),
1589 })
1590
1591 def process_ie_result(self, ie_result, download=True, extra_info=None):
1592 """
1593 Take the result of the ie(may be modified) and resolve all unresolved
1594 references (URLs, playlist items).
1595
1596 It will also download the videos if 'download'.
1597 Returns the resolved ie_result.
1598 """
1599 if extra_info is None:
1600 extra_info = {}
1601 result_type = ie_result.get('_type', 'video')
1602
1603 if result_type in ('url', 'url_transparent'):
1604 ie_result['url'] = sanitize_url(
1605 ie_result['url'], scheme='http' if self.params.get('prefer_insecure') else 'https')
1606 if ie_result.get('original_url'):
1607 extra_info.setdefault('original_url', ie_result['original_url'])
1608
1609 extract_flat = self.params.get('extract_flat', False)
1610 if ((extract_flat == 'in_playlist' and 'playlist' in extra_info)
1611 or extract_flat is True):
1612 info_copy = ie_result.copy()
1613 ie = try_get(ie_result.get('ie_key'), self.get_info_extractor)
1614 if ie and not ie_result.get('id'):
1615 info_copy['id'] = ie.get_temp_id(ie_result['url'])
1616 self.add_default_extra_info(info_copy, ie, ie_result['url'])
1617 self.add_extra_info(info_copy, extra_info)
1618 info_copy, _ = self.pre_process(info_copy)
1619 self.__forced_printings(info_copy, self.prepare_filename(info_copy), incomplete=True)
1620 self._raise_pending_errors(info_copy)
1621 if self.params.get('force_write_download_archive', False):
1622 self.record_download_archive(info_copy)
1623 return ie_result
1624
1625 if result_type == 'video':
1626 self.add_extra_info(ie_result, extra_info)
1627 ie_result = self.process_video_result(ie_result, download=download)
1628 self._raise_pending_errors(ie_result)
1629 additional_urls = (ie_result or {}).get('additional_urls')
1630 if additional_urls:
1631 # TODO: Improve MetadataParserPP to allow setting a list
1632 if isinstance(additional_urls, str):
1633 additional_urls = [additional_urls]
1634 self.to_screen(
1635 '[info] %s: %d additional URL(s) requested' % (ie_result['id'], len(additional_urls)))
1636 self.write_debug('Additional URLs: "%s"' % '", "'.join(additional_urls))
1637 ie_result['additional_entries'] = [
1638 self.extract_info(
1639 url, download, extra_info=extra_info,
1640 force_generic_extractor=self.params.get('force_generic_extractor'))
1641 for url in additional_urls
1642 ]
1643 return ie_result
1644 elif result_type == 'url':
1645 # We have to add extra_info to the results because it may be
1646 # contained in a playlist
1647 return self.extract_info(
1648 ie_result['url'], download,
1649 ie_key=ie_result.get('ie_key'),
1650 extra_info=extra_info)
1651 elif result_type == 'url_transparent':
1652 # Use the information from the embedding page
1653 info = self.extract_info(
1654 ie_result['url'], ie_key=ie_result.get('ie_key'),
1655 extra_info=extra_info, download=False, process=False)
1656
1657 # extract_info may return None when ignoreerrors is enabled and
1658 # extraction failed with an error, don't crash and return early
1659 # in this case
1660 if not info:
1661 return info
1662
1663 exempted_fields = {'_type', 'url', 'ie_key'}
1664 if not ie_result.get('section_end') and ie_result.get('section_start') is None:
1665 # For video clips, the id etc of the clip extractor should be used
1666 exempted_fields |= {'id', 'extractor', 'extractor_key'}
1667
1668 new_result = info.copy()
1669 new_result.update(filter_dict(ie_result, lambda k, v: v is not None and k not in exempted_fields))
1670
1671 # Extracted info may not be a video result (i.e.
1672 # info.get('_type', 'video') != video) but rather an url or
1673 # url_transparent. In such cases outer metadata (from ie_result)
1674 # should be propagated to inner one (info). For this to happen
1675 # _type of info should be overridden with url_transparent. This
1676 # fixes issue from https://github.com/ytdl-org/youtube-dl/pull/11163.
1677 if new_result.get('_type') == 'url':
1678 new_result['_type'] = 'url_transparent'
1679
1680 return self.process_ie_result(
1681 new_result, download=download, extra_info=extra_info)
1682 elif result_type in ('playlist', 'multi_video'):
1683 # Protect from infinite recursion due to recursively nested playlists
1684 # (see https://github.com/ytdl-org/youtube-dl/issues/27833)
1685 webpage_url = ie_result['webpage_url']
1686 if webpage_url in self._playlist_urls:
1687 self.to_screen(
1688 '[download] Skipping already downloaded playlist: %s'
1689 % ie_result.get('title') or ie_result.get('id'))
1690 return
1691
1692 self._playlist_level += 1
1693 self._playlist_urls.add(webpage_url)
1694 self._fill_common_fields(ie_result, False)
1695 self._sanitize_thumbnails(ie_result)
1696 try:
1697 return self.__process_playlist(ie_result, download)
1698 finally:
1699 self._playlist_level -= 1
1700 if not self._playlist_level:
1701 self._playlist_urls.clear()
1702 elif result_type == 'compat_list':
1703 self.report_warning(
1704 'Extractor %s returned a compat_list result. '
1705 'It needs to be updated.' % ie_result.get('extractor'))
1706
1707 def _fixup(r):
1708 self.add_extra_info(r, {
1709 'extractor': ie_result['extractor'],
1710 'webpage_url': ie_result['webpage_url'],
1711 'webpage_url_basename': url_basename(ie_result['webpage_url']),
1712 'webpage_url_domain': get_domain(ie_result['webpage_url']),
1713 'extractor_key': ie_result['extractor_key'],
1714 })
1715 return r
1716 ie_result['entries'] = [
1717 self.process_ie_result(_fixup(r), download, extra_info)
1718 for r in ie_result['entries']
1719 ]
1720 return ie_result
1721 else:
1722 raise Exception('Invalid result type: %s' % result_type)
1723
1724 def _ensure_dir_exists(self, path):
1725 return make_dir(path, self.report_error)
1726
1727 @staticmethod
1728 def _playlist_infodict(ie_result, strict=False, **kwargs):
1729 info = {
1730 'playlist_count': ie_result.get('playlist_count'),
1731 'playlist': ie_result.get('title') or ie_result.get('id'),
1732 'playlist_id': ie_result.get('id'),
1733 'playlist_title': ie_result.get('title'),
1734 'playlist_uploader': ie_result.get('uploader'),
1735 'playlist_uploader_id': ie_result.get('uploader_id'),
1736 **kwargs,
1737 }
1738 if strict:
1739 return info
1740 return {
1741 **info,
1742 'playlist_index': 0,
1743 '__last_playlist_index': max(ie_result['requested_entries'] or (0, 0)),
1744 'extractor': ie_result['extractor'],
1745 'webpage_url': ie_result['webpage_url'],
1746 'webpage_url_basename': url_basename(ie_result['webpage_url']),
1747 'webpage_url_domain': get_domain(ie_result['webpage_url']),
1748 'extractor_key': ie_result['extractor_key'],
1749 }
1750
1751 def __process_playlist(self, ie_result, download):
1752 """Process each entry in the playlist"""
1753 assert ie_result['_type'] in ('playlist', 'multi_video')
1754
1755 common_info = self._playlist_infodict(ie_result, strict=True)
1756 title = common_info.get('playlist') or '<Untitled>'
1757 if self._match_entry(common_info, incomplete=True) is not None:
1758 return
1759 self.to_screen(f'[download] Downloading {ie_result["_type"]}: {title}')
1760
1761 all_entries = PlaylistEntries(self, ie_result)
1762 entries = orderedSet(all_entries.get_requested_items(), lazy=True)
1763
1764 lazy = self.params.get('lazy_playlist')
1765 if lazy:
1766 resolved_entries, n_entries = [], 'N/A'
1767 ie_result['requested_entries'], ie_result['entries'] = None, None
1768 else:
1769 entries = resolved_entries = list(entries)
1770 n_entries = len(resolved_entries)
1771 ie_result['requested_entries'], ie_result['entries'] = tuple(zip(*resolved_entries)) or ([], [])
1772 if not ie_result.get('playlist_count'):
1773 # Better to do this after potentially exhausting entries
1774 ie_result['playlist_count'] = all_entries.get_full_count()
1775
1776 extra = self._playlist_infodict(ie_result, n_entries=int_or_none(n_entries))
1777 ie_copy = collections.ChainMap(ie_result, extra)
1778
1779 _infojson_written = False
1780 write_playlist_files = self.params.get('allow_playlist_files', True)
1781 if write_playlist_files and self.params.get('list_thumbnails'):
1782 self.list_thumbnails(ie_result)
1783 if write_playlist_files and not self.params.get('simulate'):
1784 _infojson_written = self._write_info_json(
1785 'playlist', ie_result, self.prepare_filename(ie_copy, 'pl_infojson'))
1786 if _infojson_written is None:
1787 return
1788 if self._write_description('playlist', ie_result,
1789 self.prepare_filename(ie_copy, 'pl_description')) is None:
1790 return
1791 # TODO: This should be passed to ThumbnailsConvertor if necessary
1792 self._write_thumbnails('playlist', ie_result, self.prepare_filename(ie_copy, 'pl_thumbnail'))
1793
1794 if lazy:
1795 if self.params.get('playlistreverse') or self.params.get('playlistrandom'):
1796 self.report_warning('playlistreverse and playlistrandom are not supported with lazy_playlist', only_once=True)
1797 elif self.params.get('playlistreverse'):
1798 entries.reverse()
1799 elif self.params.get('playlistrandom'):
1800 random.shuffle(entries)
1801
1802 self.to_screen(f'[{ie_result["extractor"]}] Playlist {title}: Downloading {n_entries} videos'
1803 f'{format_field(ie_result, "playlist_count", " of %s")}')
1804
1805 keep_resolved_entries = self.params.get('extract_flat') != 'discard'
1806 if self.params.get('extract_flat') == 'discard_in_playlist':
1807 keep_resolved_entries = ie_result['_type'] != 'playlist'
1808 if keep_resolved_entries:
1809 self.write_debug('The information of all playlist entries will be held in memory')
1810
1811 failures = 0
1812 max_failures = self.params.get('skip_playlist_after_errors') or float('inf')
1813 for i, (playlist_index, entry) in enumerate(entries):
1814 if lazy:
1815 resolved_entries.append((playlist_index, entry))
1816 if not entry:
1817 continue
1818
1819 entry['__x_forwarded_for_ip'] = ie_result.get('__x_forwarded_for_ip')
1820 if not lazy and 'playlist-index' in self.params.get('compat_opts', []):
1821 playlist_index = ie_result['requested_entries'][i]
1822
1823 entry_copy = collections.ChainMap(entry, {
1824 **common_info,
1825 'n_entries': int_or_none(n_entries),
1826 'playlist_index': playlist_index,
1827 'playlist_autonumber': i + 1,
1828 })
1829
1830 if self._match_entry(entry_copy, incomplete=True) is not None:
1831 # For compatabilty with youtube-dl. See https://github.com/yt-dlp/yt-dlp/issues/4369
1832 resolved_entries[i] = (playlist_index, NO_DEFAULT)
1833 continue
1834
1835 self.to_screen('[download] Downloading video %s of %s' % (
1836 self._format_screen(i + 1, self.Styles.ID), self._format_screen(n_entries, self.Styles.EMPHASIS)))
1837
1838 extra.update({
1839 'playlist_index': playlist_index,
1840 'playlist_autonumber': i + 1,
1841 })
1842 entry_result = self.__process_iterable_entry(entry, download, extra)
1843 if not entry_result:
1844 failures += 1
1845 if failures >= max_failures:
1846 self.report_error(
1847 f'Skipping the remaining entries in playlist "{title}" since {failures} items failed extraction')
1848 break
1849 if keep_resolved_entries:
1850 resolved_entries[i] = (playlist_index, entry_result)
1851
1852 # Update with processed data
1853 ie_result['requested_entries'] = [i for i, e in resolved_entries if e is not NO_DEFAULT]
1854 ie_result['entries'] = [e for _, e in resolved_entries if e is not NO_DEFAULT]
1855
1856 # Write the updated info to json
1857 if _infojson_written is True and self._write_info_json(
1858 'updated playlist', ie_result,
1859 self.prepare_filename(ie_copy, 'pl_infojson'), overwrite=True) is None:
1860 return
1861
1862 ie_result = self.run_all_pps('playlist', ie_result)
1863 self.to_screen(f'[download] Finished downloading playlist: {title}')
1864 return ie_result
1865
1866 @_handle_extraction_exceptions
1867 def __process_iterable_entry(self, entry, download, extra_info):
1868 return self.process_ie_result(
1869 entry, download=download, extra_info=extra_info)
1870
1871 def _build_format_filter(self, filter_spec):
1872 " Returns a function to filter the formats according to the filter_spec "
1873
1874 OPERATORS = {
1875 '<': operator.lt,
1876 '<=': operator.le,
1877 '>': operator.gt,
1878 '>=': operator.ge,
1879 '=': operator.eq,
1880 '!=': operator.ne,
1881 }
1882 operator_rex = re.compile(r'''(?x)\s*
1883 (?P<key>width|height|tbr|abr|vbr|asr|filesize|filesize_approx|fps)\s*
1884 (?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
1885 (?P<value>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)\s*
1886 ''' % '|'.join(map(re.escape, OPERATORS.keys())))
1887 m = operator_rex.fullmatch(filter_spec)
1888 if m:
1889 try:
1890 comparison_value = int(m.group('value'))
1891 except ValueError:
1892 comparison_value = parse_filesize(m.group('value'))
1893 if comparison_value is None:
1894 comparison_value = parse_filesize(m.group('value') + 'B')
1895 if comparison_value is None:
1896 raise ValueError(
1897 'Invalid value %r in format specification %r' % (
1898 m.group('value'), filter_spec))
1899 op = OPERATORS[m.group('op')]
1900
1901 if not m:
1902 STR_OPERATORS = {
1903 '=': operator.eq,
1904 '^=': lambda attr, value: attr.startswith(value),
1905 '$=': lambda attr, value: attr.endswith(value),
1906 '*=': lambda attr, value: value in attr,
1907 '~=': lambda attr, value: value.search(attr) is not None
1908 }
1909 str_operator_rex = re.compile(r'''(?x)\s*
1910 (?P<key>[a-zA-Z0-9._-]+)\s*
1911 (?P<negation>!\s*)?(?P<op>%s)\s*(?P<none_inclusive>\?\s*)?
1912 (?P<quote>["'])?
1913 (?P<value>(?(quote)(?:(?!(?P=quote))[^\\]|\\.)+|[\w.-]+))
1914 (?(quote)(?P=quote))\s*
1915 ''' % '|'.join(map(re.escape, STR_OPERATORS.keys())))
1916 m = str_operator_rex.fullmatch(filter_spec)
1917 if m:
1918 if m.group('op') == '~=':
1919 comparison_value = re.compile(m.group('value'))
1920 else:
1921 comparison_value = re.sub(r'''\\([\\"'])''', r'\1', m.group('value'))
1922 str_op = STR_OPERATORS[m.group('op')]
1923 if m.group('negation'):
1924 op = lambda attr, value: not str_op(attr, value)
1925 else:
1926 op = str_op
1927
1928 if not m:
1929 raise SyntaxError('Invalid filter specification %r' % filter_spec)
1930
1931 def _filter(f):
1932 actual_value = f.get(m.group('key'))
1933 if actual_value is None:
1934 return m.group('none_inclusive')
1935 return op(actual_value, comparison_value)
1936 return _filter
1937
1938 def _check_formats(self, formats):
1939 for f in formats:
1940 self.to_screen('[info] Testing format %s' % f['format_id'])
1941 path = self.get_output_path('temp')
1942 if not self._ensure_dir_exists(f'{path}/'):
1943 continue
1944 temp_file = tempfile.NamedTemporaryFile(suffix='.tmp', delete=False, dir=path or None)
1945 temp_file.close()
1946 try:
1947 success, _ = self.dl(temp_file.name, f, test=True)
1948 except (DownloadError, OSError, ValueError) + network_exceptions:
1949 success = False
1950 finally:
1951 if os.path.exists(temp_file.name):
1952 try:
1953 os.remove(temp_file.name)
1954 except OSError:
1955 self.report_warning('Unable to delete temporary file "%s"' % temp_file.name)
1956 if success:
1957 yield f
1958 else:
1959 self.to_screen('[info] Unable to download format %s. Skipping...' % f['format_id'])
1960
1961 def _default_format_spec(self, info_dict, download=True):
1962
1963 def can_merge():
1964 merger = FFmpegMergerPP(self)
1965 return merger.available and merger.can_merge()
1966
1967 prefer_best = (
1968 not self.params.get('simulate')
1969 and download
1970 and (
1971 not can_merge()
1972 or info_dict.get('is_live') and not self.params.get('live_from_start')
1973 or self.params['outtmpl']['default'] == '-'))
1974 compat = (
1975 prefer_best
1976 or self.params.get('allow_multiple_audio_streams', False)
1977 or 'format-spec' in self.params['compat_opts'])
1978
1979 return (
1980 'best/bestvideo+bestaudio' if prefer_best
1981 else 'bestvideo*+bestaudio/best' if not compat
1982 else 'bestvideo+bestaudio/best')
1983
1984 def build_format_selector(self, format_spec):
1985 def syntax_error(note, start):
1986 message = (
1987 'Invalid format specification: '
1988 '{}\n\t{}\n\t{}^'.format(note, format_spec, ' ' * start[1]))
1989 return SyntaxError(message)
1990
1991 PICKFIRST = 'PICKFIRST'
1992 MERGE = 'MERGE'
1993 SINGLE = 'SINGLE'
1994 GROUP = 'GROUP'
1995 FormatSelector = collections.namedtuple('FormatSelector', ['type', 'selector', 'filters'])
1996
1997 allow_multiple_streams = {'audio': self.params.get('allow_multiple_audio_streams', False),
1998 'video': self.params.get('allow_multiple_video_streams', False)}
1999
2000 check_formats = self.params.get('check_formats') == 'selected'
2001
2002 def _parse_filter(tokens):
2003 filter_parts = []
2004 for type, string, start, _, _ in tokens:
2005 if type == tokenize.OP and string == ']':
2006 return ''.join(filter_parts)
2007 else:
2008 filter_parts.append(string)
2009
2010 def _remove_unused_ops(tokens):
2011 # Remove operators that we don't use and join them with the surrounding strings.
2012 # E.g. 'mp4' '-' 'baseline' '-' '16x9' is converted to 'mp4-baseline-16x9'
2013 ALLOWED_OPS = ('/', '+', ',', '(', ')')
2014 last_string, last_start, last_end, last_line = None, None, None, None
2015 for type, string, start, end, line in tokens:
2016 if type == tokenize.OP and string == '[':
2017 if last_string:
2018 yield tokenize.NAME, last_string, last_start, last_end, last_line
2019 last_string = None
2020 yield type, string, start, end, line
2021 # everything inside brackets will be handled by _parse_filter
2022 for type, string, start, end, line in tokens:
2023 yield type, string, start, end, line
2024 if type == tokenize.OP and string == ']':
2025 break
2026 elif type == tokenize.OP and string in ALLOWED_OPS:
2027 if last_string:
2028 yield tokenize.NAME, last_string, last_start, last_end, last_line
2029 last_string = None
2030 yield type, string, start, end, line
2031 elif type in [tokenize.NAME, tokenize.NUMBER, tokenize.OP]:
2032 if not last_string:
2033 last_string = string
2034 last_start = start
2035 last_end = end
2036 else:
2037 last_string += string
2038 if last_string:
2039 yield tokenize.NAME, last_string, last_start, last_end, last_line
2040
2041 def _parse_format_selection(tokens, inside_merge=False, inside_choice=False, inside_group=False):
2042 selectors = []
2043 current_selector = None
2044 for type, string, start, _, _ in tokens:
2045 # ENCODING is only defined in python 3.x
2046 if type == getattr(tokenize, 'ENCODING', None):
2047 continue
2048 elif type in [tokenize.NAME, tokenize.NUMBER]:
2049 current_selector = FormatSelector(SINGLE, string, [])
2050 elif type == tokenize.OP:
2051 if string == ')':
2052 if not inside_group:
2053 # ')' will be handled by the parentheses group
2054 tokens.restore_last_token()
2055 break
2056 elif inside_merge and string in ['/', ',']:
2057 tokens.restore_last_token()
2058 break
2059 elif inside_choice and string == ',':
2060 tokens.restore_last_token()
2061 break
2062 elif string == ',':
2063 if not current_selector:
2064 raise syntax_error('"," must follow a format selector', start)
2065 selectors.append(current_selector)
2066 current_selector = None
2067 elif string == '/':
2068 if not current_selector:
2069 raise syntax_error('"/" must follow a format selector', start)
2070 first_choice = current_selector
2071 second_choice = _parse_format_selection(tokens, inside_choice=True)
2072 current_selector = FormatSelector(PICKFIRST, (first_choice, second_choice), [])
2073 elif string == '[':
2074 if not current_selector:
2075 current_selector = FormatSelector(SINGLE, 'best', [])
2076 format_filter = _parse_filter(tokens)
2077 current_selector.filters.append(format_filter)
2078 elif string == '(':
2079 if current_selector:
2080 raise syntax_error('Unexpected "("', start)
2081 group = _parse_format_selection(tokens, inside_group=True)
2082 current_selector = FormatSelector(GROUP, group, [])
2083 elif string == '+':
2084 if not current_selector:
2085 raise syntax_error('Unexpected "+"', start)
2086 selector_1 = current_selector
2087 selector_2 = _parse_format_selection(tokens, inside_merge=True)
2088 if not selector_2:
2089 raise syntax_error('Expected a selector', start)
2090 current_selector = FormatSelector(MERGE, (selector_1, selector_2), [])
2091 else:
2092 raise syntax_error(f'Operator not recognized: "{string}"', start)
2093 elif type == tokenize.ENDMARKER:
2094 break
2095 if current_selector:
2096 selectors.append(current_selector)
2097 return selectors
2098
2099 def _merge(formats_pair):
2100 format_1, format_2 = formats_pair
2101
2102 formats_info = []
2103 formats_info.extend(format_1.get('requested_formats', (format_1,)))
2104 formats_info.extend(format_2.get('requested_formats', (format_2,)))
2105
2106 if not allow_multiple_streams['video'] or not allow_multiple_streams['audio']:
2107 get_no_more = {'video': False, 'audio': False}
2108 for (i, fmt_info) in enumerate(formats_info):
2109 if fmt_info.get('acodec') == fmt_info.get('vcodec') == 'none':
2110 formats_info.pop(i)
2111 continue
2112 for aud_vid in ['audio', 'video']:
2113 if not allow_multiple_streams[aud_vid] and fmt_info.get(aud_vid[0] + 'codec') != 'none':
2114 if get_no_more[aud_vid]:
2115 formats_info.pop(i)
2116 break
2117 get_no_more[aud_vid] = True
2118
2119 if len(formats_info) == 1:
2120 return formats_info[0]
2121
2122 video_fmts = [fmt_info for fmt_info in formats_info if fmt_info.get('vcodec') != 'none']
2123 audio_fmts = [fmt_info for fmt_info in formats_info if fmt_info.get('acodec') != 'none']
2124
2125 the_only_video = video_fmts[0] if len(video_fmts) == 1 else None
2126 the_only_audio = audio_fmts[0] if len(audio_fmts) == 1 else None
2127
2128 output_ext = get_compatible_ext(
2129 vcodecs=[f.get('vcodec') for f in video_fmts],
2130 acodecs=[f.get('acodec') for f in audio_fmts],
2131 vexts=[f['ext'] for f in video_fmts],
2132 aexts=[f['ext'] for f in audio_fmts],
2133 preferences=(try_call(lambda: self.params['merge_output_format'].split('/'))
2134 or self.params.get('prefer_free_formats') and ('webm', 'mkv')))
2135
2136 filtered = lambda *keys: filter(None, (traverse_obj(fmt, *keys) for fmt in formats_info))
2137
2138 new_dict = {
2139 'requested_formats': formats_info,
2140 'format': '+'.join(filtered('format')),
2141 'format_id': '+'.join(filtered('format_id')),
2142 'ext': output_ext,
2143 'protocol': '+'.join(map(determine_protocol, formats_info)),
2144 'language': '+'.join(orderedSet(filtered('language'))) or None,
2145 'format_note': '+'.join(orderedSet(filtered('format_note'))) or None,
2146 'filesize_approx': sum(filtered('filesize', 'filesize_approx')) or None,
2147 'tbr': sum(filtered('tbr', 'vbr', 'abr')),
2148 }
2149
2150 if the_only_video:
2151 new_dict.update({
2152 'width': the_only_video.get('width'),
2153 'height': the_only_video.get('height'),
2154 'resolution': the_only_video.get('resolution') or self.format_resolution(the_only_video),
2155 'fps': the_only_video.get('fps'),
2156 'dynamic_range': the_only_video.get('dynamic_range'),
2157 'vcodec': the_only_video.get('vcodec'),
2158 'vbr': the_only_video.get('vbr'),
2159 'stretched_ratio': the_only_video.get('stretched_ratio'),
2160 })
2161
2162 if the_only_audio:
2163 new_dict.update({
2164 'acodec': the_only_audio.get('acodec'),
2165 'abr': the_only_audio.get('abr'),
2166 'asr': the_only_audio.get('asr'),
2167 'audio_channels': the_only_audio.get('audio_channels')
2168 })
2169
2170 return new_dict
2171
2172 def _check_formats(formats):
2173 if not check_formats:
2174 yield from formats
2175 return
2176 yield from self._check_formats(formats)
2177
2178 def _build_selector_function(selector):
2179 if isinstance(selector, list): # ,
2180 fs = [_build_selector_function(s) for s in selector]
2181
2182 def selector_function(ctx):
2183 for f in fs:
2184 yield from f(ctx)
2185 return selector_function
2186
2187 elif selector.type == GROUP: # ()
2188 selector_function = _build_selector_function(selector.selector)
2189
2190 elif selector.type == PICKFIRST: # /
2191 fs = [_build_selector_function(s) for s in selector.selector]
2192
2193 def selector_function(ctx):
2194 for f in fs:
2195 picked_formats = list(f(ctx))
2196 if picked_formats:
2197 return picked_formats
2198 return []
2199
2200 elif selector.type == MERGE: # +
2201 selector_1, selector_2 = map(_build_selector_function, selector.selector)
2202
2203 def selector_function(ctx):
2204 for pair in itertools.product(selector_1(ctx), selector_2(ctx)):
2205 yield _merge(pair)
2206
2207 elif selector.type == SINGLE: # atom
2208 format_spec = selector.selector or 'best'
2209
2210 # TODO: Add allvideo, allaudio etc by generalizing the code with best/worst selector
2211 if format_spec == 'all':
2212 def selector_function(ctx):
2213 yield from _check_formats(ctx['formats'][::-1])
2214 elif format_spec == 'mergeall':
2215 def selector_function(ctx):
2216 formats = list(_check_formats(
2217 f for f in ctx['formats'] if f.get('vcodec') != 'none' or f.get('acodec') != 'none'))
2218 if not formats:
2219 return
2220 merged_format = formats[-1]
2221 for f in formats[-2::-1]:
2222 merged_format = _merge((merged_format, f))
2223 yield merged_format
2224
2225 else:
2226 format_fallback, seperate_fallback, format_reverse, format_idx = False, None, True, 1
2227 mobj = re.match(
2228 r'(?P<bw>best|worst|b|w)(?P<type>video|audio|v|a)?(?P<mod>\*)?(?:\.(?P<n>[1-9]\d*))?$',
2229 format_spec)
2230 if mobj is not None:
2231 format_idx = int_or_none(mobj.group('n'), default=1)
2232 format_reverse = mobj.group('bw')[0] == 'b'
2233 format_type = (mobj.group('type') or [None])[0]
2234 not_format_type = {'v': 'a', 'a': 'v'}.get(format_type)
2235 format_modified = mobj.group('mod') is not None
2236
2237 format_fallback = not format_type and not format_modified # for b, w
2238 _filter_f = (
2239 (lambda f: f.get('%scodec' % format_type) != 'none')
2240 if format_type and format_modified # bv*, ba*, wv*, wa*
2241 else (lambda f: f.get('%scodec' % not_format_type) == 'none')
2242 if format_type # bv, ba, wv, wa
2243 else (lambda f: f.get('vcodec') != 'none' and f.get('acodec') != 'none')
2244 if not format_modified # b, w
2245 else lambda f: True) # b*, w*
2246 filter_f = lambda f: _filter_f(f) and (
2247 f.get('vcodec') != 'none' or f.get('acodec') != 'none')
2248 else:
2249 if format_spec in self._format_selection_exts['audio']:
2250 filter_f = lambda f: f.get('ext') == format_spec and f.get('acodec') != 'none'
2251 elif format_spec in self._format_selection_exts['video']:
2252 filter_f = lambda f: f.get('ext') == format_spec and f.get('acodec') != 'none' and f.get('vcodec') != 'none'
2253 seperate_fallback = lambda f: f.get('ext') == format_spec and f.get('vcodec') != 'none'
2254 elif format_spec in self._format_selection_exts['storyboards']:
2255 filter_f = lambda f: f.get('ext') == format_spec and f.get('acodec') == 'none' and f.get('vcodec') == 'none'
2256 else:
2257 filter_f = lambda f: f.get('format_id') == format_spec # id
2258
2259 def selector_function(ctx):
2260 formats = list(ctx['formats'])
2261 matches = list(filter(filter_f, formats)) if filter_f is not None else formats
2262 if not matches:
2263 if format_fallback and ctx['incomplete_formats']:
2264 # for extractors with incomplete formats (audio only (soundcloud)
2265 # or video only (imgur)) best/worst will fallback to
2266 # best/worst {video,audio}-only format
2267 matches = formats
2268 elif seperate_fallback and not ctx['has_merged_format']:
2269 # for compatibility with youtube-dl when there is no pre-merged format
2270 matches = list(filter(seperate_fallback, formats))
2271 matches = LazyList(_check_formats(matches[::-1 if format_reverse else 1]))
2272 try:
2273 yield matches[format_idx - 1]
2274 except LazyList.IndexError:
2275 return
2276
2277 filters = [self._build_format_filter(f) for f in selector.filters]
2278
2279 def final_selector(ctx):
2280 ctx_copy = dict(ctx)
2281 for _filter in filters:
2282 ctx_copy['formats'] = list(filter(_filter, ctx_copy['formats']))
2283 return selector_function(ctx_copy)
2284 return final_selector
2285
2286 stream = io.BytesIO(format_spec.encode())
2287 try:
2288 tokens = list(_remove_unused_ops(tokenize.tokenize(stream.readline)))
2289 except tokenize.TokenError:
2290 raise syntax_error('Missing closing/opening brackets or parenthesis', (0, len(format_spec)))
2291
2292 class TokenIterator:
2293 def __init__(self, tokens):
2294 self.tokens = tokens
2295 self.counter = 0
2296
2297 def __iter__(self):
2298 return self
2299
2300 def __next__(self):
2301 if self.counter >= len(self.tokens):
2302 raise StopIteration()
2303 value = self.tokens[self.counter]
2304 self.counter += 1
2305 return value
2306
2307 next = __next__
2308
2309 def restore_last_token(self):
2310 self.counter -= 1
2311
2312 parsed_selector = _parse_format_selection(iter(TokenIterator(tokens)))
2313 return _build_selector_function(parsed_selector)
2314
2315 def _calc_headers(self, info_dict):
2316 res = merge_headers(self.params['http_headers'], info_dict.get('http_headers') or {})
2317
2318 cookies = self._calc_cookies(info_dict['url'])
2319 if cookies:
2320 res['Cookie'] = cookies
2321
2322 if 'X-Forwarded-For' not in res:
2323 x_forwarded_for_ip = info_dict.get('__x_forwarded_for_ip')
2324 if x_forwarded_for_ip:
2325 res['X-Forwarded-For'] = x_forwarded_for_ip
2326
2327 return res
2328
2329 def _calc_cookies(self, url):
2330 pr = sanitized_Request(url)
2331 self.cookiejar.add_cookie_header(pr)
2332 return pr.get_header('Cookie')
2333
2334 def _sort_thumbnails(self, thumbnails):
2335 thumbnails.sort(key=lambda t: (
2336 t.get('preference') if t.get('preference') is not None else -1,
2337 t.get('width') if t.get('width') is not None else -1,
2338 t.get('height') if t.get('height') is not None else -1,
2339 t.get('id') if t.get('id') is not None else '',
2340 t.get('url')))
2341
2342 def _sanitize_thumbnails(self, info_dict):
2343 thumbnails = info_dict.get('thumbnails')
2344 if thumbnails is None:
2345 thumbnail = info_dict.get('thumbnail')
2346 if thumbnail:
2347 info_dict['thumbnails'] = thumbnails = [{'url': thumbnail}]
2348 if not thumbnails:
2349 return
2350
2351 def check_thumbnails(thumbnails):
2352 for t in thumbnails:
2353 self.to_screen(f'[info] Testing thumbnail {t["id"]}')
2354 try:
2355 self.urlopen(HEADRequest(t['url']))
2356 except network_exceptions as err:
2357 self.to_screen(f'[info] Unable to connect to thumbnail {t["id"]} URL {t["url"]!r} - {err}. Skipping...')
2358 continue
2359 yield t
2360
2361 self._sort_thumbnails(thumbnails)
2362 for i, t in enumerate(thumbnails):
2363 if t.get('id') is None:
2364 t['id'] = '%d' % i
2365 if t.get('width') and t.get('height'):
2366 t['resolution'] = '%dx%d' % (t['width'], t['height'])
2367 t['url'] = sanitize_url(t['url'])
2368
2369 if self.params.get('check_formats') is True:
2370 info_dict['thumbnails'] = LazyList(check_thumbnails(thumbnails[::-1]), reverse=True)
2371 else:
2372 info_dict['thumbnails'] = thumbnails
2373
2374 def _fill_common_fields(self, info_dict, is_video=True):
2375 # TODO: move sanitization here
2376 if is_video:
2377 # playlists are allowed to lack "title"
2378 title = info_dict.get('title', NO_DEFAULT)
2379 if title is NO_DEFAULT:
2380 raise ExtractorError('Missing "title" field in extractor result',
2381 video_id=info_dict['id'], ie=info_dict['extractor'])
2382 info_dict['fulltitle'] = title
2383 if not title:
2384 if title == '':
2385 self.write_debug('Extractor gave empty title. Creating a generic title')
2386 else:
2387 self.report_warning('Extractor failed to obtain "title". Creating a generic title instead')
2388 info_dict['title'] = f'{info_dict["extractor"].replace(":", "-")} video #{info_dict["id"]}'
2389
2390 if info_dict.get('duration') is not None:
2391 info_dict['duration_string'] = formatSeconds(info_dict['duration'])
2392
2393 for ts_key, date_key in (
2394 ('timestamp', 'upload_date'),
2395 ('release_timestamp', 'release_date'),
2396 ('modified_timestamp', 'modified_date'),
2397 ):
2398 if info_dict.get(date_key) is None and info_dict.get(ts_key) is not None:
2399 # Working around out-of-range timestamp values (e.g. negative ones on Windows,
2400 # see http://bugs.python.org/issue1646728)
2401 with contextlib.suppress(ValueError, OverflowError, OSError):
2402 upload_date = datetime.datetime.utcfromtimestamp(info_dict[ts_key])
2403 info_dict[date_key] = upload_date.strftime('%Y%m%d')
2404
2405 live_keys = ('is_live', 'was_live')
2406 live_status = info_dict.get('live_status')
2407 if live_status is None:
2408 for key in live_keys:
2409 if info_dict.get(key) is False:
2410 continue
2411 if info_dict.get(key):
2412 live_status = key
2413 break
2414 if all(info_dict.get(key) is False for key in live_keys):
2415 live_status = 'not_live'
2416 if live_status:
2417 info_dict['live_status'] = live_status
2418 for key in live_keys:
2419 if info_dict.get(key) is None:
2420 info_dict[key] = (live_status == key)
2421
2422 # Auto generate title fields corresponding to the *_number fields when missing
2423 # in order to always have clean titles. This is very common for TV series.
2424 for field in ('chapter', 'season', 'episode'):
2425 if info_dict.get('%s_number' % field) is not None and not info_dict.get(field):
2426 info_dict[field] = '%s %d' % (field.capitalize(), info_dict['%s_number' % field])
2427
2428 def _raise_pending_errors(self, info):
2429 err = info.pop('__pending_error', None)
2430 if err:
2431 self.report_error(err, tb=False)
2432
2433 def process_video_result(self, info_dict, download=True):
2434 assert info_dict.get('_type', 'video') == 'video'
2435 self._num_videos += 1
2436
2437 if 'id' not in info_dict:
2438 raise ExtractorError('Missing "id" field in extractor result', ie=info_dict['extractor'])
2439 elif not info_dict.get('id'):
2440 raise ExtractorError('Extractor failed to obtain "id"', ie=info_dict['extractor'])
2441
2442 def report_force_conversion(field, field_not, conversion):
2443 self.report_warning(
2444 '"%s" field is not %s - forcing %s conversion, there is an error in extractor'
2445 % (field, field_not, conversion))
2446
2447 def sanitize_string_field(info, string_field):
2448 field = info.get(string_field)
2449 if field is None or isinstance(field, str):
2450 return
2451 report_force_conversion(string_field, 'a string', 'string')
2452 info[string_field] = str(field)
2453
2454 def sanitize_numeric_fields(info):
2455 for numeric_field in self._NUMERIC_FIELDS:
2456 field = info.get(numeric_field)
2457 if field is None or isinstance(field, (int, float)):
2458 continue
2459 report_force_conversion(numeric_field, 'numeric', 'int')
2460 info[numeric_field] = int_or_none(field)
2461
2462 sanitize_string_field(info_dict, 'id')
2463 sanitize_numeric_fields(info_dict)
2464 if info_dict.get('section_end') and info_dict.get('section_start') is not None:
2465 info_dict['duration'] = round(info_dict['section_end'] - info_dict['section_start'], 3)
2466 if (info_dict.get('duration') or 0) <= 0 and info_dict.pop('duration', None):
2467 self.report_warning('"duration" field is negative, there is an error in extractor')
2468
2469 chapters = info_dict.get('chapters') or []
2470 if chapters and chapters[0].get('start_time'):
2471 chapters.insert(0, {'start_time': 0})
2472
2473 dummy_chapter = {'end_time': 0, 'start_time': info_dict.get('duration')}
2474 for idx, (prev, current, next_) in enumerate(zip(
2475 (dummy_chapter, *chapters), chapters, (*chapters[1:], dummy_chapter)), 1):
2476 if current.get('start_time') is None:
2477 current['start_time'] = prev.get('end_time')
2478 if not current.get('end_time'):
2479 current['end_time'] = next_.get('start_time')
2480 if not current.get('title'):
2481 current['title'] = f'<Untitled Chapter {idx}>'
2482
2483 if 'playlist' not in info_dict:
2484 # It isn't part of a playlist
2485 info_dict['playlist'] = None
2486 info_dict['playlist_index'] = None
2487
2488 self._sanitize_thumbnails(info_dict)
2489
2490 thumbnail = info_dict.get('thumbnail')
2491 thumbnails = info_dict.get('thumbnails')
2492 if thumbnail:
2493 info_dict['thumbnail'] = sanitize_url(thumbnail)
2494 elif thumbnails:
2495 info_dict['thumbnail'] = thumbnails[-1]['url']
2496
2497 if info_dict.get('display_id') is None and 'id' in info_dict:
2498 info_dict['display_id'] = info_dict['id']
2499
2500 self._fill_common_fields(info_dict)
2501
2502 for cc_kind in ('subtitles', 'automatic_captions'):
2503 cc = info_dict.get(cc_kind)
2504 if cc:
2505 for _, subtitle in cc.items():
2506 for subtitle_format in subtitle:
2507 if subtitle_format.get('url'):
2508 subtitle_format['url'] = sanitize_url(subtitle_format['url'])
2509 if subtitle_format.get('ext') is None:
2510 subtitle_format['ext'] = determine_ext(subtitle_format['url']).lower()
2511
2512 automatic_captions = info_dict.get('automatic_captions')
2513 subtitles = info_dict.get('subtitles')
2514
2515 info_dict['requested_subtitles'] = self.process_subtitles(
2516 info_dict['id'], subtitles, automatic_captions)
2517
2518 if info_dict.get('formats') is None:
2519 # There's only one format available
2520 formats = [info_dict]
2521 else:
2522 formats = info_dict['formats']
2523
2524 # or None ensures --clean-infojson removes it
2525 info_dict['_has_drm'] = any(f.get('has_drm') for f in formats) or None
2526 if not self.params.get('allow_unplayable_formats'):
2527 formats = [f for f in formats if not f.get('has_drm')]
2528 if info_dict['_has_drm'] and formats and all(
2529 f.get('acodec') == f.get('vcodec') == 'none' for f in formats):
2530 self.report_warning(
2531 'This video is DRM protected and only images are available for download. '
2532 'Use --list-formats to see them')
2533
2534 get_from_start = not info_dict.get('is_live') or bool(self.params.get('live_from_start'))
2535 if not get_from_start:
2536 info_dict['title'] += ' ' + datetime.datetime.now().strftime('%Y-%m-%d %H:%M')
2537 if info_dict.get('is_live') and formats:
2538 formats = [f for f in formats if bool(f.get('is_from_start')) == get_from_start]
2539 if get_from_start and not formats:
2540 self.raise_no_formats(info_dict, msg=(
2541 '--live-from-start is passed, but there are no formats that can be downloaded from the start. '
2542 'If you want to download from the current time, use --no-live-from-start'))
2543
2544 def is_wellformed(f):
2545 url = f.get('url')
2546 if not url:
2547 self.report_warning(
2548 '"url" field is missing or empty - skipping format, '
2549 'there is an error in extractor')
2550 return False
2551 if isinstance(url, bytes):
2552 sanitize_string_field(f, 'url')
2553 return True
2554
2555 # Filter out malformed formats for better extraction robustness
2556 formats = list(filter(is_wellformed, formats or []))
2557
2558 if not formats:
2559 self.raise_no_formats(info_dict)
2560
2561 formats_dict = {}
2562
2563 # We check that all the formats have the format and format_id fields
2564 for i, format in enumerate(formats):
2565 sanitize_string_field(format, 'format_id')
2566 sanitize_numeric_fields(format)
2567 format['url'] = sanitize_url(format['url'])
2568 if not format.get('format_id'):
2569 format['format_id'] = str(i)
2570 else:
2571 # Sanitize format_id from characters used in format selector expression
2572 format['format_id'] = re.sub(r'[\s,/+\[\]()]', '_', format['format_id'])
2573 format_id = format['format_id']
2574 if format_id not in formats_dict:
2575 formats_dict[format_id] = []
2576 formats_dict[format_id].append(format)
2577
2578 # Make sure all formats have unique format_id
2579 common_exts = set(itertools.chain(*self._format_selection_exts.values()))
2580 for format_id, ambiguous_formats in formats_dict.items():
2581 ambigious_id = len(ambiguous_formats) > 1
2582 for i, format in enumerate(ambiguous_formats):
2583 if ambigious_id:
2584 format['format_id'] = '%s-%d' % (format_id, i)
2585 if format.get('ext') is None:
2586 format['ext'] = determine_ext(format['url']).lower()
2587 # Ensure there is no conflict between id and ext in format selection
2588 # See https://github.com/yt-dlp/yt-dlp/issues/1282
2589 if format['format_id'] != format['ext'] and format['format_id'] in common_exts:
2590 format['format_id'] = 'f%s' % format['format_id']
2591
2592 for i, format in enumerate(formats):
2593 if format.get('format') is None:
2594 format['format'] = '{id} - {res}{note}'.format(
2595 id=format['format_id'],
2596 res=self.format_resolution(format),
2597 note=format_field(format, 'format_note', ' (%s)'),
2598 )
2599 if format.get('protocol') is None:
2600 format['protocol'] = determine_protocol(format)
2601 if format.get('resolution') is None:
2602 format['resolution'] = self.format_resolution(format, default=None)
2603 if format.get('dynamic_range') is None and format.get('vcodec') != 'none':
2604 format['dynamic_range'] = 'SDR'
2605 if (info_dict.get('duration') and format.get('tbr')
2606 and not format.get('filesize') and not format.get('filesize_approx')):
2607 format['filesize_approx'] = int(info_dict['duration'] * format['tbr'] * (1024 / 8))
2608
2609 # Add HTTP headers, so that external programs can use them from the
2610 # json output
2611 full_format_info = info_dict.copy()
2612 full_format_info.update(format)
2613 format['http_headers'] = self._calc_headers(full_format_info)
2614 # Remove private housekeeping stuff
2615 if '__x_forwarded_for_ip' in info_dict:
2616 del info_dict['__x_forwarded_for_ip']
2617
2618 if self.params.get('check_formats') is True:
2619 formats = LazyList(self._check_formats(formats[::-1]), reverse=True)
2620
2621 if not formats or formats[0] is not info_dict:
2622 # only set the 'formats' fields if the original info_dict list them
2623 # otherwise we end up with a circular reference, the first (and unique)
2624 # element in the 'formats' field in info_dict is info_dict itself,
2625 # which can't be exported to json
2626 info_dict['formats'] = formats
2627
2628 info_dict, _ = self.pre_process(info_dict)
2629
2630 if self._match_entry(info_dict, incomplete=self._format_fields) is not None:
2631 return info_dict
2632
2633 self.post_extract(info_dict)
2634 info_dict, _ = self.pre_process(info_dict, 'after_filter')
2635
2636 # The pre-processors may have modified the formats
2637 formats = info_dict.get('formats', [info_dict])
2638
2639 list_only = self.params.get('simulate') is None and (
2640 self.params.get('list_thumbnails') or self.params.get('listformats') or self.params.get('listsubtitles'))
2641 interactive_format_selection = not list_only and self.format_selector == '-'
2642 if self.params.get('list_thumbnails'):
2643 self.list_thumbnails(info_dict)
2644 if self.params.get('listsubtitles'):
2645 if 'automatic_captions' in info_dict:
2646 self.list_subtitles(
2647 info_dict['id'], automatic_captions, 'automatic captions')
2648 self.list_subtitles(info_dict['id'], subtitles, 'subtitles')
2649 if self.params.get('listformats') or interactive_format_selection:
2650 self.list_formats(info_dict)
2651 if list_only:
2652 # Without this printing, -F --print-json will not work
2653 self.__forced_printings(info_dict, self.prepare_filename(info_dict), incomplete=True)
2654 return info_dict
2655
2656 format_selector = self.format_selector
2657 if format_selector is None:
2658 req_format = self._default_format_spec(info_dict, download=download)
2659 self.write_debug('Default format spec: %s' % req_format)
2660 format_selector = self.build_format_selector(req_format)
2661
2662 while True:
2663 if interactive_format_selection:
2664 req_format = input(
2665 self._format_screen('\nEnter format selector: ', self.Styles.EMPHASIS))
2666 try:
2667 format_selector = self.build_format_selector(req_format)
2668 except SyntaxError as err:
2669 self.report_error(err, tb=False, is_error=False)
2670 continue
2671
2672 formats_to_download = list(format_selector({
2673 'formats': formats,
2674 'has_merged_format': any('none' not in (f.get('acodec'), f.get('vcodec')) for f in formats),
2675 'incomplete_formats': (
2676 # All formats are video-only or
2677 all(f.get('vcodec') != 'none' and f.get('acodec') == 'none' for f in formats)
2678 # all formats are audio-only
2679 or all(f.get('vcodec') == 'none' and f.get('acodec') != 'none' for f in formats)),
2680 }))
2681 if interactive_format_selection and not formats_to_download:
2682 self.report_error('Requested format is not available', tb=False, is_error=False)
2683 continue
2684 break
2685
2686 if not formats_to_download:
2687 if not self.params.get('ignore_no_formats_error'):
2688 raise ExtractorError(
2689 'Requested format is not available. Use --list-formats for a list of available formats',
2690 expected=True, video_id=info_dict['id'], ie=info_dict['extractor'])
2691 self.report_warning('Requested format is not available')
2692 # Process what we can, even without any available formats.
2693 formats_to_download = [{}]
2694
2695 requested_ranges = self.params.get('download_ranges')
2696 if requested_ranges:
2697 requested_ranges = tuple(requested_ranges(info_dict, self))
2698
2699 best_format, downloaded_formats = formats_to_download[-1], []
2700 if download:
2701 if best_format:
2702 def to_screen(*msg):
2703 self.to_screen(f'[info] {info_dict["id"]}: {" ".join(", ".join(variadic(m)) for m in msg)}')
2704
2705 to_screen(f'Downloading {len(formats_to_download)} format(s):',
2706 (f['format_id'] for f in formats_to_download))
2707 if requested_ranges:
2708 to_screen(f'Downloading {len(requested_ranges)} time ranges:',
2709 (f'{int(c["start_time"])}-{int(c["end_time"])}' for c in requested_ranges))
2710 max_downloads_reached = False
2711
2712 for fmt, chapter in itertools.product(formats_to_download, requested_ranges or [{}]):
2713 new_info = self._copy_infodict(info_dict)
2714 new_info.update(fmt)
2715 offset, duration = info_dict.get('section_start') or 0, info_dict.get('duration') or float('inf')
2716 if chapter or offset:
2717 new_info.update({
2718 'section_start': offset + chapter.get('start_time', 0),
2719 'section_end': offset + min(chapter.get('end_time', duration), duration),
2720 'section_title': chapter.get('title'),
2721 'section_number': chapter.get('index'),
2722 })
2723 downloaded_formats.append(new_info)
2724 try:
2725 self.process_info(new_info)
2726 except MaxDownloadsReached:
2727 max_downloads_reached = True
2728 self._raise_pending_errors(new_info)
2729 # Remove copied info
2730 for key, val in tuple(new_info.items()):
2731 if info_dict.get(key) == val:
2732 new_info.pop(key)
2733 if max_downloads_reached:
2734 break
2735
2736 write_archive = {f.get('__write_download_archive', False) for f in downloaded_formats}
2737 assert write_archive.issubset({True, False, 'ignore'})
2738 if True in write_archive and False not in write_archive:
2739 self.record_download_archive(info_dict)
2740
2741 info_dict['requested_downloads'] = downloaded_formats
2742 info_dict = self.run_all_pps('after_video', info_dict)
2743 if max_downloads_reached:
2744 raise MaxDownloadsReached()
2745
2746 # We update the info dict with the selected best quality format (backwards compatibility)
2747 info_dict.update(best_format)
2748 return info_dict
2749
2750 def process_subtitles(self, video_id, normal_subtitles, automatic_captions):
2751 """Select the requested subtitles and their format"""
2752 available_subs, normal_sub_langs = {}, []
2753 if normal_subtitles and self.params.get('writesubtitles'):
2754 available_subs.update(normal_subtitles)
2755 normal_sub_langs = tuple(normal_subtitles.keys())
2756 if automatic_captions and self.params.get('writeautomaticsub'):
2757 for lang, cap_info in automatic_captions.items():
2758 if lang not in available_subs:
2759 available_subs[lang] = cap_info
2760
2761 if not available_subs or (
2762 not self.params.get('writesubtitles')
2763 and not self.params.get('writeautomaticsub')):
2764 return None
2765
2766 all_sub_langs = tuple(available_subs.keys())
2767 if self.params.get('allsubtitles', False):
2768 requested_langs = all_sub_langs
2769 elif self.params.get('subtitleslangs', False):
2770 try:
2771 requested_langs = orderedSet_from_options(
2772 self.params.get('subtitleslangs'), {'all': all_sub_langs}, use_regex=True)
2773 except re.error as e:
2774 raise ValueError(f'Wrong regex for subtitlelangs: {e.pattern}')
2775 elif normal_sub_langs:
2776 requested_langs = ['en'] if 'en' in normal_sub_langs else normal_sub_langs[:1]
2777 else:
2778 requested_langs = ['en'] if 'en' in all_sub_langs else all_sub_langs[:1]
2779 if requested_langs:
2780 self.to_screen(f'[info] {video_id}: Downloading subtitles: {", ".join(requested_langs)}')
2781
2782 formats_query = self.params.get('subtitlesformat', 'best')
2783 formats_preference = formats_query.split('/') if formats_query else []
2784 subs = {}
2785 for lang in requested_langs:
2786 formats = available_subs.get(lang)
2787 if formats is None:
2788 self.report_warning(f'{lang} subtitles not available for {video_id}')
2789 continue
2790 for ext in formats_preference:
2791 if ext == 'best':
2792 f = formats[-1]
2793 break
2794 matches = list(filter(lambda f: f['ext'] == ext, formats))
2795 if matches:
2796 f = matches[-1]
2797 break
2798 else:
2799 f = formats[-1]
2800 self.report_warning(
2801 'No subtitle format found matching "%s" for language %s, '
2802 'using %s' % (formats_query, lang, f['ext']))
2803 subs[lang] = f
2804 return subs
2805
2806 def _forceprint(self, key, info_dict):
2807 if info_dict is None:
2808 return
2809 info_copy = info_dict.copy()
2810 info_copy['formats_table'] = self.render_formats_table(info_dict)
2811 info_copy['thumbnails_table'] = self.render_thumbnails_table(info_dict)
2812 info_copy['subtitles_table'] = self.render_subtitles_table(info_dict.get('id'), info_dict.get('subtitles'))
2813 info_copy['automatic_captions_table'] = self.render_subtitles_table(info_dict.get('id'), info_dict.get('automatic_captions'))
2814
2815 def format_tmpl(tmpl):
2816 mobj = re.fullmatch(r'([\w.:,]|-\d|(?P<dict>{([\w.:,]|-\d)+}))+=?', tmpl)
2817 if not mobj:
2818 return tmpl
2819
2820 fmt = '%({})s'
2821 if tmpl.startswith('{'):
2822 tmpl = f'.{tmpl}'
2823 if tmpl.endswith('='):
2824 tmpl, fmt = tmpl[:-1], '{0} = %({0})#j'
2825 return '\n'.join(map(fmt.format, [tmpl] if mobj.group('dict') else tmpl.split(',')))
2826
2827 for tmpl in self.params['forceprint'].get(key, []):
2828 self.to_stdout(self.evaluate_outtmpl(format_tmpl(tmpl), info_copy))
2829
2830 for tmpl, file_tmpl in self.params['print_to_file'].get(key, []):
2831 filename = self.prepare_filename(info_dict, outtmpl=file_tmpl)
2832 tmpl = format_tmpl(tmpl)
2833 self.to_screen(f'[info] Writing {tmpl!r} to: {filename}')
2834 if self._ensure_dir_exists(filename):
2835 with open(filename, 'a', encoding='utf-8') as f:
2836 f.write(self.evaluate_outtmpl(tmpl, info_copy) + '\n')
2837
2838 def __forced_printings(self, info_dict, filename, incomplete):
2839 def print_mandatory(field, actual_field=None):
2840 if actual_field is None:
2841 actual_field = field
2842 if (self.params.get('force%s' % field, False)
2843 and (not incomplete or info_dict.get(actual_field) is not None)):
2844 self.to_stdout(info_dict[actual_field])
2845
2846 def print_optional(field):
2847 if (self.params.get('force%s' % field, False)
2848 and info_dict.get(field) is not None):
2849 self.to_stdout(info_dict[field])
2850
2851 info_dict = info_dict.copy()
2852 if filename is not None:
2853 info_dict['filename'] = filename
2854 if info_dict.get('requested_formats') is not None:
2855 # For RTMP URLs, also include the playpath
2856 info_dict['urls'] = '\n'.join(f['url'] + f.get('play_path', '') for f in info_dict['requested_formats'])
2857 elif info_dict.get('url'):
2858 info_dict['urls'] = info_dict['url'] + info_dict.get('play_path', '')
2859
2860 if (self.params.get('forcejson')
2861 or self.params['forceprint'].get('video')
2862 or self.params['print_to_file'].get('video')):
2863 self.post_extract(info_dict)
2864 self._forceprint('video', info_dict)
2865
2866 print_mandatory('title')
2867 print_mandatory('id')
2868 print_mandatory('url', 'urls')
2869 print_optional('thumbnail')
2870 print_optional('description')
2871 print_optional('filename')
2872 if self.params.get('forceduration') and info_dict.get('duration') is not None:
2873 self.to_stdout(formatSeconds(info_dict['duration']))
2874 print_mandatory('format')
2875
2876 if self.params.get('forcejson'):
2877 self.to_stdout(json.dumps(self.sanitize_info(info_dict)))
2878
2879 def dl(self, name, info, subtitle=False, test=False):
2880 if not info.get('url'):
2881 self.raise_no_formats(info, True)
2882
2883 if test:
2884 verbose = self.params.get('verbose')
2885 params = {
2886 'test': True,
2887 'quiet': self.params.get('quiet') or not verbose,
2888 'verbose': verbose,
2889 'noprogress': not verbose,
2890 'nopart': True,
2891 'skip_unavailable_fragments': False,
2892 'keep_fragments': False,
2893 'overwrites': True,
2894 '_no_ytdl_file': True,
2895 }
2896 else:
2897 params = self.params
2898 fd = get_suitable_downloader(info, params, to_stdout=(name == '-'))(self, params)
2899 if not test:
2900 for ph in self._progress_hooks:
2901 fd.add_progress_hook(ph)
2902 urls = '", "'.join(
2903 (f['url'].split(',')[0] + ',<data>' if f['url'].startswith('data:') else f['url'])
2904 for f in info.get('requested_formats', []) or [info])
2905 self.write_debug(f'Invoking {fd.FD_NAME} downloader on "{urls}"')
2906
2907 # Note: Ideally info should be a deep-copied so that hooks cannot modify it.
2908 # But it may contain objects that are not deep-copyable
2909 new_info = self._copy_infodict(info)
2910 if new_info.get('http_headers') is None:
2911 new_info['http_headers'] = self._calc_headers(new_info)
2912 return fd.download(name, new_info, subtitle)
2913
2914 def existing_file(self, filepaths, *, default_overwrite=True):
2915 existing_files = list(filter(os.path.exists, orderedSet(filepaths)))
2916 if existing_files and not self.params.get('overwrites', default_overwrite):
2917 return existing_files[0]
2918
2919 for file in existing_files:
2920 self.report_file_delete(file)
2921 os.remove(file)
2922 return None
2923
2924 def process_info(self, info_dict):
2925 """Process a single resolved IE result. (Modifies it in-place)"""
2926
2927 assert info_dict.get('_type', 'video') == 'video'
2928 original_infodict = info_dict
2929
2930 if 'format' not in info_dict and 'ext' in info_dict:
2931 info_dict['format'] = info_dict['ext']
2932
2933 # This is mostly just for backward compatibility of process_info
2934 # As a side-effect, this allows for format-specific filters
2935 if self._match_entry(info_dict) is not None:
2936 info_dict['__write_download_archive'] = 'ignore'
2937 return
2938
2939 # Does nothing under normal operation - for backward compatibility of process_info
2940 self.post_extract(info_dict)
2941 self._num_downloads += 1
2942
2943 # info_dict['_filename'] needs to be set for backward compatibility
2944 info_dict['_filename'] = full_filename = self.prepare_filename(info_dict, warn=True)
2945 temp_filename = self.prepare_filename(info_dict, 'temp')
2946 files_to_move = {}
2947
2948 # Forced printings
2949 self.__forced_printings(info_dict, full_filename, incomplete=('format' not in info_dict))
2950
2951 def check_max_downloads():
2952 if self._num_downloads >= float(self.params.get('max_downloads') or 'inf'):
2953 raise MaxDownloadsReached()
2954
2955 if self.params.get('simulate'):
2956 info_dict['__write_download_archive'] = self.params.get('force_write_download_archive')
2957 check_max_downloads()
2958 return
2959
2960 if full_filename is None:
2961 return
2962 if not self._ensure_dir_exists(encodeFilename(full_filename)):
2963 return
2964 if not self._ensure_dir_exists(encodeFilename(temp_filename)):
2965 return
2966
2967 if self._write_description('video', info_dict,
2968 self.prepare_filename(info_dict, 'description')) is None:
2969 return
2970
2971 sub_files = self._write_subtitles(info_dict, temp_filename)
2972 if sub_files is None:
2973 return
2974 files_to_move.update(dict(sub_files))
2975
2976 thumb_files = self._write_thumbnails(
2977 'video', info_dict, temp_filename, self.prepare_filename(info_dict, 'thumbnail'))
2978 if thumb_files is None:
2979 return
2980 files_to_move.update(dict(thumb_files))
2981
2982 infofn = self.prepare_filename(info_dict, 'infojson')
2983 _infojson_written = self._write_info_json('video', info_dict, infofn)
2984 if _infojson_written:
2985 info_dict['infojson_filename'] = infofn
2986 # For backward compatibility, even though it was a private field
2987 info_dict['__infojson_filename'] = infofn
2988 elif _infojson_written is None:
2989 return
2990
2991 # Note: Annotations are deprecated
2992 annofn = None
2993 if self.params.get('writeannotations', False):
2994 annofn = self.prepare_filename(info_dict, 'annotation')
2995 if annofn:
2996 if not self._ensure_dir_exists(encodeFilename(annofn)):
2997 return
2998 if not self.params.get('overwrites', True) and os.path.exists(encodeFilename(annofn)):
2999 self.to_screen('[info] Video annotations are already present')
3000 elif not info_dict.get('annotations'):
3001 self.report_warning('There are no annotations to write.')
3002 else:
3003 try:
3004 self.to_screen('[info] Writing video annotations to: ' + annofn)
3005 with open(encodeFilename(annofn), 'w', encoding='utf-8') as annofile:
3006 annofile.write(info_dict['annotations'])
3007 except (KeyError, TypeError):
3008 self.report_warning('There are no annotations to write.')
3009 except OSError:
3010 self.report_error('Cannot write annotations file: ' + annofn)
3011 return
3012
3013 # Write internet shortcut files
3014 def _write_link_file(link_type):
3015 url = try_get(info_dict['webpage_url'], iri_to_uri)
3016 if not url:
3017 self.report_warning(
3018 f'Cannot write internet shortcut file because the actual URL of "{info_dict["webpage_url"]}" is unknown')
3019 return True
3020 linkfn = replace_extension(self.prepare_filename(info_dict, 'link'), link_type, info_dict.get('ext'))
3021 if not self._ensure_dir_exists(encodeFilename(linkfn)):
3022 return False
3023 if self.params.get('overwrites', True) and os.path.exists(encodeFilename(linkfn)):
3024 self.to_screen(f'[info] Internet shortcut (.{link_type}) is already present')
3025 return True
3026 try:
3027 self.to_screen(f'[info] Writing internet shortcut (.{link_type}) to: {linkfn}')
3028 with open(encodeFilename(to_high_limit_path(linkfn)), 'w', encoding='utf-8',
3029 newline='\r\n' if link_type == 'url' else '\n') as linkfile:
3030 template_vars = {'url': url}
3031 if link_type == 'desktop':
3032 template_vars['filename'] = linkfn[:-(len(link_type) + 1)]
3033 linkfile.write(LINK_TEMPLATES[link_type] % template_vars)
3034 except OSError:
3035 self.report_error(f'Cannot write internet shortcut {linkfn}')
3036 return False
3037 return True
3038
3039 write_links = {
3040 'url': self.params.get('writeurllink'),
3041 'webloc': self.params.get('writewebloclink'),
3042 'desktop': self.params.get('writedesktoplink'),
3043 }
3044 if self.params.get('writelink'):
3045 link_type = ('webloc' if sys.platform == 'darwin'
3046 else 'desktop' if sys.platform.startswith('linux')
3047 else 'url')
3048 write_links[link_type] = True
3049
3050 if any(should_write and not _write_link_file(link_type)
3051 for link_type, should_write in write_links.items()):
3052 return
3053
3054 def replace_info_dict(new_info):
3055 nonlocal info_dict
3056 if new_info == info_dict:
3057 return
3058 info_dict.clear()
3059 info_dict.update(new_info)
3060
3061 new_info, files_to_move = self.pre_process(info_dict, 'before_dl', files_to_move)
3062 replace_info_dict(new_info)
3063
3064 if self.params.get('skip_download'):
3065 info_dict['filepath'] = temp_filename
3066 info_dict['__finaldir'] = os.path.dirname(os.path.abspath(encodeFilename(full_filename)))
3067 info_dict['__files_to_move'] = files_to_move
3068 replace_info_dict(self.run_pp(MoveFilesAfterDownloadPP(self, False), info_dict))
3069 info_dict['__write_download_archive'] = self.params.get('force_write_download_archive')
3070 else:
3071 # Download
3072 info_dict.setdefault('__postprocessors', [])
3073 try:
3074
3075 def existing_video_file(*filepaths):
3076 ext = info_dict.get('ext')
3077 converted = lambda file: replace_extension(file, self.params.get('final_ext') or ext, ext)
3078 file = self.existing_file(itertools.chain(*zip(map(converted, filepaths), filepaths)),
3079 default_overwrite=False)
3080 if file:
3081 info_dict['ext'] = os.path.splitext(file)[1][1:]
3082 return file
3083
3084 fd, success = None, True
3085 if info_dict.get('protocol') or info_dict.get('url'):
3086 fd = get_suitable_downloader(info_dict, self.params, to_stdout=temp_filename == '-')
3087 if fd is not FFmpegFD and (
3088 info_dict.get('section_start') or info_dict.get('section_end')):
3089 msg = ('This format cannot be partially downloaded' if FFmpegFD.available()
3090 else 'You have requested downloading the video partially, but ffmpeg is not installed')
3091 self.report_error(f'{msg}. Aborting')
3092 return
3093
3094 if info_dict.get('requested_formats') is not None:
3095 requested_formats = info_dict['requested_formats']
3096 old_ext = info_dict['ext']
3097 if self.params.get('merge_output_format') is None:
3098 if (info_dict['ext'] == 'webm'
3099 and info_dict.get('thumbnails')
3100 # check with type instead of pp_key, __name__, or isinstance
3101 # since we dont want any custom PPs to trigger this
3102 and any(type(pp) == EmbedThumbnailPP for pp in self._pps['post_process'])): # noqa: E721
3103 info_dict['ext'] = 'mkv'
3104 self.report_warning(
3105 'webm doesn\'t support embedding a thumbnail, mkv will be used')
3106 new_ext = info_dict['ext']
3107
3108 def correct_ext(filename, ext=new_ext):
3109 if filename == '-':
3110 return filename
3111 filename_real_ext = os.path.splitext(filename)[1][1:]
3112 filename_wo_ext = (
3113 os.path.splitext(filename)[0]
3114 if filename_real_ext in (old_ext, new_ext)
3115 else filename)
3116 return f'{filename_wo_ext}.{ext}'
3117
3118 # Ensure filename always has a correct extension for successful merge
3119 full_filename = correct_ext(full_filename)
3120 temp_filename = correct_ext(temp_filename)
3121 dl_filename = existing_video_file(full_filename, temp_filename)
3122 info_dict['__real_download'] = False
3123
3124 merger = FFmpegMergerPP(self)
3125 downloaded = []
3126 if dl_filename is not None:
3127 self.report_file_already_downloaded(dl_filename)
3128 elif fd:
3129 for f in requested_formats if fd != FFmpegFD else []:
3130 f['filepath'] = fname = prepend_extension(
3131 correct_ext(temp_filename, info_dict['ext']),
3132 'f%s' % f['format_id'], info_dict['ext'])
3133 downloaded.append(fname)
3134 info_dict['url'] = '\n'.join(f['url'] for f in requested_formats)
3135 success, real_download = self.dl(temp_filename, info_dict)
3136 info_dict['__real_download'] = real_download
3137 else:
3138 if self.params.get('allow_unplayable_formats'):
3139 self.report_warning(
3140 'You have requested merging of multiple formats '
3141 'while also allowing unplayable formats to be downloaded. '
3142 'The formats won\'t be merged to prevent data corruption.')
3143 elif not merger.available:
3144 msg = 'You have requested merging of multiple formats but ffmpeg is not installed'
3145 if not self.params.get('ignoreerrors'):
3146 self.report_error(f'{msg}. Aborting due to --abort-on-error')
3147 return
3148 self.report_warning(f'{msg}. The formats won\'t be merged')
3149
3150 if temp_filename == '-':
3151 reason = ('using a downloader other than ffmpeg' if FFmpegFD.can_merge_formats(info_dict, self.params)
3152 else 'but the formats are incompatible for simultaneous download' if merger.available
3153 else 'but ffmpeg is not installed')
3154 self.report_warning(
3155 f'You have requested downloading multiple formats to stdout {reason}. '
3156 'The formats will be streamed one after the other')
3157 fname = temp_filename
3158 for f in requested_formats:
3159 new_info = dict(info_dict)
3160 del new_info['requested_formats']
3161 new_info.update(f)
3162 if temp_filename != '-':
3163 fname = prepend_extension(
3164 correct_ext(temp_filename, new_info['ext']),
3165 'f%s' % f['format_id'], new_info['ext'])
3166 if not self._ensure_dir_exists(fname):
3167 return
3168 f['filepath'] = fname
3169 downloaded.append(fname)
3170 partial_success, real_download = self.dl(fname, new_info)
3171 info_dict['__real_download'] = info_dict['__real_download'] or real_download
3172 success = success and partial_success
3173
3174 if downloaded and merger.available and not self.params.get('allow_unplayable_formats'):
3175 info_dict['__postprocessors'].append(merger)
3176 info_dict['__files_to_merge'] = downloaded
3177 # Even if there were no downloads, it is being merged only now
3178 info_dict['__real_download'] = True
3179 else:
3180 for file in downloaded:
3181 files_to_move[file] = None
3182 else:
3183 # Just a single file
3184 dl_filename = existing_video_file(full_filename, temp_filename)
3185 if dl_filename is None or dl_filename == temp_filename:
3186 # dl_filename == temp_filename could mean that the file was partially downloaded with --no-part.
3187 # So we should try to resume the download
3188 success, real_download = self.dl(temp_filename, info_dict)
3189 info_dict['__real_download'] = real_download
3190 else:
3191 self.report_file_already_downloaded(dl_filename)
3192
3193 dl_filename = dl_filename or temp_filename
3194 info_dict['__finaldir'] = os.path.dirname(os.path.abspath(encodeFilename(full_filename)))
3195
3196 except network_exceptions as err:
3197 self.report_error('unable to download video data: %s' % error_to_compat_str(err))
3198 return
3199 except OSError as err:
3200 raise UnavailableVideoError(err)
3201 except (ContentTooShortError, ) as err:
3202 self.report_error(f'content too short (expected {err.expected} bytes and served {err.downloaded})')
3203 return
3204
3205 self._raise_pending_errors(info_dict)
3206 if success and full_filename != '-':
3207
3208 def fixup():
3209 do_fixup = True
3210 fixup_policy = self.params.get('fixup')
3211 vid = info_dict['id']
3212
3213 if fixup_policy in ('ignore', 'never'):
3214 return
3215 elif fixup_policy == 'warn':
3216 do_fixup = 'warn'
3217 elif fixup_policy != 'force':
3218 assert fixup_policy in ('detect_or_warn', None)
3219 if not info_dict.get('__real_download'):
3220 do_fixup = False
3221
3222 def ffmpeg_fixup(cndn, msg, cls):
3223 if not (do_fixup and cndn):
3224 return
3225 elif do_fixup == 'warn':
3226 self.report_warning(f'{vid}: {msg}')
3227 return
3228 pp = cls(self)
3229 if pp.available:
3230 info_dict['__postprocessors'].append(pp)
3231 else:
3232 self.report_warning(f'{vid}: {msg}. Install ffmpeg to fix this automatically')
3233
3234 stretched_ratio = info_dict.get('stretched_ratio')
3235 ffmpeg_fixup(stretched_ratio not in (1, None),
3236 f'Non-uniform pixel ratio {stretched_ratio}',
3237 FFmpegFixupStretchedPP)
3238
3239 downloader = get_suitable_downloader(info_dict, self.params) if 'protocol' in info_dict else None
3240 downloader = downloader.FD_NAME if downloader else None
3241
3242 ext = info_dict.get('ext')
3243 postprocessed_by_ffmpeg = info_dict.get('requested_formats') or any((
3244 isinstance(pp, FFmpegVideoConvertorPP)
3245 and resolve_recode_mapping(ext, pp.mapping)[0] not in (ext, None)
3246 ) for pp in self._pps['post_process'])
3247
3248 if not postprocessed_by_ffmpeg:
3249 ffmpeg_fixup(ext == 'm4a' and info_dict.get('container') == 'm4a_dash',
3250 'writing DASH m4a. Only some players support this container',
3251 FFmpegFixupM4aPP)
3252 ffmpeg_fixup(downloader == 'hlsnative' and not self.params.get('hls_use_mpegts')
3253 or info_dict.get('is_live') and self.params.get('hls_use_mpegts') is None,
3254 'Possible MPEG-TS in MP4 container or malformed AAC timestamps',
3255 FFmpegFixupM3u8PP)
3256 ffmpeg_fixup(info_dict.get('is_live') and downloader == 'DashSegmentsFD',
3257 'Possible duplicate MOOV atoms', FFmpegFixupDuplicateMoovPP)
3258
3259 ffmpeg_fixup(downloader == 'web_socket_fragment', 'Malformed timestamps detected', FFmpegFixupTimestampPP)
3260 ffmpeg_fixup(downloader == 'web_socket_fragment', 'Malformed duration detected', FFmpegFixupDurationPP)
3261
3262 fixup()
3263 try:
3264 replace_info_dict(self.post_process(dl_filename, info_dict, files_to_move))
3265 except PostProcessingError as err:
3266 self.report_error('Postprocessing: %s' % str(err))
3267 return
3268 try:
3269 for ph in self._post_hooks:
3270 ph(info_dict['filepath'])
3271 except Exception as err:
3272 self.report_error('post hooks: %s' % str(err))
3273 return
3274 info_dict['__write_download_archive'] = True
3275
3276 assert info_dict is original_infodict # Make sure the info_dict was modified in-place
3277 if self.params.get('force_write_download_archive'):
3278 info_dict['__write_download_archive'] = True
3279 check_max_downloads()
3280
3281 def __download_wrapper(self, func):
3282 @functools.wraps(func)
3283 def wrapper(*args, **kwargs):
3284 try:
3285 res = func(*args, **kwargs)
3286 except UnavailableVideoError as e:
3287 self.report_error(e)
3288 except DownloadCancelled as e:
3289 self.to_screen(f'[info] {e}')
3290 if not self.params.get('break_per_url'):
3291 raise
3292 self._num_downloads = 0
3293 else:
3294 if self.params.get('dump_single_json', False):
3295 self.post_extract(res)
3296 self.to_stdout(json.dumps(self.sanitize_info(res)))
3297 return wrapper
3298
3299 def download(self, url_list):
3300 """Download a given list of URLs."""
3301 url_list = variadic(url_list) # Passing a single URL is a common mistake
3302 outtmpl = self.params['outtmpl']['default']
3303 if (len(url_list) > 1
3304 and outtmpl != '-'
3305 and '%' not in outtmpl
3306 and self.params.get('max_downloads') != 1):
3307 raise SameFileError(outtmpl)
3308
3309 for url in url_list:
3310 self.__download_wrapper(self.extract_info)(
3311 url, force_generic_extractor=self.params.get('force_generic_extractor', False))
3312
3313 return self._download_retcode
3314
3315 def download_with_info_file(self, info_filename):
3316 with contextlib.closing(fileinput.FileInput(
3317 [info_filename], mode='r',
3318 openhook=fileinput.hook_encoded('utf-8'))) as f:
3319 # FileInput doesn't have a read method, we can't call json.load
3320 info = self.sanitize_info(json.loads('\n'.join(f)), self.params.get('clean_infojson', True))
3321 try:
3322 self.__download_wrapper(self.process_ie_result)(info, download=True)
3323 except (DownloadError, EntryNotInPlaylist, ReExtractInfo) as e:
3324 if not isinstance(e, EntryNotInPlaylist):
3325 self.to_stderr('\r')
3326 webpage_url = info.get('webpage_url')
3327 if webpage_url is not None:
3328 self.report_warning(f'The info failed to download: {e}; trying with URL {webpage_url}')
3329 return self.download([webpage_url])
3330 else:
3331 raise
3332 return self._download_retcode
3333
3334 @staticmethod
3335 def sanitize_info(info_dict, remove_private_keys=False):
3336 ''' Sanitize the infodict for converting to json '''
3337 if info_dict is None:
3338 return info_dict
3339 info_dict.setdefault('epoch', int(time.time()))
3340 info_dict.setdefault('_type', 'video')
3341 info_dict.setdefault('_version', {
3342 'version': __version__,
3343 'current_git_head': current_git_head(),
3344 'release_git_head': RELEASE_GIT_HEAD,
3345 'repository': REPOSITORY,
3346 })
3347
3348 if remove_private_keys:
3349 reject = lambda k, v: v is None or k.startswith('__') or k in {
3350 'requested_downloads', 'requested_formats', 'requested_subtitles', 'requested_entries',
3351 'entries', 'filepath', '_filename', 'infojson_filename', 'original_url', 'playlist_autonumber',
3352 }
3353 else:
3354 reject = lambda k, v: False
3355
3356 def filter_fn(obj):
3357 if isinstance(obj, dict):
3358 return {k: filter_fn(v) for k, v in obj.items() if not reject(k, v)}
3359 elif isinstance(obj, (list, tuple, set, LazyList)):
3360 return list(map(filter_fn, obj))
3361 elif obj is None or isinstance(obj, (str, int, float, bool)):
3362 return obj
3363 else:
3364 return repr(obj)
3365
3366 return filter_fn(info_dict)
3367
3368 @staticmethod
3369 def filter_requested_info(info_dict, actually_filter=True):
3370 ''' Alias of sanitize_info for backward compatibility '''
3371 return YoutubeDL.sanitize_info(info_dict, actually_filter)
3372
3373 def _delete_downloaded_files(self, *files_to_delete, info={}, msg=None):
3374 for filename in set(filter(None, files_to_delete)):
3375 if msg:
3376 self.to_screen(msg % filename)
3377 try:
3378 os.remove(filename)
3379 except OSError:
3380 self.report_warning(f'Unable to delete file {filename}')
3381 if filename in info.get('__files_to_move', []): # NB: Delete even if None
3382 del info['__files_to_move'][filename]
3383
3384 @staticmethod
3385 def post_extract(info_dict):
3386 def actual_post_extract(info_dict):
3387 if info_dict.get('_type') in ('playlist', 'multi_video'):
3388 for video_dict in info_dict.get('entries', {}):
3389 actual_post_extract(video_dict or {})
3390 return
3391
3392 post_extractor = info_dict.pop('__post_extractor', None) or (lambda: {})
3393 info_dict.update(post_extractor())
3394
3395 actual_post_extract(info_dict or {})
3396
3397 def run_pp(self, pp, infodict):
3398 files_to_delete = []
3399 if '__files_to_move' not in infodict:
3400 infodict['__files_to_move'] = {}
3401 try:
3402 files_to_delete, infodict = pp.run(infodict)
3403 except PostProcessingError as e:
3404 # Must be True and not 'only_download'
3405 if self.params.get('ignoreerrors') is True:
3406 self.report_error(e)
3407 return infodict
3408 raise
3409
3410 if not files_to_delete:
3411 return infodict
3412 if self.params.get('keepvideo', False):
3413 for f in files_to_delete:
3414 infodict['__files_to_move'].setdefault(f, '')
3415 else:
3416 self._delete_downloaded_files(
3417 *files_to_delete, info=infodict, msg='Deleting original file %s (pass -k to keep)')
3418 return infodict
3419
3420 def run_all_pps(self, key, info, *, additional_pps=None):
3421 self._forceprint(key, info)
3422 for pp in (additional_pps or []) + self._pps[key]:
3423 info = self.run_pp(pp, info)
3424 return info
3425
3426 def pre_process(self, ie_info, key='pre_process', files_to_move=None):
3427 info = dict(ie_info)
3428 info['__files_to_move'] = files_to_move or {}
3429 try:
3430 info = self.run_all_pps(key, info)
3431 except PostProcessingError as err:
3432 msg = f'Preprocessing: {err}'
3433 info.setdefault('__pending_error', msg)
3434 self.report_error(msg, is_error=False)
3435 return info, info.pop('__files_to_move', None)
3436
3437 def post_process(self, filename, info, files_to_move=None):
3438 """Run all the postprocessors on the given file."""
3439 info['filepath'] = filename
3440 info['__files_to_move'] = files_to_move or {}
3441 info = self.run_all_pps('post_process', info, additional_pps=info.get('__postprocessors'))
3442 info = self.run_pp(MoveFilesAfterDownloadPP(self), info)
3443 del info['__files_to_move']
3444 return self.run_all_pps('after_move', info)
3445
3446 def _make_archive_id(self, info_dict):
3447 video_id = info_dict.get('id')
3448 if not video_id:
3449 return
3450 # Future-proof against any change in case
3451 # and backwards compatibility with prior versions
3452 extractor = info_dict.get('extractor_key') or info_dict.get('ie_key') # key in a playlist
3453 if extractor is None:
3454 url = str_or_none(info_dict.get('url'))
3455 if not url:
3456 return
3457 # Try to find matching extractor for the URL and take its ie_key
3458 for ie_key, ie in self._ies.items():
3459 if ie.suitable(url):
3460 extractor = ie_key
3461 break
3462 else:
3463 return
3464 return make_archive_id(extractor, video_id)
3465
3466 def in_download_archive(self, info_dict):
3467 fn = self.params.get('download_archive')
3468 if fn is None:
3469 return False
3470
3471 vid_ids = [self._make_archive_id(info_dict)]
3472 vid_ids.extend(info_dict.get('_old_archive_ids') or [])
3473 return any(id_ in self.archive for id_ in vid_ids)
3474
3475 def record_download_archive(self, info_dict):
3476 fn = self.params.get('download_archive')
3477 if fn is None:
3478 return
3479 vid_id = self._make_archive_id(info_dict)
3480 assert vid_id
3481 self.write_debug(f'Adding to archive: {vid_id}')
3482 with locked_file(fn, 'a', encoding='utf-8') as archive_file:
3483 archive_file.write(vid_id + '\n')
3484 self.archive.add(vid_id)
3485
3486 @staticmethod
3487 def format_resolution(format, default='unknown'):
3488 if format.get('vcodec') == 'none' and format.get('acodec') != 'none':
3489 return 'audio only'
3490 if format.get('resolution') is not None:
3491 return format['resolution']
3492 if format.get('width') and format.get('height'):
3493 return '%dx%d' % (format['width'], format['height'])
3494 elif format.get('height'):
3495 return '%sp' % format['height']
3496 elif format.get('width'):
3497 return '%dx?' % format['width']
3498 return default
3499
3500 def _list_format_headers(self, *headers):
3501 if self.params.get('listformats_table', True) is not False:
3502 return [self._format_out(header, self.Styles.HEADERS) for header in headers]
3503 return headers
3504
3505 def _format_note(self, fdict):
3506 res = ''
3507 if fdict.get('ext') in ['f4f', 'f4m']:
3508 res += '(unsupported)'
3509 if fdict.get('language'):
3510 if res:
3511 res += ' '
3512 res += '[%s]' % fdict['language']
3513 if fdict.get('format_note') is not None:
3514 if res:
3515 res += ' '
3516 res += fdict['format_note']
3517 if fdict.get('tbr') is not None:
3518 if res:
3519 res += ', '
3520 res += '%4dk' % fdict['tbr']
3521 if fdict.get('container') is not None:
3522 if res:
3523 res += ', '
3524 res += '%s container' % fdict['container']
3525 if (fdict.get('vcodec') is not None
3526 and fdict.get('vcodec') != 'none'):
3527 if res:
3528 res += ', '
3529 res += fdict['vcodec']
3530 if fdict.get('vbr') is not None:
3531 res += '@'
3532 elif fdict.get('vbr') is not None and fdict.get('abr') is not None:
3533 res += 'video@'
3534 if fdict.get('vbr') is not None:
3535 res += '%4dk' % fdict['vbr']
3536 if fdict.get('fps') is not None:
3537 if res:
3538 res += ', '
3539 res += '%sfps' % fdict['fps']
3540 if fdict.get('acodec') is not None:
3541 if res:
3542 res += ', '
3543 if fdict['acodec'] == 'none':
3544 res += 'video only'
3545 else:
3546 res += '%-5s' % fdict['acodec']
3547 elif fdict.get('abr') is not None:
3548 if res:
3549 res += ', '
3550 res += 'audio'
3551 if fdict.get('abr') is not None:
3552 res += '@%3dk' % fdict['abr']
3553 if fdict.get('asr') is not None:
3554 res += ' (%5dHz)' % fdict['asr']
3555 if fdict.get('filesize') is not None:
3556 if res:
3557 res += ', '
3558 res += format_bytes(fdict['filesize'])
3559 elif fdict.get('filesize_approx') is not None:
3560 if res:
3561 res += ', '
3562 res += '~' + format_bytes(fdict['filesize_approx'])
3563 return res
3564
3565 def render_formats_table(self, info_dict):
3566 if not info_dict.get('formats') and not info_dict.get('url'):
3567 return None
3568
3569 formats = info_dict.get('formats', [info_dict])
3570 if not self.params.get('listformats_table', True) is not False:
3571 table = [
3572 [
3573 format_field(f, 'format_id'),
3574 format_field(f, 'ext'),
3575 self.format_resolution(f),
3576 self._format_note(f)
3577 ] for f in formats if f.get('preference') is None or f['preference'] >= -1000]
3578 return render_table(['format code', 'extension', 'resolution', 'note'], table, extra_gap=1)
3579
3580 def simplified_codec(f, field):
3581 assert field in ('acodec', 'vcodec')
3582 codec = f.get(field, 'unknown')
3583 if not codec:
3584 return 'unknown'
3585 elif codec != 'none':
3586 return '.'.join(codec.split('.')[:4])
3587
3588 if field == 'vcodec' and f.get('acodec') == 'none':
3589 return 'images'
3590 elif field == 'acodec' and f.get('vcodec') == 'none':
3591 return ''
3592 return self._format_out('audio only' if field == 'vcodec' else 'video only',
3593 self.Styles.SUPPRESS)
3594
3595 delim = self._format_out('\u2502', self.Styles.DELIM, '|', test_encoding=True)
3596 table = [
3597 [
3598 self._format_out(format_field(f, 'format_id'), self.Styles.ID),
3599 format_field(f, 'ext'),
3600 format_field(f, func=self.format_resolution, ignore=('audio only', 'images')),
3601 format_field(f, 'fps', '\t%d', func=round),
3602 format_field(f, 'dynamic_range', '%s', ignore=(None, 'SDR')).replace('HDR', ''),
3603 format_field(f, 'audio_channels', '\t%s'),
3604 delim,
3605 format_field(f, 'filesize', ' \t%s', func=format_bytes) + format_field(f, 'filesize_approx', '~\t%s', func=format_bytes),
3606 format_field(f, 'tbr', '\t%dk', func=round),
3607 shorten_protocol_name(f.get('protocol', '')),
3608 delim,
3609 simplified_codec(f, 'vcodec'),
3610 format_field(f, 'vbr', '\t%dk', func=round),
3611 simplified_codec(f, 'acodec'),
3612 format_field(f, 'abr', '\t%dk', func=round),
3613 format_field(f, 'asr', '\t%s', func=format_decimal_suffix),
3614 join_nonempty(
3615 self._format_out('UNSUPPORTED', 'light red') if f.get('ext') in ('f4f', 'f4m') else None,
3616 format_field(f, 'language', '[%s]'),
3617 join_nonempty(format_field(f, 'format_note'),
3618 format_field(f, 'container', ignore=(None, f.get('ext'))),
3619 delim=', '),
3620 delim=' '),
3621 ] for f in formats if f.get('preference') is None or f['preference'] >= -1000]
3622 header_line = self._list_format_headers(
3623 'ID', 'EXT', 'RESOLUTION', '\tFPS', 'HDR', 'CH', delim, '\tFILESIZE', '\tTBR', 'PROTO',
3624 delim, 'VCODEC', '\tVBR', 'ACODEC', '\tABR', '\tASR', 'MORE INFO')
3625
3626 return render_table(
3627 header_line, table, hide_empty=True,
3628 delim=self._format_out('\u2500', self.Styles.DELIM, '-', test_encoding=True))
3629
3630 def render_thumbnails_table(self, info_dict):
3631 thumbnails = list(info_dict.get('thumbnails') or [])
3632 if not thumbnails:
3633 return None
3634 return render_table(
3635 self._list_format_headers('ID', 'Width', 'Height', 'URL'),
3636 [[t.get('id'), t.get('width', 'unknown'), t.get('height', 'unknown'), t['url']] for t in thumbnails])
3637
3638 def render_subtitles_table(self, video_id, subtitles):
3639 def _row(lang, formats):
3640 exts, names = zip(*((f['ext'], f.get('name') or 'unknown') for f in reversed(formats)))
3641 if len(set(names)) == 1:
3642 names = [] if names[0] == 'unknown' else names[:1]
3643 return [lang, ', '.join(names), ', '.join(exts)]
3644
3645 if not subtitles:
3646 return None
3647 return render_table(
3648 self._list_format_headers('Language', 'Name', 'Formats'),
3649 [_row(lang, formats) for lang, formats in subtitles.items()],
3650 hide_empty=True)
3651
3652 def __list_table(self, video_id, name, func, *args):
3653 table = func(*args)
3654 if not table:
3655 self.to_screen(f'{video_id} has no {name}')
3656 return
3657 self.to_screen(f'[info] Available {name} for {video_id}:')
3658 self.to_stdout(table)
3659
3660 def list_formats(self, info_dict):
3661 self.__list_table(info_dict['id'], 'formats', self.render_formats_table, info_dict)
3662
3663 def list_thumbnails(self, info_dict):
3664 self.__list_table(info_dict['id'], 'thumbnails', self.render_thumbnails_table, info_dict)
3665
3666 def list_subtitles(self, video_id, subtitles, name='subtitles'):
3667 self.__list_table(video_id, name, self.render_subtitles_table, video_id, subtitles)
3668
3669 def urlopen(self, req):
3670 """ Start an HTTP download """
3671 if isinstance(req, str):
3672 req = sanitized_Request(req)
3673 return self._opener.open(req, timeout=self._socket_timeout)
3674
3675 def print_debug_header(self):
3676 if not self.params.get('verbose'):
3677 return
3678
3679 # These imports can be slow. So import them only as needed
3680 from .extractor.extractors import _LAZY_LOADER
3681 from .extractor.extractors import _PLUGIN_CLASSES as plugin_extractors
3682
3683 def get_encoding(stream):
3684 ret = str(getattr(stream, 'encoding', 'missing (%s)' % type(stream).__name__))
3685 if not supports_terminal_sequences(stream):
3686 from .utils import WINDOWS_VT_MODE # Must be imported locally
3687 ret += ' (No VT)' if WINDOWS_VT_MODE is False else ' (No ANSI)'
3688 return ret
3689
3690 encoding_str = 'Encodings: locale %s, fs %s, pref %s, %s' % (
3691 locale.getpreferredencoding(),
3692 sys.getfilesystemencoding(),
3693 self.get_encoding(),
3694 ', '.join(
3695 f'{key} {get_encoding(stream)}' for key, stream in self._out_files.items_
3696 if stream is not None and key != 'console')
3697 )
3698
3699 logger = self.params.get('logger')
3700 if logger:
3701 write_debug = lambda msg: logger.debug(f'[debug] {msg}')
3702 write_debug(encoding_str)
3703 else:
3704 write_string(f'[debug] {encoding_str}\n', encoding=None)
3705 write_debug = lambda msg: self._write_string(f'[debug] {msg}\n')
3706
3707 source = detect_variant()
3708 if VARIANT not in (None, 'pip'):
3709 source += '*'
3710 write_debug(join_nonempty(
3711 f'{"yt-dlp" if REPOSITORY == "yt-dlp/yt-dlp" else REPOSITORY} version',
3712 __version__,
3713 f'[{RELEASE_GIT_HEAD}]' if RELEASE_GIT_HEAD else '',
3714 '' if source == 'unknown' else f'({source})',
3715 delim=' '))
3716 if not _LAZY_LOADER:
3717 if os.environ.get('YTDLP_NO_LAZY_EXTRACTORS'):
3718 write_debug('Lazy loading extractors is forcibly disabled')
3719 else:
3720 write_debug('Lazy loading extractors is disabled')
3721 if plugin_extractors or plugin_postprocessors:
3722 write_debug('Plugins: %s' % [
3723 '%s%s' % (klass.__name__, '' if klass.__name__ == name else f' as {name}')
3724 for name, klass in itertools.chain(plugin_extractors.items(), plugin_postprocessors.items())])
3725 if self.params['compat_opts']:
3726 write_debug('Compatibility options: %s' % ', '.join(self.params['compat_opts']))
3727
3728 if current_git_head():
3729 write_debug(f'Git HEAD: {current_git_head()}')
3730 write_debug(system_identifier())
3731
3732 exe_versions, ffmpeg_features = FFmpegPostProcessor.get_versions_and_features(self)
3733 ffmpeg_features = {key for key, val in ffmpeg_features.items() if val}
3734 if ffmpeg_features:
3735 exe_versions['ffmpeg'] += ' (%s)' % ','.join(sorted(ffmpeg_features))
3736
3737 exe_versions['rtmpdump'] = rtmpdump_version()
3738 exe_versions['phantomjs'] = PhantomJSwrapper._version()
3739 exe_str = ', '.join(
3740 f'{exe} {v}' for exe, v in sorted(exe_versions.items()) if v
3741 ) or 'none'
3742 write_debug('exe versions: %s' % exe_str)
3743
3744 from .compat.compat_utils import get_package_info
3745 from .dependencies import available_dependencies
3746
3747 write_debug('Optional libraries: %s' % (', '.join(sorted({
3748 join_nonempty(*get_package_info(m)) for m in available_dependencies.values()
3749 })) or 'none'))
3750
3751 self._setup_opener()
3752 proxy_map = {}
3753 for handler in self._opener.handlers:
3754 if hasattr(handler, 'proxies'):
3755 proxy_map.update(handler.proxies)
3756 write_debug(f'Proxy map: {proxy_map}')
3757
3758 # Not implemented
3759 if False and self.params.get('call_home'):
3760 ipaddr = self.urlopen('https://yt-dl.org/ip').read().decode()
3761 write_debug('Public IP address: %s' % ipaddr)
3762 latest_version = self.urlopen(
3763 'https://yt-dl.org/latest/version').read().decode()
3764 if version_tuple(latest_version) > version_tuple(__version__):
3765 self.report_warning(
3766 'You are using an outdated version (newest version: %s)! '
3767 'See https://yt-dl.org/update if you need help updating.' %
3768 latest_version)
3769
3770 def _setup_opener(self):
3771 if hasattr(self, '_opener'):
3772 return
3773 timeout_val = self.params.get('socket_timeout')
3774 self._socket_timeout = 20 if timeout_val is None else float(timeout_val)
3775
3776 opts_cookiesfrombrowser = self.params.get('cookiesfrombrowser')
3777 opts_cookiefile = self.params.get('cookiefile')
3778 opts_proxy = self.params.get('proxy')
3779
3780 self.cookiejar = load_cookies(opts_cookiefile, opts_cookiesfrombrowser, self)
3781
3782 cookie_processor = YoutubeDLCookieProcessor(self.cookiejar)
3783 if opts_proxy is not None:
3784 if opts_proxy == '':
3785 proxies = {}
3786 else:
3787 proxies = {'http': opts_proxy, 'https': opts_proxy}
3788 else:
3789 proxies = urllib.request.getproxies()
3790 # Set HTTPS proxy to HTTP one if given (https://github.com/ytdl-org/youtube-dl/issues/805)
3791 if 'http' in proxies and 'https' not in proxies:
3792 proxies['https'] = proxies['http']
3793 proxy_handler = PerRequestProxyHandler(proxies)
3794
3795 debuglevel = 1 if self.params.get('debug_printtraffic') else 0
3796 https_handler = make_HTTPS_handler(self.params, debuglevel=debuglevel)
3797 ydlh = YoutubeDLHandler(self.params, debuglevel=debuglevel)
3798 redirect_handler = YoutubeDLRedirectHandler()
3799 data_handler = urllib.request.DataHandler()
3800
3801 # When passing our own FileHandler instance, build_opener won't add the
3802 # default FileHandler and allows us to disable the file protocol, which
3803 # can be used for malicious purposes (see
3804 # https://github.com/ytdl-org/youtube-dl/issues/8227)
3805 file_handler = urllib.request.FileHandler()
3806
3807 def file_open(*args, **kwargs):
3808 raise urllib.error.URLError('file:// scheme is explicitly disabled in yt-dlp for security reasons')
3809 file_handler.file_open = file_open
3810
3811 opener = urllib.request.build_opener(
3812 proxy_handler, https_handler, cookie_processor, ydlh, redirect_handler, data_handler, file_handler)
3813
3814 # Delete the default user-agent header, which would otherwise apply in
3815 # cases where our custom HTTP handler doesn't come into play
3816 # (See https://github.com/ytdl-org/youtube-dl/issues/1309 for details)
3817 opener.addheaders = []
3818 self._opener = opener
3819
3820 def encode(self, s):
3821 if isinstance(s, bytes):
3822 return s # Already encoded
3823
3824 try:
3825 return s.encode(self.get_encoding())
3826 except UnicodeEncodeError as err:
3827 err.reason = err.reason + '. Check your system encoding configuration or use the --encoding option.'
3828 raise
3829
3830 def get_encoding(self):
3831 encoding = self.params.get('encoding')
3832 if encoding is None:
3833 encoding = preferredencoding()
3834 return encoding
3835
3836 def _write_info_json(self, label, ie_result, infofn, overwrite=None):
3837 ''' Write infojson and returns True = written, 'exists' = Already exists, False = skip, None = error '''
3838 if overwrite is None:
3839 overwrite = self.params.get('overwrites', True)
3840 if not self.params.get('writeinfojson'):
3841 return False
3842 elif not infofn:
3843 self.write_debug(f'Skipping writing {label} infojson')
3844 return False
3845 elif not self._ensure_dir_exists(infofn):
3846 return None
3847 elif not overwrite and os.path.exists(infofn):
3848 self.to_screen(f'[info] {label.title()} metadata is already present')
3849 return 'exists'
3850
3851 self.to_screen(f'[info] Writing {label} metadata as JSON to: {infofn}')
3852 try:
3853 write_json_file(self.sanitize_info(ie_result, self.params.get('clean_infojson', True)), infofn)
3854 return True
3855 except OSError:
3856 self.report_error(f'Cannot write {label} metadata to JSON file {infofn}')
3857 return None
3858
3859 def _write_description(self, label, ie_result, descfn):
3860 ''' Write description and returns True = written, False = skip, None = error '''
3861 if not self.params.get('writedescription'):
3862 return False
3863 elif not descfn:
3864 self.write_debug(f'Skipping writing {label} description')
3865 return False
3866 elif not self._ensure_dir_exists(descfn):
3867 return None
3868 elif not self.params.get('overwrites', True) and os.path.exists(descfn):
3869 self.to_screen(f'[info] {label.title()} description is already present')
3870 elif ie_result.get('description') is None:
3871 self.report_warning(f'There\'s no {label} description to write')
3872 return False
3873 else:
3874 try:
3875 self.to_screen(f'[info] Writing {label} description to: {descfn}')
3876 with open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile:
3877 descfile.write(ie_result['description'])
3878 except OSError:
3879 self.report_error(f'Cannot write {label} description file {descfn}')
3880 return None
3881 return True
3882
3883 def _write_subtitles(self, info_dict, filename):
3884 ''' Write subtitles to file and return list of (sub_filename, final_sub_filename); or None if error'''
3885 ret = []
3886 subtitles = info_dict.get('requested_subtitles')
3887 if not subtitles or not (self.params.get('writesubtitles') or self.params.get('writeautomaticsub')):
3888 # subtitles download errors are already managed as troubles in relevant IE
3889 # that way it will silently go on when used with unsupporting IE
3890 return ret
3891
3892 sub_filename_base = self.prepare_filename(info_dict, 'subtitle')
3893 if not sub_filename_base:
3894 self.to_screen('[info] Skipping writing video subtitles')
3895 return ret
3896 for sub_lang, sub_info in subtitles.items():
3897 sub_format = sub_info['ext']
3898 sub_filename = subtitles_filename(filename, sub_lang, sub_format, info_dict.get('ext'))
3899 sub_filename_final = subtitles_filename(sub_filename_base, sub_lang, sub_format, info_dict.get('ext'))
3900 existing_sub = self.existing_file((sub_filename_final, sub_filename))
3901 if existing_sub:
3902 self.to_screen(f'[info] Video subtitle {sub_lang}.{sub_format} is already present')
3903 sub_info['filepath'] = existing_sub
3904 ret.append((existing_sub, sub_filename_final))
3905 continue
3906
3907 self.to_screen(f'[info] Writing video subtitles to: {sub_filename}')
3908 if sub_info.get('data') is not None:
3909 try:
3910 # Use newline='' to prevent conversion of newline characters
3911 # See https://github.com/ytdl-org/youtube-dl/issues/10268
3912 with open(sub_filename, 'w', encoding='utf-8', newline='') as subfile:
3913 subfile.write(sub_info['data'])
3914 sub_info['filepath'] = sub_filename
3915 ret.append((sub_filename, sub_filename_final))
3916 continue
3917 except OSError:
3918 self.report_error(f'Cannot write video subtitles file {sub_filename}')
3919 return None
3920
3921 try:
3922 sub_copy = sub_info.copy()
3923 sub_copy.setdefault('http_headers', info_dict.get('http_headers'))
3924 self.dl(sub_filename, sub_copy, subtitle=True)
3925 sub_info['filepath'] = sub_filename
3926 ret.append((sub_filename, sub_filename_final))
3927 except (DownloadError, ExtractorError, IOError, OSError, ValueError) + network_exceptions as err:
3928 msg = f'Unable to download video subtitles for {sub_lang!r}: {err}'
3929 if self.params.get('ignoreerrors') is not True: # False or 'only_download'
3930 if not self.params.get('ignoreerrors'):
3931 self.report_error(msg)
3932 raise DownloadError(msg)
3933 self.report_warning(msg)
3934 return ret
3935
3936 def _write_thumbnails(self, label, info_dict, filename, thumb_filename_base=None):
3937 ''' Write thumbnails to file and return list of (thumb_filename, final_thumb_filename) '''
3938 write_all = self.params.get('write_all_thumbnails', False)
3939 thumbnails, ret = [], []
3940 if write_all or self.params.get('writethumbnail', False):
3941 thumbnails = info_dict.get('thumbnails') or []
3942 multiple = write_all and len(thumbnails) > 1
3943
3944 if thumb_filename_base is None:
3945 thumb_filename_base = filename
3946 if thumbnails and not thumb_filename_base:
3947 self.write_debug(f'Skipping writing {label} thumbnail')
3948 return ret
3949
3950 for idx, t in list(enumerate(thumbnails))[::-1]:
3951 thumb_ext = (f'{t["id"]}.' if multiple else '') + determine_ext(t['url'], 'jpg')
3952 thumb_display_id = f'{label} thumbnail {t["id"]}'
3953 thumb_filename = replace_extension(filename, thumb_ext, info_dict.get('ext'))
3954 thumb_filename_final = replace_extension(thumb_filename_base, thumb_ext, info_dict.get('ext'))
3955
3956 existing_thumb = self.existing_file((thumb_filename_final, thumb_filename))
3957 if existing_thumb:
3958 self.to_screen('[info] %s is already present' % (
3959 thumb_display_id if multiple else f'{label} thumbnail').capitalize())
3960 t['filepath'] = existing_thumb
3961 ret.append((existing_thumb, thumb_filename_final))
3962 else:
3963 self.to_screen(f'[info] Downloading {thumb_display_id} ...')
3964 try:
3965 uf = self.urlopen(sanitized_Request(t['url'], headers=t.get('http_headers', {})))
3966 self.to_screen(f'[info] Writing {thumb_display_id} to: {thumb_filename}')
3967 with open(encodeFilename(thumb_filename), 'wb') as thumbf:
3968 shutil.copyfileobj(uf, thumbf)
3969 ret.append((thumb_filename, thumb_filename_final))
3970 t['filepath'] = thumb_filename
3971 except network_exceptions as err:
3972 thumbnails.pop(idx)
3973 self.report_warning(f'Unable to download {thumb_display_id}: {err}')
3974 if ret and not write_all:
3975 break
3976 return ret