]> jfr.im git - yt-dlp.git/blob - yt_dlp/YoutubeDL.py
Add new field `aspect_ratio`
[yt-dlp.git] / yt_dlp / YoutubeDL.py
1 import collections
2 import contextlib
3 import datetime
4 import errno
5 import fileinput
6 import functools
7 import io
8 import itertools
9 import json
10 import locale
11 import operator
12 import os
13 import random
14 import re
15 import shutil
16 import subprocess
17 import sys
18 import tempfile
19 import time
20 import tokenize
21 import traceback
22 import unicodedata
23 import urllib.request
24 from string import ascii_letters
25
26 from .cache import Cache
27 from .compat import compat_os_name, compat_shlex_quote
28 from .cookies import load_cookies
29 from .downloader import FFmpegFD, get_suitable_downloader, shorten_protocol_name
30 from .downloader.rtmp import rtmpdump_version
31 from .extractor import gen_extractor_classes, get_info_extractor
32 from .extractor.common import UnsupportedURLIE
33 from .extractor.openload import PhantomJSwrapper
34 from .minicurses import format_text
35 from .postprocessor import _PLUGIN_CLASSES as plugin_postprocessors
36 from .postprocessor import (
37 EmbedThumbnailPP,
38 FFmpegFixupDuplicateMoovPP,
39 FFmpegFixupDurationPP,
40 FFmpegFixupM3u8PP,
41 FFmpegFixupM4aPP,
42 FFmpegFixupStretchedPP,
43 FFmpegFixupTimestampPP,
44 FFmpegMergerPP,
45 FFmpegPostProcessor,
46 FFmpegVideoConvertorPP,
47 MoveFilesAfterDownloadPP,
48 get_postprocessor,
49 )
50 from .postprocessor.ffmpeg import resolve_mapping as resolve_recode_mapping
51 from .update import REPOSITORY, current_git_head, detect_variant
52 from .utils import (
53 DEFAULT_OUTTMPL,
54 IDENTITY,
55 LINK_TEMPLATES,
56 MEDIA_EXTENSIONS,
57 NO_DEFAULT,
58 NUMBER_RE,
59 OUTTMPL_TYPES,
60 POSTPROCESS_WHEN,
61 STR_FORMAT_RE_TMPL,
62 STR_FORMAT_TYPES,
63 ContentTooShortError,
64 DateRange,
65 DownloadCancelled,
66 DownloadError,
67 EntryNotInPlaylist,
68 ExistingVideoReached,
69 ExtractorError,
70 GeoRestrictedError,
71 HEADRequest,
72 ISO3166Utils,
73 LazyList,
74 MaxDownloadsReached,
75 Namespace,
76 PagedList,
77 PerRequestProxyHandler,
78 PlaylistEntries,
79 Popen,
80 PostProcessingError,
81 ReExtractInfo,
82 RejectedVideoReached,
83 SameFileError,
84 UnavailableVideoError,
85 UserNotLive,
86 YoutubeDLCookieProcessor,
87 YoutubeDLHandler,
88 YoutubeDLRedirectHandler,
89 age_restricted,
90 args_to_str,
91 bug_reports_message,
92 date_from_str,
93 deprecation_warning,
94 determine_ext,
95 determine_protocol,
96 encode_compat_str,
97 encodeFilename,
98 error_to_compat_str,
99 escapeHTML,
100 expand_path,
101 filter_dict,
102 float_or_none,
103 format_bytes,
104 format_decimal_suffix,
105 format_field,
106 formatSeconds,
107 get_compatible_ext,
108 get_domain,
109 int_or_none,
110 iri_to_uri,
111 is_path_like,
112 join_nonempty,
113 locked_file,
114 make_archive_id,
115 make_dir,
116 make_HTTPS_handler,
117 merge_headers,
118 network_exceptions,
119 number_of_digits,
120 orderedSet,
121 orderedSet_from_options,
122 parse_filesize,
123 preferredencoding,
124 prepend_extension,
125 register_socks_protocols,
126 remove_terminal_sequences,
127 render_table,
128 replace_extension,
129 sanitize_filename,
130 sanitize_path,
131 sanitize_url,
132 sanitized_Request,
133 std_headers,
134 str_or_none,
135 strftime_or_none,
136 subtitles_filename,
137 supports_terminal_sequences,
138 system_identifier,
139 timetuple_from_msec,
140 to_high_limit_path,
141 traverse_obj,
142 try_call,
143 try_get,
144 url_basename,
145 variadic,
146 version_tuple,
147 windows_enable_vt_mode,
148 write_json_file,
149 write_string,
150 )
151 from .version import RELEASE_GIT_HEAD, VARIANT, __version__
152
153 if compat_os_name == 'nt':
154 import ctypes
155
156
157 class YoutubeDL:
158 """YoutubeDL class.
159
160 YoutubeDL objects are the ones responsible of downloading the
161 actual video file and writing it to disk if the user has requested
162 it, among some other tasks. In most cases there should be one per
163 program. As, given a video URL, the downloader doesn't know how to
164 extract all the needed information, task that InfoExtractors do, it
165 has to pass the URL to one of them.
166
167 For this, YoutubeDL objects have a method that allows
168 InfoExtractors to be registered in a given order. When it is passed
169 a URL, the YoutubeDL object handles it to the first InfoExtractor it
170 finds that reports being able to handle it. The InfoExtractor extracts
171 all the information about the video or videos the URL refers to, and
172 YoutubeDL process the extracted information, possibly using a File
173 Downloader to download the video.
174
175 YoutubeDL objects accept a lot of parameters. In order not to saturate
176 the object constructor with arguments, it receives a dictionary of
177 options instead. These options are available through the params
178 attribute for the InfoExtractors to use. The YoutubeDL also
179 registers itself as the downloader in charge for the InfoExtractors
180 that are added to it, so this is a "mutual registration".
181
182 Available options:
183
184 username: Username for authentication purposes.
185 password: Password for authentication purposes.
186 videopassword: Password for accessing a video.
187 ap_mso: Adobe Pass multiple-system operator identifier.
188 ap_username: Multiple-system operator account username.
189 ap_password: Multiple-system operator account password.
190 usenetrc: Use netrc for authentication instead.
191 verbose: Print additional info to stdout.
192 quiet: Do not print messages to stdout.
193 no_warnings: Do not print out anything for warnings.
194 forceprint: A dict with keys WHEN mapped to a list of templates to
195 print to stdout. The allowed keys are video or any of the
196 items in utils.POSTPROCESS_WHEN.
197 For compatibility, a single list is also accepted
198 print_to_file: A dict with keys WHEN (same as forceprint) mapped to
199 a list of tuples with (template, filename)
200 forcejson: Force printing info_dict as JSON.
201 dump_single_json: Force printing the info_dict of the whole playlist
202 (or video) as a single JSON line.
203 force_write_download_archive: Force writing download archive regardless
204 of 'skip_download' or 'simulate'.
205 simulate: Do not download the video files. If unset (or None),
206 simulate only if listsubtitles, listformats or list_thumbnails is used
207 format: Video format code. see "FORMAT SELECTION" for more details.
208 You can also pass a function. The function takes 'ctx' as
209 argument and returns the formats to download.
210 See "build_format_selector" for an implementation
211 allow_unplayable_formats: Allow unplayable formats to be extracted and downloaded.
212 ignore_no_formats_error: Ignore "No video formats" error. Usefull for
213 extracting metadata even if the video is not actually
214 available for download (experimental)
215 format_sort: A list of fields by which to sort the video formats.
216 See "Sorting Formats" for more details.
217 format_sort_force: Force the given format_sort. see "Sorting Formats"
218 for more details.
219 prefer_free_formats: Whether to prefer video formats with free containers
220 over non-free ones of same quality.
221 allow_multiple_video_streams: Allow multiple video streams to be merged
222 into a single file
223 allow_multiple_audio_streams: Allow multiple audio streams to be merged
224 into a single file
225 check_formats Whether to test if the formats are downloadable.
226 Can be True (check all), False (check none),
227 'selected' (check selected formats),
228 or None (check only if requested by extractor)
229 paths: Dictionary of output paths. The allowed keys are 'home'
230 'temp' and the keys of OUTTMPL_TYPES (in utils.py)
231 outtmpl: Dictionary of templates for output names. Allowed keys
232 are 'default' and the keys of OUTTMPL_TYPES (in utils.py).
233 For compatibility with youtube-dl, a single string can also be used
234 outtmpl_na_placeholder: Placeholder for unavailable meta fields.
235 restrictfilenames: Do not allow "&" and spaces in file names
236 trim_file_name: Limit length of filename (extension excluded)
237 windowsfilenames: Force the filenames to be windows compatible
238 ignoreerrors: Do not stop on download/postprocessing errors.
239 Can be 'only_download' to ignore only download errors.
240 Default is 'only_download' for CLI, but False for API
241 skip_playlist_after_errors: Number of allowed failures until the rest of
242 the playlist is skipped
243 allowed_extractors: List of regexes to match against extractor names that are allowed
244 overwrites: Overwrite all video and metadata files if True,
245 overwrite only non-video files if None
246 and don't overwrite any file if False
247 For compatibility with youtube-dl,
248 "nooverwrites" may also be used instead
249 playlist_items: Specific indices of playlist to download.
250 playlistrandom: Download playlist items in random order.
251 lazy_playlist: Process playlist entries as they are received.
252 matchtitle: Download only matching titles.
253 rejecttitle: Reject downloads for matching titles.
254 logger: Log messages to a logging.Logger instance.
255 logtostderr: Print everything to stderr instead of stdout.
256 consoletitle: Display progress in console window's titlebar.
257 writedescription: Write the video description to a .description file
258 writeinfojson: Write the video description to a .info.json file
259 clean_infojson: Remove private fields from the infojson
260 getcomments: Extract video comments. This will not be written to disk
261 unless writeinfojson is also given
262 writeannotations: Write the video annotations to a .annotations.xml file
263 writethumbnail: Write the thumbnail image to a file
264 allow_playlist_files: Whether to write playlists' description, infojson etc
265 also to disk when using the 'write*' options
266 write_all_thumbnails: Write all thumbnail formats to files
267 writelink: Write an internet shortcut file, depending on the
268 current platform (.url/.webloc/.desktop)
269 writeurllink: Write a Windows internet shortcut file (.url)
270 writewebloclink: Write a macOS internet shortcut file (.webloc)
271 writedesktoplink: Write a Linux internet shortcut file (.desktop)
272 writesubtitles: Write the video subtitles to a file
273 writeautomaticsub: Write the automatically generated subtitles to a file
274 listsubtitles: Lists all available subtitles for the video
275 subtitlesformat: The format code for subtitles
276 subtitleslangs: List of languages of the subtitles to download (can be regex).
277 The list may contain "all" to refer to all the available
278 subtitles. The language can be prefixed with a "-" to
279 exclude it from the requested languages, e.g. ['all', '-live_chat']
280 keepvideo: Keep the video file after post-processing
281 daterange: A DateRange object, download only if the upload_date is in the range.
282 skip_download: Skip the actual download of the video file
283 cachedir: Location of the cache files in the filesystem.
284 False to disable filesystem cache.
285 noplaylist: Download single video instead of a playlist if in doubt.
286 age_limit: An integer representing the user's age in years.
287 Unsuitable videos for the given age are skipped.
288 min_views: An integer representing the minimum view count the video
289 must have in order to not be skipped.
290 Videos without view count information are always
291 downloaded. None for no limit.
292 max_views: An integer representing the maximum view count.
293 Videos that are more popular than that are not
294 downloaded.
295 Videos without view count information are always
296 downloaded. None for no limit.
297 download_archive: A set, or the name of a file where all downloads are recorded.
298 Videos already present in the file are not downloaded again.
299 break_on_existing: Stop the download process after attempting to download a
300 file that is in the archive.
301 break_on_reject: Stop the download process when encountering a video that
302 has been filtered out.
303 break_per_url: Whether break_on_reject and break_on_existing
304 should act on each input URL as opposed to for the entire queue
305 cookiefile: File name or text stream from where cookies should be read and dumped to
306 cookiesfrombrowser: A tuple containing the name of the browser, the profile
307 name/path from where cookies are loaded, the name of the keyring,
308 and the container name, e.g. ('chrome', ) or
309 ('vivaldi', 'default', 'BASICTEXT') or ('firefox', 'default', None, 'Meta')
310 legacyserverconnect: Explicitly allow HTTPS connection to servers that do not
311 support RFC 5746 secure renegotiation
312 nocheckcertificate: Do not verify SSL certificates
313 client_certificate: Path to client certificate file in PEM format. May include the private key
314 client_certificate_key: Path to private key file for client certificate
315 client_certificate_password: Password for client certificate private key, if encrypted.
316 If not provided and the key is encrypted, yt-dlp will ask interactively
317 prefer_insecure: Use HTTP instead of HTTPS to retrieve information.
318 (Only supported by some extractors)
319 http_headers: A dictionary of custom headers to be used for all requests
320 proxy: URL of the proxy server to use
321 geo_verification_proxy: URL of the proxy to use for IP address verification
322 on geo-restricted sites.
323 socket_timeout: Time to wait for unresponsive hosts, in seconds
324 bidi_workaround: Work around buggy terminals without bidirectional text
325 support, using fridibi
326 debug_printtraffic:Print out sent and received HTTP traffic
327 default_search: Prepend this string if an input url is not valid.
328 'auto' for elaborate guessing
329 encoding: Use this encoding instead of the system-specified.
330 extract_flat: Whether to resolve and process url_results further
331 * False: Always process (default)
332 * True: Never process
333 * 'in_playlist': Do not process inside playlist/multi_video
334 * 'discard': Always process, but don't return the result
335 from inside playlist/multi_video
336 * 'discard_in_playlist': Same as "discard", but only for
337 playlists (not multi_video)
338 wait_for_video: If given, wait for scheduled streams to become available.
339 The value should be a tuple containing the range
340 (min_secs, max_secs) to wait between retries
341 postprocessors: A list of dictionaries, each with an entry
342 * key: The name of the postprocessor. See
343 yt_dlp/postprocessor/__init__.py for a list.
344 * when: When to run the postprocessor. Allowed values are
345 the entries of utils.POSTPROCESS_WHEN
346 Assumed to be 'post_process' if not given
347 progress_hooks: A list of functions that get called on download
348 progress, with a dictionary with the entries
349 * status: One of "downloading", "error", or "finished".
350 Check this first and ignore unknown values.
351 * info_dict: The extracted info_dict
352
353 If status is one of "downloading", or "finished", the
354 following properties may also be present:
355 * filename: The final filename (always present)
356 * tmpfilename: The filename we're currently writing to
357 * downloaded_bytes: Bytes on disk
358 * total_bytes: Size of the whole file, None if unknown
359 * total_bytes_estimate: Guess of the eventual file size,
360 None if unavailable.
361 * elapsed: The number of seconds since download started.
362 * eta: The estimated time in seconds, None if unknown
363 * speed: The download speed in bytes/second, None if
364 unknown
365 * fragment_index: The counter of the currently
366 downloaded video fragment.
367 * fragment_count: The number of fragments (= individual
368 files that will be merged)
369
370 Progress hooks are guaranteed to be called at least once
371 (with status "finished") if the download is successful.
372 postprocessor_hooks: A list of functions that get called on postprocessing
373 progress, with a dictionary with the entries
374 * status: One of "started", "processing", or "finished".
375 Check this first and ignore unknown values.
376 * postprocessor: Name of the postprocessor
377 * info_dict: The extracted info_dict
378
379 Progress hooks are guaranteed to be called at least twice
380 (with status "started" and "finished") if the processing is successful.
381 merge_output_format: "/" separated list of extensions to use when merging formats.
382 final_ext: Expected final extension; used to detect when the file was
383 already downloaded and converted
384 fixup: Automatically correct known faults of the file.
385 One of:
386 - "never": do nothing
387 - "warn": only emit a warning
388 - "detect_or_warn": check whether we can do anything
389 about it, warn otherwise (default)
390 source_address: Client-side IP address to bind to.
391 sleep_interval_requests: Number of seconds to sleep between requests
392 during extraction
393 sleep_interval: Number of seconds to sleep before each download when
394 used alone or a lower bound of a range for randomized
395 sleep before each download (minimum possible number
396 of seconds to sleep) when used along with
397 max_sleep_interval.
398 max_sleep_interval:Upper bound of a range for randomized sleep before each
399 download (maximum possible number of seconds to sleep).
400 Must only be used along with sleep_interval.
401 Actual sleep time will be a random float from range
402 [sleep_interval; max_sleep_interval].
403 sleep_interval_subtitles: Number of seconds to sleep before each subtitle download
404 listformats: Print an overview of available video formats and exit.
405 list_thumbnails: Print a table of all thumbnails and exit.
406 match_filter: A function that gets called for every video with the signature
407 (info_dict, *, incomplete: bool) -> Optional[str]
408 For backward compatibility with youtube-dl, the signature
409 (info_dict) -> Optional[str] is also allowed.
410 - If it returns a message, the video is ignored.
411 - If it returns None, the video is downloaded.
412 - If it returns utils.NO_DEFAULT, the user is interactively
413 asked whether to download the video.
414 match_filter_func in utils.py is one example for this.
415 no_color: Do not emit color codes in output.
416 geo_bypass: Bypass geographic restriction via faking X-Forwarded-For
417 HTTP header
418 geo_bypass_country:
419 Two-letter ISO 3166-2 country code that will be used for
420 explicit geographic restriction bypassing via faking
421 X-Forwarded-For HTTP header
422 geo_bypass_ip_block:
423 IP range in CIDR notation that will be used similarly to
424 geo_bypass_country
425 external_downloader: A dictionary of protocol keys and the executable of the
426 external downloader to use for it. The allowed protocols
427 are default|http|ftp|m3u8|dash|rtsp|rtmp|mms.
428 Set the value to 'native' to use the native downloader
429 compat_opts: Compatibility options. See "Differences in default behavior".
430 The following options do not work when used through the API:
431 filename, abort-on-error, multistreams, no-live-chat, format-sort
432 no-clean-infojson, no-playlist-metafiles, no-keep-subs, no-attach-info-json.
433 Refer __init__.py for their implementation
434 progress_template: Dictionary of templates for progress outputs.
435 Allowed keys are 'download', 'postprocess',
436 'download-title' (console title) and 'postprocess-title'.
437 The template is mapped on a dictionary with keys 'progress' and 'info'
438 retry_sleep_functions: Dictionary of functions that takes the number of attempts
439 as argument and returns the time to sleep in seconds.
440 Allowed keys are 'http', 'fragment', 'file_access'
441 download_ranges: A callback function that gets called for every video with
442 the signature (info_dict, ydl) -> Iterable[Section].
443 Only the returned sections will be downloaded.
444 Each Section is a dict with the following keys:
445 * start_time: Start time of the section in seconds
446 * end_time: End time of the section in seconds
447 * title: Section title (Optional)
448 * index: Section number (Optional)
449 force_keyframes_at_cuts: Re-encode the video when downloading ranges to get precise cuts
450 noprogress: Do not print the progress bar
451 live_from_start: Whether to download livestreams videos from the start
452
453 The following parameters are not used by YoutubeDL itself, they are used by
454 the downloader (see yt_dlp/downloader/common.py):
455 nopart, updatetime, buffersize, ratelimit, throttledratelimit, min_filesize,
456 max_filesize, test, noresizebuffer, retries, file_access_retries, fragment_retries,
457 continuedl, xattr_set_filesize, hls_use_mpegts, http_chunk_size,
458 external_downloader_args, concurrent_fragment_downloads.
459
460 The following options are used by the post processors:
461 ffmpeg_location: Location of the ffmpeg/avconv binary; either the path
462 to the binary or its containing directory.
463 postprocessor_args: A dictionary of postprocessor/executable keys (in lower case)
464 and a list of additional command-line arguments for the
465 postprocessor/executable. The dict can also have "PP+EXE" keys
466 which are used when the given exe is used by the given PP.
467 Use 'default' as the name for arguments to passed to all PP
468 For compatibility with youtube-dl, a single list of args
469 can also be used
470
471 The following options are used by the extractors:
472 extractor_retries: Number of times to retry for known errors
473 dynamic_mpd: Whether to process dynamic DASH manifests (default: True)
474 hls_split_discontinuity: Split HLS playlists to different formats at
475 discontinuities such as ad breaks (default: False)
476 extractor_args: A dictionary of arguments to be passed to the extractors.
477 See "EXTRACTOR ARGUMENTS" for details.
478 E.g. {'youtube': {'skip': ['dash', 'hls']}}
479 mark_watched: Mark videos watched (even with --simulate). Only for YouTube
480
481 The following options are deprecated and may be removed in the future:
482
483 force_generic_extractor: Force downloader to use the generic extractor
484 - Use allowed_extractors = ['generic', 'default']
485 playliststart: - Use playlist_items
486 Playlist item to start at.
487 playlistend: - Use playlist_items
488 Playlist item to end at.
489 playlistreverse: - Use playlist_items
490 Download playlist items in reverse order.
491 forceurl: - Use forceprint
492 Force printing final URL.
493 forcetitle: - Use forceprint
494 Force printing title.
495 forceid: - Use forceprint
496 Force printing ID.
497 forcethumbnail: - Use forceprint
498 Force printing thumbnail URL.
499 forcedescription: - Use forceprint
500 Force printing description.
501 forcefilename: - Use forceprint
502 Force printing final filename.
503 forceduration: - Use forceprint
504 Force printing duration.
505 allsubtitles: - Use subtitleslangs = ['all']
506 Downloads all the subtitles of the video
507 (requires writesubtitles or writeautomaticsub)
508 include_ads: - Doesn't work
509 Download ads as well
510 call_home: - Not implemented
511 Boolean, true iff we are allowed to contact the
512 yt-dlp servers for debugging.
513 post_hooks: - Register a custom postprocessor
514 A list of functions that get called as the final step
515 for each video file, after all postprocessors have been
516 called. The filename will be passed as the only argument.
517 hls_prefer_native: - Use external_downloader = {'m3u8': 'native'} or {'m3u8': 'ffmpeg'}.
518 Use the native HLS downloader instead of ffmpeg/avconv
519 if True, otherwise use ffmpeg/avconv if False, otherwise
520 use downloader suggested by extractor if None.
521 prefer_ffmpeg: - avconv support is deprecated
522 If False, use avconv instead of ffmpeg if both are available,
523 otherwise prefer ffmpeg.
524 youtube_include_dash_manifest: - Use extractor_args
525 If True (default), DASH manifests and related
526 data will be downloaded and processed by extractor.
527 You can reduce network I/O by disabling it if you don't
528 care about DASH. (only for youtube)
529 youtube_include_hls_manifest: - Use extractor_args
530 If True (default), HLS manifests and related
531 data will be downloaded and processed by extractor.
532 You can reduce network I/O by disabling it if you don't
533 care about HLS. (only for youtube)
534 """
535
536 _NUMERIC_FIELDS = {
537 'width', 'height', 'asr', 'audio_channels', 'fps',
538 'tbr', 'abr', 'vbr', 'filesize', 'filesize_approx',
539 'timestamp', 'release_timestamp',
540 'duration', 'view_count', 'like_count', 'dislike_count', 'repost_count',
541 'average_rating', 'comment_count', 'age_limit',
542 'start_time', 'end_time',
543 'chapter_number', 'season_number', 'episode_number',
544 'track_number', 'disc_number', 'release_year',
545 }
546
547 _format_fields = {
548 # NB: Keep in sync with the docstring of extractor/common.py
549 'url', 'manifest_url', 'manifest_stream_number', 'ext', 'format', 'format_id', 'format_note',
550 'width', 'height', 'aspect_ratio', 'resolution', 'dynamic_range', 'tbr', 'abr', 'acodec', 'asr', 'audio_channels',
551 'vbr', 'fps', 'vcodec', 'container', 'filesize', 'filesize_approx', 'rows', 'columns',
552 'player_url', 'protocol', 'fragment_base_url', 'fragments', 'is_from_start',
553 'preference', 'language', 'language_preference', 'quality', 'source_preference',
554 'http_headers', 'stretched_ratio', 'no_resume', 'has_drm', 'downloader_options',
555 'page_url', 'app', 'play_path', 'tc_url', 'flash_version', 'rtmp_live', 'rtmp_conn', 'rtmp_protocol', 'rtmp_real_time'
556 }
557 _format_selection_exts = {
558 'audio': set(MEDIA_EXTENSIONS.common_audio),
559 'video': set(MEDIA_EXTENSIONS.common_video + ('3gp', )),
560 'storyboards': set(MEDIA_EXTENSIONS.storyboards),
561 }
562
563 def __init__(self, params=None, auto_init=True):
564 """Create a FileDownloader object with the given options.
565 @param auto_init Whether to load the default extractors and print header (if verbose).
566 Set to 'no_verbose_header' to not print the header
567 """
568 if params is None:
569 params = {}
570 self.params = params
571 self._ies = {}
572 self._ies_instances = {}
573 self._pps = {k: [] for k in POSTPROCESS_WHEN}
574 self._printed_messages = set()
575 self._first_webpage_request = True
576 self._post_hooks = []
577 self._progress_hooks = []
578 self._postprocessor_hooks = []
579 self._download_retcode = 0
580 self._num_downloads = 0
581 self._num_videos = 0
582 self._playlist_level = 0
583 self._playlist_urls = set()
584 self.cache = Cache(self)
585
586 windows_enable_vt_mode()
587 stdout = sys.stderr if self.params.get('logtostderr') else sys.stdout
588 self._out_files = Namespace(
589 out=stdout,
590 error=sys.stderr,
591 screen=sys.stderr if self.params.get('quiet') else stdout,
592 console=None if compat_os_name == 'nt' else next(
593 filter(supports_terminal_sequences, (sys.stderr, sys.stdout)), None)
594 )
595 self._allow_colors = Namespace(**{
596 type_: not self.params.get('no_color') and supports_terminal_sequences(stream)
597 for type_, stream in self._out_files.items_ if type_ != 'console'
598 })
599
600 # The code is left like this to be reused for future deprecations
601 MIN_SUPPORTED, MIN_RECOMMENDED = (3, 7), (3, 7)
602 current_version = sys.version_info[:2]
603 if current_version < MIN_RECOMMENDED:
604 msg = ('Support for Python version %d.%d has been deprecated. '
605 'See https://github.com/yt-dlp/yt-dlp/issues/3764 for more details.'
606 '\n You will no longer receive updates on this version')
607 if current_version < MIN_SUPPORTED:
608 msg = 'Python version %d.%d is no longer supported'
609 self.deprecation_warning(
610 f'{msg}! Please update to Python %d.%d or above' % (*current_version, *MIN_RECOMMENDED))
611
612 if self.params.get('allow_unplayable_formats'):
613 self.report_warning(
614 f'You have asked for {self._format_err("UNPLAYABLE", self.Styles.EMPHASIS)} formats to be listed/downloaded. '
615 'This is a developer option intended for debugging. \n'
616 ' If you experience any issues while using this option, '
617 f'{self._format_err("DO NOT", self.Styles.ERROR)} open a bug report')
618
619 if self.params.get('bidi_workaround', False):
620 try:
621 import pty
622 master, slave = pty.openpty()
623 width = shutil.get_terminal_size().columns
624 width_args = [] if width is None else ['-w', str(width)]
625 sp_kwargs = {'stdin': subprocess.PIPE, 'stdout': slave, 'stderr': self._out_files.error}
626 try:
627 self._output_process = Popen(['bidiv'] + width_args, **sp_kwargs)
628 except OSError:
629 self._output_process = Popen(['fribidi', '-c', 'UTF-8'] + width_args, **sp_kwargs)
630 self._output_channel = os.fdopen(master, 'rb')
631 except OSError as ose:
632 if ose.errno == errno.ENOENT:
633 self.report_warning(
634 'Could not find fribidi executable, ignoring --bidi-workaround. '
635 'Make sure that fribidi is an executable file in one of the directories in your $PATH.')
636 else:
637 raise
638
639 self.params['compat_opts'] = set(self.params.get('compat_opts', ()))
640 if auto_init and auto_init != 'no_verbose_header':
641 self.print_debug_header()
642
643 def check_deprecated(param, option, suggestion):
644 if self.params.get(param) is not None:
645 self.report_warning(f'{option} is deprecated. Use {suggestion} instead')
646 return True
647 return False
648
649 if check_deprecated('cn_verification_proxy', '--cn-verification-proxy', '--geo-verification-proxy'):
650 if self.params.get('geo_verification_proxy') is None:
651 self.params['geo_verification_proxy'] = self.params['cn_verification_proxy']
652
653 check_deprecated('autonumber', '--auto-number', '-o "%(autonumber)s-%(title)s.%(ext)s"')
654 check_deprecated('usetitle', '--title', '-o "%(title)s-%(id)s.%(ext)s"')
655 check_deprecated('useid', '--id', '-o "%(id)s.%(ext)s"')
656
657 for msg in self.params.get('_warnings', []):
658 self.report_warning(msg)
659 for msg in self.params.get('_deprecation_warnings', []):
660 self.deprecated_feature(msg)
661
662 if 'list-formats' in self.params['compat_opts']:
663 self.params['listformats_table'] = False
664
665 if 'overwrites' not in self.params and self.params.get('nooverwrites') is not None:
666 # nooverwrites was unnecessarily changed to overwrites
667 # in 0c3d0f51778b153f65c21906031c2e091fcfb641
668 # This ensures compatibility with both keys
669 self.params['overwrites'] = not self.params['nooverwrites']
670 elif self.params.get('overwrites') is None:
671 self.params.pop('overwrites', None)
672 else:
673 self.params['nooverwrites'] = not self.params['overwrites']
674
675 if self.params.get('simulate') is None and any((
676 self.params.get('list_thumbnails'),
677 self.params.get('listformats'),
678 self.params.get('listsubtitles'),
679 )):
680 self.params['simulate'] = 'list_only'
681
682 self.params.setdefault('forceprint', {})
683 self.params.setdefault('print_to_file', {})
684
685 # Compatibility with older syntax
686 if not isinstance(params['forceprint'], dict):
687 self.params['forceprint'] = {'video': params['forceprint']}
688
689 if auto_init:
690 self.add_default_info_extractors()
691
692 if (sys.platform != 'win32'
693 and sys.getfilesystemencoding() in ['ascii', 'ANSI_X3.4-1968']
694 and not self.params.get('restrictfilenames', False)):
695 # Unicode filesystem API will throw errors (#1474, #13027)
696 self.report_warning(
697 'Assuming --restrict-filenames since file system encoding '
698 'cannot encode all characters. '
699 'Set the LC_ALL environment variable to fix this.')
700 self.params['restrictfilenames'] = True
701
702 self._parse_outtmpl()
703
704 # Creating format selector here allows us to catch syntax errors before the extraction
705 self.format_selector = (
706 self.params.get('format') if self.params.get('format') in (None, '-')
707 else self.params['format'] if callable(self.params['format'])
708 else self.build_format_selector(self.params['format']))
709
710 # Set http_headers defaults according to std_headers
711 self.params['http_headers'] = merge_headers(std_headers, self.params.get('http_headers', {}))
712
713 hooks = {
714 'post_hooks': self.add_post_hook,
715 'progress_hooks': self.add_progress_hook,
716 'postprocessor_hooks': self.add_postprocessor_hook,
717 }
718 for opt, fn in hooks.items():
719 for ph in self.params.get(opt, []):
720 fn(ph)
721
722 for pp_def_raw in self.params.get('postprocessors', []):
723 pp_def = dict(pp_def_raw)
724 when = pp_def.pop('when', 'post_process')
725 self.add_post_processor(
726 get_postprocessor(pp_def.pop('key'))(self, **pp_def),
727 when=when)
728
729 self._setup_opener()
730 register_socks_protocols()
731
732 def preload_download_archive(fn):
733 """Preload the archive, if any is specified"""
734 archive = set()
735 if fn is None:
736 return archive
737 elif not is_path_like(fn):
738 return fn
739
740 self.write_debug(f'Loading archive file {fn!r}')
741 try:
742 with locked_file(fn, 'r', encoding='utf-8') as archive_file:
743 for line in archive_file:
744 archive.add(line.strip())
745 except OSError as ioe:
746 if ioe.errno != errno.ENOENT:
747 raise
748 return archive
749
750 self.archive = preload_download_archive(self.params.get('download_archive'))
751
752 def warn_if_short_id(self, argv):
753 # short YouTube ID starting with dash?
754 idxs = [
755 i for i, a in enumerate(argv)
756 if re.match(r'^-[0-9A-Za-z_-]{10}$', a)]
757 if idxs:
758 correct_argv = (
759 ['yt-dlp']
760 + [a for i, a in enumerate(argv) if i not in idxs]
761 + ['--'] + [argv[i] for i in idxs]
762 )
763 self.report_warning(
764 'Long argument string detected. '
765 'Use -- to separate parameters and URLs, like this:\n%s' %
766 args_to_str(correct_argv))
767
768 def add_info_extractor(self, ie):
769 """Add an InfoExtractor object to the end of the list."""
770 ie_key = ie.ie_key()
771 self._ies[ie_key] = ie
772 if not isinstance(ie, type):
773 self._ies_instances[ie_key] = ie
774 ie.set_downloader(self)
775
776 def get_info_extractor(self, ie_key):
777 """
778 Get an instance of an IE with name ie_key, it will try to get one from
779 the _ies list, if there's no instance it will create a new one and add
780 it to the extractor list.
781 """
782 ie = self._ies_instances.get(ie_key)
783 if ie is None:
784 ie = get_info_extractor(ie_key)()
785 self.add_info_extractor(ie)
786 return ie
787
788 def add_default_info_extractors(self):
789 """
790 Add the InfoExtractors returned by gen_extractors to the end of the list
791 """
792 all_ies = {ie.IE_NAME.lower(): ie for ie in gen_extractor_classes()}
793 all_ies['end'] = UnsupportedURLIE()
794 try:
795 ie_names = orderedSet_from_options(
796 self.params.get('allowed_extractors', ['default']), {
797 'all': list(all_ies),
798 'default': [name for name, ie in all_ies.items() if ie._ENABLED],
799 }, use_regex=True)
800 except re.error as e:
801 raise ValueError(f'Wrong regex for allowed_extractors: {e.pattern}')
802 for name in ie_names:
803 self.add_info_extractor(all_ies[name])
804 self.write_debug(f'Loaded {len(ie_names)} extractors')
805
806 def add_post_processor(self, pp, when='post_process'):
807 """Add a PostProcessor object to the end of the chain."""
808 assert when in POSTPROCESS_WHEN, f'Invalid when={when}'
809 self._pps[when].append(pp)
810 pp.set_downloader(self)
811
812 def add_post_hook(self, ph):
813 """Add the post hook"""
814 self._post_hooks.append(ph)
815
816 def add_progress_hook(self, ph):
817 """Add the download progress hook"""
818 self._progress_hooks.append(ph)
819
820 def add_postprocessor_hook(self, ph):
821 """Add the postprocessing progress hook"""
822 self._postprocessor_hooks.append(ph)
823 for pps in self._pps.values():
824 for pp in pps:
825 pp.add_progress_hook(ph)
826
827 def _bidi_workaround(self, message):
828 if not hasattr(self, '_output_channel'):
829 return message
830
831 assert hasattr(self, '_output_process')
832 assert isinstance(message, str)
833 line_count = message.count('\n') + 1
834 self._output_process.stdin.write((message + '\n').encode())
835 self._output_process.stdin.flush()
836 res = ''.join(self._output_channel.readline().decode()
837 for _ in range(line_count))
838 return res[:-len('\n')]
839
840 def _write_string(self, message, out=None, only_once=False):
841 if only_once:
842 if message in self._printed_messages:
843 return
844 self._printed_messages.add(message)
845 write_string(message, out=out, encoding=self.params.get('encoding'))
846
847 def to_stdout(self, message, skip_eol=False, quiet=None):
848 """Print message to stdout"""
849 if quiet is not None:
850 self.deprecation_warning('"YoutubeDL.to_stdout" no longer accepts the argument quiet. '
851 'Use "YoutubeDL.to_screen" instead')
852 if skip_eol is not False:
853 self.deprecation_warning('"YoutubeDL.to_stdout" no longer accepts the argument skip_eol. '
854 'Use "YoutubeDL.to_screen" instead')
855 self._write_string(f'{self._bidi_workaround(message)}\n', self._out_files.out)
856
857 def to_screen(self, message, skip_eol=False, quiet=None, only_once=False):
858 """Print message to screen if not in quiet mode"""
859 if self.params.get('logger'):
860 self.params['logger'].debug(message)
861 return
862 if (self.params.get('quiet') if quiet is None else quiet) and not self.params.get('verbose'):
863 return
864 self._write_string(
865 '%s%s' % (self._bidi_workaround(message), ('' if skip_eol else '\n')),
866 self._out_files.screen, only_once=only_once)
867
868 def to_stderr(self, message, only_once=False):
869 """Print message to stderr"""
870 assert isinstance(message, str)
871 if self.params.get('logger'):
872 self.params['logger'].error(message)
873 else:
874 self._write_string(f'{self._bidi_workaround(message)}\n', self._out_files.error, only_once=only_once)
875
876 def _send_console_code(self, code):
877 if compat_os_name == 'nt' or not self._out_files.console:
878 return
879 self._write_string(code, self._out_files.console)
880
881 def to_console_title(self, message):
882 if not self.params.get('consoletitle', False):
883 return
884 message = remove_terminal_sequences(message)
885 if compat_os_name == 'nt':
886 if ctypes.windll.kernel32.GetConsoleWindow():
887 # c_wchar_p() might not be necessary if `message` is
888 # already of type unicode()
889 ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(message))
890 else:
891 self._send_console_code(f'\033]0;{message}\007')
892
893 def save_console_title(self):
894 if not self.params.get('consoletitle') or self.params.get('simulate'):
895 return
896 self._send_console_code('\033[22;0t') # Save the title on stack
897
898 def restore_console_title(self):
899 if not self.params.get('consoletitle') or self.params.get('simulate'):
900 return
901 self._send_console_code('\033[23;0t') # Restore the title from stack
902
903 def __enter__(self):
904 self.save_console_title()
905 return self
906
907 def __exit__(self, *args):
908 self.restore_console_title()
909
910 if self.params.get('cookiefile') is not None:
911 self.cookiejar.save(ignore_discard=True, ignore_expires=True)
912
913 def trouble(self, message=None, tb=None, is_error=True):
914 """Determine action to take when a download problem appears.
915
916 Depending on if the downloader has been configured to ignore
917 download errors or not, this method may throw an exception or
918 not when errors are found, after printing the message.
919
920 @param tb If given, is additional traceback information
921 @param is_error Whether to raise error according to ignorerrors
922 """
923 if message is not None:
924 self.to_stderr(message)
925 if self.params.get('verbose'):
926 if tb is None:
927 if sys.exc_info()[0]: # if .trouble has been called from an except block
928 tb = ''
929 if hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
930 tb += ''.join(traceback.format_exception(*sys.exc_info()[1].exc_info))
931 tb += encode_compat_str(traceback.format_exc())
932 else:
933 tb_data = traceback.format_list(traceback.extract_stack())
934 tb = ''.join(tb_data)
935 if tb:
936 self.to_stderr(tb)
937 if not is_error:
938 return
939 if not self.params.get('ignoreerrors'):
940 if sys.exc_info()[0] and hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
941 exc_info = sys.exc_info()[1].exc_info
942 else:
943 exc_info = sys.exc_info()
944 raise DownloadError(message, exc_info)
945 self._download_retcode = 1
946
947 Styles = Namespace(
948 HEADERS='yellow',
949 EMPHASIS='light blue',
950 FILENAME='green',
951 ID='green',
952 DELIM='blue',
953 ERROR='red',
954 WARNING='yellow',
955 SUPPRESS='light black',
956 )
957
958 def _format_text(self, handle, allow_colors, text, f, fallback=None, *, test_encoding=False):
959 text = str(text)
960 if test_encoding:
961 original_text = text
962 # handle.encoding can be None. See https://github.com/yt-dlp/yt-dlp/issues/2711
963 encoding = self.params.get('encoding') or getattr(handle, 'encoding', None) or 'ascii'
964 text = text.encode(encoding, 'ignore').decode(encoding)
965 if fallback is not None and text != original_text:
966 text = fallback
967 return format_text(text, f) if allow_colors else text if fallback is None else fallback
968
969 def _format_out(self, *args, **kwargs):
970 return self._format_text(self._out_files.out, self._allow_colors.out, *args, **kwargs)
971
972 def _format_screen(self, *args, **kwargs):
973 return self._format_text(self._out_files.screen, self._allow_colors.screen, *args, **kwargs)
974
975 def _format_err(self, *args, **kwargs):
976 return self._format_text(self._out_files.error, self._allow_colors.error, *args, **kwargs)
977
978 def report_warning(self, message, only_once=False):
979 '''
980 Print the message to stderr, it will be prefixed with 'WARNING:'
981 If stderr is a tty file the 'WARNING:' will be colored
982 '''
983 if self.params.get('logger') is not None:
984 self.params['logger'].warning(message)
985 else:
986 if self.params.get('no_warnings'):
987 return
988 self.to_stderr(f'{self._format_err("WARNING:", self.Styles.WARNING)} {message}', only_once)
989
990 def deprecation_warning(self, message, *, stacklevel=0):
991 deprecation_warning(
992 message, stacklevel=stacklevel + 1, printer=self.report_error, is_error=False)
993
994 def deprecated_feature(self, message):
995 if self.params.get('logger') is not None:
996 self.params['logger'].warning(f'Deprecated Feature: {message}')
997 self.to_stderr(f'{self._format_err("Deprecated Feature:", self.Styles.ERROR)} {message}', True)
998
999 def report_error(self, message, *args, **kwargs):
1000 '''
1001 Do the same as trouble, but prefixes the message with 'ERROR:', colored
1002 in red if stderr is a tty file.
1003 '''
1004 self.trouble(f'{self._format_err("ERROR:", self.Styles.ERROR)} {message}', *args, **kwargs)
1005
1006 def write_debug(self, message, only_once=False):
1007 '''Log debug message or Print message to stderr'''
1008 if not self.params.get('verbose', False):
1009 return
1010 message = f'[debug] {message}'
1011 if self.params.get('logger'):
1012 self.params['logger'].debug(message)
1013 else:
1014 self.to_stderr(message, only_once)
1015
1016 def report_file_already_downloaded(self, file_name):
1017 """Report file has already been fully downloaded."""
1018 try:
1019 self.to_screen('[download] %s has already been downloaded' % file_name)
1020 except UnicodeEncodeError:
1021 self.to_screen('[download] The file has already been downloaded')
1022
1023 def report_file_delete(self, file_name):
1024 """Report that existing file will be deleted."""
1025 try:
1026 self.to_screen('Deleting existing file %s' % file_name)
1027 except UnicodeEncodeError:
1028 self.to_screen('Deleting existing file')
1029
1030 def raise_no_formats(self, info, forced=False, *, msg=None):
1031 has_drm = info.get('_has_drm')
1032 ignored, expected = self.params.get('ignore_no_formats_error'), bool(msg)
1033 msg = msg or has_drm and 'This video is DRM protected' or 'No video formats found!'
1034 if forced or not ignored:
1035 raise ExtractorError(msg, video_id=info['id'], ie=info['extractor'],
1036 expected=has_drm or ignored or expected)
1037 else:
1038 self.report_warning(msg)
1039
1040 def parse_outtmpl(self):
1041 self.deprecation_warning('"YoutubeDL.parse_outtmpl" is deprecated and may be removed in a future version')
1042 self._parse_outtmpl()
1043 return self.params['outtmpl']
1044
1045 def _parse_outtmpl(self):
1046 sanitize = IDENTITY
1047 if self.params.get('restrictfilenames'): # Remove spaces in the default template
1048 sanitize = lambda x: x.replace(' - ', ' ').replace(' ', '-')
1049
1050 outtmpl = self.params.setdefault('outtmpl', {})
1051 if not isinstance(outtmpl, dict):
1052 self.params['outtmpl'] = outtmpl = {'default': outtmpl}
1053 outtmpl.update({k: sanitize(v) for k, v in DEFAULT_OUTTMPL.items() if outtmpl.get(k) is None})
1054
1055 def get_output_path(self, dir_type='', filename=None):
1056 paths = self.params.get('paths', {})
1057 assert isinstance(paths, dict), '"paths" parameter must be a dictionary'
1058 path = os.path.join(
1059 expand_path(paths.get('home', '').strip()),
1060 expand_path(paths.get(dir_type, '').strip()) if dir_type else '',
1061 filename or '')
1062 return sanitize_path(path, force=self.params.get('windowsfilenames'))
1063
1064 @staticmethod
1065 def _outtmpl_expandpath(outtmpl):
1066 # expand_path translates '%%' into '%' and '$$' into '$'
1067 # correspondingly that is not what we want since we need to keep
1068 # '%%' intact for template dict substitution step. Working around
1069 # with boundary-alike separator hack.
1070 sep = ''.join([random.choice(ascii_letters) for _ in range(32)])
1071 outtmpl = outtmpl.replace('%%', f'%{sep}%').replace('$$', f'${sep}$')
1072
1073 # outtmpl should be expand_path'ed before template dict substitution
1074 # because meta fields may contain env variables we don't want to
1075 # be expanded. E.g. for outtmpl "%(title)s.%(ext)s" and
1076 # title "Hello $PATH", we don't want `$PATH` to be expanded.
1077 return expand_path(outtmpl).replace(sep, '')
1078
1079 @staticmethod
1080 def escape_outtmpl(outtmpl):
1081 ''' Escape any remaining strings like %s, %abc% etc. '''
1082 return re.sub(
1083 STR_FORMAT_RE_TMPL.format('', '(?![%(\0])'),
1084 lambda mobj: ('' if mobj.group('has_key') else '%') + mobj.group(0),
1085 outtmpl)
1086
1087 @classmethod
1088 def validate_outtmpl(cls, outtmpl):
1089 ''' @return None or Exception object '''
1090 outtmpl = re.sub(
1091 STR_FORMAT_RE_TMPL.format('[^)]*', '[ljhqBUDS]'),
1092 lambda mobj: f'{mobj.group(0)[:-1]}s',
1093 cls._outtmpl_expandpath(outtmpl))
1094 try:
1095 cls.escape_outtmpl(outtmpl) % collections.defaultdict(int)
1096 return None
1097 except ValueError as err:
1098 return err
1099
1100 @staticmethod
1101 def _copy_infodict(info_dict):
1102 info_dict = dict(info_dict)
1103 info_dict.pop('__postprocessors', None)
1104 info_dict.pop('__pending_error', None)
1105 return info_dict
1106
1107 def prepare_outtmpl(self, outtmpl, info_dict, sanitize=False):
1108 """ Make the outtmpl and info_dict suitable for substitution: ydl.escape_outtmpl(outtmpl) % info_dict
1109 @param sanitize Whether to sanitize the output as a filename.
1110 For backward compatibility, a function can also be passed
1111 """
1112
1113 info_dict.setdefault('epoch', int(time.time())) # keep epoch consistent once set
1114
1115 info_dict = self._copy_infodict(info_dict)
1116 info_dict['duration_string'] = ( # %(duration>%H-%M-%S)s is wrong if duration > 24hrs
1117 formatSeconds(info_dict['duration'], '-' if sanitize else ':')
1118 if info_dict.get('duration', None) is not None
1119 else None)
1120 info_dict['autonumber'] = int(self.params.get('autonumber_start', 1) - 1 + self._num_downloads)
1121 info_dict['video_autonumber'] = self._num_videos
1122 if info_dict.get('resolution') is None:
1123 info_dict['resolution'] = self.format_resolution(info_dict, default=None)
1124
1125 # For fields playlist_index, playlist_autonumber and autonumber convert all occurrences
1126 # of %(field)s to %(field)0Nd for backward compatibility
1127 field_size_compat_map = {
1128 'playlist_index': number_of_digits(info_dict.get('__last_playlist_index') or 0),
1129 'playlist_autonumber': number_of_digits(info_dict.get('n_entries') or 0),
1130 'autonumber': self.params.get('autonumber_size') or 5,
1131 }
1132
1133 TMPL_DICT = {}
1134 EXTERNAL_FORMAT_RE = re.compile(STR_FORMAT_RE_TMPL.format('[^)]*', f'[{STR_FORMAT_TYPES}ljhqBUDS]'))
1135 MATH_FUNCTIONS = {
1136 '+': float.__add__,
1137 '-': float.__sub__,
1138 }
1139 # Field is of the form key1.key2...
1140 # where keys (except first) can be string, int, slice or "{field, ...}"
1141 FIELD_INNER_RE = r'(?:\w+|%(num)s|%(num)s?(?::%(num)s?){1,2})' % {'num': r'(?:-?\d+)'}
1142 FIELD_RE = r'\w*(?:\.(?:%(inner)s|{%(field)s(?:,%(field)s)*}))*' % {
1143 'inner': FIELD_INNER_RE,
1144 'field': rf'\w*(?:\.{FIELD_INNER_RE})*'
1145 }
1146 MATH_FIELD_RE = rf'(?:{FIELD_RE}|-?{NUMBER_RE})'
1147 MATH_OPERATORS_RE = r'(?:%s)' % '|'.join(map(re.escape, MATH_FUNCTIONS.keys()))
1148 INTERNAL_FORMAT_RE = re.compile(rf'''(?x)
1149 (?P<negate>-)?
1150 (?P<fields>{FIELD_RE})
1151 (?P<maths>(?:{MATH_OPERATORS_RE}{MATH_FIELD_RE})*)
1152 (?:>(?P<strf_format>.+?))?
1153 (?P<remaining>
1154 (?P<alternate>(?<!\\),[^|&)]+)?
1155 (?:&(?P<replacement>.*?))?
1156 (?:\|(?P<default>.*?))?
1157 )$''')
1158
1159 def _traverse_infodict(fields):
1160 fields = [f for x in re.split(r'\.({.+?})\.?', fields)
1161 for f in ([x] if x.startswith('{') else x.split('.'))]
1162 for i in (0, -1):
1163 if fields and not fields[i]:
1164 fields.pop(i)
1165
1166 for i, f in enumerate(fields):
1167 if not f.startswith('{'):
1168 continue
1169 assert f.endswith('}'), f'No closing brace for {f} in {fields}'
1170 fields[i] = {k: k.split('.') for k in f[1:-1].split(',')}
1171
1172 return traverse_obj(info_dict, fields, is_user_input=True, traverse_string=True)
1173
1174 def get_value(mdict):
1175 # Object traversal
1176 value = _traverse_infodict(mdict['fields'])
1177 # Negative
1178 if mdict['negate']:
1179 value = float_or_none(value)
1180 if value is not None:
1181 value *= -1
1182 # Do maths
1183 offset_key = mdict['maths']
1184 if offset_key:
1185 value = float_or_none(value)
1186 operator = None
1187 while offset_key:
1188 item = re.match(
1189 MATH_FIELD_RE if operator else MATH_OPERATORS_RE,
1190 offset_key).group(0)
1191 offset_key = offset_key[len(item):]
1192 if operator is None:
1193 operator = MATH_FUNCTIONS[item]
1194 continue
1195 item, multiplier = (item[1:], -1) if item[0] == '-' else (item, 1)
1196 offset = float_or_none(item)
1197 if offset is None:
1198 offset = float_or_none(_traverse_infodict(item))
1199 try:
1200 value = operator(value, multiplier * offset)
1201 except (TypeError, ZeroDivisionError):
1202 return None
1203 operator = None
1204 # Datetime formatting
1205 if mdict['strf_format']:
1206 value = strftime_or_none(value, mdict['strf_format'].replace('\\,', ','))
1207
1208 # XXX: Workaround for https://github.com/yt-dlp/yt-dlp/issues/4485
1209 if sanitize and value == '':
1210 value = None
1211 return value
1212
1213 na = self.params.get('outtmpl_na_placeholder', 'NA')
1214
1215 def filename_sanitizer(key, value, restricted=self.params.get('restrictfilenames')):
1216 return sanitize_filename(str(value), restricted=restricted, is_id=(
1217 bool(re.search(r'(^|[_.])id(\.|$)', key))
1218 if 'filename-sanitization' in self.params['compat_opts']
1219 else NO_DEFAULT))
1220
1221 sanitizer = sanitize if callable(sanitize) else filename_sanitizer
1222 sanitize = bool(sanitize)
1223
1224 def _dumpjson_default(obj):
1225 if isinstance(obj, (set, LazyList)):
1226 return list(obj)
1227 return repr(obj)
1228
1229 def create_key(outer_mobj):
1230 if not outer_mobj.group('has_key'):
1231 return outer_mobj.group(0)
1232 key = outer_mobj.group('key')
1233 mobj = re.match(INTERNAL_FORMAT_RE, key)
1234 initial_field = mobj.group('fields') if mobj else ''
1235 value, replacement, default = None, None, na
1236 while mobj:
1237 mobj = mobj.groupdict()
1238 default = mobj['default'] if mobj['default'] is not None else default
1239 value = get_value(mobj)
1240 replacement = mobj['replacement']
1241 if value is None and mobj['alternate']:
1242 mobj = re.match(INTERNAL_FORMAT_RE, mobj['remaining'][1:])
1243 else:
1244 break
1245
1246 fmt = outer_mobj.group('format')
1247 if fmt == 's' and value is not None and key in field_size_compat_map.keys():
1248 fmt = f'0{field_size_compat_map[key]:d}d'
1249
1250 value = default if value is None else value if replacement is None else replacement
1251
1252 flags = outer_mobj.group('conversion') or ''
1253 str_fmt = f'{fmt[:-1]}s'
1254 if fmt[-1] == 'l': # list
1255 delim = '\n' if '#' in flags else ', '
1256 value, fmt = delim.join(map(str, variadic(value, allowed_types=(str, bytes)))), str_fmt
1257 elif fmt[-1] == 'j': # json
1258 value, fmt = json.dumps(
1259 value, default=_dumpjson_default,
1260 indent=4 if '#' in flags else None, ensure_ascii='+' not in flags), str_fmt
1261 elif fmt[-1] == 'h': # html
1262 value, fmt = escapeHTML(str(value)), str_fmt
1263 elif fmt[-1] == 'q': # quoted
1264 value = map(str, variadic(value) if '#' in flags else [value])
1265 value, fmt = ' '.join(map(compat_shlex_quote, value)), str_fmt
1266 elif fmt[-1] == 'B': # bytes
1267 value = f'%{str_fmt}'.encode() % str(value).encode()
1268 value, fmt = value.decode('utf-8', 'ignore'), 's'
1269 elif fmt[-1] == 'U': # unicode normalized
1270 value, fmt = unicodedata.normalize(
1271 # "+" = compatibility equivalence, "#" = NFD
1272 'NF%s%s' % ('K' if '+' in flags else '', 'D' if '#' in flags else 'C'),
1273 value), str_fmt
1274 elif fmt[-1] == 'D': # decimal suffix
1275 num_fmt, fmt = fmt[:-1].replace('#', ''), 's'
1276 value = format_decimal_suffix(value, f'%{num_fmt}f%s' if num_fmt else '%d%s',
1277 factor=1024 if '#' in flags else 1000)
1278 elif fmt[-1] == 'S': # filename sanitization
1279 value, fmt = filename_sanitizer(initial_field, value, restricted='#' in flags), str_fmt
1280 elif fmt[-1] == 'c':
1281 if value:
1282 value = str(value)[0]
1283 else:
1284 fmt = str_fmt
1285 elif fmt[-1] not in 'rs': # numeric
1286 value = float_or_none(value)
1287 if value is None:
1288 value, fmt = default, 's'
1289
1290 if sanitize:
1291 if fmt[-1] == 'r':
1292 # If value is an object, sanitize might convert it to a string
1293 # So we convert it to repr first
1294 value, fmt = repr(value), str_fmt
1295 if fmt[-1] in 'csr':
1296 value = sanitizer(initial_field, value)
1297
1298 key = '%s\0%s' % (key.replace('%', '%\0'), outer_mobj.group('format'))
1299 TMPL_DICT[key] = value
1300 return '{prefix}%({key}){fmt}'.format(key=key, fmt=fmt, prefix=outer_mobj.group('prefix'))
1301
1302 return EXTERNAL_FORMAT_RE.sub(create_key, outtmpl), TMPL_DICT
1303
1304 def evaluate_outtmpl(self, outtmpl, info_dict, *args, **kwargs):
1305 outtmpl, info_dict = self.prepare_outtmpl(outtmpl, info_dict, *args, **kwargs)
1306 return self.escape_outtmpl(outtmpl) % info_dict
1307
1308 def _prepare_filename(self, info_dict, *, outtmpl=None, tmpl_type=None):
1309 assert None in (outtmpl, tmpl_type), 'outtmpl and tmpl_type are mutually exclusive'
1310 if outtmpl is None:
1311 outtmpl = self.params['outtmpl'].get(tmpl_type or 'default', self.params['outtmpl']['default'])
1312 try:
1313 outtmpl = self._outtmpl_expandpath(outtmpl)
1314 filename = self.evaluate_outtmpl(outtmpl, info_dict, True)
1315 if not filename:
1316 return None
1317
1318 if tmpl_type in ('', 'temp'):
1319 final_ext, ext = self.params.get('final_ext'), info_dict.get('ext')
1320 if final_ext and ext and final_ext != ext and filename.endswith(f'.{final_ext}'):
1321 filename = replace_extension(filename, ext, final_ext)
1322 elif tmpl_type:
1323 force_ext = OUTTMPL_TYPES[tmpl_type]
1324 if force_ext:
1325 filename = replace_extension(filename, force_ext, info_dict.get('ext'))
1326
1327 # https://github.com/blackjack4494/youtube-dlc/issues/85
1328 trim_file_name = self.params.get('trim_file_name', False)
1329 if trim_file_name:
1330 no_ext, *ext = filename.rsplit('.', 2)
1331 filename = join_nonempty(no_ext[:trim_file_name], *ext, delim='.')
1332
1333 return filename
1334 except ValueError as err:
1335 self.report_error('Error in output template: ' + str(err) + ' (encoding: ' + repr(preferredencoding()) + ')')
1336 return None
1337
1338 def prepare_filename(self, info_dict, dir_type='', *, outtmpl=None, warn=False):
1339 """Generate the output filename"""
1340 if outtmpl:
1341 assert not dir_type, 'outtmpl and dir_type are mutually exclusive'
1342 dir_type = None
1343 filename = self._prepare_filename(info_dict, tmpl_type=dir_type, outtmpl=outtmpl)
1344 if not filename and dir_type not in ('', 'temp'):
1345 return ''
1346
1347 if warn:
1348 if not self.params.get('paths'):
1349 pass
1350 elif filename == '-':
1351 self.report_warning('--paths is ignored when an outputting to stdout', only_once=True)
1352 elif os.path.isabs(filename):
1353 self.report_warning('--paths is ignored since an absolute path is given in output template', only_once=True)
1354 if filename == '-' or not filename:
1355 return filename
1356
1357 return self.get_output_path(dir_type, filename)
1358
1359 def _match_entry(self, info_dict, incomplete=False, silent=False):
1360 """Returns None if the file should be downloaded"""
1361 _type = info_dict.get('_type', 'video')
1362 assert incomplete or _type == 'video', 'Only video result can be considered complete'
1363
1364 video_title = info_dict.get('title', info_dict.get('id', 'entry'))
1365
1366 def check_filter():
1367 if _type in ('playlist', 'multi_video'):
1368 return
1369 elif _type in ('url', 'url_transparent') and not try_call(
1370 lambda: self.get_info_extractor(info_dict['ie_key']).is_single_video(info_dict['url'])):
1371 return
1372
1373 if 'title' in info_dict:
1374 # This can happen when we're just evaluating the playlist
1375 title = info_dict['title']
1376 matchtitle = self.params.get('matchtitle', False)
1377 if matchtitle:
1378 if not re.search(matchtitle, title, re.IGNORECASE):
1379 return '"' + title + '" title did not match pattern "' + matchtitle + '"'
1380 rejecttitle = self.params.get('rejecttitle', False)
1381 if rejecttitle:
1382 if re.search(rejecttitle, title, re.IGNORECASE):
1383 return '"' + title + '" title matched reject pattern "' + rejecttitle + '"'
1384
1385 date = info_dict.get('upload_date')
1386 if date is not None:
1387 dateRange = self.params.get('daterange', DateRange())
1388 if date not in dateRange:
1389 return f'{date_from_str(date).isoformat()} upload date is not in range {dateRange}'
1390 view_count = info_dict.get('view_count')
1391 if view_count is not None:
1392 min_views = self.params.get('min_views')
1393 if min_views is not None and view_count < min_views:
1394 return 'Skipping %s, because it has not reached minimum view count (%d/%d)' % (video_title, view_count, min_views)
1395 max_views = self.params.get('max_views')
1396 if max_views is not None and view_count > max_views:
1397 return 'Skipping %s, because it has exceeded the maximum view count (%d/%d)' % (video_title, view_count, max_views)
1398 if age_restricted(info_dict.get('age_limit'), self.params.get('age_limit')):
1399 return 'Skipping "%s" because it is age restricted' % video_title
1400
1401 match_filter = self.params.get('match_filter')
1402 if match_filter is not None:
1403 try:
1404 ret = match_filter(info_dict, incomplete=incomplete)
1405 except TypeError:
1406 # For backward compatibility
1407 ret = None if incomplete else match_filter(info_dict)
1408 if ret is NO_DEFAULT:
1409 while True:
1410 filename = self._format_screen(self.prepare_filename(info_dict), self.Styles.FILENAME)
1411 reply = input(self._format_screen(
1412 f'Download "{filename}"? (Y/n): ', self.Styles.EMPHASIS)).lower().strip()
1413 if reply in {'y', ''}:
1414 return None
1415 elif reply == 'n':
1416 return f'Skipping {video_title}'
1417 elif ret is not None:
1418 return ret
1419 return None
1420
1421 if self.in_download_archive(info_dict):
1422 reason = '%s has already been recorded in the archive' % video_title
1423 break_opt, break_err = 'break_on_existing', ExistingVideoReached
1424 else:
1425 reason = check_filter()
1426 break_opt, break_err = 'break_on_reject', RejectedVideoReached
1427 if reason is not None:
1428 if not silent:
1429 self.to_screen('[download] ' + reason)
1430 if self.params.get(break_opt, False):
1431 raise break_err()
1432 return reason
1433
1434 @staticmethod
1435 def add_extra_info(info_dict, extra_info):
1436 '''Set the keys from extra_info in info dict if they are missing'''
1437 for key, value in extra_info.items():
1438 info_dict.setdefault(key, value)
1439
1440 def extract_info(self, url, download=True, ie_key=None, extra_info=None,
1441 process=True, force_generic_extractor=False):
1442 """
1443 Extract and return the information dictionary of the URL
1444
1445 Arguments:
1446 @param url URL to extract
1447
1448 Keyword arguments:
1449 @param download Whether to download videos
1450 @param process Whether to resolve all unresolved references (URLs, playlist items).
1451 Must be True for download to work
1452 @param ie_key Use only the extractor with this key
1453
1454 @param extra_info Dictionary containing the extra values to add to the info (For internal use only)
1455 @force_generic_extractor Force using the generic extractor (Deprecated; use ie_key='Generic')
1456 """
1457
1458 if extra_info is None:
1459 extra_info = {}
1460
1461 if not ie_key and force_generic_extractor:
1462 ie_key = 'Generic'
1463
1464 if ie_key:
1465 ies = {ie_key: self._ies[ie_key]} if ie_key in self._ies else {}
1466 else:
1467 ies = self._ies
1468
1469 for key, ie in ies.items():
1470 if not ie.suitable(url):
1471 continue
1472
1473 if not ie.working():
1474 self.report_warning('The program functionality for this site has been marked as broken, '
1475 'and will probably not work.')
1476
1477 temp_id = ie.get_temp_id(url)
1478 if temp_id is not None and self.in_download_archive({'id': temp_id, 'ie_key': key}):
1479 self.to_screen(f'[{key}] {temp_id}: has already been recorded in the archive')
1480 if self.params.get('break_on_existing', False):
1481 raise ExistingVideoReached()
1482 break
1483 return self.__extract_info(url, self.get_info_extractor(key), download, extra_info, process)
1484 else:
1485 extractors_restricted = self.params.get('allowed_extractors') not in (None, ['default'])
1486 self.report_error(f'No suitable extractor{format_field(ie_key, None, " (%s)")} found for URL {url}',
1487 tb=False if extractors_restricted else None)
1488
1489 def _handle_extraction_exceptions(func):
1490 @functools.wraps(func)
1491 def wrapper(self, *args, **kwargs):
1492 while True:
1493 try:
1494 return func(self, *args, **kwargs)
1495 except (DownloadCancelled, LazyList.IndexError, PagedList.IndexError):
1496 raise
1497 except ReExtractInfo as e:
1498 if e.expected:
1499 self.to_screen(f'{e}; Re-extracting data')
1500 else:
1501 self.to_stderr('\r')
1502 self.report_warning(f'{e}; Re-extracting data')
1503 continue
1504 except GeoRestrictedError as e:
1505 msg = e.msg
1506 if e.countries:
1507 msg += '\nThis video is available in %s.' % ', '.join(
1508 map(ISO3166Utils.short2full, e.countries))
1509 msg += '\nYou might want to use a VPN or a proxy server (with --proxy) to workaround.'
1510 self.report_error(msg)
1511 except ExtractorError as e: # An error we somewhat expected
1512 self.report_error(str(e), e.format_traceback())
1513 except Exception as e:
1514 if self.params.get('ignoreerrors'):
1515 self.report_error(str(e), tb=encode_compat_str(traceback.format_exc()))
1516 else:
1517 raise
1518 break
1519 return wrapper
1520
1521 def _wait_for_video(self, ie_result={}):
1522 if (not self.params.get('wait_for_video')
1523 or ie_result.get('_type', 'video') != 'video'
1524 or ie_result.get('formats') or ie_result.get('url')):
1525 return
1526
1527 format_dur = lambda dur: '%02d:%02d:%02d' % timetuple_from_msec(dur * 1000)[:-1]
1528 last_msg = ''
1529
1530 def progress(msg):
1531 nonlocal last_msg
1532 full_msg = f'{msg}\n'
1533 if not self.params.get('noprogress'):
1534 full_msg = msg + ' ' * (len(last_msg) - len(msg)) + '\r'
1535 elif last_msg:
1536 return
1537 self.to_screen(full_msg, skip_eol=True)
1538 last_msg = msg
1539
1540 min_wait, max_wait = self.params.get('wait_for_video')
1541 diff = try_get(ie_result, lambda x: x['release_timestamp'] - time.time())
1542 if diff is None and ie_result.get('live_status') == 'is_upcoming':
1543 diff = round(random.uniform(min_wait, max_wait) if (max_wait and min_wait) else (max_wait or min_wait), 0)
1544 self.report_warning('Release time of video is not known')
1545 elif ie_result and (diff or 0) <= 0:
1546 self.report_warning('Video should already be available according to extracted info')
1547 diff = min(max(diff or 0, min_wait or 0), max_wait or float('inf'))
1548 self.to_screen(f'[wait] Waiting for {format_dur(diff)} - Press Ctrl+C to try now')
1549
1550 wait_till = time.time() + diff
1551 try:
1552 while True:
1553 diff = wait_till - time.time()
1554 if diff <= 0:
1555 progress('')
1556 raise ReExtractInfo('[wait] Wait period ended', expected=True)
1557 progress(f'[wait] Remaining time until next attempt: {self._format_screen(format_dur(diff), self.Styles.EMPHASIS)}')
1558 time.sleep(1)
1559 except KeyboardInterrupt:
1560 progress('')
1561 raise ReExtractInfo('[wait] Interrupted by user', expected=True)
1562 except BaseException as e:
1563 if not isinstance(e, ReExtractInfo):
1564 self.to_screen('')
1565 raise
1566
1567 @_handle_extraction_exceptions
1568 def __extract_info(self, url, ie, download, extra_info, process):
1569 try:
1570 ie_result = ie.extract(url)
1571 except UserNotLive as e:
1572 if process:
1573 if self.params.get('wait_for_video'):
1574 self.report_warning(e)
1575 self._wait_for_video()
1576 raise
1577 if ie_result is None: # Finished already (backwards compatibility; listformats and friends should be moved here)
1578 self.report_warning(f'Extractor {ie.IE_NAME} returned nothing{bug_reports_message()}')
1579 return
1580 if isinstance(ie_result, list):
1581 # Backwards compatibility: old IE result format
1582 ie_result = {
1583 '_type': 'compat_list',
1584 'entries': ie_result,
1585 }
1586 if extra_info.get('original_url'):
1587 ie_result.setdefault('original_url', extra_info['original_url'])
1588 self.add_default_extra_info(ie_result, ie, url)
1589 if process:
1590 self._wait_for_video(ie_result)
1591 return self.process_ie_result(ie_result, download, extra_info)
1592 else:
1593 return ie_result
1594
1595 def add_default_extra_info(self, ie_result, ie, url):
1596 if url is not None:
1597 self.add_extra_info(ie_result, {
1598 'webpage_url': url,
1599 'original_url': url,
1600 })
1601 webpage_url = ie_result.get('webpage_url')
1602 if webpage_url:
1603 self.add_extra_info(ie_result, {
1604 'webpage_url_basename': url_basename(webpage_url),
1605 'webpage_url_domain': get_domain(webpage_url),
1606 })
1607 if ie is not None:
1608 self.add_extra_info(ie_result, {
1609 'extractor': ie.IE_NAME,
1610 'extractor_key': ie.ie_key(),
1611 })
1612
1613 def process_ie_result(self, ie_result, download=True, extra_info=None):
1614 """
1615 Take the result of the ie(may be modified) and resolve all unresolved
1616 references (URLs, playlist items).
1617
1618 It will also download the videos if 'download'.
1619 Returns the resolved ie_result.
1620 """
1621 if extra_info is None:
1622 extra_info = {}
1623 result_type = ie_result.get('_type', 'video')
1624
1625 if result_type in ('url', 'url_transparent'):
1626 ie_result['url'] = sanitize_url(
1627 ie_result['url'], scheme='http' if self.params.get('prefer_insecure') else 'https')
1628 if ie_result.get('original_url'):
1629 extra_info.setdefault('original_url', ie_result['original_url'])
1630
1631 extract_flat = self.params.get('extract_flat', False)
1632 if ((extract_flat == 'in_playlist' and 'playlist' in extra_info)
1633 or extract_flat is True):
1634 info_copy = ie_result.copy()
1635 ie = try_get(ie_result.get('ie_key'), self.get_info_extractor)
1636 if ie and not ie_result.get('id'):
1637 info_copy['id'] = ie.get_temp_id(ie_result['url'])
1638 self.add_default_extra_info(info_copy, ie, ie_result['url'])
1639 self.add_extra_info(info_copy, extra_info)
1640 info_copy, _ = self.pre_process(info_copy)
1641 self._fill_common_fields(info_copy, False)
1642 self.__forced_printings(info_copy, self.prepare_filename(info_copy), incomplete=True)
1643 self._raise_pending_errors(info_copy)
1644 if self.params.get('force_write_download_archive', False):
1645 self.record_download_archive(info_copy)
1646 return ie_result
1647
1648 if result_type == 'video':
1649 self.add_extra_info(ie_result, extra_info)
1650 ie_result = self.process_video_result(ie_result, download=download)
1651 self._raise_pending_errors(ie_result)
1652 additional_urls = (ie_result or {}).get('additional_urls')
1653 if additional_urls:
1654 # TODO: Improve MetadataParserPP to allow setting a list
1655 if isinstance(additional_urls, str):
1656 additional_urls = [additional_urls]
1657 self.to_screen(
1658 '[info] %s: %d additional URL(s) requested' % (ie_result['id'], len(additional_urls)))
1659 self.write_debug('Additional URLs: "%s"' % '", "'.join(additional_urls))
1660 ie_result['additional_entries'] = [
1661 self.extract_info(
1662 url, download, extra_info=extra_info,
1663 force_generic_extractor=self.params.get('force_generic_extractor'))
1664 for url in additional_urls
1665 ]
1666 return ie_result
1667 elif result_type == 'url':
1668 # We have to add extra_info to the results because it may be
1669 # contained in a playlist
1670 return self.extract_info(
1671 ie_result['url'], download,
1672 ie_key=ie_result.get('ie_key'),
1673 extra_info=extra_info)
1674 elif result_type == 'url_transparent':
1675 # Use the information from the embedding page
1676 info = self.extract_info(
1677 ie_result['url'], ie_key=ie_result.get('ie_key'),
1678 extra_info=extra_info, download=False, process=False)
1679
1680 # extract_info may return None when ignoreerrors is enabled and
1681 # extraction failed with an error, don't crash and return early
1682 # in this case
1683 if not info:
1684 return info
1685
1686 exempted_fields = {'_type', 'url', 'ie_key'}
1687 if not ie_result.get('section_end') and ie_result.get('section_start') is None:
1688 # For video clips, the id etc of the clip extractor should be used
1689 exempted_fields |= {'id', 'extractor', 'extractor_key'}
1690
1691 new_result = info.copy()
1692 new_result.update(filter_dict(ie_result, lambda k, v: v is not None and k not in exempted_fields))
1693
1694 # Extracted info may not be a video result (i.e.
1695 # info.get('_type', 'video') != video) but rather an url or
1696 # url_transparent. In such cases outer metadata (from ie_result)
1697 # should be propagated to inner one (info). For this to happen
1698 # _type of info should be overridden with url_transparent. This
1699 # fixes issue from https://github.com/ytdl-org/youtube-dl/pull/11163.
1700 if new_result.get('_type') == 'url':
1701 new_result['_type'] = 'url_transparent'
1702
1703 return self.process_ie_result(
1704 new_result, download=download, extra_info=extra_info)
1705 elif result_type in ('playlist', 'multi_video'):
1706 # Protect from infinite recursion due to recursively nested playlists
1707 # (see https://github.com/ytdl-org/youtube-dl/issues/27833)
1708 webpage_url = ie_result.get('webpage_url') # Playlists maynot have webpage_url
1709 if webpage_url and webpage_url in self._playlist_urls:
1710 self.to_screen(
1711 '[download] Skipping already downloaded playlist: %s'
1712 % ie_result.get('title') or ie_result.get('id'))
1713 return
1714
1715 self._playlist_level += 1
1716 self._playlist_urls.add(webpage_url)
1717 self._fill_common_fields(ie_result, False)
1718 self._sanitize_thumbnails(ie_result)
1719 try:
1720 return self.__process_playlist(ie_result, download)
1721 finally:
1722 self._playlist_level -= 1
1723 if not self._playlist_level:
1724 self._playlist_urls.clear()
1725 elif result_type == 'compat_list':
1726 self.report_warning(
1727 'Extractor %s returned a compat_list result. '
1728 'It needs to be updated.' % ie_result.get('extractor'))
1729
1730 def _fixup(r):
1731 self.add_extra_info(r, {
1732 'extractor': ie_result['extractor'],
1733 'webpage_url': ie_result['webpage_url'],
1734 'webpage_url_basename': url_basename(ie_result['webpage_url']),
1735 'webpage_url_domain': get_domain(ie_result['webpage_url']),
1736 'extractor_key': ie_result['extractor_key'],
1737 })
1738 return r
1739 ie_result['entries'] = [
1740 self.process_ie_result(_fixup(r), download, extra_info)
1741 for r in ie_result['entries']
1742 ]
1743 return ie_result
1744 else:
1745 raise Exception('Invalid result type: %s' % result_type)
1746
1747 def _ensure_dir_exists(self, path):
1748 return make_dir(path, self.report_error)
1749
1750 @staticmethod
1751 def _playlist_infodict(ie_result, strict=False, **kwargs):
1752 info = {
1753 'playlist_count': ie_result.get('playlist_count'),
1754 'playlist': ie_result.get('title') or ie_result.get('id'),
1755 'playlist_id': ie_result.get('id'),
1756 'playlist_title': ie_result.get('title'),
1757 'playlist_uploader': ie_result.get('uploader'),
1758 'playlist_uploader_id': ie_result.get('uploader_id'),
1759 **kwargs,
1760 }
1761 if strict:
1762 return info
1763 if ie_result.get('webpage_url'):
1764 info.update({
1765 'webpage_url': ie_result['webpage_url'],
1766 'webpage_url_basename': url_basename(ie_result['webpage_url']),
1767 'webpage_url_domain': get_domain(ie_result['webpage_url']),
1768 })
1769 return {
1770 **info,
1771 'playlist_index': 0,
1772 '__last_playlist_index': max(ie_result['requested_entries'] or (0, 0)),
1773 'extractor': ie_result['extractor'],
1774 'extractor_key': ie_result['extractor_key'],
1775 }
1776
1777 def __process_playlist(self, ie_result, download):
1778 """Process each entry in the playlist"""
1779 assert ie_result['_type'] in ('playlist', 'multi_video')
1780
1781 common_info = self._playlist_infodict(ie_result, strict=True)
1782 title = common_info.get('playlist') or '<Untitled>'
1783 if self._match_entry(common_info, incomplete=True) is not None:
1784 return
1785 self.to_screen(f'[download] Downloading {ie_result["_type"]}: {title}')
1786
1787 all_entries = PlaylistEntries(self, ie_result)
1788 entries = orderedSet(all_entries.get_requested_items(), lazy=True)
1789
1790 lazy = self.params.get('lazy_playlist')
1791 if lazy:
1792 resolved_entries, n_entries = [], 'N/A'
1793 ie_result['requested_entries'], ie_result['entries'] = None, None
1794 else:
1795 entries = resolved_entries = list(entries)
1796 n_entries = len(resolved_entries)
1797 ie_result['requested_entries'], ie_result['entries'] = tuple(zip(*resolved_entries)) or ([], [])
1798 if not ie_result.get('playlist_count'):
1799 # Better to do this after potentially exhausting entries
1800 ie_result['playlist_count'] = all_entries.get_full_count()
1801
1802 extra = self._playlist_infodict(ie_result, n_entries=int_or_none(n_entries))
1803 ie_copy = collections.ChainMap(ie_result, extra)
1804
1805 _infojson_written = False
1806 write_playlist_files = self.params.get('allow_playlist_files', True)
1807 if write_playlist_files and self.params.get('list_thumbnails'):
1808 self.list_thumbnails(ie_result)
1809 if write_playlist_files and not self.params.get('simulate'):
1810 _infojson_written = self._write_info_json(
1811 'playlist', ie_result, self.prepare_filename(ie_copy, 'pl_infojson'))
1812 if _infojson_written is None:
1813 return
1814 if self._write_description('playlist', ie_result,
1815 self.prepare_filename(ie_copy, 'pl_description')) is None:
1816 return
1817 # TODO: This should be passed to ThumbnailsConvertor if necessary
1818 self._write_thumbnails('playlist', ie_result, self.prepare_filename(ie_copy, 'pl_thumbnail'))
1819
1820 if lazy:
1821 if self.params.get('playlistreverse') or self.params.get('playlistrandom'):
1822 self.report_warning('playlistreverse and playlistrandom are not supported with lazy_playlist', only_once=True)
1823 elif self.params.get('playlistreverse'):
1824 entries.reverse()
1825 elif self.params.get('playlistrandom'):
1826 random.shuffle(entries)
1827
1828 self.to_screen(f'[{ie_result["extractor"]}] Playlist {title}: Downloading {n_entries} items'
1829 f'{format_field(ie_result, "playlist_count", " of %s")}')
1830
1831 keep_resolved_entries = self.params.get('extract_flat') != 'discard'
1832 if self.params.get('extract_flat') == 'discard_in_playlist':
1833 keep_resolved_entries = ie_result['_type'] != 'playlist'
1834 if keep_resolved_entries:
1835 self.write_debug('The information of all playlist entries will be held in memory')
1836
1837 failures = 0
1838 max_failures = self.params.get('skip_playlist_after_errors') or float('inf')
1839 for i, (playlist_index, entry) in enumerate(entries):
1840 if lazy:
1841 resolved_entries.append((playlist_index, entry))
1842 if not entry:
1843 continue
1844
1845 entry['__x_forwarded_for_ip'] = ie_result.get('__x_forwarded_for_ip')
1846 if not lazy and 'playlist-index' in self.params.get('compat_opts', []):
1847 playlist_index = ie_result['requested_entries'][i]
1848
1849 entry_copy = collections.ChainMap(entry, {
1850 **common_info,
1851 'n_entries': int_or_none(n_entries),
1852 'playlist_index': playlist_index,
1853 'playlist_autonumber': i + 1,
1854 })
1855
1856 if self._match_entry(entry_copy, incomplete=True) is not None:
1857 # For compatabilty with youtube-dl. See https://github.com/yt-dlp/yt-dlp/issues/4369
1858 resolved_entries[i] = (playlist_index, NO_DEFAULT)
1859 continue
1860
1861 self.to_screen('[download] Downloading item %s of %s' % (
1862 self._format_screen(i + 1, self.Styles.ID), self._format_screen(n_entries, self.Styles.EMPHASIS)))
1863
1864 extra.update({
1865 'playlist_index': playlist_index,
1866 'playlist_autonumber': i + 1,
1867 })
1868 entry_result = self.__process_iterable_entry(entry, download, extra)
1869 if not entry_result:
1870 failures += 1
1871 if failures >= max_failures:
1872 self.report_error(
1873 f'Skipping the remaining entries in playlist "{title}" since {failures} items failed extraction')
1874 break
1875 if keep_resolved_entries:
1876 resolved_entries[i] = (playlist_index, entry_result)
1877
1878 # Update with processed data
1879 ie_result['entries'] = [e for _, e in resolved_entries if e is not NO_DEFAULT]
1880 ie_result['requested_entries'] = [i for i, e in resolved_entries if e is not NO_DEFAULT]
1881 if ie_result['requested_entries'] == try_call(lambda: list(range(1, ie_result['playlist_count'] + 1))):
1882 # Do not set for full playlist
1883 ie_result.pop('requested_entries')
1884
1885 # Write the updated info to json
1886 if _infojson_written is True and self._write_info_json(
1887 'updated playlist', ie_result,
1888 self.prepare_filename(ie_copy, 'pl_infojson'), overwrite=True) is None:
1889 return
1890
1891 ie_result = self.run_all_pps('playlist', ie_result)
1892 self.to_screen(f'[download] Finished downloading playlist: {title}')
1893 return ie_result
1894
1895 @_handle_extraction_exceptions
1896 def __process_iterable_entry(self, entry, download, extra_info):
1897 return self.process_ie_result(
1898 entry, download=download, extra_info=extra_info)
1899
1900 def _build_format_filter(self, filter_spec):
1901 " Returns a function to filter the formats according to the filter_spec "
1902
1903 OPERATORS = {
1904 '<': operator.lt,
1905 '<=': operator.le,
1906 '>': operator.gt,
1907 '>=': operator.ge,
1908 '=': operator.eq,
1909 '!=': operator.ne,
1910 }
1911 operator_rex = re.compile(r'''(?x)\s*
1912 (?P<key>width|height|tbr|abr|vbr|asr|filesize|filesize_approx|fps)\s*
1913 (?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
1914 (?P<value>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)\s*
1915 ''' % '|'.join(map(re.escape, OPERATORS.keys())))
1916 m = operator_rex.fullmatch(filter_spec)
1917 if m:
1918 try:
1919 comparison_value = int(m.group('value'))
1920 except ValueError:
1921 comparison_value = parse_filesize(m.group('value'))
1922 if comparison_value is None:
1923 comparison_value = parse_filesize(m.group('value') + 'B')
1924 if comparison_value is None:
1925 raise ValueError(
1926 'Invalid value %r in format specification %r' % (
1927 m.group('value'), filter_spec))
1928 op = OPERATORS[m.group('op')]
1929
1930 if not m:
1931 STR_OPERATORS = {
1932 '=': operator.eq,
1933 '^=': lambda attr, value: attr.startswith(value),
1934 '$=': lambda attr, value: attr.endswith(value),
1935 '*=': lambda attr, value: value in attr,
1936 '~=': lambda attr, value: value.search(attr) is not None
1937 }
1938 str_operator_rex = re.compile(r'''(?x)\s*
1939 (?P<key>[a-zA-Z0-9._-]+)\s*
1940 (?P<negation>!\s*)?(?P<op>%s)\s*(?P<none_inclusive>\?\s*)?
1941 (?P<quote>["'])?
1942 (?P<value>(?(quote)(?:(?!(?P=quote))[^\\]|\\.)+|[\w.-]+))
1943 (?(quote)(?P=quote))\s*
1944 ''' % '|'.join(map(re.escape, STR_OPERATORS.keys())))
1945 m = str_operator_rex.fullmatch(filter_spec)
1946 if m:
1947 if m.group('op') == '~=':
1948 comparison_value = re.compile(m.group('value'))
1949 else:
1950 comparison_value = re.sub(r'''\\([\\"'])''', r'\1', m.group('value'))
1951 str_op = STR_OPERATORS[m.group('op')]
1952 if m.group('negation'):
1953 op = lambda attr, value: not str_op(attr, value)
1954 else:
1955 op = str_op
1956
1957 if not m:
1958 raise SyntaxError('Invalid filter specification %r' % filter_spec)
1959
1960 def _filter(f):
1961 actual_value = f.get(m.group('key'))
1962 if actual_value is None:
1963 return m.group('none_inclusive')
1964 return op(actual_value, comparison_value)
1965 return _filter
1966
1967 def _check_formats(self, formats):
1968 for f in formats:
1969 self.to_screen('[info] Testing format %s' % f['format_id'])
1970 path = self.get_output_path('temp')
1971 if not self._ensure_dir_exists(f'{path}/'):
1972 continue
1973 temp_file = tempfile.NamedTemporaryFile(suffix='.tmp', delete=False, dir=path or None)
1974 temp_file.close()
1975 try:
1976 success, _ = self.dl(temp_file.name, f, test=True)
1977 except (DownloadError, OSError, ValueError) + network_exceptions:
1978 success = False
1979 finally:
1980 if os.path.exists(temp_file.name):
1981 try:
1982 os.remove(temp_file.name)
1983 except OSError:
1984 self.report_warning('Unable to delete temporary file "%s"' % temp_file.name)
1985 if success:
1986 yield f
1987 else:
1988 self.to_screen('[info] Unable to download format %s. Skipping...' % f['format_id'])
1989
1990 def _default_format_spec(self, info_dict, download=True):
1991
1992 def can_merge():
1993 merger = FFmpegMergerPP(self)
1994 return merger.available and merger.can_merge()
1995
1996 prefer_best = (
1997 not self.params.get('simulate')
1998 and download
1999 and (
2000 not can_merge()
2001 or info_dict.get('is_live') and not self.params.get('live_from_start')
2002 or self.params['outtmpl']['default'] == '-'))
2003 compat = (
2004 prefer_best
2005 or self.params.get('allow_multiple_audio_streams', False)
2006 or 'format-spec' in self.params['compat_opts'])
2007
2008 return (
2009 'best/bestvideo+bestaudio' if prefer_best
2010 else 'bestvideo*+bestaudio/best' if not compat
2011 else 'bestvideo+bestaudio/best')
2012
2013 def build_format_selector(self, format_spec):
2014 def syntax_error(note, start):
2015 message = (
2016 'Invalid format specification: '
2017 '{}\n\t{}\n\t{}^'.format(note, format_spec, ' ' * start[1]))
2018 return SyntaxError(message)
2019
2020 PICKFIRST = 'PICKFIRST'
2021 MERGE = 'MERGE'
2022 SINGLE = 'SINGLE'
2023 GROUP = 'GROUP'
2024 FormatSelector = collections.namedtuple('FormatSelector', ['type', 'selector', 'filters'])
2025
2026 allow_multiple_streams = {'audio': self.params.get('allow_multiple_audio_streams', False),
2027 'video': self.params.get('allow_multiple_video_streams', False)}
2028
2029 check_formats = self.params.get('check_formats') == 'selected'
2030
2031 def _parse_filter(tokens):
2032 filter_parts = []
2033 for type, string, start, _, _ in tokens:
2034 if type == tokenize.OP and string == ']':
2035 return ''.join(filter_parts)
2036 else:
2037 filter_parts.append(string)
2038
2039 def _remove_unused_ops(tokens):
2040 # Remove operators that we don't use and join them with the surrounding strings.
2041 # E.g. 'mp4' '-' 'baseline' '-' '16x9' is converted to 'mp4-baseline-16x9'
2042 ALLOWED_OPS = ('/', '+', ',', '(', ')')
2043 last_string, last_start, last_end, last_line = None, None, None, None
2044 for type, string, start, end, line in tokens:
2045 if type == tokenize.OP and string == '[':
2046 if last_string:
2047 yield tokenize.NAME, last_string, last_start, last_end, last_line
2048 last_string = None
2049 yield type, string, start, end, line
2050 # everything inside brackets will be handled by _parse_filter
2051 for type, string, start, end, line in tokens:
2052 yield type, string, start, end, line
2053 if type == tokenize.OP and string == ']':
2054 break
2055 elif type == tokenize.OP and string in ALLOWED_OPS:
2056 if last_string:
2057 yield tokenize.NAME, last_string, last_start, last_end, last_line
2058 last_string = None
2059 yield type, string, start, end, line
2060 elif type in [tokenize.NAME, tokenize.NUMBER, tokenize.OP]:
2061 if not last_string:
2062 last_string = string
2063 last_start = start
2064 last_end = end
2065 else:
2066 last_string += string
2067 if last_string:
2068 yield tokenize.NAME, last_string, last_start, last_end, last_line
2069
2070 def _parse_format_selection(tokens, inside_merge=False, inside_choice=False, inside_group=False):
2071 selectors = []
2072 current_selector = None
2073 for type, string, start, _, _ in tokens:
2074 # ENCODING is only defined in python 3.x
2075 if type == getattr(tokenize, 'ENCODING', None):
2076 continue
2077 elif type in [tokenize.NAME, tokenize.NUMBER]:
2078 current_selector = FormatSelector(SINGLE, string, [])
2079 elif type == tokenize.OP:
2080 if string == ')':
2081 if not inside_group:
2082 # ')' will be handled by the parentheses group
2083 tokens.restore_last_token()
2084 break
2085 elif inside_merge and string in ['/', ',']:
2086 tokens.restore_last_token()
2087 break
2088 elif inside_choice and string == ',':
2089 tokens.restore_last_token()
2090 break
2091 elif string == ',':
2092 if not current_selector:
2093 raise syntax_error('"," must follow a format selector', start)
2094 selectors.append(current_selector)
2095 current_selector = None
2096 elif string == '/':
2097 if not current_selector:
2098 raise syntax_error('"/" must follow a format selector', start)
2099 first_choice = current_selector
2100 second_choice = _parse_format_selection(tokens, inside_choice=True)
2101 current_selector = FormatSelector(PICKFIRST, (first_choice, second_choice), [])
2102 elif string == '[':
2103 if not current_selector:
2104 current_selector = FormatSelector(SINGLE, 'best', [])
2105 format_filter = _parse_filter(tokens)
2106 current_selector.filters.append(format_filter)
2107 elif string == '(':
2108 if current_selector:
2109 raise syntax_error('Unexpected "("', start)
2110 group = _parse_format_selection(tokens, inside_group=True)
2111 current_selector = FormatSelector(GROUP, group, [])
2112 elif string == '+':
2113 if not current_selector:
2114 raise syntax_error('Unexpected "+"', start)
2115 selector_1 = current_selector
2116 selector_2 = _parse_format_selection(tokens, inside_merge=True)
2117 if not selector_2:
2118 raise syntax_error('Expected a selector', start)
2119 current_selector = FormatSelector(MERGE, (selector_1, selector_2), [])
2120 else:
2121 raise syntax_error(f'Operator not recognized: "{string}"', start)
2122 elif type == tokenize.ENDMARKER:
2123 break
2124 if current_selector:
2125 selectors.append(current_selector)
2126 return selectors
2127
2128 def _merge(formats_pair):
2129 format_1, format_2 = formats_pair
2130
2131 formats_info = []
2132 formats_info.extend(format_1.get('requested_formats', (format_1,)))
2133 formats_info.extend(format_2.get('requested_formats', (format_2,)))
2134
2135 if not allow_multiple_streams['video'] or not allow_multiple_streams['audio']:
2136 get_no_more = {'video': False, 'audio': False}
2137 for (i, fmt_info) in enumerate(formats_info):
2138 if fmt_info.get('acodec') == fmt_info.get('vcodec') == 'none':
2139 formats_info.pop(i)
2140 continue
2141 for aud_vid in ['audio', 'video']:
2142 if not allow_multiple_streams[aud_vid] and fmt_info.get(aud_vid[0] + 'codec') != 'none':
2143 if get_no_more[aud_vid]:
2144 formats_info.pop(i)
2145 break
2146 get_no_more[aud_vid] = True
2147
2148 if len(formats_info) == 1:
2149 return formats_info[0]
2150
2151 video_fmts = [fmt_info for fmt_info in formats_info if fmt_info.get('vcodec') != 'none']
2152 audio_fmts = [fmt_info for fmt_info in formats_info if fmt_info.get('acodec') != 'none']
2153
2154 the_only_video = video_fmts[0] if len(video_fmts) == 1 else None
2155 the_only_audio = audio_fmts[0] if len(audio_fmts) == 1 else None
2156
2157 output_ext = get_compatible_ext(
2158 vcodecs=[f.get('vcodec') for f in video_fmts],
2159 acodecs=[f.get('acodec') for f in audio_fmts],
2160 vexts=[f['ext'] for f in video_fmts],
2161 aexts=[f['ext'] for f in audio_fmts],
2162 preferences=(try_call(lambda: self.params['merge_output_format'].split('/'))
2163 or self.params.get('prefer_free_formats') and ('webm', 'mkv')))
2164
2165 filtered = lambda *keys: filter(None, (traverse_obj(fmt, *keys) for fmt in formats_info))
2166
2167 new_dict = {
2168 'requested_formats': formats_info,
2169 'format': '+'.join(filtered('format')),
2170 'format_id': '+'.join(filtered('format_id')),
2171 'ext': output_ext,
2172 'protocol': '+'.join(map(determine_protocol, formats_info)),
2173 'language': '+'.join(orderedSet(filtered('language'))) or None,
2174 'format_note': '+'.join(orderedSet(filtered('format_note'))) or None,
2175 'filesize_approx': sum(filtered('filesize', 'filesize_approx')) or None,
2176 'tbr': sum(filtered('tbr', 'vbr', 'abr')),
2177 }
2178
2179 if the_only_video:
2180 new_dict.update({
2181 'width': the_only_video.get('width'),
2182 'height': the_only_video.get('height'),
2183 'resolution': the_only_video.get('resolution') or self.format_resolution(the_only_video),
2184 'fps': the_only_video.get('fps'),
2185 'dynamic_range': the_only_video.get('dynamic_range'),
2186 'vcodec': the_only_video.get('vcodec'),
2187 'vbr': the_only_video.get('vbr'),
2188 'stretched_ratio': the_only_video.get('stretched_ratio'),
2189 'aspect_ratio': the_only_video.get('aspect_ratio'),
2190 })
2191
2192 if the_only_audio:
2193 new_dict.update({
2194 'acodec': the_only_audio.get('acodec'),
2195 'abr': the_only_audio.get('abr'),
2196 'asr': the_only_audio.get('asr'),
2197 'audio_channels': the_only_audio.get('audio_channels')
2198 })
2199
2200 return new_dict
2201
2202 def _check_formats(formats):
2203 if not check_formats:
2204 yield from formats
2205 return
2206 yield from self._check_formats(formats)
2207
2208 def _build_selector_function(selector):
2209 if isinstance(selector, list): # ,
2210 fs = [_build_selector_function(s) for s in selector]
2211
2212 def selector_function(ctx):
2213 for f in fs:
2214 yield from f(ctx)
2215 return selector_function
2216
2217 elif selector.type == GROUP: # ()
2218 selector_function = _build_selector_function(selector.selector)
2219
2220 elif selector.type == PICKFIRST: # /
2221 fs = [_build_selector_function(s) for s in selector.selector]
2222
2223 def selector_function(ctx):
2224 for f in fs:
2225 picked_formats = list(f(ctx))
2226 if picked_formats:
2227 return picked_formats
2228 return []
2229
2230 elif selector.type == MERGE: # +
2231 selector_1, selector_2 = map(_build_selector_function, selector.selector)
2232
2233 def selector_function(ctx):
2234 for pair in itertools.product(selector_1(ctx), selector_2(ctx)):
2235 yield _merge(pair)
2236
2237 elif selector.type == SINGLE: # atom
2238 format_spec = selector.selector or 'best'
2239
2240 # TODO: Add allvideo, allaudio etc by generalizing the code with best/worst selector
2241 if format_spec == 'all':
2242 def selector_function(ctx):
2243 yield from _check_formats(ctx['formats'][::-1])
2244 elif format_spec == 'mergeall':
2245 def selector_function(ctx):
2246 formats = list(_check_formats(
2247 f for f in ctx['formats'] if f.get('vcodec') != 'none' or f.get('acodec') != 'none'))
2248 if not formats:
2249 return
2250 merged_format = formats[-1]
2251 for f in formats[-2::-1]:
2252 merged_format = _merge((merged_format, f))
2253 yield merged_format
2254
2255 else:
2256 format_fallback, seperate_fallback, format_reverse, format_idx = False, None, True, 1
2257 mobj = re.match(
2258 r'(?P<bw>best|worst|b|w)(?P<type>video|audio|v|a)?(?P<mod>\*)?(?:\.(?P<n>[1-9]\d*))?$',
2259 format_spec)
2260 if mobj is not None:
2261 format_idx = int_or_none(mobj.group('n'), default=1)
2262 format_reverse = mobj.group('bw')[0] == 'b'
2263 format_type = (mobj.group('type') or [None])[0]
2264 not_format_type = {'v': 'a', 'a': 'v'}.get(format_type)
2265 format_modified = mobj.group('mod') is not None
2266
2267 format_fallback = not format_type and not format_modified # for b, w
2268 _filter_f = (
2269 (lambda f: f.get('%scodec' % format_type) != 'none')
2270 if format_type and format_modified # bv*, ba*, wv*, wa*
2271 else (lambda f: f.get('%scodec' % not_format_type) == 'none')
2272 if format_type # bv, ba, wv, wa
2273 else (lambda f: f.get('vcodec') != 'none' and f.get('acodec') != 'none')
2274 if not format_modified # b, w
2275 else lambda f: True) # b*, w*
2276 filter_f = lambda f: _filter_f(f) and (
2277 f.get('vcodec') != 'none' or f.get('acodec') != 'none')
2278 else:
2279 if format_spec in self._format_selection_exts['audio']:
2280 filter_f = lambda f: f.get('ext') == format_spec and f.get('acodec') != 'none'
2281 elif format_spec in self._format_selection_exts['video']:
2282 filter_f = lambda f: f.get('ext') == format_spec and f.get('acodec') != 'none' and f.get('vcodec') != 'none'
2283 seperate_fallback = lambda f: f.get('ext') == format_spec and f.get('vcodec') != 'none'
2284 elif format_spec in self._format_selection_exts['storyboards']:
2285 filter_f = lambda f: f.get('ext') == format_spec and f.get('acodec') == 'none' and f.get('vcodec') == 'none'
2286 else:
2287 filter_f = lambda f: f.get('format_id') == format_spec # id
2288
2289 def selector_function(ctx):
2290 formats = list(ctx['formats'])
2291 matches = list(filter(filter_f, formats)) if filter_f is not None else formats
2292 if not matches:
2293 if format_fallback and ctx['incomplete_formats']:
2294 # for extractors with incomplete formats (audio only (soundcloud)
2295 # or video only (imgur)) best/worst will fallback to
2296 # best/worst {video,audio}-only format
2297 matches = formats
2298 elif seperate_fallback and not ctx['has_merged_format']:
2299 # for compatibility with youtube-dl when there is no pre-merged format
2300 matches = list(filter(seperate_fallback, formats))
2301 matches = LazyList(_check_formats(matches[::-1 if format_reverse else 1]))
2302 try:
2303 yield matches[format_idx - 1]
2304 except LazyList.IndexError:
2305 return
2306
2307 filters = [self._build_format_filter(f) for f in selector.filters]
2308
2309 def final_selector(ctx):
2310 ctx_copy = dict(ctx)
2311 for _filter in filters:
2312 ctx_copy['formats'] = list(filter(_filter, ctx_copy['formats']))
2313 return selector_function(ctx_copy)
2314 return final_selector
2315
2316 stream = io.BytesIO(format_spec.encode())
2317 try:
2318 tokens = list(_remove_unused_ops(tokenize.tokenize(stream.readline)))
2319 except tokenize.TokenError:
2320 raise syntax_error('Missing closing/opening brackets or parenthesis', (0, len(format_spec)))
2321
2322 class TokenIterator:
2323 def __init__(self, tokens):
2324 self.tokens = tokens
2325 self.counter = 0
2326
2327 def __iter__(self):
2328 return self
2329
2330 def __next__(self):
2331 if self.counter >= len(self.tokens):
2332 raise StopIteration()
2333 value = self.tokens[self.counter]
2334 self.counter += 1
2335 return value
2336
2337 next = __next__
2338
2339 def restore_last_token(self):
2340 self.counter -= 1
2341
2342 parsed_selector = _parse_format_selection(iter(TokenIterator(tokens)))
2343 return _build_selector_function(parsed_selector)
2344
2345 def _calc_headers(self, info_dict):
2346 res = merge_headers(self.params['http_headers'], info_dict.get('http_headers') or {})
2347
2348 cookies = self._calc_cookies(info_dict['url'])
2349 if cookies:
2350 res['Cookie'] = cookies
2351
2352 if 'X-Forwarded-For' not in res:
2353 x_forwarded_for_ip = info_dict.get('__x_forwarded_for_ip')
2354 if x_forwarded_for_ip:
2355 res['X-Forwarded-For'] = x_forwarded_for_ip
2356
2357 return res
2358
2359 def _calc_cookies(self, url):
2360 pr = sanitized_Request(url)
2361 self.cookiejar.add_cookie_header(pr)
2362 return pr.get_header('Cookie')
2363
2364 def _sort_thumbnails(self, thumbnails):
2365 thumbnails.sort(key=lambda t: (
2366 t.get('preference') if t.get('preference') is not None else -1,
2367 t.get('width') if t.get('width') is not None else -1,
2368 t.get('height') if t.get('height') is not None else -1,
2369 t.get('id') if t.get('id') is not None else '',
2370 t.get('url')))
2371
2372 def _sanitize_thumbnails(self, info_dict):
2373 thumbnails = info_dict.get('thumbnails')
2374 if thumbnails is None:
2375 thumbnail = info_dict.get('thumbnail')
2376 if thumbnail:
2377 info_dict['thumbnails'] = thumbnails = [{'url': thumbnail}]
2378 if not thumbnails:
2379 return
2380
2381 def check_thumbnails(thumbnails):
2382 for t in thumbnails:
2383 self.to_screen(f'[info] Testing thumbnail {t["id"]}')
2384 try:
2385 self.urlopen(HEADRequest(t['url']))
2386 except network_exceptions as err:
2387 self.to_screen(f'[info] Unable to connect to thumbnail {t["id"]} URL {t["url"]!r} - {err}. Skipping...')
2388 continue
2389 yield t
2390
2391 self._sort_thumbnails(thumbnails)
2392 for i, t in enumerate(thumbnails):
2393 if t.get('id') is None:
2394 t['id'] = '%d' % i
2395 if t.get('width') and t.get('height'):
2396 t['resolution'] = '%dx%d' % (t['width'], t['height'])
2397 t['url'] = sanitize_url(t['url'])
2398
2399 if self.params.get('check_formats') is True:
2400 info_dict['thumbnails'] = LazyList(check_thumbnails(thumbnails[::-1]), reverse=True)
2401 else:
2402 info_dict['thumbnails'] = thumbnails
2403
2404 def _fill_common_fields(self, info_dict, final=True):
2405 # TODO: move sanitization here
2406 if final:
2407 title = info_dict.get('title', NO_DEFAULT)
2408 if title is NO_DEFAULT:
2409 raise ExtractorError('Missing "title" field in extractor result',
2410 video_id=info_dict['id'], ie=info_dict['extractor'])
2411 info_dict['fulltitle'] = title
2412 if not title:
2413 if title == '':
2414 self.write_debug('Extractor gave empty title. Creating a generic title')
2415 else:
2416 self.report_warning('Extractor failed to obtain "title". Creating a generic title instead')
2417 info_dict['title'] = f'{info_dict["extractor"].replace(":", "-")} video #{info_dict["id"]}'
2418
2419 if info_dict.get('duration') is not None:
2420 info_dict['duration_string'] = formatSeconds(info_dict['duration'])
2421
2422 for ts_key, date_key in (
2423 ('timestamp', 'upload_date'),
2424 ('release_timestamp', 'release_date'),
2425 ('modified_timestamp', 'modified_date'),
2426 ):
2427 if info_dict.get(date_key) is None and info_dict.get(ts_key) is not None:
2428 # Working around out-of-range timestamp values (e.g. negative ones on Windows,
2429 # see http://bugs.python.org/issue1646728)
2430 with contextlib.suppress(ValueError, OverflowError, OSError):
2431 upload_date = datetime.datetime.utcfromtimestamp(info_dict[ts_key])
2432 info_dict[date_key] = upload_date.strftime('%Y%m%d')
2433
2434 live_keys = ('is_live', 'was_live')
2435 live_status = info_dict.get('live_status')
2436 if live_status is None:
2437 for key in live_keys:
2438 if info_dict.get(key) is False:
2439 continue
2440 if info_dict.get(key):
2441 live_status = key
2442 break
2443 if all(info_dict.get(key) is False for key in live_keys):
2444 live_status = 'not_live'
2445 if live_status:
2446 info_dict['live_status'] = live_status
2447 for key in live_keys:
2448 if info_dict.get(key) is None:
2449 info_dict[key] = (live_status == key)
2450 if live_status == 'post_live':
2451 info_dict['was_live'] = True
2452
2453 # Auto generate title fields corresponding to the *_number fields when missing
2454 # in order to always have clean titles. This is very common for TV series.
2455 for field in ('chapter', 'season', 'episode'):
2456 if final and info_dict.get('%s_number' % field) is not None and not info_dict.get(field):
2457 info_dict[field] = '%s %d' % (field.capitalize(), info_dict['%s_number' % field])
2458
2459 def _raise_pending_errors(self, info):
2460 err = info.pop('__pending_error', None)
2461 if err:
2462 self.report_error(err, tb=False)
2463
2464 def process_video_result(self, info_dict, download=True):
2465 assert info_dict.get('_type', 'video') == 'video'
2466 self._num_videos += 1
2467
2468 if 'id' not in info_dict:
2469 raise ExtractorError('Missing "id" field in extractor result', ie=info_dict['extractor'])
2470 elif not info_dict.get('id'):
2471 raise ExtractorError('Extractor failed to obtain "id"', ie=info_dict['extractor'])
2472
2473 def report_force_conversion(field, field_not, conversion):
2474 self.report_warning(
2475 '"%s" field is not %s - forcing %s conversion, there is an error in extractor'
2476 % (field, field_not, conversion))
2477
2478 def sanitize_string_field(info, string_field):
2479 field = info.get(string_field)
2480 if field is None or isinstance(field, str):
2481 return
2482 report_force_conversion(string_field, 'a string', 'string')
2483 info[string_field] = str(field)
2484
2485 def sanitize_numeric_fields(info):
2486 for numeric_field in self._NUMERIC_FIELDS:
2487 field = info.get(numeric_field)
2488 if field is None or isinstance(field, (int, float)):
2489 continue
2490 report_force_conversion(numeric_field, 'numeric', 'int')
2491 info[numeric_field] = int_or_none(field)
2492
2493 sanitize_string_field(info_dict, 'id')
2494 sanitize_numeric_fields(info_dict)
2495 if info_dict.get('section_end') and info_dict.get('section_start') is not None:
2496 info_dict['duration'] = round(info_dict['section_end'] - info_dict['section_start'], 3)
2497 if (info_dict.get('duration') or 0) <= 0 and info_dict.pop('duration', None):
2498 self.report_warning('"duration" field is negative, there is an error in extractor')
2499
2500 chapters = info_dict.get('chapters') or []
2501 if chapters and chapters[0].get('start_time'):
2502 chapters.insert(0, {'start_time': 0})
2503
2504 dummy_chapter = {'end_time': 0, 'start_time': info_dict.get('duration')}
2505 for idx, (prev, current, next_) in enumerate(zip(
2506 (dummy_chapter, *chapters), chapters, (*chapters[1:], dummy_chapter)), 1):
2507 if current.get('start_time') is None:
2508 current['start_time'] = prev.get('end_time')
2509 if not current.get('end_time'):
2510 current['end_time'] = next_.get('start_time')
2511 if not current.get('title'):
2512 current['title'] = f'<Untitled Chapter {idx}>'
2513
2514 if 'playlist' not in info_dict:
2515 # It isn't part of a playlist
2516 info_dict['playlist'] = None
2517 info_dict['playlist_index'] = None
2518
2519 self._sanitize_thumbnails(info_dict)
2520
2521 thumbnail = info_dict.get('thumbnail')
2522 thumbnails = info_dict.get('thumbnails')
2523 if thumbnail:
2524 info_dict['thumbnail'] = sanitize_url(thumbnail)
2525 elif thumbnails:
2526 info_dict['thumbnail'] = thumbnails[-1]['url']
2527
2528 if info_dict.get('display_id') is None and 'id' in info_dict:
2529 info_dict['display_id'] = info_dict['id']
2530
2531 self._fill_common_fields(info_dict)
2532
2533 for cc_kind in ('subtitles', 'automatic_captions'):
2534 cc = info_dict.get(cc_kind)
2535 if cc:
2536 for _, subtitle in cc.items():
2537 for subtitle_format in subtitle:
2538 if subtitle_format.get('url'):
2539 subtitle_format['url'] = sanitize_url(subtitle_format['url'])
2540 if subtitle_format.get('ext') is None:
2541 subtitle_format['ext'] = determine_ext(subtitle_format['url']).lower()
2542
2543 automatic_captions = info_dict.get('automatic_captions')
2544 subtitles = info_dict.get('subtitles')
2545
2546 info_dict['requested_subtitles'] = self.process_subtitles(
2547 info_dict['id'], subtitles, automatic_captions)
2548
2549 formats = self._get_formats(info_dict)
2550
2551 # or None ensures --clean-infojson removes it
2552 info_dict['_has_drm'] = any(f.get('has_drm') for f in formats) or None
2553 if not self.params.get('allow_unplayable_formats'):
2554 formats = [f for f in formats if not f.get('has_drm')]
2555
2556 if formats and all(f.get('acodec') == f.get('vcodec') == 'none' for f in formats):
2557 self.report_warning(
2558 f'{"This video is DRM protected and " if info_dict["_has_drm"] else ""}'
2559 'only images are available for download. Use --list-formats to see them'.capitalize())
2560
2561 get_from_start = not info_dict.get('is_live') or bool(self.params.get('live_from_start'))
2562 if not get_from_start:
2563 info_dict['title'] += ' ' + datetime.datetime.now().strftime('%Y-%m-%d %H:%M')
2564 if info_dict.get('is_live') and formats:
2565 formats = [f for f in formats if bool(f.get('is_from_start')) == get_from_start]
2566 if get_from_start and not formats:
2567 self.raise_no_formats(info_dict, msg=(
2568 '--live-from-start is passed, but there are no formats that can be downloaded from the start. '
2569 'If you want to download from the current time, use --no-live-from-start'))
2570
2571 def is_wellformed(f):
2572 url = f.get('url')
2573 if not url:
2574 self.report_warning(
2575 '"url" field is missing or empty - skipping format, '
2576 'there is an error in extractor')
2577 return False
2578 if isinstance(url, bytes):
2579 sanitize_string_field(f, 'url')
2580 return True
2581
2582 # Filter out malformed formats for better extraction robustness
2583 formats = list(filter(is_wellformed, formats or []))
2584
2585 if not formats:
2586 self.raise_no_formats(info_dict)
2587
2588 formats_dict = {}
2589
2590 # We check that all the formats have the format and format_id fields
2591 for i, format in enumerate(formats):
2592 sanitize_string_field(format, 'format_id')
2593 sanitize_numeric_fields(format)
2594 format['url'] = sanitize_url(format['url'])
2595 if not format.get('format_id'):
2596 format['format_id'] = str(i)
2597 else:
2598 # Sanitize format_id from characters used in format selector expression
2599 format['format_id'] = re.sub(r'[\s,/+\[\]()]', '_', format['format_id'])
2600 format_id = format['format_id']
2601 if format_id not in formats_dict:
2602 formats_dict[format_id] = []
2603 formats_dict[format_id].append(format)
2604
2605 # Make sure all formats have unique format_id
2606 common_exts = set(itertools.chain(*self._format_selection_exts.values()))
2607 for format_id, ambiguous_formats in formats_dict.items():
2608 ambigious_id = len(ambiguous_formats) > 1
2609 for i, format in enumerate(ambiguous_formats):
2610 if ambigious_id:
2611 format['format_id'] = '%s-%d' % (format_id, i)
2612 if format.get('ext') is None:
2613 format['ext'] = determine_ext(format['url']).lower()
2614 # Ensure there is no conflict between id and ext in format selection
2615 # See https://github.com/yt-dlp/yt-dlp/issues/1282
2616 if format['format_id'] != format['ext'] and format['format_id'] in common_exts:
2617 format['format_id'] = 'f%s' % format['format_id']
2618
2619 for i, format in enumerate(formats):
2620 if format.get('format') is None:
2621 format['format'] = '{id} - {res}{note}'.format(
2622 id=format['format_id'],
2623 res=self.format_resolution(format),
2624 note=format_field(format, 'format_note', ' (%s)'),
2625 )
2626 if format.get('protocol') is None:
2627 format['protocol'] = determine_protocol(format)
2628 if format.get('resolution') is None:
2629 format['resolution'] = self.format_resolution(format, default=None)
2630 if format.get('dynamic_range') is None and format.get('vcodec') != 'none':
2631 format['dynamic_range'] = 'SDR'
2632 if format.get('aspect_ratio') is None:
2633 format['aspect_ratio'] = try_call(lambda: round(format['width'] / format['height'], 2))
2634 if (info_dict.get('duration') and format.get('tbr')
2635 and not format.get('filesize') and not format.get('filesize_approx')):
2636 format['filesize_approx'] = int(info_dict['duration'] * format['tbr'] * (1024 / 8))
2637
2638 # Add HTTP headers, so that external programs can use them from the
2639 # json output
2640 full_format_info = info_dict.copy()
2641 full_format_info.update(format)
2642 format['http_headers'] = self._calc_headers(full_format_info)
2643 # Remove private housekeeping stuff
2644 if '__x_forwarded_for_ip' in info_dict:
2645 del info_dict['__x_forwarded_for_ip']
2646
2647 if self.params.get('check_formats') is True:
2648 formats = LazyList(self._check_formats(formats[::-1]), reverse=True)
2649
2650 if not formats or formats[0] is not info_dict:
2651 # only set the 'formats' fields if the original info_dict list them
2652 # otherwise we end up with a circular reference, the first (and unique)
2653 # element in the 'formats' field in info_dict is info_dict itself,
2654 # which can't be exported to json
2655 info_dict['formats'] = formats
2656
2657 info_dict, _ = self.pre_process(info_dict)
2658
2659 if self._match_entry(info_dict, incomplete=self._format_fields) is not None:
2660 return info_dict
2661
2662 self.post_extract(info_dict)
2663 info_dict, _ = self.pre_process(info_dict, 'after_filter')
2664
2665 # The pre-processors may have modified the formats
2666 formats = self._get_formats(info_dict)
2667
2668 list_only = self.params.get('simulate') == 'list_only'
2669 interactive_format_selection = not list_only and self.format_selector == '-'
2670 if self.params.get('list_thumbnails'):
2671 self.list_thumbnails(info_dict)
2672 if self.params.get('listsubtitles'):
2673 if 'automatic_captions' in info_dict:
2674 self.list_subtitles(
2675 info_dict['id'], automatic_captions, 'automatic captions')
2676 self.list_subtitles(info_dict['id'], subtitles, 'subtitles')
2677 if self.params.get('listformats') or interactive_format_selection:
2678 self.list_formats(info_dict)
2679 if list_only:
2680 # Without this printing, -F --print-json will not work
2681 self.__forced_printings(info_dict, self.prepare_filename(info_dict), incomplete=True)
2682 return info_dict
2683
2684 format_selector = self.format_selector
2685 if format_selector is None:
2686 req_format = self._default_format_spec(info_dict, download=download)
2687 self.write_debug('Default format spec: %s' % req_format)
2688 format_selector = self.build_format_selector(req_format)
2689
2690 while True:
2691 if interactive_format_selection:
2692 req_format = input(
2693 self._format_screen('\nEnter format selector: ', self.Styles.EMPHASIS))
2694 try:
2695 format_selector = self.build_format_selector(req_format)
2696 except SyntaxError as err:
2697 self.report_error(err, tb=False, is_error=False)
2698 continue
2699
2700 formats_to_download = list(format_selector({
2701 'formats': formats,
2702 'has_merged_format': any('none' not in (f.get('acodec'), f.get('vcodec')) for f in formats),
2703 'incomplete_formats': (
2704 # All formats are video-only or
2705 all(f.get('vcodec') != 'none' and f.get('acodec') == 'none' for f in formats)
2706 # all formats are audio-only
2707 or all(f.get('vcodec') == 'none' and f.get('acodec') != 'none' for f in formats)),
2708 }))
2709 if interactive_format_selection and not formats_to_download:
2710 self.report_error('Requested format is not available', tb=False, is_error=False)
2711 continue
2712 break
2713
2714 if not formats_to_download:
2715 if not self.params.get('ignore_no_formats_error'):
2716 raise ExtractorError(
2717 'Requested format is not available. Use --list-formats for a list of available formats',
2718 expected=True, video_id=info_dict['id'], ie=info_dict['extractor'])
2719 self.report_warning('Requested format is not available')
2720 # Process what we can, even without any available formats.
2721 formats_to_download = [{}]
2722
2723 requested_ranges = tuple(self.params.get('download_ranges', lambda *_: [{}])(info_dict, self))
2724 best_format, downloaded_formats = formats_to_download[-1], []
2725 if download:
2726 if best_format and requested_ranges:
2727 def to_screen(*msg):
2728 self.to_screen(f'[info] {info_dict["id"]}: {" ".join(", ".join(variadic(m)) for m in msg)}')
2729
2730 to_screen(f'Downloading {len(formats_to_download)} format(s):',
2731 (f['format_id'] for f in formats_to_download))
2732 if requested_ranges != ({}, ):
2733 to_screen(f'Downloading {len(requested_ranges)} time ranges:',
2734 (f'{c["start_time"]:.1f}-{c["end_time"]:.1f}' for c in requested_ranges))
2735 max_downloads_reached = False
2736
2737 for fmt, chapter in itertools.product(formats_to_download, requested_ranges):
2738 new_info = self._copy_infodict(info_dict)
2739 new_info.update(fmt)
2740 offset, duration = info_dict.get('section_start') or 0, info_dict.get('duration') or float('inf')
2741 end_time = offset + min(chapter.get('end_time', duration), duration)
2742 if chapter or offset:
2743 new_info.update({
2744 'section_start': offset + chapter.get('start_time', 0),
2745 # duration may not be accurate. So allow deviations <1sec
2746 'section_end': end_time if end_time <= offset + duration + 1 else None,
2747 'section_title': chapter.get('title'),
2748 'section_number': chapter.get('index'),
2749 })
2750 downloaded_formats.append(new_info)
2751 try:
2752 self.process_info(new_info)
2753 except MaxDownloadsReached:
2754 max_downloads_reached = True
2755 self._raise_pending_errors(new_info)
2756 # Remove copied info
2757 for key, val in tuple(new_info.items()):
2758 if info_dict.get(key) == val:
2759 new_info.pop(key)
2760 if max_downloads_reached:
2761 break
2762
2763 write_archive = {f.get('__write_download_archive', False) for f in downloaded_formats}
2764 assert write_archive.issubset({True, False, 'ignore'})
2765 if True in write_archive and False not in write_archive:
2766 self.record_download_archive(info_dict)
2767
2768 info_dict['requested_downloads'] = downloaded_formats
2769 info_dict = self.run_all_pps('after_video', info_dict)
2770 if max_downloads_reached:
2771 raise MaxDownloadsReached()
2772
2773 # We update the info dict with the selected best quality format (backwards compatibility)
2774 info_dict.update(best_format)
2775 return info_dict
2776
2777 def process_subtitles(self, video_id, normal_subtitles, automatic_captions):
2778 """Select the requested subtitles and their format"""
2779 available_subs, normal_sub_langs = {}, []
2780 if normal_subtitles and self.params.get('writesubtitles'):
2781 available_subs.update(normal_subtitles)
2782 normal_sub_langs = tuple(normal_subtitles.keys())
2783 if automatic_captions and self.params.get('writeautomaticsub'):
2784 for lang, cap_info in automatic_captions.items():
2785 if lang not in available_subs:
2786 available_subs[lang] = cap_info
2787
2788 if not available_subs or (
2789 not self.params.get('writesubtitles')
2790 and not self.params.get('writeautomaticsub')):
2791 return None
2792
2793 all_sub_langs = tuple(available_subs.keys())
2794 if self.params.get('allsubtitles', False):
2795 requested_langs = all_sub_langs
2796 elif self.params.get('subtitleslangs', False):
2797 try:
2798 requested_langs = orderedSet_from_options(
2799 self.params.get('subtitleslangs'), {'all': all_sub_langs}, use_regex=True)
2800 except re.error as e:
2801 raise ValueError(f'Wrong regex for subtitlelangs: {e.pattern}')
2802 elif normal_sub_langs:
2803 requested_langs = ['en'] if 'en' in normal_sub_langs else normal_sub_langs[:1]
2804 else:
2805 requested_langs = ['en'] if 'en' in all_sub_langs else all_sub_langs[:1]
2806 if requested_langs:
2807 self.to_screen(f'[info] {video_id}: Downloading subtitles: {", ".join(requested_langs)}')
2808
2809 formats_query = self.params.get('subtitlesformat', 'best')
2810 formats_preference = formats_query.split('/') if formats_query else []
2811 subs = {}
2812 for lang in requested_langs:
2813 formats = available_subs.get(lang)
2814 if formats is None:
2815 self.report_warning(f'{lang} subtitles not available for {video_id}')
2816 continue
2817 for ext in formats_preference:
2818 if ext == 'best':
2819 f = formats[-1]
2820 break
2821 matches = list(filter(lambda f: f['ext'] == ext, formats))
2822 if matches:
2823 f = matches[-1]
2824 break
2825 else:
2826 f = formats[-1]
2827 self.report_warning(
2828 'No subtitle format found matching "%s" for language %s, '
2829 'using %s' % (formats_query, lang, f['ext']))
2830 subs[lang] = f
2831 return subs
2832
2833 def _forceprint(self, key, info_dict):
2834 if info_dict is None:
2835 return
2836 info_copy = info_dict.copy()
2837 info_copy['formats_table'] = self.render_formats_table(info_dict)
2838 info_copy['thumbnails_table'] = self.render_thumbnails_table(info_dict)
2839 info_copy['subtitles_table'] = self.render_subtitles_table(info_dict.get('id'), info_dict.get('subtitles'))
2840 info_copy['automatic_captions_table'] = self.render_subtitles_table(info_dict.get('id'), info_dict.get('automatic_captions'))
2841
2842 def format_tmpl(tmpl):
2843 mobj = re.fullmatch(r'([\w.:,]|-\d|(?P<dict>{([\w.:,]|-\d)+}))+=?', tmpl)
2844 if not mobj:
2845 return tmpl
2846
2847 fmt = '%({})s'
2848 if tmpl.startswith('{'):
2849 tmpl = f'.{tmpl}'
2850 if tmpl.endswith('='):
2851 tmpl, fmt = tmpl[:-1], '{0} = %({0})#j'
2852 return '\n'.join(map(fmt.format, [tmpl] if mobj.group('dict') else tmpl.split(',')))
2853
2854 for tmpl in self.params['forceprint'].get(key, []):
2855 self.to_stdout(self.evaluate_outtmpl(format_tmpl(tmpl), info_copy))
2856
2857 for tmpl, file_tmpl in self.params['print_to_file'].get(key, []):
2858 filename = self.prepare_filename(info_dict, outtmpl=file_tmpl)
2859 tmpl = format_tmpl(tmpl)
2860 self.to_screen(f'[info] Writing {tmpl!r} to: {filename}')
2861 if self._ensure_dir_exists(filename):
2862 with open(filename, 'a', encoding='utf-8') as f:
2863 f.write(self.evaluate_outtmpl(tmpl, info_copy) + '\n')
2864
2865 def __forced_printings(self, info_dict, filename, incomplete):
2866 def print_mandatory(field, actual_field=None):
2867 if actual_field is None:
2868 actual_field = field
2869 if (self.params.get('force%s' % field, False)
2870 and (not incomplete or info_dict.get(actual_field) is not None)):
2871 self.to_stdout(info_dict[actual_field])
2872
2873 def print_optional(field):
2874 if (self.params.get('force%s' % field, False)
2875 and info_dict.get(field) is not None):
2876 self.to_stdout(info_dict[field])
2877
2878 info_dict = info_dict.copy()
2879 if filename is not None:
2880 info_dict['filename'] = filename
2881 if info_dict.get('requested_formats') is not None:
2882 # For RTMP URLs, also include the playpath
2883 info_dict['urls'] = '\n'.join(f['url'] + f.get('play_path', '') for f in info_dict['requested_formats'])
2884 elif info_dict.get('url'):
2885 info_dict['urls'] = info_dict['url'] + info_dict.get('play_path', '')
2886
2887 if (self.params.get('forcejson')
2888 or self.params['forceprint'].get('video')
2889 or self.params['print_to_file'].get('video')):
2890 self.post_extract(info_dict)
2891 self._forceprint('video', info_dict)
2892
2893 print_mandatory('title')
2894 print_mandatory('id')
2895 print_mandatory('url', 'urls')
2896 print_optional('thumbnail')
2897 print_optional('description')
2898 print_optional('filename')
2899 if self.params.get('forceduration') and info_dict.get('duration') is not None:
2900 self.to_stdout(formatSeconds(info_dict['duration']))
2901 print_mandatory('format')
2902
2903 if self.params.get('forcejson'):
2904 self.to_stdout(json.dumps(self.sanitize_info(info_dict)))
2905
2906 def dl(self, name, info, subtitle=False, test=False):
2907 if not info.get('url'):
2908 self.raise_no_formats(info, True)
2909
2910 if test:
2911 verbose = self.params.get('verbose')
2912 params = {
2913 'test': True,
2914 'quiet': self.params.get('quiet') or not verbose,
2915 'verbose': verbose,
2916 'noprogress': not verbose,
2917 'nopart': True,
2918 'skip_unavailable_fragments': False,
2919 'keep_fragments': False,
2920 'overwrites': True,
2921 '_no_ytdl_file': True,
2922 }
2923 else:
2924 params = self.params
2925 fd = get_suitable_downloader(info, params, to_stdout=(name == '-'))(self, params)
2926 if not test:
2927 for ph in self._progress_hooks:
2928 fd.add_progress_hook(ph)
2929 urls = '", "'.join(
2930 (f['url'].split(',')[0] + ',<data>' if f['url'].startswith('data:') else f['url'])
2931 for f in info.get('requested_formats', []) or [info])
2932 self.write_debug(f'Invoking {fd.FD_NAME} downloader on "{urls}"')
2933
2934 # Note: Ideally info should be a deep-copied so that hooks cannot modify it.
2935 # But it may contain objects that are not deep-copyable
2936 new_info = self._copy_infodict(info)
2937 if new_info.get('http_headers') is None:
2938 new_info['http_headers'] = self._calc_headers(new_info)
2939 return fd.download(name, new_info, subtitle)
2940
2941 def existing_file(self, filepaths, *, default_overwrite=True):
2942 existing_files = list(filter(os.path.exists, orderedSet(filepaths)))
2943 if existing_files and not self.params.get('overwrites', default_overwrite):
2944 return existing_files[0]
2945
2946 for file in existing_files:
2947 self.report_file_delete(file)
2948 os.remove(file)
2949 return None
2950
2951 def process_info(self, info_dict):
2952 """Process a single resolved IE result. (Modifies it in-place)"""
2953
2954 assert info_dict.get('_type', 'video') == 'video'
2955 original_infodict = info_dict
2956
2957 if 'format' not in info_dict and 'ext' in info_dict:
2958 info_dict['format'] = info_dict['ext']
2959
2960 if self._match_entry(info_dict) is not None:
2961 info_dict['__write_download_archive'] = 'ignore'
2962 return
2963
2964 # Does nothing under normal operation - for backward compatibility of process_info
2965 self.post_extract(info_dict)
2966 self._num_downloads += 1
2967
2968 # info_dict['_filename'] needs to be set for backward compatibility
2969 info_dict['_filename'] = full_filename = self.prepare_filename(info_dict, warn=True)
2970 temp_filename = self.prepare_filename(info_dict, 'temp')
2971 files_to_move = {}
2972
2973 # Forced printings
2974 self.__forced_printings(info_dict, full_filename, incomplete=('format' not in info_dict))
2975
2976 def check_max_downloads():
2977 if self._num_downloads >= float(self.params.get('max_downloads') or 'inf'):
2978 raise MaxDownloadsReached()
2979
2980 if self.params.get('simulate'):
2981 info_dict['__write_download_archive'] = self.params.get('force_write_download_archive')
2982 check_max_downloads()
2983 return
2984
2985 if full_filename is None:
2986 return
2987 if not self._ensure_dir_exists(encodeFilename(full_filename)):
2988 return
2989 if not self._ensure_dir_exists(encodeFilename(temp_filename)):
2990 return
2991
2992 if self._write_description('video', info_dict,
2993 self.prepare_filename(info_dict, 'description')) is None:
2994 return
2995
2996 sub_files = self._write_subtitles(info_dict, temp_filename)
2997 if sub_files is None:
2998 return
2999 files_to_move.update(dict(sub_files))
3000
3001 thumb_files = self._write_thumbnails(
3002 'video', info_dict, temp_filename, self.prepare_filename(info_dict, 'thumbnail'))
3003 if thumb_files is None:
3004 return
3005 files_to_move.update(dict(thumb_files))
3006
3007 infofn = self.prepare_filename(info_dict, 'infojson')
3008 _infojson_written = self._write_info_json('video', info_dict, infofn)
3009 if _infojson_written:
3010 info_dict['infojson_filename'] = infofn
3011 # For backward compatibility, even though it was a private field
3012 info_dict['__infojson_filename'] = infofn
3013 elif _infojson_written is None:
3014 return
3015
3016 # Note: Annotations are deprecated
3017 annofn = None
3018 if self.params.get('writeannotations', False):
3019 annofn = self.prepare_filename(info_dict, 'annotation')
3020 if annofn:
3021 if not self._ensure_dir_exists(encodeFilename(annofn)):
3022 return
3023 if not self.params.get('overwrites', True) and os.path.exists(encodeFilename(annofn)):
3024 self.to_screen('[info] Video annotations are already present')
3025 elif not info_dict.get('annotations'):
3026 self.report_warning('There are no annotations to write.')
3027 else:
3028 try:
3029 self.to_screen('[info] Writing video annotations to: ' + annofn)
3030 with open(encodeFilename(annofn), 'w', encoding='utf-8') as annofile:
3031 annofile.write(info_dict['annotations'])
3032 except (KeyError, TypeError):
3033 self.report_warning('There are no annotations to write.')
3034 except OSError:
3035 self.report_error('Cannot write annotations file: ' + annofn)
3036 return
3037
3038 # Write internet shortcut files
3039 def _write_link_file(link_type):
3040 url = try_get(info_dict['webpage_url'], iri_to_uri)
3041 if not url:
3042 self.report_warning(
3043 f'Cannot write internet shortcut file because the actual URL of "{info_dict["webpage_url"]}" is unknown')
3044 return True
3045 linkfn = replace_extension(self.prepare_filename(info_dict, 'link'), link_type, info_dict.get('ext'))
3046 if not self._ensure_dir_exists(encodeFilename(linkfn)):
3047 return False
3048 if self.params.get('overwrites', True) and os.path.exists(encodeFilename(linkfn)):
3049 self.to_screen(f'[info] Internet shortcut (.{link_type}) is already present')
3050 return True
3051 try:
3052 self.to_screen(f'[info] Writing internet shortcut (.{link_type}) to: {linkfn}')
3053 with open(encodeFilename(to_high_limit_path(linkfn)), 'w', encoding='utf-8',
3054 newline='\r\n' if link_type == 'url' else '\n') as linkfile:
3055 template_vars = {'url': url}
3056 if link_type == 'desktop':
3057 template_vars['filename'] = linkfn[:-(len(link_type) + 1)]
3058 linkfile.write(LINK_TEMPLATES[link_type] % template_vars)
3059 except OSError:
3060 self.report_error(f'Cannot write internet shortcut {linkfn}')
3061 return False
3062 return True
3063
3064 write_links = {
3065 'url': self.params.get('writeurllink'),
3066 'webloc': self.params.get('writewebloclink'),
3067 'desktop': self.params.get('writedesktoplink'),
3068 }
3069 if self.params.get('writelink'):
3070 link_type = ('webloc' if sys.platform == 'darwin'
3071 else 'desktop' if sys.platform.startswith('linux')
3072 else 'url')
3073 write_links[link_type] = True
3074
3075 if any(should_write and not _write_link_file(link_type)
3076 for link_type, should_write in write_links.items()):
3077 return
3078
3079 def replace_info_dict(new_info):
3080 nonlocal info_dict
3081 if new_info == info_dict:
3082 return
3083 info_dict.clear()
3084 info_dict.update(new_info)
3085
3086 new_info, files_to_move = self.pre_process(info_dict, 'before_dl', files_to_move)
3087 replace_info_dict(new_info)
3088
3089 if self.params.get('skip_download'):
3090 info_dict['filepath'] = temp_filename
3091 info_dict['__finaldir'] = os.path.dirname(os.path.abspath(encodeFilename(full_filename)))
3092 info_dict['__files_to_move'] = files_to_move
3093 replace_info_dict(self.run_pp(MoveFilesAfterDownloadPP(self, False), info_dict))
3094 info_dict['__write_download_archive'] = self.params.get('force_write_download_archive')
3095 else:
3096 # Download
3097 info_dict.setdefault('__postprocessors', [])
3098 try:
3099
3100 def existing_video_file(*filepaths):
3101 ext = info_dict.get('ext')
3102 converted = lambda file: replace_extension(file, self.params.get('final_ext') or ext, ext)
3103 file = self.existing_file(itertools.chain(*zip(map(converted, filepaths), filepaths)),
3104 default_overwrite=False)
3105 if file:
3106 info_dict['ext'] = os.path.splitext(file)[1][1:]
3107 return file
3108
3109 fd, success = None, True
3110 if info_dict.get('protocol') or info_dict.get('url'):
3111 fd = get_suitable_downloader(info_dict, self.params, to_stdout=temp_filename == '-')
3112 if fd is not FFmpegFD and (
3113 info_dict.get('section_start') or info_dict.get('section_end')):
3114 msg = ('This format cannot be partially downloaded' if FFmpegFD.available()
3115 else 'You have requested downloading the video partially, but ffmpeg is not installed')
3116 self.report_error(f'{msg}. Aborting')
3117 return
3118
3119 if info_dict.get('requested_formats') is not None:
3120 requested_formats = info_dict['requested_formats']
3121 old_ext = info_dict['ext']
3122 if self.params.get('merge_output_format') is None:
3123 if (info_dict['ext'] == 'webm'
3124 and info_dict.get('thumbnails')
3125 # check with type instead of pp_key, __name__, or isinstance
3126 # since we dont want any custom PPs to trigger this
3127 and any(type(pp) == EmbedThumbnailPP for pp in self._pps['post_process'])): # noqa: E721
3128 info_dict['ext'] = 'mkv'
3129 self.report_warning(
3130 'webm doesn\'t support embedding a thumbnail, mkv will be used')
3131 new_ext = info_dict['ext']
3132
3133 def correct_ext(filename, ext=new_ext):
3134 if filename == '-':
3135 return filename
3136 filename_real_ext = os.path.splitext(filename)[1][1:]
3137 filename_wo_ext = (
3138 os.path.splitext(filename)[0]
3139 if filename_real_ext in (old_ext, new_ext)
3140 else filename)
3141 return f'{filename_wo_ext}.{ext}'
3142
3143 # Ensure filename always has a correct extension for successful merge
3144 full_filename = correct_ext(full_filename)
3145 temp_filename = correct_ext(temp_filename)
3146 dl_filename = existing_video_file(full_filename, temp_filename)
3147 info_dict['__real_download'] = False
3148
3149 merger = FFmpegMergerPP(self)
3150 downloaded = []
3151 if dl_filename is not None:
3152 self.report_file_already_downloaded(dl_filename)
3153 elif fd:
3154 for f in requested_formats if fd != FFmpegFD else []:
3155 f['filepath'] = fname = prepend_extension(
3156 correct_ext(temp_filename, info_dict['ext']),
3157 'f%s' % f['format_id'], info_dict['ext'])
3158 downloaded.append(fname)
3159 info_dict['url'] = '\n'.join(f['url'] for f in requested_formats)
3160 success, real_download = self.dl(temp_filename, info_dict)
3161 info_dict['__real_download'] = real_download
3162 else:
3163 if self.params.get('allow_unplayable_formats'):
3164 self.report_warning(
3165 'You have requested merging of multiple formats '
3166 'while also allowing unplayable formats to be downloaded. '
3167 'The formats won\'t be merged to prevent data corruption.')
3168 elif not merger.available:
3169 msg = 'You have requested merging of multiple formats but ffmpeg is not installed'
3170 if not self.params.get('ignoreerrors'):
3171 self.report_error(f'{msg}. Aborting due to --abort-on-error')
3172 return
3173 self.report_warning(f'{msg}. The formats won\'t be merged')
3174
3175 if temp_filename == '-':
3176 reason = ('using a downloader other than ffmpeg' if FFmpegFD.can_merge_formats(info_dict, self.params)
3177 else 'but the formats are incompatible for simultaneous download' if merger.available
3178 else 'but ffmpeg is not installed')
3179 self.report_warning(
3180 f'You have requested downloading multiple formats to stdout {reason}. '
3181 'The formats will be streamed one after the other')
3182 fname = temp_filename
3183 for f in requested_formats:
3184 new_info = dict(info_dict)
3185 del new_info['requested_formats']
3186 new_info.update(f)
3187 if temp_filename != '-':
3188 fname = prepend_extension(
3189 correct_ext(temp_filename, new_info['ext']),
3190 'f%s' % f['format_id'], new_info['ext'])
3191 if not self._ensure_dir_exists(fname):
3192 return
3193 f['filepath'] = fname
3194 downloaded.append(fname)
3195 partial_success, real_download = self.dl(fname, new_info)
3196 info_dict['__real_download'] = info_dict['__real_download'] or real_download
3197 success = success and partial_success
3198
3199 if downloaded and merger.available and not self.params.get('allow_unplayable_formats'):
3200 info_dict['__postprocessors'].append(merger)
3201 info_dict['__files_to_merge'] = downloaded
3202 # Even if there were no downloads, it is being merged only now
3203 info_dict['__real_download'] = True
3204 else:
3205 for file in downloaded:
3206 files_to_move[file] = None
3207 else:
3208 # Just a single file
3209 dl_filename = existing_video_file(full_filename, temp_filename)
3210 if dl_filename is None or dl_filename == temp_filename:
3211 # dl_filename == temp_filename could mean that the file was partially downloaded with --no-part.
3212 # So we should try to resume the download
3213 success, real_download = self.dl(temp_filename, info_dict)
3214 info_dict['__real_download'] = real_download
3215 else:
3216 self.report_file_already_downloaded(dl_filename)
3217
3218 dl_filename = dl_filename or temp_filename
3219 info_dict['__finaldir'] = os.path.dirname(os.path.abspath(encodeFilename(full_filename)))
3220
3221 except network_exceptions as err:
3222 self.report_error('unable to download video data: %s' % error_to_compat_str(err))
3223 return
3224 except OSError as err:
3225 raise UnavailableVideoError(err)
3226 except (ContentTooShortError, ) as err:
3227 self.report_error(f'content too short (expected {err.expected} bytes and served {err.downloaded})')
3228 return
3229
3230 self._raise_pending_errors(info_dict)
3231 if success and full_filename != '-':
3232
3233 def fixup():
3234 do_fixup = True
3235 fixup_policy = self.params.get('fixup')
3236 vid = info_dict['id']
3237
3238 if fixup_policy in ('ignore', 'never'):
3239 return
3240 elif fixup_policy == 'warn':
3241 do_fixup = 'warn'
3242 elif fixup_policy != 'force':
3243 assert fixup_policy in ('detect_or_warn', None)
3244 if not info_dict.get('__real_download'):
3245 do_fixup = False
3246
3247 def ffmpeg_fixup(cndn, msg, cls):
3248 if not (do_fixup and cndn):
3249 return
3250 elif do_fixup == 'warn':
3251 self.report_warning(f'{vid}: {msg}')
3252 return
3253 pp = cls(self)
3254 if pp.available:
3255 info_dict['__postprocessors'].append(pp)
3256 else:
3257 self.report_warning(f'{vid}: {msg}. Install ffmpeg to fix this automatically')
3258
3259 stretched_ratio = info_dict.get('stretched_ratio')
3260 ffmpeg_fixup(stretched_ratio not in (1, None),
3261 f'Non-uniform pixel ratio {stretched_ratio}',
3262 FFmpegFixupStretchedPP)
3263
3264 downloader = get_suitable_downloader(info_dict, self.params) if 'protocol' in info_dict else None
3265 downloader = downloader.FD_NAME if downloader else None
3266
3267 ext = info_dict.get('ext')
3268 postprocessed_by_ffmpeg = info_dict.get('requested_formats') or any((
3269 isinstance(pp, FFmpegVideoConvertorPP)
3270 and resolve_recode_mapping(ext, pp.mapping)[0] not in (ext, None)
3271 ) for pp in self._pps['post_process'])
3272
3273 if not postprocessed_by_ffmpeg:
3274 ffmpeg_fixup(ext == 'm4a' and info_dict.get('container') == 'm4a_dash',
3275 'writing DASH m4a. Only some players support this container',
3276 FFmpegFixupM4aPP)
3277 ffmpeg_fixup(downloader == 'hlsnative' and not self.params.get('hls_use_mpegts')
3278 or info_dict.get('is_live') and self.params.get('hls_use_mpegts') is None,
3279 'Possible MPEG-TS in MP4 container or malformed AAC timestamps',
3280 FFmpegFixupM3u8PP)
3281 ffmpeg_fixup(info_dict.get('is_live') and downloader == 'DashSegmentsFD',
3282 'Possible duplicate MOOV atoms', FFmpegFixupDuplicateMoovPP)
3283
3284 ffmpeg_fixup(downloader == 'web_socket_fragment', 'Malformed timestamps detected', FFmpegFixupTimestampPP)
3285 ffmpeg_fixup(downloader == 'web_socket_fragment', 'Malformed duration detected', FFmpegFixupDurationPP)
3286
3287 fixup()
3288 try:
3289 replace_info_dict(self.post_process(dl_filename, info_dict, files_to_move))
3290 except PostProcessingError as err:
3291 self.report_error('Postprocessing: %s' % str(err))
3292 return
3293 try:
3294 for ph in self._post_hooks:
3295 ph(info_dict['filepath'])
3296 except Exception as err:
3297 self.report_error('post hooks: %s' % str(err))
3298 return
3299 info_dict['__write_download_archive'] = True
3300
3301 assert info_dict is original_infodict # Make sure the info_dict was modified in-place
3302 if self.params.get('force_write_download_archive'):
3303 info_dict['__write_download_archive'] = True
3304 check_max_downloads()
3305
3306 def __download_wrapper(self, func):
3307 @functools.wraps(func)
3308 def wrapper(*args, **kwargs):
3309 try:
3310 res = func(*args, **kwargs)
3311 except UnavailableVideoError as e:
3312 self.report_error(e)
3313 except DownloadCancelled as e:
3314 self.to_screen(f'[info] {e}')
3315 if not self.params.get('break_per_url'):
3316 raise
3317 self._num_downloads = 0
3318 else:
3319 if self.params.get('dump_single_json', False):
3320 self.post_extract(res)
3321 self.to_stdout(json.dumps(self.sanitize_info(res)))
3322 return wrapper
3323
3324 def download(self, url_list):
3325 """Download a given list of URLs."""
3326 url_list = variadic(url_list) # Passing a single URL is a common mistake
3327 outtmpl = self.params['outtmpl']['default']
3328 if (len(url_list) > 1
3329 and outtmpl != '-'
3330 and '%' not in outtmpl
3331 and self.params.get('max_downloads') != 1):
3332 raise SameFileError(outtmpl)
3333
3334 for url in url_list:
3335 self.__download_wrapper(self.extract_info)(
3336 url, force_generic_extractor=self.params.get('force_generic_extractor', False))
3337
3338 return self._download_retcode
3339
3340 def download_with_info_file(self, info_filename):
3341 with contextlib.closing(fileinput.FileInput(
3342 [info_filename], mode='r',
3343 openhook=fileinput.hook_encoded('utf-8'))) as f:
3344 # FileInput doesn't have a read method, we can't call json.load
3345 info = self.sanitize_info(json.loads('\n'.join(f)), self.params.get('clean_infojson', True))
3346 try:
3347 self.__download_wrapper(self.process_ie_result)(info, download=True)
3348 except (DownloadError, EntryNotInPlaylist, ReExtractInfo) as e:
3349 if not isinstance(e, EntryNotInPlaylist):
3350 self.to_stderr('\r')
3351 webpage_url = info.get('webpage_url')
3352 if webpage_url is not None:
3353 self.report_warning(f'The info failed to download: {e}; trying with URL {webpage_url}')
3354 return self.download([webpage_url])
3355 else:
3356 raise
3357 return self._download_retcode
3358
3359 @staticmethod
3360 def sanitize_info(info_dict, remove_private_keys=False):
3361 ''' Sanitize the infodict for converting to json '''
3362 if info_dict is None:
3363 return info_dict
3364 info_dict.setdefault('epoch', int(time.time()))
3365 info_dict.setdefault('_type', 'video')
3366 info_dict.setdefault('_version', {
3367 'version': __version__,
3368 'current_git_head': current_git_head(),
3369 'release_git_head': RELEASE_GIT_HEAD,
3370 'repository': REPOSITORY,
3371 })
3372
3373 if remove_private_keys:
3374 reject = lambda k, v: v is None or k.startswith('__') or k in {
3375 'requested_downloads', 'requested_formats', 'requested_subtitles', 'requested_entries',
3376 'entries', 'filepath', '_filename', 'infojson_filename', 'original_url', 'playlist_autonumber',
3377 }
3378 else:
3379 reject = lambda k, v: False
3380
3381 def filter_fn(obj):
3382 if isinstance(obj, dict):
3383 return {k: filter_fn(v) for k, v in obj.items() if not reject(k, v)}
3384 elif isinstance(obj, (list, tuple, set, LazyList)):
3385 return list(map(filter_fn, obj))
3386 elif obj is None or isinstance(obj, (str, int, float, bool)):
3387 return obj
3388 else:
3389 return repr(obj)
3390
3391 return filter_fn(info_dict)
3392
3393 @staticmethod
3394 def filter_requested_info(info_dict, actually_filter=True):
3395 ''' Alias of sanitize_info for backward compatibility '''
3396 return YoutubeDL.sanitize_info(info_dict, actually_filter)
3397
3398 def _delete_downloaded_files(self, *files_to_delete, info={}, msg=None):
3399 for filename in set(filter(None, files_to_delete)):
3400 if msg:
3401 self.to_screen(msg % filename)
3402 try:
3403 os.remove(filename)
3404 except OSError:
3405 self.report_warning(f'Unable to delete file {filename}')
3406 if filename in info.get('__files_to_move', []): # NB: Delete even if None
3407 del info['__files_to_move'][filename]
3408
3409 @staticmethod
3410 def post_extract(info_dict):
3411 def actual_post_extract(info_dict):
3412 if info_dict.get('_type') in ('playlist', 'multi_video'):
3413 for video_dict in info_dict.get('entries', {}):
3414 actual_post_extract(video_dict or {})
3415 return
3416
3417 post_extractor = info_dict.pop('__post_extractor', None) or (lambda: {})
3418 info_dict.update(post_extractor())
3419
3420 actual_post_extract(info_dict or {})
3421
3422 def run_pp(self, pp, infodict):
3423 files_to_delete = []
3424 if '__files_to_move' not in infodict:
3425 infodict['__files_to_move'] = {}
3426 try:
3427 files_to_delete, infodict = pp.run(infodict)
3428 except PostProcessingError as e:
3429 # Must be True and not 'only_download'
3430 if self.params.get('ignoreerrors') is True:
3431 self.report_error(e)
3432 return infodict
3433 raise
3434
3435 if not files_to_delete:
3436 return infodict
3437 if self.params.get('keepvideo', False):
3438 for f in files_to_delete:
3439 infodict['__files_to_move'].setdefault(f, '')
3440 else:
3441 self._delete_downloaded_files(
3442 *files_to_delete, info=infodict, msg='Deleting original file %s (pass -k to keep)')
3443 return infodict
3444
3445 def run_all_pps(self, key, info, *, additional_pps=None):
3446 self._forceprint(key, info)
3447 for pp in (additional_pps or []) + self._pps[key]:
3448 info = self.run_pp(pp, info)
3449 return info
3450
3451 def pre_process(self, ie_info, key='pre_process', files_to_move=None):
3452 info = dict(ie_info)
3453 info['__files_to_move'] = files_to_move or {}
3454 try:
3455 info = self.run_all_pps(key, info)
3456 except PostProcessingError as err:
3457 msg = f'Preprocessing: {err}'
3458 info.setdefault('__pending_error', msg)
3459 self.report_error(msg, is_error=False)
3460 return info, info.pop('__files_to_move', None)
3461
3462 def post_process(self, filename, info, files_to_move=None):
3463 """Run all the postprocessors on the given file."""
3464 info['filepath'] = filename
3465 info['__files_to_move'] = files_to_move or {}
3466 info = self.run_all_pps('post_process', info, additional_pps=info.get('__postprocessors'))
3467 info = self.run_pp(MoveFilesAfterDownloadPP(self), info)
3468 del info['__files_to_move']
3469 return self.run_all_pps('after_move', info)
3470
3471 def _make_archive_id(self, info_dict):
3472 video_id = info_dict.get('id')
3473 if not video_id:
3474 return
3475 # Future-proof against any change in case
3476 # and backwards compatibility with prior versions
3477 extractor = info_dict.get('extractor_key') or info_dict.get('ie_key') # key in a playlist
3478 if extractor is None:
3479 url = str_or_none(info_dict.get('url'))
3480 if not url:
3481 return
3482 # Try to find matching extractor for the URL and take its ie_key
3483 for ie_key, ie in self._ies.items():
3484 if ie.suitable(url):
3485 extractor = ie_key
3486 break
3487 else:
3488 return
3489 return make_archive_id(extractor, video_id)
3490
3491 def in_download_archive(self, info_dict):
3492 if not self.archive:
3493 return False
3494
3495 vid_ids = [self._make_archive_id(info_dict)]
3496 vid_ids.extend(info_dict.get('_old_archive_ids') or [])
3497 return any(id_ in self.archive for id_ in vid_ids)
3498
3499 def record_download_archive(self, info_dict):
3500 fn = self.params.get('download_archive')
3501 if fn is None:
3502 return
3503 vid_id = self._make_archive_id(info_dict)
3504 assert vid_id
3505
3506 self.write_debug(f'Adding to archive: {vid_id}')
3507 if is_path_like(fn):
3508 with locked_file(fn, 'a', encoding='utf-8') as archive_file:
3509 archive_file.write(vid_id + '\n')
3510 self.archive.add(vid_id)
3511
3512 @staticmethod
3513 def format_resolution(format, default='unknown'):
3514 if format.get('vcodec') == 'none' and format.get('acodec') != 'none':
3515 return 'audio only'
3516 if format.get('resolution') is not None:
3517 return format['resolution']
3518 if format.get('width') and format.get('height'):
3519 return '%dx%d' % (format['width'], format['height'])
3520 elif format.get('height'):
3521 return '%sp' % format['height']
3522 elif format.get('width'):
3523 return '%dx?' % format['width']
3524 return default
3525
3526 def _list_format_headers(self, *headers):
3527 if self.params.get('listformats_table', True) is not False:
3528 return [self._format_out(header, self.Styles.HEADERS) for header in headers]
3529 return headers
3530
3531 def _format_note(self, fdict):
3532 res = ''
3533 if fdict.get('ext') in ['f4f', 'f4m']:
3534 res += '(unsupported)'
3535 if fdict.get('language'):
3536 if res:
3537 res += ' '
3538 res += '[%s]' % fdict['language']
3539 if fdict.get('format_note') is not None:
3540 if res:
3541 res += ' '
3542 res += fdict['format_note']
3543 if fdict.get('tbr') is not None:
3544 if res:
3545 res += ', '
3546 res += '%4dk' % fdict['tbr']
3547 if fdict.get('container') is not None:
3548 if res:
3549 res += ', '
3550 res += '%s container' % fdict['container']
3551 if (fdict.get('vcodec') is not None
3552 and fdict.get('vcodec') != 'none'):
3553 if res:
3554 res += ', '
3555 res += fdict['vcodec']
3556 if fdict.get('vbr') is not None:
3557 res += '@'
3558 elif fdict.get('vbr') is not None and fdict.get('abr') is not None:
3559 res += 'video@'
3560 if fdict.get('vbr') is not None:
3561 res += '%4dk' % fdict['vbr']
3562 if fdict.get('fps') is not None:
3563 if res:
3564 res += ', '
3565 res += '%sfps' % fdict['fps']
3566 if fdict.get('acodec') is not None:
3567 if res:
3568 res += ', '
3569 if fdict['acodec'] == 'none':
3570 res += 'video only'
3571 else:
3572 res += '%-5s' % fdict['acodec']
3573 elif fdict.get('abr') is not None:
3574 if res:
3575 res += ', '
3576 res += 'audio'
3577 if fdict.get('abr') is not None:
3578 res += '@%3dk' % fdict['abr']
3579 if fdict.get('asr') is not None:
3580 res += ' (%5dHz)' % fdict['asr']
3581 if fdict.get('filesize') is not None:
3582 if res:
3583 res += ', '
3584 res += format_bytes(fdict['filesize'])
3585 elif fdict.get('filesize_approx') is not None:
3586 if res:
3587 res += ', '
3588 res += '~' + format_bytes(fdict['filesize_approx'])
3589 return res
3590
3591 def _get_formats(self, info_dict):
3592 if info_dict.get('formats') is None:
3593 if info_dict.get('url') and info_dict.get('_type', 'video') == 'video':
3594 return [info_dict]
3595 return []
3596 return info_dict['formats']
3597
3598 def render_formats_table(self, info_dict):
3599 formats = self._get_formats(info_dict)
3600 if not formats:
3601 return
3602 if not self.params.get('listformats_table', True) is not False:
3603 table = [
3604 [
3605 format_field(f, 'format_id'),
3606 format_field(f, 'ext'),
3607 self.format_resolution(f),
3608 self._format_note(f)
3609 ] for f in formats if (f.get('preference') or 0) >= -1000]
3610 return render_table(['format code', 'extension', 'resolution', 'note'], table, extra_gap=1)
3611
3612 def simplified_codec(f, field):
3613 assert field in ('acodec', 'vcodec')
3614 codec = f.get(field, 'unknown')
3615 if not codec:
3616 return 'unknown'
3617 elif codec != 'none':
3618 return '.'.join(codec.split('.')[:4])
3619
3620 if field == 'vcodec' and f.get('acodec') == 'none':
3621 return 'images'
3622 elif field == 'acodec' and f.get('vcodec') == 'none':
3623 return ''
3624 return self._format_out('audio only' if field == 'vcodec' else 'video only',
3625 self.Styles.SUPPRESS)
3626
3627 delim = self._format_out('\u2502', self.Styles.DELIM, '|', test_encoding=True)
3628 table = [
3629 [
3630 self._format_out(format_field(f, 'format_id'), self.Styles.ID),
3631 format_field(f, 'ext'),
3632 format_field(f, func=self.format_resolution, ignore=('audio only', 'images')),
3633 format_field(f, 'fps', '\t%d', func=round),
3634 format_field(f, 'dynamic_range', '%s', ignore=(None, 'SDR')).replace('HDR', ''),
3635 format_field(f, 'audio_channels', '\t%s'),
3636 delim,
3637 format_field(f, 'filesize', ' \t%s', func=format_bytes) + format_field(f, 'filesize_approx', '~\t%s', func=format_bytes),
3638 format_field(f, 'tbr', '\t%dk', func=round),
3639 shorten_protocol_name(f.get('protocol', '')),
3640 delim,
3641 simplified_codec(f, 'vcodec'),
3642 format_field(f, 'vbr', '\t%dk', func=round),
3643 simplified_codec(f, 'acodec'),
3644 format_field(f, 'abr', '\t%dk', func=round),
3645 format_field(f, 'asr', '\t%s', func=format_decimal_suffix),
3646 join_nonempty(
3647 self._format_out('UNSUPPORTED', 'light red') if f.get('ext') in ('f4f', 'f4m') else None,
3648 format_field(f, 'language', '[%s]'),
3649 join_nonempty(format_field(f, 'format_note'),
3650 format_field(f, 'container', ignore=(None, f.get('ext'))),
3651 delim=', '),
3652 delim=' '),
3653 ] for f in formats if f.get('preference') is None or f['preference'] >= -1000]
3654 header_line = self._list_format_headers(
3655 'ID', 'EXT', 'RESOLUTION', '\tFPS', 'HDR', 'CH', delim, '\tFILESIZE', '\tTBR', 'PROTO',
3656 delim, 'VCODEC', '\tVBR', 'ACODEC', '\tABR', '\tASR', 'MORE INFO')
3657
3658 return render_table(
3659 header_line, table, hide_empty=True,
3660 delim=self._format_out('\u2500', self.Styles.DELIM, '-', test_encoding=True))
3661
3662 def render_thumbnails_table(self, info_dict):
3663 thumbnails = list(info_dict.get('thumbnails') or [])
3664 if not thumbnails:
3665 return None
3666 return render_table(
3667 self._list_format_headers('ID', 'Width', 'Height', 'URL'),
3668 [[t.get('id'), t.get('width') or 'unknown', t.get('height') or 'unknown', t['url']] for t in thumbnails])
3669
3670 def render_subtitles_table(self, video_id, subtitles):
3671 def _row(lang, formats):
3672 exts, names = zip(*((f['ext'], f.get('name') or 'unknown') for f in reversed(formats)))
3673 if len(set(names)) == 1:
3674 names = [] if names[0] == 'unknown' else names[:1]
3675 return [lang, ', '.join(names), ', '.join(exts)]
3676
3677 if not subtitles:
3678 return None
3679 return render_table(
3680 self._list_format_headers('Language', 'Name', 'Formats'),
3681 [_row(lang, formats) for lang, formats in subtitles.items()],
3682 hide_empty=True)
3683
3684 def __list_table(self, video_id, name, func, *args):
3685 table = func(*args)
3686 if not table:
3687 self.to_screen(f'{video_id} has no {name}')
3688 return
3689 self.to_screen(f'[info] Available {name} for {video_id}:')
3690 self.to_stdout(table)
3691
3692 def list_formats(self, info_dict):
3693 self.__list_table(info_dict['id'], 'formats', self.render_formats_table, info_dict)
3694
3695 def list_thumbnails(self, info_dict):
3696 self.__list_table(info_dict['id'], 'thumbnails', self.render_thumbnails_table, info_dict)
3697
3698 def list_subtitles(self, video_id, subtitles, name='subtitles'):
3699 self.__list_table(video_id, name, self.render_subtitles_table, video_id, subtitles)
3700
3701 def urlopen(self, req):
3702 """ Start an HTTP download """
3703 if isinstance(req, str):
3704 req = sanitized_Request(req)
3705 return self._opener.open(req, timeout=self._socket_timeout)
3706
3707 def print_debug_header(self):
3708 if not self.params.get('verbose'):
3709 return
3710
3711 from . import _IN_CLI # Must be delayed import
3712
3713 # These imports can be slow. So import them only as needed
3714 from .extractor.extractors import _LAZY_LOADER
3715 from .extractor.extractors import _PLUGIN_CLASSES as plugin_extractors
3716
3717 def get_encoding(stream):
3718 ret = str(getattr(stream, 'encoding', 'missing (%s)' % type(stream).__name__))
3719 if not supports_terminal_sequences(stream):
3720 from .utils import WINDOWS_VT_MODE # Must be imported locally
3721 ret += ' (No VT)' if WINDOWS_VT_MODE is False else ' (No ANSI)'
3722 return ret
3723
3724 encoding_str = 'Encodings: locale %s, fs %s, pref %s, %s' % (
3725 locale.getpreferredencoding(),
3726 sys.getfilesystemencoding(),
3727 self.get_encoding(),
3728 ', '.join(
3729 f'{key} {get_encoding(stream)}' for key, stream in self._out_files.items_
3730 if stream is not None and key != 'console')
3731 )
3732
3733 logger = self.params.get('logger')
3734 if logger:
3735 write_debug = lambda msg: logger.debug(f'[debug] {msg}')
3736 write_debug(encoding_str)
3737 else:
3738 write_string(f'[debug] {encoding_str}\n', encoding=None)
3739 write_debug = lambda msg: self._write_string(f'[debug] {msg}\n')
3740
3741 source = detect_variant()
3742 if VARIANT not in (None, 'pip'):
3743 source += '*'
3744 write_debug(join_nonempty(
3745 f'{"yt-dlp" if REPOSITORY == "yt-dlp/yt-dlp" else REPOSITORY} version',
3746 __version__,
3747 f'[{RELEASE_GIT_HEAD}]' if RELEASE_GIT_HEAD else '',
3748 '' if source == 'unknown' else f'({source})',
3749 '' if _IN_CLI else 'API',
3750 delim=' '))
3751
3752 if not _IN_CLI:
3753 write_debug(f'params: {self.params}')
3754
3755 if not _LAZY_LOADER:
3756 if os.environ.get('YTDLP_NO_LAZY_EXTRACTORS'):
3757 write_debug('Lazy loading extractors is forcibly disabled')
3758 else:
3759 write_debug('Lazy loading extractors is disabled')
3760 if plugin_extractors or plugin_postprocessors:
3761 write_debug('Plugins: %s' % [
3762 '%s%s' % (klass.__name__, '' if klass.__name__ == name else f' as {name}')
3763 for name, klass in itertools.chain(plugin_extractors.items(), plugin_postprocessors.items())])
3764 if self.params['compat_opts']:
3765 write_debug('Compatibility options: %s' % ', '.join(self.params['compat_opts']))
3766
3767 if current_git_head():
3768 write_debug(f'Git HEAD: {current_git_head()}')
3769 write_debug(system_identifier())
3770
3771 exe_versions, ffmpeg_features = FFmpegPostProcessor.get_versions_and_features(self)
3772 ffmpeg_features = {key for key, val in ffmpeg_features.items() if val}
3773 if ffmpeg_features:
3774 exe_versions['ffmpeg'] += ' (%s)' % ','.join(sorted(ffmpeg_features))
3775
3776 exe_versions['rtmpdump'] = rtmpdump_version()
3777 exe_versions['phantomjs'] = PhantomJSwrapper._version()
3778 exe_str = ', '.join(
3779 f'{exe} {v}' for exe, v in sorted(exe_versions.items()) if v
3780 ) or 'none'
3781 write_debug('exe versions: %s' % exe_str)
3782
3783 from .compat.compat_utils import get_package_info
3784 from .dependencies import available_dependencies
3785
3786 write_debug('Optional libraries: %s' % (', '.join(sorted({
3787 join_nonempty(*get_package_info(m)) for m in available_dependencies.values()
3788 })) or 'none'))
3789
3790 self._setup_opener()
3791 proxy_map = {}
3792 for handler in self._opener.handlers:
3793 if hasattr(handler, 'proxies'):
3794 proxy_map.update(handler.proxies)
3795 write_debug(f'Proxy map: {proxy_map}')
3796
3797 # Not implemented
3798 if False and self.params.get('call_home'):
3799 ipaddr = self.urlopen('https://yt-dl.org/ip').read().decode()
3800 write_debug('Public IP address: %s' % ipaddr)
3801 latest_version = self.urlopen(
3802 'https://yt-dl.org/latest/version').read().decode()
3803 if version_tuple(latest_version) > version_tuple(__version__):
3804 self.report_warning(
3805 'You are using an outdated version (newest version: %s)! '
3806 'See https://yt-dl.org/update if you need help updating.' %
3807 latest_version)
3808
3809 def _setup_opener(self):
3810 if hasattr(self, '_opener'):
3811 return
3812 timeout_val = self.params.get('socket_timeout')
3813 self._socket_timeout = 20 if timeout_val is None else float(timeout_val)
3814
3815 opts_cookiesfrombrowser = self.params.get('cookiesfrombrowser')
3816 opts_cookiefile = self.params.get('cookiefile')
3817 opts_proxy = self.params.get('proxy')
3818
3819 self.cookiejar = load_cookies(opts_cookiefile, opts_cookiesfrombrowser, self)
3820
3821 cookie_processor = YoutubeDLCookieProcessor(self.cookiejar)
3822 if opts_proxy is not None:
3823 if opts_proxy == '':
3824 proxies = {}
3825 else:
3826 proxies = {'http': opts_proxy, 'https': opts_proxy}
3827 else:
3828 proxies = urllib.request.getproxies()
3829 # Set HTTPS proxy to HTTP one if given (https://github.com/ytdl-org/youtube-dl/issues/805)
3830 if 'http' in proxies and 'https' not in proxies:
3831 proxies['https'] = proxies['http']
3832 proxy_handler = PerRequestProxyHandler(proxies)
3833
3834 debuglevel = 1 if self.params.get('debug_printtraffic') else 0
3835 https_handler = make_HTTPS_handler(self.params, debuglevel=debuglevel)
3836 ydlh = YoutubeDLHandler(self.params, debuglevel=debuglevel)
3837 redirect_handler = YoutubeDLRedirectHandler()
3838 data_handler = urllib.request.DataHandler()
3839
3840 # When passing our own FileHandler instance, build_opener won't add the
3841 # default FileHandler and allows us to disable the file protocol, which
3842 # can be used for malicious purposes (see
3843 # https://github.com/ytdl-org/youtube-dl/issues/8227)
3844 file_handler = urllib.request.FileHandler()
3845
3846 def file_open(*args, **kwargs):
3847 raise urllib.error.URLError('file:// scheme is explicitly disabled in yt-dlp for security reasons')
3848 file_handler.file_open = file_open
3849
3850 opener = urllib.request.build_opener(
3851 proxy_handler, https_handler, cookie_processor, ydlh, redirect_handler, data_handler, file_handler)
3852
3853 # Delete the default user-agent header, which would otherwise apply in
3854 # cases where our custom HTTP handler doesn't come into play
3855 # (See https://github.com/ytdl-org/youtube-dl/issues/1309 for details)
3856 opener.addheaders = []
3857 self._opener = opener
3858
3859 def encode(self, s):
3860 if isinstance(s, bytes):
3861 return s # Already encoded
3862
3863 try:
3864 return s.encode(self.get_encoding())
3865 except UnicodeEncodeError as err:
3866 err.reason = err.reason + '. Check your system encoding configuration or use the --encoding option.'
3867 raise
3868
3869 def get_encoding(self):
3870 encoding = self.params.get('encoding')
3871 if encoding is None:
3872 encoding = preferredencoding()
3873 return encoding
3874
3875 def _write_info_json(self, label, ie_result, infofn, overwrite=None):
3876 ''' Write infojson and returns True = written, 'exists' = Already exists, False = skip, None = error '''
3877 if overwrite is None:
3878 overwrite = self.params.get('overwrites', True)
3879 if not self.params.get('writeinfojson'):
3880 return False
3881 elif not infofn:
3882 self.write_debug(f'Skipping writing {label} infojson')
3883 return False
3884 elif not self._ensure_dir_exists(infofn):
3885 return None
3886 elif not overwrite and os.path.exists(infofn):
3887 self.to_screen(f'[info] {label.title()} metadata is already present')
3888 return 'exists'
3889
3890 self.to_screen(f'[info] Writing {label} metadata as JSON to: {infofn}')
3891 try:
3892 write_json_file(self.sanitize_info(ie_result, self.params.get('clean_infojson', True)), infofn)
3893 return True
3894 except OSError:
3895 self.report_error(f'Cannot write {label} metadata to JSON file {infofn}')
3896 return None
3897
3898 def _write_description(self, label, ie_result, descfn):
3899 ''' Write description and returns True = written, False = skip, None = error '''
3900 if not self.params.get('writedescription'):
3901 return False
3902 elif not descfn:
3903 self.write_debug(f'Skipping writing {label} description')
3904 return False
3905 elif not self._ensure_dir_exists(descfn):
3906 return None
3907 elif not self.params.get('overwrites', True) and os.path.exists(descfn):
3908 self.to_screen(f'[info] {label.title()} description is already present')
3909 elif ie_result.get('description') is None:
3910 self.report_warning(f'There\'s no {label} description to write')
3911 return False
3912 else:
3913 try:
3914 self.to_screen(f'[info] Writing {label} description to: {descfn}')
3915 with open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile:
3916 descfile.write(ie_result['description'])
3917 except OSError:
3918 self.report_error(f'Cannot write {label} description file {descfn}')
3919 return None
3920 return True
3921
3922 def _write_subtitles(self, info_dict, filename):
3923 ''' Write subtitles to file and return list of (sub_filename, final_sub_filename); or None if error'''
3924 ret = []
3925 subtitles = info_dict.get('requested_subtitles')
3926 if not subtitles or not (self.params.get('writesubtitles') or self.params.get('writeautomaticsub')):
3927 # subtitles download errors are already managed as troubles in relevant IE
3928 # that way it will silently go on when used with unsupporting IE
3929 return ret
3930
3931 sub_filename_base = self.prepare_filename(info_dict, 'subtitle')
3932 if not sub_filename_base:
3933 self.to_screen('[info] Skipping writing video subtitles')
3934 return ret
3935 for sub_lang, sub_info in subtitles.items():
3936 sub_format = sub_info['ext']
3937 sub_filename = subtitles_filename(filename, sub_lang, sub_format, info_dict.get('ext'))
3938 sub_filename_final = subtitles_filename(sub_filename_base, sub_lang, sub_format, info_dict.get('ext'))
3939 existing_sub = self.existing_file((sub_filename_final, sub_filename))
3940 if existing_sub:
3941 self.to_screen(f'[info] Video subtitle {sub_lang}.{sub_format} is already present')
3942 sub_info['filepath'] = existing_sub
3943 ret.append((existing_sub, sub_filename_final))
3944 continue
3945
3946 self.to_screen(f'[info] Writing video subtitles to: {sub_filename}')
3947 if sub_info.get('data') is not None:
3948 try:
3949 # Use newline='' to prevent conversion of newline characters
3950 # See https://github.com/ytdl-org/youtube-dl/issues/10268
3951 with open(sub_filename, 'w', encoding='utf-8', newline='') as subfile:
3952 subfile.write(sub_info['data'])
3953 sub_info['filepath'] = sub_filename
3954 ret.append((sub_filename, sub_filename_final))
3955 continue
3956 except OSError:
3957 self.report_error(f'Cannot write video subtitles file {sub_filename}')
3958 return None
3959
3960 try:
3961 sub_copy = sub_info.copy()
3962 sub_copy.setdefault('http_headers', info_dict.get('http_headers'))
3963 self.dl(sub_filename, sub_copy, subtitle=True)
3964 sub_info['filepath'] = sub_filename
3965 ret.append((sub_filename, sub_filename_final))
3966 except (DownloadError, ExtractorError, IOError, OSError, ValueError) + network_exceptions as err:
3967 msg = f'Unable to download video subtitles for {sub_lang!r}: {err}'
3968 if self.params.get('ignoreerrors') is not True: # False or 'only_download'
3969 if not self.params.get('ignoreerrors'):
3970 self.report_error(msg)
3971 raise DownloadError(msg)
3972 self.report_warning(msg)
3973 return ret
3974
3975 def _write_thumbnails(self, label, info_dict, filename, thumb_filename_base=None):
3976 ''' Write thumbnails to file and return list of (thumb_filename, final_thumb_filename) '''
3977 write_all = self.params.get('write_all_thumbnails', False)
3978 thumbnails, ret = [], []
3979 if write_all or self.params.get('writethumbnail', False):
3980 thumbnails = info_dict.get('thumbnails') or []
3981 multiple = write_all and len(thumbnails) > 1
3982
3983 if thumb_filename_base is None:
3984 thumb_filename_base = filename
3985 if thumbnails and not thumb_filename_base:
3986 self.write_debug(f'Skipping writing {label} thumbnail')
3987 return ret
3988
3989 for idx, t in list(enumerate(thumbnails))[::-1]:
3990 thumb_ext = (f'{t["id"]}.' if multiple else '') + determine_ext(t['url'], 'jpg')
3991 thumb_display_id = f'{label} thumbnail {t["id"]}'
3992 thumb_filename = replace_extension(filename, thumb_ext, info_dict.get('ext'))
3993 thumb_filename_final = replace_extension(thumb_filename_base, thumb_ext, info_dict.get('ext'))
3994
3995 existing_thumb = self.existing_file((thumb_filename_final, thumb_filename))
3996 if existing_thumb:
3997 self.to_screen('[info] %s is already present' % (
3998 thumb_display_id if multiple else f'{label} thumbnail').capitalize())
3999 t['filepath'] = existing_thumb
4000 ret.append((existing_thumb, thumb_filename_final))
4001 else:
4002 self.to_screen(f'[info] Downloading {thumb_display_id} ...')
4003 try:
4004 uf = self.urlopen(sanitized_Request(t['url'], headers=t.get('http_headers', {})))
4005 self.to_screen(f'[info] Writing {thumb_display_id} to: {thumb_filename}')
4006 with open(encodeFilename(thumb_filename), 'wb') as thumbf:
4007 shutil.copyfileobj(uf, thumbf)
4008 ret.append((thumb_filename, thumb_filename_final))
4009 t['filepath'] = thumb_filename
4010 except network_exceptions as err:
4011 thumbnails.pop(idx)
4012 self.report_warning(f'Unable to download {thumb_display_id}: {err}')
4013 if ret and not write_all:
4014 break
4015 return ret