]> jfr.im git - yt-dlp.git/blob - yt_dlp/YoutubeDL.py
[build, test] Harden workflows' security (#5410)
[yt-dlp.git] / yt_dlp / YoutubeDL.py
1 import collections
2 import contextlib
3 import datetime
4 import errno
5 import fileinput
6 import functools
7 import io
8 import itertools
9 import json
10 import locale
11 import operator
12 import os
13 import random
14 import re
15 import shutil
16 import subprocess
17 import sys
18 import tempfile
19 import time
20 import tokenize
21 import traceback
22 import unicodedata
23 import urllib.request
24 from string import ascii_letters
25
26 from .cache import Cache
27 from .compat import compat_os_name, compat_shlex_quote
28 from .cookies import load_cookies
29 from .downloader import FFmpegFD, get_suitable_downloader, shorten_protocol_name
30 from .downloader.rtmp import rtmpdump_version
31 from .extractor import gen_extractor_classes, get_info_extractor
32 from .extractor.common import UnsupportedURLIE
33 from .extractor.openload import PhantomJSwrapper
34 from .minicurses import format_text
35 from .postprocessor import _PLUGIN_CLASSES as plugin_postprocessors
36 from .postprocessor import (
37 EmbedThumbnailPP,
38 FFmpegFixupDuplicateMoovPP,
39 FFmpegFixupDurationPP,
40 FFmpegFixupM3u8PP,
41 FFmpegFixupM4aPP,
42 FFmpegFixupStretchedPP,
43 FFmpegFixupTimestampPP,
44 FFmpegMergerPP,
45 FFmpegPostProcessor,
46 FFmpegVideoConvertorPP,
47 MoveFilesAfterDownloadPP,
48 get_postprocessor,
49 )
50 from .postprocessor.ffmpeg import resolve_mapping as resolve_recode_mapping
51 from .update import REPOSITORY, current_git_head, detect_variant
52 from .utils import (
53 DEFAULT_OUTTMPL,
54 IDENTITY,
55 LINK_TEMPLATES,
56 MEDIA_EXTENSIONS,
57 NO_DEFAULT,
58 NUMBER_RE,
59 OUTTMPL_TYPES,
60 POSTPROCESS_WHEN,
61 STR_FORMAT_RE_TMPL,
62 STR_FORMAT_TYPES,
63 ContentTooShortError,
64 DateRange,
65 DownloadCancelled,
66 DownloadError,
67 EntryNotInPlaylist,
68 ExistingVideoReached,
69 ExtractorError,
70 GeoRestrictedError,
71 HEADRequest,
72 ISO3166Utils,
73 LazyList,
74 MaxDownloadsReached,
75 Namespace,
76 PagedList,
77 PerRequestProxyHandler,
78 PlaylistEntries,
79 Popen,
80 PostProcessingError,
81 ReExtractInfo,
82 RejectedVideoReached,
83 SameFileError,
84 UnavailableVideoError,
85 UserNotLive,
86 YoutubeDLCookieProcessor,
87 YoutubeDLHandler,
88 YoutubeDLRedirectHandler,
89 age_restricted,
90 args_to_str,
91 bug_reports_message,
92 date_from_str,
93 deprecation_warning,
94 determine_ext,
95 determine_protocol,
96 encode_compat_str,
97 encodeFilename,
98 error_to_compat_str,
99 escapeHTML,
100 expand_path,
101 filter_dict,
102 float_or_none,
103 format_bytes,
104 format_decimal_suffix,
105 format_field,
106 formatSeconds,
107 get_compatible_ext,
108 get_domain,
109 int_or_none,
110 iri_to_uri,
111 is_path_like,
112 join_nonempty,
113 locked_file,
114 make_archive_id,
115 make_dir,
116 make_HTTPS_handler,
117 merge_headers,
118 network_exceptions,
119 number_of_digits,
120 orderedSet,
121 orderedSet_from_options,
122 parse_filesize,
123 preferredencoding,
124 prepend_extension,
125 register_socks_protocols,
126 remove_terminal_sequences,
127 render_table,
128 replace_extension,
129 sanitize_filename,
130 sanitize_path,
131 sanitize_url,
132 sanitized_Request,
133 std_headers,
134 str_or_none,
135 strftime_or_none,
136 subtitles_filename,
137 supports_terminal_sequences,
138 system_identifier,
139 timetuple_from_msec,
140 to_high_limit_path,
141 traverse_obj,
142 try_call,
143 try_get,
144 url_basename,
145 variadic,
146 version_tuple,
147 windows_enable_vt_mode,
148 write_json_file,
149 write_string,
150 )
151 from .version import RELEASE_GIT_HEAD, VARIANT, __version__
152
153 if compat_os_name == 'nt':
154 import ctypes
155
156
157 class YoutubeDL:
158 """YoutubeDL class.
159
160 YoutubeDL objects are the ones responsible of downloading the
161 actual video file and writing it to disk if the user has requested
162 it, among some other tasks. In most cases there should be one per
163 program. As, given a video URL, the downloader doesn't know how to
164 extract all the needed information, task that InfoExtractors do, it
165 has to pass the URL to one of them.
166
167 For this, YoutubeDL objects have a method that allows
168 InfoExtractors to be registered in a given order. When it is passed
169 a URL, the YoutubeDL object handles it to the first InfoExtractor it
170 finds that reports being able to handle it. The InfoExtractor extracts
171 all the information about the video or videos the URL refers to, and
172 YoutubeDL process the extracted information, possibly using a File
173 Downloader to download the video.
174
175 YoutubeDL objects accept a lot of parameters. In order not to saturate
176 the object constructor with arguments, it receives a dictionary of
177 options instead. These options are available through the params
178 attribute for the InfoExtractors to use. The YoutubeDL also
179 registers itself as the downloader in charge for the InfoExtractors
180 that are added to it, so this is a "mutual registration".
181
182 Available options:
183
184 username: Username for authentication purposes.
185 password: Password for authentication purposes.
186 videopassword: Password for accessing a video.
187 ap_mso: Adobe Pass multiple-system operator identifier.
188 ap_username: Multiple-system operator account username.
189 ap_password: Multiple-system operator account password.
190 usenetrc: Use netrc for authentication instead.
191 verbose: Print additional info to stdout.
192 quiet: Do not print messages to stdout.
193 no_warnings: Do not print out anything for warnings.
194 forceprint: A dict with keys WHEN mapped to a list of templates to
195 print to stdout. The allowed keys are video or any of the
196 items in utils.POSTPROCESS_WHEN.
197 For compatibility, a single list is also accepted
198 print_to_file: A dict with keys WHEN (same as forceprint) mapped to
199 a list of tuples with (template, filename)
200 forcejson: Force printing info_dict as JSON.
201 dump_single_json: Force printing the info_dict of the whole playlist
202 (or video) as a single JSON line.
203 force_write_download_archive: Force writing download archive regardless
204 of 'skip_download' or 'simulate'.
205 simulate: Do not download the video files. If unset (or None),
206 simulate only if listsubtitles, listformats or list_thumbnails is used
207 format: Video format code. see "FORMAT SELECTION" for more details.
208 You can also pass a function. The function takes 'ctx' as
209 argument and returns the formats to download.
210 See "build_format_selector" for an implementation
211 allow_unplayable_formats: Allow unplayable formats to be extracted and downloaded.
212 ignore_no_formats_error: Ignore "No video formats" error. Usefull for
213 extracting metadata even if the video is not actually
214 available for download (experimental)
215 format_sort: A list of fields by which to sort the video formats.
216 See "Sorting Formats" for more details.
217 format_sort_force: Force the given format_sort. see "Sorting Formats"
218 for more details.
219 prefer_free_formats: Whether to prefer video formats with free containers
220 over non-free ones of same quality.
221 allow_multiple_video_streams: Allow multiple video streams to be merged
222 into a single file
223 allow_multiple_audio_streams: Allow multiple audio streams to be merged
224 into a single file
225 check_formats Whether to test if the formats are downloadable.
226 Can be True (check all), False (check none),
227 'selected' (check selected formats),
228 or None (check only if requested by extractor)
229 paths: Dictionary of output paths. The allowed keys are 'home'
230 'temp' and the keys of OUTTMPL_TYPES (in utils.py)
231 outtmpl: Dictionary of templates for output names. Allowed keys
232 are 'default' and the keys of OUTTMPL_TYPES (in utils.py).
233 For compatibility with youtube-dl, a single string can also be used
234 outtmpl_na_placeholder: Placeholder for unavailable meta fields.
235 restrictfilenames: Do not allow "&" and spaces in file names
236 trim_file_name: Limit length of filename (extension excluded)
237 windowsfilenames: Force the filenames to be windows compatible
238 ignoreerrors: Do not stop on download/postprocessing errors.
239 Can be 'only_download' to ignore only download errors.
240 Default is 'only_download' for CLI, but False for API
241 skip_playlist_after_errors: Number of allowed failures until the rest of
242 the playlist is skipped
243 allowed_extractors: List of regexes to match against extractor names that are allowed
244 overwrites: Overwrite all video and metadata files if True,
245 overwrite only non-video files if None
246 and don't overwrite any file if False
247 For compatibility with youtube-dl,
248 "nooverwrites" may also be used instead
249 playlist_items: Specific indices of playlist to download.
250 playlistrandom: Download playlist items in random order.
251 lazy_playlist: Process playlist entries as they are received.
252 matchtitle: Download only matching titles.
253 rejecttitle: Reject downloads for matching titles.
254 logger: Log messages to a logging.Logger instance.
255 logtostderr: Print everything to stderr instead of stdout.
256 consoletitle: Display progress in console window's titlebar.
257 writedescription: Write the video description to a .description file
258 writeinfojson: Write the video description to a .info.json file
259 clean_infojson: Remove private fields from the infojson
260 getcomments: Extract video comments. This will not be written to disk
261 unless writeinfojson is also given
262 writeannotations: Write the video annotations to a .annotations.xml file
263 writethumbnail: Write the thumbnail image to a file
264 allow_playlist_files: Whether to write playlists' description, infojson etc
265 also to disk when using the 'write*' options
266 write_all_thumbnails: Write all thumbnail formats to files
267 writelink: Write an internet shortcut file, depending on the
268 current platform (.url/.webloc/.desktop)
269 writeurllink: Write a Windows internet shortcut file (.url)
270 writewebloclink: Write a macOS internet shortcut file (.webloc)
271 writedesktoplink: Write a Linux internet shortcut file (.desktop)
272 writesubtitles: Write the video subtitles to a file
273 writeautomaticsub: Write the automatically generated subtitles to a file
274 listsubtitles: Lists all available subtitles for the video
275 subtitlesformat: The format code for subtitles
276 subtitleslangs: List of languages of the subtitles to download (can be regex).
277 The list may contain "all" to refer to all the available
278 subtitles. The language can be prefixed with a "-" to
279 exclude it from the requested languages, e.g. ['all', '-live_chat']
280 keepvideo: Keep the video file after post-processing
281 daterange: A DateRange object, download only if the upload_date is in the range.
282 skip_download: Skip the actual download of the video file
283 cachedir: Location of the cache files in the filesystem.
284 False to disable filesystem cache.
285 noplaylist: Download single video instead of a playlist if in doubt.
286 age_limit: An integer representing the user's age in years.
287 Unsuitable videos for the given age are skipped.
288 min_views: An integer representing the minimum view count the video
289 must have in order to not be skipped.
290 Videos without view count information are always
291 downloaded. None for no limit.
292 max_views: An integer representing the maximum view count.
293 Videos that are more popular than that are not
294 downloaded.
295 Videos without view count information are always
296 downloaded. None for no limit.
297 download_archive: A set, or the name of a file where all downloads are recorded.
298 Videos already present in the file are not downloaded again.
299 break_on_existing: Stop the download process after attempting to download a
300 file that is in the archive.
301 break_on_reject: Stop the download process when encountering a video that
302 has been filtered out.
303 break_per_url: Whether break_on_reject and break_on_existing
304 should act on each input URL as opposed to for the entire queue
305 cookiefile: File name or text stream from where cookies should be read and dumped to
306 cookiesfrombrowser: A tuple containing the name of the browser, the profile
307 name/path from where cookies are loaded, the name of the keyring,
308 and the container name, e.g. ('chrome', ) or
309 ('vivaldi', 'default', 'BASICTEXT') or ('firefox', 'default', None, 'Meta')
310 legacyserverconnect: Explicitly allow HTTPS connection to servers that do not
311 support RFC 5746 secure renegotiation
312 nocheckcertificate: Do not verify SSL certificates
313 client_certificate: Path to client certificate file in PEM format. May include the private key
314 client_certificate_key: Path to private key file for client certificate
315 client_certificate_password: Password for client certificate private key, if encrypted.
316 If not provided and the key is encrypted, yt-dlp will ask interactively
317 prefer_insecure: Use HTTP instead of HTTPS to retrieve information.
318 (Only supported by some extractors)
319 http_headers: A dictionary of custom headers to be used for all requests
320 proxy: URL of the proxy server to use
321 geo_verification_proxy: URL of the proxy to use for IP address verification
322 on geo-restricted sites.
323 socket_timeout: Time to wait for unresponsive hosts, in seconds
324 bidi_workaround: Work around buggy terminals without bidirectional text
325 support, using fridibi
326 debug_printtraffic:Print out sent and received HTTP traffic
327 default_search: Prepend this string if an input url is not valid.
328 'auto' for elaborate guessing
329 encoding: Use this encoding instead of the system-specified.
330 extract_flat: Whether to resolve and process url_results further
331 * False: Always process (default)
332 * True: Never process
333 * 'in_playlist': Do not process inside playlist/multi_video
334 * 'discard': Always process, but don't return the result
335 from inside playlist/multi_video
336 * 'discard_in_playlist': Same as "discard", but only for
337 playlists (not multi_video)
338 wait_for_video: If given, wait for scheduled streams to become available.
339 The value should be a tuple containing the range
340 (min_secs, max_secs) to wait between retries
341 postprocessors: A list of dictionaries, each with an entry
342 * key: The name of the postprocessor. See
343 yt_dlp/postprocessor/__init__.py for a list.
344 * when: When to run the postprocessor. Allowed values are
345 the entries of utils.POSTPROCESS_WHEN
346 Assumed to be 'post_process' if not given
347 progress_hooks: A list of functions that get called on download
348 progress, with a dictionary with the entries
349 * status: One of "downloading", "error", or "finished".
350 Check this first and ignore unknown values.
351 * info_dict: The extracted info_dict
352
353 If status is one of "downloading", or "finished", the
354 following properties may also be present:
355 * filename: The final filename (always present)
356 * tmpfilename: The filename we're currently writing to
357 * downloaded_bytes: Bytes on disk
358 * total_bytes: Size of the whole file, None if unknown
359 * total_bytes_estimate: Guess of the eventual file size,
360 None if unavailable.
361 * elapsed: The number of seconds since download started.
362 * eta: The estimated time in seconds, None if unknown
363 * speed: The download speed in bytes/second, None if
364 unknown
365 * fragment_index: The counter of the currently
366 downloaded video fragment.
367 * fragment_count: The number of fragments (= individual
368 files that will be merged)
369
370 Progress hooks are guaranteed to be called at least once
371 (with status "finished") if the download is successful.
372 postprocessor_hooks: A list of functions that get called on postprocessing
373 progress, with a dictionary with the entries
374 * status: One of "started", "processing", or "finished".
375 Check this first and ignore unknown values.
376 * postprocessor: Name of the postprocessor
377 * info_dict: The extracted info_dict
378
379 Progress hooks are guaranteed to be called at least twice
380 (with status "started" and "finished") if the processing is successful.
381 merge_output_format: "/" separated list of extensions to use when merging formats.
382 final_ext: Expected final extension; used to detect when the file was
383 already downloaded and converted
384 fixup: Automatically correct known faults of the file.
385 One of:
386 - "never": do nothing
387 - "warn": only emit a warning
388 - "detect_or_warn": check whether we can do anything
389 about it, warn otherwise (default)
390 source_address: Client-side IP address to bind to.
391 sleep_interval_requests: Number of seconds to sleep between requests
392 during extraction
393 sleep_interval: Number of seconds to sleep before each download when
394 used alone or a lower bound of a range for randomized
395 sleep before each download (minimum possible number
396 of seconds to sleep) when used along with
397 max_sleep_interval.
398 max_sleep_interval:Upper bound of a range for randomized sleep before each
399 download (maximum possible number of seconds to sleep).
400 Must only be used along with sleep_interval.
401 Actual sleep time will be a random float from range
402 [sleep_interval; max_sleep_interval].
403 sleep_interval_subtitles: Number of seconds to sleep before each subtitle download
404 listformats: Print an overview of available video formats and exit.
405 list_thumbnails: Print a table of all thumbnails and exit.
406 match_filter: A function that gets called for every video with the signature
407 (info_dict, *, incomplete: bool) -> Optional[str]
408 For backward compatibility with youtube-dl, the signature
409 (info_dict) -> Optional[str] is also allowed.
410 - If it returns a message, the video is ignored.
411 - If it returns None, the video is downloaded.
412 - If it returns utils.NO_DEFAULT, the user is interactively
413 asked whether to download the video.
414 match_filter_func in utils.py is one example for this.
415 no_color: Do not emit color codes in output.
416 geo_bypass: Bypass geographic restriction via faking X-Forwarded-For
417 HTTP header
418 geo_bypass_country:
419 Two-letter ISO 3166-2 country code that will be used for
420 explicit geographic restriction bypassing via faking
421 X-Forwarded-For HTTP header
422 geo_bypass_ip_block:
423 IP range in CIDR notation that will be used similarly to
424 geo_bypass_country
425 external_downloader: A dictionary of protocol keys and the executable of the
426 external downloader to use for it. The allowed protocols
427 are default|http|ftp|m3u8|dash|rtsp|rtmp|mms.
428 Set the value to 'native' to use the native downloader
429 compat_opts: Compatibility options. See "Differences in default behavior".
430 The following options do not work when used through the API:
431 filename, abort-on-error, multistreams, no-live-chat, format-sort
432 no-clean-infojson, no-playlist-metafiles, no-keep-subs, no-attach-info-json.
433 Refer __init__.py for their implementation
434 progress_template: Dictionary of templates for progress outputs.
435 Allowed keys are 'download', 'postprocess',
436 'download-title' (console title) and 'postprocess-title'.
437 The template is mapped on a dictionary with keys 'progress' and 'info'
438 retry_sleep_functions: Dictionary of functions that takes the number of attempts
439 as argument and returns the time to sleep in seconds.
440 Allowed keys are 'http', 'fragment', 'file_access'
441 download_ranges: A callback function that gets called for every video with
442 the signature (info_dict, ydl) -> Iterable[Section].
443 Only the returned sections will be downloaded.
444 Each Section is a dict with the following keys:
445 * start_time: Start time of the section in seconds
446 * end_time: End time of the section in seconds
447 * title: Section title (Optional)
448 * index: Section number (Optional)
449 force_keyframes_at_cuts: Re-encode the video when downloading ranges to get precise cuts
450 noprogress: Do not print the progress bar
451 live_from_start: Whether to download livestreams videos from the start
452
453 The following parameters are not used by YoutubeDL itself, they are used by
454 the downloader (see yt_dlp/downloader/common.py):
455 nopart, updatetime, buffersize, ratelimit, throttledratelimit, min_filesize,
456 max_filesize, test, noresizebuffer, retries, file_access_retries, fragment_retries,
457 continuedl, xattr_set_filesize, hls_use_mpegts, http_chunk_size,
458 external_downloader_args, concurrent_fragment_downloads.
459
460 The following options are used by the post processors:
461 ffmpeg_location: Location of the ffmpeg/avconv binary; either the path
462 to the binary or its containing directory.
463 postprocessor_args: A dictionary of postprocessor/executable keys (in lower case)
464 and a list of additional command-line arguments for the
465 postprocessor/executable. The dict can also have "PP+EXE" keys
466 which are used when the given exe is used by the given PP.
467 Use 'default' as the name for arguments to passed to all PP
468 For compatibility with youtube-dl, a single list of args
469 can also be used
470
471 The following options are used by the extractors:
472 extractor_retries: Number of times to retry for known errors
473 dynamic_mpd: Whether to process dynamic DASH manifests (default: True)
474 hls_split_discontinuity: Split HLS playlists to different formats at
475 discontinuities such as ad breaks (default: False)
476 extractor_args: A dictionary of arguments to be passed to the extractors.
477 See "EXTRACTOR ARGUMENTS" for details.
478 E.g. {'youtube': {'skip': ['dash', 'hls']}}
479 mark_watched: Mark videos watched (even with --simulate). Only for YouTube
480
481 The following options are deprecated and may be removed in the future:
482
483 force_generic_extractor: Force downloader to use the generic extractor
484 - Use allowed_extractors = ['generic', 'default']
485 playliststart: - Use playlist_items
486 Playlist item to start at.
487 playlistend: - Use playlist_items
488 Playlist item to end at.
489 playlistreverse: - Use playlist_items
490 Download playlist items in reverse order.
491 forceurl: - Use forceprint
492 Force printing final URL.
493 forcetitle: - Use forceprint
494 Force printing title.
495 forceid: - Use forceprint
496 Force printing ID.
497 forcethumbnail: - Use forceprint
498 Force printing thumbnail URL.
499 forcedescription: - Use forceprint
500 Force printing description.
501 forcefilename: - Use forceprint
502 Force printing final filename.
503 forceduration: - Use forceprint
504 Force printing duration.
505 allsubtitles: - Use subtitleslangs = ['all']
506 Downloads all the subtitles of the video
507 (requires writesubtitles or writeautomaticsub)
508 include_ads: - Doesn't work
509 Download ads as well
510 call_home: - Not implemented
511 Boolean, true iff we are allowed to contact the
512 yt-dlp servers for debugging.
513 post_hooks: - Register a custom postprocessor
514 A list of functions that get called as the final step
515 for each video file, after all postprocessors have been
516 called. The filename will be passed as the only argument.
517 hls_prefer_native: - Use external_downloader = {'m3u8': 'native'} or {'m3u8': 'ffmpeg'}.
518 Use the native HLS downloader instead of ffmpeg/avconv
519 if True, otherwise use ffmpeg/avconv if False, otherwise
520 use downloader suggested by extractor if None.
521 prefer_ffmpeg: - avconv support is deprecated
522 If False, use avconv instead of ffmpeg if both are available,
523 otherwise prefer ffmpeg.
524 youtube_include_dash_manifest: - Use extractor_args
525 If True (default), DASH manifests and related
526 data will be downloaded and processed by extractor.
527 You can reduce network I/O by disabling it if you don't
528 care about DASH. (only for youtube)
529 youtube_include_hls_manifest: - Use extractor_args
530 If True (default), HLS manifests and related
531 data will be downloaded and processed by extractor.
532 You can reduce network I/O by disabling it if you don't
533 care about HLS. (only for youtube)
534 """
535
536 _NUMERIC_FIELDS = {
537 'width', 'height', 'asr', 'audio_channels', 'fps',
538 'tbr', 'abr', 'vbr', 'filesize', 'filesize_approx',
539 'timestamp', 'release_timestamp',
540 'duration', 'view_count', 'like_count', 'dislike_count', 'repost_count',
541 'average_rating', 'comment_count', 'age_limit',
542 'start_time', 'end_time',
543 'chapter_number', 'season_number', 'episode_number',
544 'track_number', 'disc_number', 'release_year',
545 }
546
547 _format_fields = {
548 # NB: Keep in sync with the docstring of extractor/common.py
549 'url', 'manifest_url', 'manifest_stream_number', 'ext', 'format', 'format_id', 'format_note',
550 'width', 'height', 'resolution', 'dynamic_range', 'tbr', 'abr', 'acodec', 'asr', 'audio_channels',
551 'vbr', 'fps', 'vcodec', 'container', 'filesize', 'filesize_approx', 'rows', 'columns',
552 'player_url', 'protocol', 'fragment_base_url', 'fragments', 'is_from_start',
553 'preference', 'language', 'language_preference', 'quality', 'source_preference',
554 'http_headers', 'stretched_ratio', 'no_resume', 'has_drm', 'downloader_options',
555 'page_url', 'app', 'play_path', 'tc_url', 'flash_version', 'rtmp_live', 'rtmp_conn', 'rtmp_protocol', 'rtmp_real_time'
556 }
557 _format_selection_exts = {
558 'audio': set(MEDIA_EXTENSIONS.common_audio),
559 'video': set(MEDIA_EXTENSIONS.common_video + ('3gp', )),
560 'storyboards': set(MEDIA_EXTENSIONS.storyboards),
561 }
562
563 def __init__(self, params=None, auto_init=True):
564 """Create a FileDownloader object with the given options.
565 @param auto_init Whether to load the default extractors and print header (if verbose).
566 Set to 'no_verbose_header' to not print the header
567 """
568 if params is None:
569 params = {}
570 self.params = params
571 self._ies = {}
572 self._ies_instances = {}
573 self._pps = {k: [] for k in POSTPROCESS_WHEN}
574 self._printed_messages = set()
575 self._first_webpage_request = True
576 self._post_hooks = []
577 self._progress_hooks = []
578 self._postprocessor_hooks = []
579 self._download_retcode = 0
580 self._num_downloads = 0
581 self._num_videos = 0
582 self._playlist_level = 0
583 self._playlist_urls = set()
584 self.cache = Cache(self)
585
586 windows_enable_vt_mode()
587 stdout = sys.stderr if self.params.get('logtostderr') else sys.stdout
588 self._out_files = Namespace(
589 out=stdout,
590 error=sys.stderr,
591 screen=sys.stderr if self.params.get('quiet') else stdout,
592 console=None if compat_os_name == 'nt' else next(
593 filter(supports_terminal_sequences, (sys.stderr, sys.stdout)), None)
594 )
595 self._allow_colors = Namespace(**{
596 type_: not self.params.get('no_color') and supports_terminal_sequences(stream)
597 for type_, stream in self._out_files.items_ if type_ != 'console'
598 })
599
600 # The code is left like this to be reused for future deprecations
601 MIN_SUPPORTED, MIN_RECOMMENDED = (3, 7), (3, 7)
602 current_version = sys.version_info[:2]
603 if current_version < MIN_RECOMMENDED:
604 msg = ('Support for Python version %d.%d has been deprecated. '
605 'See https://github.com/yt-dlp/yt-dlp/issues/3764 for more details.'
606 '\n You will no longer receive updates on this version')
607 if current_version < MIN_SUPPORTED:
608 msg = 'Python version %d.%d is no longer supported'
609 self.deprecation_warning(
610 f'{msg}! Please update to Python %d.%d or above' % (*current_version, *MIN_RECOMMENDED))
611
612 if self.params.get('allow_unplayable_formats'):
613 self.report_warning(
614 f'You have asked for {self._format_err("UNPLAYABLE", self.Styles.EMPHASIS)} formats to be listed/downloaded. '
615 'This is a developer option intended for debugging. \n'
616 ' If you experience any issues while using this option, '
617 f'{self._format_err("DO NOT", self.Styles.ERROR)} open a bug report')
618
619 if self.params.get('bidi_workaround', False):
620 try:
621 import pty
622 master, slave = pty.openpty()
623 width = shutil.get_terminal_size().columns
624 width_args = [] if width is None else ['-w', str(width)]
625 sp_kwargs = {'stdin': subprocess.PIPE, 'stdout': slave, 'stderr': self._out_files.error}
626 try:
627 self._output_process = Popen(['bidiv'] + width_args, **sp_kwargs)
628 except OSError:
629 self._output_process = Popen(['fribidi', '-c', 'UTF-8'] + width_args, **sp_kwargs)
630 self._output_channel = os.fdopen(master, 'rb')
631 except OSError as ose:
632 if ose.errno == errno.ENOENT:
633 self.report_warning(
634 'Could not find fribidi executable, ignoring --bidi-workaround. '
635 'Make sure that fribidi is an executable file in one of the directories in your $PATH.')
636 else:
637 raise
638
639 self.params['compat_opts'] = set(self.params.get('compat_opts', ()))
640 if auto_init and auto_init != 'no_verbose_header':
641 self.print_debug_header()
642
643 def check_deprecated(param, option, suggestion):
644 if self.params.get(param) is not None:
645 self.report_warning(f'{option} is deprecated. Use {suggestion} instead')
646 return True
647 return False
648
649 if check_deprecated('cn_verification_proxy', '--cn-verification-proxy', '--geo-verification-proxy'):
650 if self.params.get('geo_verification_proxy') is None:
651 self.params['geo_verification_proxy'] = self.params['cn_verification_proxy']
652
653 check_deprecated('autonumber', '--auto-number', '-o "%(autonumber)s-%(title)s.%(ext)s"')
654 check_deprecated('usetitle', '--title', '-o "%(title)s-%(id)s.%(ext)s"')
655 check_deprecated('useid', '--id', '-o "%(id)s.%(ext)s"')
656
657 for msg in self.params.get('_warnings', []):
658 self.report_warning(msg)
659 for msg in self.params.get('_deprecation_warnings', []):
660 self.deprecated_feature(msg)
661
662 if 'list-formats' in self.params['compat_opts']:
663 self.params['listformats_table'] = False
664
665 if 'overwrites' not in self.params and self.params.get('nooverwrites') is not None:
666 # nooverwrites was unnecessarily changed to overwrites
667 # in 0c3d0f51778b153f65c21906031c2e091fcfb641
668 # This ensures compatibility with both keys
669 self.params['overwrites'] = not self.params['nooverwrites']
670 elif self.params.get('overwrites') is None:
671 self.params.pop('overwrites', None)
672 else:
673 self.params['nooverwrites'] = not self.params['overwrites']
674
675 self.params.setdefault('forceprint', {})
676 self.params.setdefault('print_to_file', {})
677
678 # Compatibility with older syntax
679 if not isinstance(params['forceprint'], dict):
680 self.params['forceprint'] = {'video': params['forceprint']}
681
682 if auto_init:
683 self.add_default_info_extractors()
684
685 if (sys.platform != 'win32'
686 and sys.getfilesystemencoding() in ['ascii', 'ANSI_X3.4-1968']
687 and not self.params.get('restrictfilenames', False)):
688 # Unicode filesystem API will throw errors (#1474, #13027)
689 self.report_warning(
690 'Assuming --restrict-filenames since file system encoding '
691 'cannot encode all characters. '
692 'Set the LC_ALL environment variable to fix this.')
693 self.params['restrictfilenames'] = True
694
695 self._parse_outtmpl()
696
697 # Creating format selector here allows us to catch syntax errors before the extraction
698 self.format_selector = (
699 self.params.get('format') if self.params.get('format') in (None, '-')
700 else self.params['format'] if callable(self.params['format'])
701 else self.build_format_selector(self.params['format']))
702
703 # Set http_headers defaults according to std_headers
704 self.params['http_headers'] = merge_headers(std_headers, self.params.get('http_headers', {}))
705
706 hooks = {
707 'post_hooks': self.add_post_hook,
708 'progress_hooks': self.add_progress_hook,
709 'postprocessor_hooks': self.add_postprocessor_hook,
710 }
711 for opt, fn in hooks.items():
712 for ph in self.params.get(opt, []):
713 fn(ph)
714
715 for pp_def_raw in self.params.get('postprocessors', []):
716 pp_def = dict(pp_def_raw)
717 when = pp_def.pop('when', 'post_process')
718 self.add_post_processor(
719 get_postprocessor(pp_def.pop('key'))(self, **pp_def),
720 when=when)
721
722 self._setup_opener()
723 register_socks_protocols()
724
725 def preload_download_archive(fn):
726 """Preload the archive, if any is specified"""
727 archive = set()
728 if fn is None:
729 return archive
730 elif not is_path_like(fn):
731 return fn
732
733 self.write_debug(f'Loading archive file {fn!r}')
734 try:
735 with locked_file(fn, 'r', encoding='utf-8') as archive_file:
736 for line in archive_file:
737 archive.add(line.strip())
738 except OSError as ioe:
739 if ioe.errno != errno.ENOENT:
740 raise
741 return archive
742
743 self.archive = preload_download_archive(self.params.get('download_archive'))
744
745 def warn_if_short_id(self, argv):
746 # short YouTube ID starting with dash?
747 idxs = [
748 i for i, a in enumerate(argv)
749 if re.match(r'^-[0-9A-Za-z_-]{10}$', a)]
750 if idxs:
751 correct_argv = (
752 ['yt-dlp']
753 + [a for i, a in enumerate(argv) if i not in idxs]
754 + ['--'] + [argv[i] for i in idxs]
755 )
756 self.report_warning(
757 'Long argument string detected. '
758 'Use -- to separate parameters and URLs, like this:\n%s' %
759 args_to_str(correct_argv))
760
761 def add_info_extractor(self, ie):
762 """Add an InfoExtractor object to the end of the list."""
763 ie_key = ie.ie_key()
764 self._ies[ie_key] = ie
765 if not isinstance(ie, type):
766 self._ies_instances[ie_key] = ie
767 ie.set_downloader(self)
768
769 def get_info_extractor(self, ie_key):
770 """
771 Get an instance of an IE with name ie_key, it will try to get one from
772 the _ies list, if there's no instance it will create a new one and add
773 it to the extractor list.
774 """
775 ie = self._ies_instances.get(ie_key)
776 if ie is None:
777 ie = get_info_extractor(ie_key)()
778 self.add_info_extractor(ie)
779 return ie
780
781 def add_default_info_extractors(self):
782 """
783 Add the InfoExtractors returned by gen_extractors to the end of the list
784 """
785 all_ies = {ie.IE_NAME.lower(): ie for ie in gen_extractor_classes()}
786 all_ies['end'] = UnsupportedURLIE()
787 try:
788 ie_names = orderedSet_from_options(
789 self.params.get('allowed_extractors', ['default']), {
790 'all': list(all_ies),
791 'default': [name for name, ie in all_ies.items() if ie._ENABLED],
792 }, use_regex=True)
793 except re.error as e:
794 raise ValueError(f'Wrong regex for allowed_extractors: {e.pattern}')
795 for name in ie_names:
796 self.add_info_extractor(all_ies[name])
797 self.write_debug(f'Loaded {len(ie_names)} extractors')
798
799 def add_post_processor(self, pp, when='post_process'):
800 """Add a PostProcessor object to the end of the chain."""
801 assert when in POSTPROCESS_WHEN, f'Invalid when={when}'
802 self._pps[when].append(pp)
803 pp.set_downloader(self)
804
805 def add_post_hook(self, ph):
806 """Add the post hook"""
807 self._post_hooks.append(ph)
808
809 def add_progress_hook(self, ph):
810 """Add the download progress hook"""
811 self._progress_hooks.append(ph)
812
813 def add_postprocessor_hook(self, ph):
814 """Add the postprocessing progress hook"""
815 self._postprocessor_hooks.append(ph)
816 for pps in self._pps.values():
817 for pp in pps:
818 pp.add_progress_hook(ph)
819
820 def _bidi_workaround(self, message):
821 if not hasattr(self, '_output_channel'):
822 return message
823
824 assert hasattr(self, '_output_process')
825 assert isinstance(message, str)
826 line_count = message.count('\n') + 1
827 self._output_process.stdin.write((message + '\n').encode())
828 self._output_process.stdin.flush()
829 res = ''.join(self._output_channel.readline().decode()
830 for _ in range(line_count))
831 return res[:-len('\n')]
832
833 def _write_string(self, message, out=None, only_once=False):
834 if only_once:
835 if message in self._printed_messages:
836 return
837 self._printed_messages.add(message)
838 write_string(message, out=out, encoding=self.params.get('encoding'))
839
840 def to_stdout(self, message, skip_eol=False, quiet=None):
841 """Print message to stdout"""
842 if quiet is not None:
843 self.deprecation_warning('"YoutubeDL.to_stdout" no longer accepts the argument quiet. '
844 'Use "YoutubeDL.to_screen" instead')
845 if skip_eol is not False:
846 self.deprecation_warning('"YoutubeDL.to_stdout" no longer accepts the argument skip_eol. '
847 'Use "YoutubeDL.to_screen" instead')
848 self._write_string(f'{self._bidi_workaround(message)}\n', self._out_files.out)
849
850 def to_screen(self, message, skip_eol=False, quiet=None, only_once=False):
851 """Print message to screen if not in quiet mode"""
852 if self.params.get('logger'):
853 self.params['logger'].debug(message)
854 return
855 if (self.params.get('quiet') if quiet is None else quiet) and not self.params.get('verbose'):
856 return
857 self._write_string(
858 '%s%s' % (self._bidi_workaround(message), ('' if skip_eol else '\n')),
859 self._out_files.screen, only_once=only_once)
860
861 def to_stderr(self, message, only_once=False):
862 """Print message to stderr"""
863 assert isinstance(message, str)
864 if self.params.get('logger'):
865 self.params['logger'].error(message)
866 else:
867 self._write_string(f'{self._bidi_workaround(message)}\n', self._out_files.error, only_once=only_once)
868
869 def _send_console_code(self, code):
870 if compat_os_name == 'nt' or not self._out_files.console:
871 return
872 self._write_string(code, self._out_files.console)
873
874 def to_console_title(self, message):
875 if not self.params.get('consoletitle', False):
876 return
877 message = remove_terminal_sequences(message)
878 if compat_os_name == 'nt':
879 if ctypes.windll.kernel32.GetConsoleWindow():
880 # c_wchar_p() might not be necessary if `message` is
881 # already of type unicode()
882 ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(message))
883 else:
884 self._send_console_code(f'\033]0;{message}\007')
885
886 def save_console_title(self):
887 if not self.params.get('consoletitle') or self.params.get('simulate'):
888 return
889 self._send_console_code('\033[22;0t') # Save the title on stack
890
891 def restore_console_title(self):
892 if not self.params.get('consoletitle') or self.params.get('simulate'):
893 return
894 self._send_console_code('\033[23;0t') # Restore the title from stack
895
896 def __enter__(self):
897 self.save_console_title()
898 return self
899
900 def __exit__(self, *args):
901 self.restore_console_title()
902
903 if self.params.get('cookiefile') is not None:
904 self.cookiejar.save(ignore_discard=True, ignore_expires=True)
905
906 def trouble(self, message=None, tb=None, is_error=True):
907 """Determine action to take when a download problem appears.
908
909 Depending on if the downloader has been configured to ignore
910 download errors or not, this method may throw an exception or
911 not when errors are found, after printing the message.
912
913 @param tb If given, is additional traceback information
914 @param is_error Whether to raise error according to ignorerrors
915 """
916 if message is not None:
917 self.to_stderr(message)
918 if self.params.get('verbose'):
919 if tb is None:
920 if sys.exc_info()[0]: # if .trouble has been called from an except block
921 tb = ''
922 if hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
923 tb += ''.join(traceback.format_exception(*sys.exc_info()[1].exc_info))
924 tb += encode_compat_str(traceback.format_exc())
925 else:
926 tb_data = traceback.format_list(traceback.extract_stack())
927 tb = ''.join(tb_data)
928 if tb:
929 self.to_stderr(tb)
930 if not is_error:
931 return
932 if not self.params.get('ignoreerrors'):
933 if sys.exc_info()[0] and hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
934 exc_info = sys.exc_info()[1].exc_info
935 else:
936 exc_info = sys.exc_info()
937 raise DownloadError(message, exc_info)
938 self._download_retcode = 1
939
940 Styles = Namespace(
941 HEADERS='yellow',
942 EMPHASIS='light blue',
943 FILENAME='green',
944 ID='green',
945 DELIM='blue',
946 ERROR='red',
947 WARNING='yellow',
948 SUPPRESS='light black',
949 )
950
951 def _format_text(self, handle, allow_colors, text, f, fallback=None, *, test_encoding=False):
952 text = str(text)
953 if test_encoding:
954 original_text = text
955 # handle.encoding can be None. See https://github.com/yt-dlp/yt-dlp/issues/2711
956 encoding = self.params.get('encoding') or getattr(handle, 'encoding', None) or 'ascii'
957 text = text.encode(encoding, 'ignore').decode(encoding)
958 if fallback is not None and text != original_text:
959 text = fallback
960 return format_text(text, f) if allow_colors else text if fallback is None else fallback
961
962 def _format_out(self, *args, **kwargs):
963 return self._format_text(self._out_files.out, self._allow_colors.out, *args, **kwargs)
964
965 def _format_screen(self, *args, **kwargs):
966 return self._format_text(self._out_files.screen, self._allow_colors.screen, *args, **kwargs)
967
968 def _format_err(self, *args, **kwargs):
969 return self._format_text(self._out_files.error, self._allow_colors.error, *args, **kwargs)
970
971 def report_warning(self, message, only_once=False):
972 '''
973 Print the message to stderr, it will be prefixed with 'WARNING:'
974 If stderr is a tty file the 'WARNING:' will be colored
975 '''
976 if self.params.get('logger') is not None:
977 self.params['logger'].warning(message)
978 else:
979 if self.params.get('no_warnings'):
980 return
981 self.to_stderr(f'{self._format_err("WARNING:", self.Styles.WARNING)} {message}', only_once)
982
983 def deprecation_warning(self, message, *, stacklevel=0):
984 deprecation_warning(
985 message, stacklevel=stacklevel + 1, printer=self.report_error, is_error=False)
986
987 def deprecated_feature(self, message):
988 if self.params.get('logger') is not None:
989 self.params['logger'].warning(f'Deprecated Feature: {message}')
990 self.to_stderr(f'{self._format_err("Deprecated Feature:", self.Styles.ERROR)} {message}', True)
991
992 def report_error(self, message, *args, **kwargs):
993 '''
994 Do the same as trouble, but prefixes the message with 'ERROR:', colored
995 in red if stderr is a tty file.
996 '''
997 self.trouble(f'{self._format_err("ERROR:", self.Styles.ERROR)} {message}', *args, **kwargs)
998
999 def write_debug(self, message, only_once=False):
1000 '''Log debug message or Print message to stderr'''
1001 if not self.params.get('verbose', False):
1002 return
1003 message = f'[debug] {message}'
1004 if self.params.get('logger'):
1005 self.params['logger'].debug(message)
1006 else:
1007 self.to_stderr(message, only_once)
1008
1009 def report_file_already_downloaded(self, file_name):
1010 """Report file has already been fully downloaded."""
1011 try:
1012 self.to_screen('[download] %s has already been downloaded' % file_name)
1013 except UnicodeEncodeError:
1014 self.to_screen('[download] The file has already been downloaded')
1015
1016 def report_file_delete(self, file_name):
1017 """Report that existing file will be deleted."""
1018 try:
1019 self.to_screen('Deleting existing file %s' % file_name)
1020 except UnicodeEncodeError:
1021 self.to_screen('Deleting existing file')
1022
1023 def raise_no_formats(self, info, forced=False, *, msg=None):
1024 has_drm = info.get('_has_drm')
1025 ignored, expected = self.params.get('ignore_no_formats_error'), bool(msg)
1026 msg = msg or has_drm and 'This video is DRM protected' or 'No video formats found!'
1027 if forced or not ignored:
1028 raise ExtractorError(msg, video_id=info['id'], ie=info['extractor'],
1029 expected=has_drm or ignored or expected)
1030 else:
1031 self.report_warning(msg)
1032
1033 def parse_outtmpl(self):
1034 self.deprecation_warning('"YoutubeDL.parse_outtmpl" is deprecated and may be removed in a future version')
1035 self._parse_outtmpl()
1036 return self.params['outtmpl']
1037
1038 def _parse_outtmpl(self):
1039 sanitize = IDENTITY
1040 if self.params.get('restrictfilenames'): # Remove spaces in the default template
1041 sanitize = lambda x: x.replace(' - ', ' ').replace(' ', '-')
1042
1043 outtmpl = self.params.setdefault('outtmpl', {})
1044 if not isinstance(outtmpl, dict):
1045 self.params['outtmpl'] = outtmpl = {'default': outtmpl}
1046 outtmpl.update({k: sanitize(v) for k, v in DEFAULT_OUTTMPL.items() if outtmpl.get(k) is None})
1047
1048 def get_output_path(self, dir_type='', filename=None):
1049 paths = self.params.get('paths', {})
1050 assert isinstance(paths, dict), '"paths" parameter must be a dictionary'
1051 path = os.path.join(
1052 expand_path(paths.get('home', '').strip()),
1053 expand_path(paths.get(dir_type, '').strip()) if dir_type else '',
1054 filename or '')
1055 return sanitize_path(path, force=self.params.get('windowsfilenames'))
1056
1057 @staticmethod
1058 def _outtmpl_expandpath(outtmpl):
1059 # expand_path translates '%%' into '%' and '$$' into '$'
1060 # correspondingly that is not what we want since we need to keep
1061 # '%%' intact for template dict substitution step. Working around
1062 # with boundary-alike separator hack.
1063 sep = ''.join([random.choice(ascii_letters) for _ in range(32)])
1064 outtmpl = outtmpl.replace('%%', f'%{sep}%').replace('$$', f'${sep}$')
1065
1066 # outtmpl should be expand_path'ed before template dict substitution
1067 # because meta fields may contain env variables we don't want to
1068 # be expanded. E.g. for outtmpl "%(title)s.%(ext)s" and
1069 # title "Hello $PATH", we don't want `$PATH` to be expanded.
1070 return expand_path(outtmpl).replace(sep, '')
1071
1072 @staticmethod
1073 def escape_outtmpl(outtmpl):
1074 ''' Escape any remaining strings like %s, %abc% etc. '''
1075 return re.sub(
1076 STR_FORMAT_RE_TMPL.format('', '(?![%(\0])'),
1077 lambda mobj: ('' if mobj.group('has_key') else '%') + mobj.group(0),
1078 outtmpl)
1079
1080 @classmethod
1081 def validate_outtmpl(cls, outtmpl):
1082 ''' @return None or Exception object '''
1083 outtmpl = re.sub(
1084 STR_FORMAT_RE_TMPL.format('[^)]*', '[ljhqBUDS]'),
1085 lambda mobj: f'{mobj.group(0)[:-1]}s',
1086 cls._outtmpl_expandpath(outtmpl))
1087 try:
1088 cls.escape_outtmpl(outtmpl) % collections.defaultdict(int)
1089 return None
1090 except ValueError as err:
1091 return err
1092
1093 @staticmethod
1094 def _copy_infodict(info_dict):
1095 info_dict = dict(info_dict)
1096 info_dict.pop('__postprocessors', None)
1097 info_dict.pop('__pending_error', None)
1098 return info_dict
1099
1100 def prepare_outtmpl(self, outtmpl, info_dict, sanitize=False):
1101 """ Make the outtmpl and info_dict suitable for substitution: ydl.escape_outtmpl(outtmpl) % info_dict
1102 @param sanitize Whether to sanitize the output as a filename.
1103 For backward compatibility, a function can also be passed
1104 """
1105
1106 info_dict.setdefault('epoch', int(time.time())) # keep epoch consistent once set
1107
1108 info_dict = self._copy_infodict(info_dict)
1109 info_dict['duration_string'] = ( # %(duration>%H-%M-%S)s is wrong if duration > 24hrs
1110 formatSeconds(info_dict['duration'], '-' if sanitize else ':')
1111 if info_dict.get('duration', None) is not None
1112 else None)
1113 info_dict['autonumber'] = int(self.params.get('autonumber_start', 1) - 1 + self._num_downloads)
1114 info_dict['video_autonumber'] = self._num_videos
1115 if info_dict.get('resolution') is None:
1116 info_dict['resolution'] = self.format_resolution(info_dict, default=None)
1117
1118 # For fields playlist_index, playlist_autonumber and autonumber convert all occurrences
1119 # of %(field)s to %(field)0Nd for backward compatibility
1120 field_size_compat_map = {
1121 'playlist_index': number_of_digits(info_dict.get('__last_playlist_index') or 0),
1122 'playlist_autonumber': number_of_digits(info_dict.get('n_entries') or 0),
1123 'autonumber': self.params.get('autonumber_size') or 5,
1124 }
1125
1126 TMPL_DICT = {}
1127 EXTERNAL_FORMAT_RE = re.compile(STR_FORMAT_RE_TMPL.format('[^)]*', f'[{STR_FORMAT_TYPES}ljhqBUDS]'))
1128 MATH_FUNCTIONS = {
1129 '+': float.__add__,
1130 '-': float.__sub__,
1131 }
1132 # Field is of the form key1.key2...
1133 # where keys (except first) can be string, int, slice or "{field, ...}"
1134 FIELD_INNER_RE = r'(?:\w+|%(num)s|%(num)s?(?::%(num)s?){1,2})' % {'num': r'(?:-?\d+)'}
1135 FIELD_RE = r'\w*(?:\.(?:%(inner)s|{%(field)s(?:,%(field)s)*}))*' % {
1136 'inner': FIELD_INNER_RE,
1137 'field': rf'\w*(?:\.{FIELD_INNER_RE})*'
1138 }
1139 MATH_FIELD_RE = rf'(?:{FIELD_RE}|-?{NUMBER_RE})'
1140 MATH_OPERATORS_RE = r'(?:%s)' % '|'.join(map(re.escape, MATH_FUNCTIONS.keys()))
1141 INTERNAL_FORMAT_RE = re.compile(rf'''(?x)
1142 (?P<negate>-)?
1143 (?P<fields>{FIELD_RE})
1144 (?P<maths>(?:{MATH_OPERATORS_RE}{MATH_FIELD_RE})*)
1145 (?:>(?P<strf_format>.+?))?
1146 (?P<remaining>
1147 (?P<alternate>(?<!\\),[^|&)]+)?
1148 (?:&(?P<replacement>.*?))?
1149 (?:\|(?P<default>.*?))?
1150 )$''')
1151
1152 def _traverse_infodict(fields):
1153 fields = [f for x in re.split(r'\.({.+?})\.?', fields)
1154 for f in ([x] if x.startswith('{') else x.split('.'))]
1155 for i in (0, -1):
1156 if fields and not fields[i]:
1157 fields.pop(i)
1158
1159 for i, f in enumerate(fields):
1160 if not f.startswith('{'):
1161 continue
1162 assert f.endswith('}'), f'No closing brace for {f} in {fields}'
1163 fields[i] = {k: k.split('.') for k in f[1:-1].split(',')}
1164
1165 return traverse_obj(info_dict, fields, is_user_input=True, traverse_string=True)
1166
1167 def get_value(mdict):
1168 # Object traversal
1169 value = _traverse_infodict(mdict['fields'])
1170 # Negative
1171 if mdict['negate']:
1172 value = float_or_none(value)
1173 if value is not None:
1174 value *= -1
1175 # Do maths
1176 offset_key = mdict['maths']
1177 if offset_key:
1178 value = float_or_none(value)
1179 operator = None
1180 while offset_key:
1181 item = re.match(
1182 MATH_FIELD_RE if operator else MATH_OPERATORS_RE,
1183 offset_key).group(0)
1184 offset_key = offset_key[len(item):]
1185 if operator is None:
1186 operator = MATH_FUNCTIONS[item]
1187 continue
1188 item, multiplier = (item[1:], -1) if item[0] == '-' else (item, 1)
1189 offset = float_or_none(item)
1190 if offset is None:
1191 offset = float_or_none(_traverse_infodict(item))
1192 try:
1193 value = operator(value, multiplier * offset)
1194 except (TypeError, ZeroDivisionError):
1195 return None
1196 operator = None
1197 # Datetime formatting
1198 if mdict['strf_format']:
1199 value = strftime_or_none(value, mdict['strf_format'].replace('\\,', ','))
1200
1201 # XXX: Workaround for https://github.com/yt-dlp/yt-dlp/issues/4485
1202 if sanitize and value == '':
1203 value = None
1204 return value
1205
1206 na = self.params.get('outtmpl_na_placeholder', 'NA')
1207
1208 def filename_sanitizer(key, value, restricted=self.params.get('restrictfilenames')):
1209 return sanitize_filename(str(value), restricted=restricted, is_id=(
1210 bool(re.search(r'(^|[_.])id(\.|$)', key))
1211 if 'filename-sanitization' in self.params['compat_opts']
1212 else NO_DEFAULT))
1213
1214 sanitizer = sanitize if callable(sanitize) else filename_sanitizer
1215 sanitize = bool(sanitize)
1216
1217 def _dumpjson_default(obj):
1218 if isinstance(obj, (set, LazyList)):
1219 return list(obj)
1220 return repr(obj)
1221
1222 def create_key(outer_mobj):
1223 if not outer_mobj.group('has_key'):
1224 return outer_mobj.group(0)
1225 key = outer_mobj.group('key')
1226 mobj = re.match(INTERNAL_FORMAT_RE, key)
1227 initial_field = mobj.group('fields') if mobj else ''
1228 value, replacement, default = None, None, na
1229 while mobj:
1230 mobj = mobj.groupdict()
1231 default = mobj['default'] if mobj['default'] is not None else default
1232 value = get_value(mobj)
1233 replacement = mobj['replacement']
1234 if value is None and mobj['alternate']:
1235 mobj = re.match(INTERNAL_FORMAT_RE, mobj['remaining'][1:])
1236 else:
1237 break
1238
1239 fmt = outer_mobj.group('format')
1240 if fmt == 's' and value is not None and key in field_size_compat_map.keys():
1241 fmt = f'0{field_size_compat_map[key]:d}d'
1242
1243 value = default if value is None else value if replacement is None else replacement
1244
1245 flags = outer_mobj.group('conversion') or ''
1246 str_fmt = f'{fmt[:-1]}s'
1247 if fmt[-1] == 'l': # list
1248 delim = '\n' if '#' in flags else ', '
1249 value, fmt = delim.join(map(str, variadic(value, allowed_types=(str, bytes)))), str_fmt
1250 elif fmt[-1] == 'j': # json
1251 value, fmt = json.dumps(
1252 value, default=_dumpjson_default,
1253 indent=4 if '#' in flags else None, ensure_ascii='+' not in flags), str_fmt
1254 elif fmt[-1] == 'h': # html
1255 value, fmt = escapeHTML(str(value)), str_fmt
1256 elif fmt[-1] == 'q': # quoted
1257 value = map(str, variadic(value) if '#' in flags else [value])
1258 value, fmt = ' '.join(map(compat_shlex_quote, value)), str_fmt
1259 elif fmt[-1] == 'B': # bytes
1260 value = f'%{str_fmt}'.encode() % str(value).encode()
1261 value, fmt = value.decode('utf-8', 'ignore'), 's'
1262 elif fmt[-1] == 'U': # unicode normalized
1263 value, fmt = unicodedata.normalize(
1264 # "+" = compatibility equivalence, "#" = NFD
1265 'NF%s%s' % ('K' if '+' in flags else '', 'D' if '#' in flags else 'C'),
1266 value), str_fmt
1267 elif fmt[-1] == 'D': # decimal suffix
1268 num_fmt, fmt = fmt[:-1].replace('#', ''), 's'
1269 value = format_decimal_suffix(value, f'%{num_fmt}f%s' if num_fmt else '%d%s',
1270 factor=1024 if '#' in flags else 1000)
1271 elif fmt[-1] == 'S': # filename sanitization
1272 value, fmt = filename_sanitizer(initial_field, value, restricted='#' in flags), str_fmt
1273 elif fmt[-1] == 'c':
1274 if value:
1275 value = str(value)[0]
1276 else:
1277 fmt = str_fmt
1278 elif fmt[-1] not in 'rs': # numeric
1279 value = float_or_none(value)
1280 if value is None:
1281 value, fmt = default, 's'
1282
1283 if sanitize:
1284 if fmt[-1] == 'r':
1285 # If value is an object, sanitize might convert it to a string
1286 # So we convert it to repr first
1287 value, fmt = repr(value), str_fmt
1288 if fmt[-1] in 'csr':
1289 value = sanitizer(initial_field, value)
1290
1291 key = '%s\0%s' % (key.replace('%', '%\0'), outer_mobj.group('format'))
1292 TMPL_DICT[key] = value
1293 return '{prefix}%({key}){fmt}'.format(key=key, fmt=fmt, prefix=outer_mobj.group('prefix'))
1294
1295 return EXTERNAL_FORMAT_RE.sub(create_key, outtmpl), TMPL_DICT
1296
1297 def evaluate_outtmpl(self, outtmpl, info_dict, *args, **kwargs):
1298 outtmpl, info_dict = self.prepare_outtmpl(outtmpl, info_dict, *args, **kwargs)
1299 return self.escape_outtmpl(outtmpl) % info_dict
1300
1301 def _prepare_filename(self, info_dict, *, outtmpl=None, tmpl_type=None):
1302 assert None in (outtmpl, tmpl_type), 'outtmpl and tmpl_type are mutually exclusive'
1303 if outtmpl is None:
1304 outtmpl = self.params['outtmpl'].get(tmpl_type or 'default', self.params['outtmpl']['default'])
1305 try:
1306 outtmpl = self._outtmpl_expandpath(outtmpl)
1307 filename = self.evaluate_outtmpl(outtmpl, info_dict, True)
1308 if not filename:
1309 return None
1310
1311 if tmpl_type in ('', 'temp'):
1312 final_ext, ext = self.params.get('final_ext'), info_dict.get('ext')
1313 if final_ext and ext and final_ext != ext and filename.endswith(f'.{final_ext}'):
1314 filename = replace_extension(filename, ext, final_ext)
1315 elif tmpl_type:
1316 force_ext = OUTTMPL_TYPES[tmpl_type]
1317 if force_ext:
1318 filename = replace_extension(filename, force_ext, info_dict.get('ext'))
1319
1320 # https://github.com/blackjack4494/youtube-dlc/issues/85
1321 trim_file_name = self.params.get('trim_file_name', False)
1322 if trim_file_name:
1323 no_ext, *ext = filename.rsplit('.', 2)
1324 filename = join_nonempty(no_ext[:trim_file_name], *ext, delim='.')
1325
1326 return filename
1327 except ValueError as err:
1328 self.report_error('Error in output template: ' + str(err) + ' (encoding: ' + repr(preferredencoding()) + ')')
1329 return None
1330
1331 def prepare_filename(self, info_dict, dir_type='', *, outtmpl=None, warn=False):
1332 """Generate the output filename"""
1333 if outtmpl:
1334 assert not dir_type, 'outtmpl and dir_type are mutually exclusive'
1335 dir_type = None
1336 filename = self._prepare_filename(info_dict, tmpl_type=dir_type, outtmpl=outtmpl)
1337 if not filename and dir_type not in ('', 'temp'):
1338 return ''
1339
1340 if warn:
1341 if not self.params.get('paths'):
1342 pass
1343 elif filename == '-':
1344 self.report_warning('--paths is ignored when an outputting to stdout', only_once=True)
1345 elif os.path.isabs(filename):
1346 self.report_warning('--paths is ignored since an absolute path is given in output template', only_once=True)
1347 if filename == '-' or not filename:
1348 return filename
1349
1350 return self.get_output_path(dir_type, filename)
1351
1352 def _match_entry(self, info_dict, incomplete=False, silent=False):
1353 """ Returns None if the file should be downloaded """
1354
1355 video_title = info_dict.get('title', info_dict.get('id', 'entry'))
1356
1357 def check_filter():
1358 if 'title' in info_dict:
1359 # This can happen when we're just evaluating the playlist
1360 title = info_dict['title']
1361 matchtitle = self.params.get('matchtitle', False)
1362 if matchtitle:
1363 if not re.search(matchtitle, title, re.IGNORECASE):
1364 return '"' + title + '" title did not match pattern "' + matchtitle + '"'
1365 rejecttitle = self.params.get('rejecttitle', False)
1366 if rejecttitle:
1367 if re.search(rejecttitle, title, re.IGNORECASE):
1368 return '"' + title + '" title matched reject pattern "' + rejecttitle + '"'
1369 date = info_dict.get('upload_date')
1370 if date is not None:
1371 dateRange = self.params.get('daterange', DateRange())
1372 if date not in dateRange:
1373 return f'{date_from_str(date).isoformat()} upload date is not in range {dateRange}'
1374 view_count = info_dict.get('view_count')
1375 if view_count is not None:
1376 min_views = self.params.get('min_views')
1377 if min_views is not None and view_count < min_views:
1378 return 'Skipping %s, because it has not reached minimum view count (%d/%d)' % (video_title, view_count, min_views)
1379 max_views = self.params.get('max_views')
1380 if max_views is not None and view_count > max_views:
1381 return 'Skipping %s, because it has exceeded the maximum view count (%d/%d)' % (video_title, view_count, max_views)
1382 if age_restricted(info_dict.get('age_limit'), self.params.get('age_limit')):
1383 return 'Skipping "%s" because it is age restricted' % video_title
1384
1385 match_filter = self.params.get('match_filter')
1386 if match_filter is not None:
1387 try:
1388 ret = match_filter(info_dict, incomplete=incomplete)
1389 except TypeError:
1390 # For backward compatibility
1391 ret = None if incomplete else match_filter(info_dict)
1392 if ret is NO_DEFAULT:
1393 while True:
1394 filename = self._format_screen(self.prepare_filename(info_dict), self.Styles.FILENAME)
1395 reply = input(self._format_screen(
1396 f'Download "{filename}"? (Y/n): ', self.Styles.EMPHASIS)).lower().strip()
1397 if reply in {'y', ''}:
1398 return None
1399 elif reply == 'n':
1400 return f'Skipping {video_title}'
1401 elif ret is not None:
1402 return ret
1403 return None
1404
1405 if self.in_download_archive(info_dict):
1406 reason = '%s has already been recorded in the archive' % video_title
1407 break_opt, break_err = 'break_on_existing', ExistingVideoReached
1408 else:
1409 reason = check_filter()
1410 break_opt, break_err = 'break_on_reject', RejectedVideoReached
1411 if reason is not None:
1412 if not silent:
1413 self.to_screen('[download] ' + reason)
1414 if self.params.get(break_opt, False):
1415 raise break_err()
1416 return reason
1417
1418 @staticmethod
1419 def add_extra_info(info_dict, extra_info):
1420 '''Set the keys from extra_info in info dict if they are missing'''
1421 for key, value in extra_info.items():
1422 info_dict.setdefault(key, value)
1423
1424 def extract_info(self, url, download=True, ie_key=None, extra_info=None,
1425 process=True, force_generic_extractor=False):
1426 """
1427 Extract and return the information dictionary of the URL
1428
1429 Arguments:
1430 @param url URL to extract
1431
1432 Keyword arguments:
1433 @param download Whether to download videos
1434 @param process Whether to resolve all unresolved references (URLs, playlist items).
1435 Must be True for download to work
1436 @param ie_key Use only the extractor with this key
1437
1438 @param extra_info Dictionary containing the extra values to add to the info (For internal use only)
1439 @force_generic_extractor Force using the generic extractor (Deprecated; use ie_key='Generic')
1440 """
1441
1442 if extra_info is None:
1443 extra_info = {}
1444
1445 if not ie_key and force_generic_extractor:
1446 ie_key = 'Generic'
1447
1448 if ie_key:
1449 ies = {ie_key: self._ies[ie_key]} if ie_key in self._ies else {}
1450 else:
1451 ies = self._ies
1452
1453 for key, ie in ies.items():
1454 if not ie.suitable(url):
1455 continue
1456
1457 if not ie.working():
1458 self.report_warning('The program functionality for this site has been marked as broken, '
1459 'and will probably not work.')
1460
1461 temp_id = ie.get_temp_id(url)
1462 if temp_id is not None and self.in_download_archive({'id': temp_id, 'ie_key': key}):
1463 self.to_screen(f'[{key}] {temp_id}: has already been recorded in the archive')
1464 if self.params.get('break_on_existing', False):
1465 raise ExistingVideoReached()
1466 break
1467 return self.__extract_info(url, self.get_info_extractor(key), download, extra_info, process)
1468 else:
1469 extractors_restricted = self.params.get('allowed_extractors') not in (None, ['default'])
1470 self.report_error(f'No suitable extractor{format_field(ie_key, None, " (%s)")} found for URL {url}',
1471 tb=False if extractors_restricted else None)
1472
1473 def _handle_extraction_exceptions(func):
1474 @functools.wraps(func)
1475 def wrapper(self, *args, **kwargs):
1476 while True:
1477 try:
1478 return func(self, *args, **kwargs)
1479 except (DownloadCancelled, LazyList.IndexError, PagedList.IndexError):
1480 raise
1481 except ReExtractInfo as e:
1482 if e.expected:
1483 self.to_screen(f'{e}; Re-extracting data')
1484 else:
1485 self.to_stderr('\r')
1486 self.report_warning(f'{e}; Re-extracting data')
1487 continue
1488 except GeoRestrictedError as e:
1489 msg = e.msg
1490 if e.countries:
1491 msg += '\nThis video is available in %s.' % ', '.join(
1492 map(ISO3166Utils.short2full, e.countries))
1493 msg += '\nYou might want to use a VPN or a proxy server (with --proxy) to workaround.'
1494 self.report_error(msg)
1495 except ExtractorError as e: # An error we somewhat expected
1496 self.report_error(str(e), e.format_traceback())
1497 except Exception as e:
1498 if self.params.get('ignoreerrors'):
1499 self.report_error(str(e), tb=encode_compat_str(traceback.format_exc()))
1500 else:
1501 raise
1502 break
1503 return wrapper
1504
1505 def _wait_for_video(self, ie_result={}):
1506 if (not self.params.get('wait_for_video')
1507 or ie_result.get('_type', 'video') != 'video'
1508 or ie_result.get('formats') or ie_result.get('url')):
1509 return
1510
1511 format_dur = lambda dur: '%02d:%02d:%02d' % timetuple_from_msec(dur * 1000)[:-1]
1512 last_msg = ''
1513
1514 def progress(msg):
1515 nonlocal last_msg
1516 full_msg = f'{msg}\n'
1517 if not self.params.get('noprogress'):
1518 full_msg = msg + ' ' * (len(last_msg) - len(msg)) + '\r'
1519 elif last_msg:
1520 return
1521 self.to_screen(full_msg, skip_eol=True)
1522 last_msg = msg
1523
1524 min_wait, max_wait = self.params.get('wait_for_video')
1525 diff = try_get(ie_result, lambda x: x['release_timestamp'] - time.time())
1526 if diff is None and ie_result.get('live_status') == 'is_upcoming':
1527 diff = round(random.uniform(min_wait, max_wait) if (max_wait and min_wait) else (max_wait or min_wait), 0)
1528 self.report_warning('Release time of video is not known')
1529 elif ie_result and (diff or 0) <= 0:
1530 self.report_warning('Video should already be available according to extracted info')
1531 diff = min(max(diff or 0, min_wait or 0), max_wait or float('inf'))
1532 self.to_screen(f'[wait] Waiting for {format_dur(diff)} - Press Ctrl+C to try now')
1533
1534 wait_till = time.time() + diff
1535 try:
1536 while True:
1537 diff = wait_till - time.time()
1538 if diff <= 0:
1539 progress('')
1540 raise ReExtractInfo('[wait] Wait period ended', expected=True)
1541 progress(f'[wait] Remaining time until next attempt: {self._format_screen(format_dur(diff), self.Styles.EMPHASIS)}')
1542 time.sleep(1)
1543 except KeyboardInterrupt:
1544 progress('')
1545 raise ReExtractInfo('[wait] Interrupted by user', expected=True)
1546 except BaseException as e:
1547 if not isinstance(e, ReExtractInfo):
1548 self.to_screen('')
1549 raise
1550
1551 @_handle_extraction_exceptions
1552 def __extract_info(self, url, ie, download, extra_info, process):
1553 try:
1554 ie_result = ie.extract(url)
1555 except UserNotLive as e:
1556 if process:
1557 if self.params.get('wait_for_video'):
1558 self.report_warning(e)
1559 self._wait_for_video()
1560 raise
1561 if ie_result is None: # Finished already (backwards compatibility; listformats and friends should be moved here)
1562 self.report_warning(f'Extractor {ie.IE_NAME} returned nothing{bug_reports_message()}')
1563 return
1564 if isinstance(ie_result, list):
1565 # Backwards compatibility: old IE result format
1566 ie_result = {
1567 '_type': 'compat_list',
1568 'entries': ie_result,
1569 }
1570 if extra_info.get('original_url'):
1571 ie_result.setdefault('original_url', extra_info['original_url'])
1572 self.add_default_extra_info(ie_result, ie, url)
1573 if process:
1574 self._wait_for_video(ie_result)
1575 return self.process_ie_result(ie_result, download, extra_info)
1576 else:
1577 return ie_result
1578
1579 def add_default_extra_info(self, ie_result, ie, url):
1580 if url is not None:
1581 self.add_extra_info(ie_result, {
1582 'webpage_url': url,
1583 'original_url': url,
1584 })
1585 webpage_url = ie_result.get('webpage_url')
1586 if webpage_url:
1587 self.add_extra_info(ie_result, {
1588 'webpage_url_basename': url_basename(webpage_url),
1589 'webpage_url_domain': get_domain(webpage_url),
1590 })
1591 if ie is not None:
1592 self.add_extra_info(ie_result, {
1593 'extractor': ie.IE_NAME,
1594 'extractor_key': ie.ie_key(),
1595 })
1596
1597 def process_ie_result(self, ie_result, download=True, extra_info=None):
1598 """
1599 Take the result of the ie(may be modified) and resolve all unresolved
1600 references (URLs, playlist items).
1601
1602 It will also download the videos if 'download'.
1603 Returns the resolved ie_result.
1604 """
1605 if extra_info is None:
1606 extra_info = {}
1607 result_type = ie_result.get('_type', 'video')
1608
1609 if result_type in ('url', 'url_transparent'):
1610 ie_result['url'] = sanitize_url(
1611 ie_result['url'], scheme='http' if self.params.get('prefer_insecure') else 'https')
1612 if ie_result.get('original_url'):
1613 extra_info.setdefault('original_url', ie_result['original_url'])
1614
1615 extract_flat = self.params.get('extract_flat', False)
1616 if ((extract_flat == 'in_playlist' and 'playlist' in extra_info)
1617 or extract_flat is True):
1618 info_copy = ie_result.copy()
1619 ie = try_get(ie_result.get('ie_key'), self.get_info_extractor)
1620 if ie and not ie_result.get('id'):
1621 info_copy['id'] = ie.get_temp_id(ie_result['url'])
1622 self.add_default_extra_info(info_copy, ie, ie_result['url'])
1623 self.add_extra_info(info_copy, extra_info)
1624 info_copy, _ = self.pre_process(info_copy)
1625 self._fill_common_fields(info_copy, False)
1626 self.__forced_printings(info_copy, self.prepare_filename(info_copy), incomplete=True)
1627 self._raise_pending_errors(info_copy)
1628 if self.params.get('force_write_download_archive', False):
1629 self.record_download_archive(info_copy)
1630 return ie_result
1631
1632 if result_type == 'video':
1633 self.add_extra_info(ie_result, extra_info)
1634 ie_result = self.process_video_result(ie_result, download=download)
1635 self._raise_pending_errors(ie_result)
1636 additional_urls = (ie_result or {}).get('additional_urls')
1637 if additional_urls:
1638 # TODO: Improve MetadataParserPP to allow setting a list
1639 if isinstance(additional_urls, str):
1640 additional_urls = [additional_urls]
1641 self.to_screen(
1642 '[info] %s: %d additional URL(s) requested' % (ie_result['id'], len(additional_urls)))
1643 self.write_debug('Additional URLs: "%s"' % '", "'.join(additional_urls))
1644 ie_result['additional_entries'] = [
1645 self.extract_info(
1646 url, download, extra_info=extra_info,
1647 force_generic_extractor=self.params.get('force_generic_extractor'))
1648 for url in additional_urls
1649 ]
1650 return ie_result
1651 elif result_type == 'url':
1652 # We have to add extra_info to the results because it may be
1653 # contained in a playlist
1654 return self.extract_info(
1655 ie_result['url'], download,
1656 ie_key=ie_result.get('ie_key'),
1657 extra_info=extra_info)
1658 elif result_type == 'url_transparent':
1659 # Use the information from the embedding page
1660 info = self.extract_info(
1661 ie_result['url'], ie_key=ie_result.get('ie_key'),
1662 extra_info=extra_info, download=False, process=False)
1663
1664 # extract_info may return None when ignoreerrors is enabled and
1665 # extraction failed with an error, don't crash and return early
1666 # in this case
1667 if not info:
1668 return info
1669
1670 exempted_fields = {'_type', 'url', 'ie_key'}
1671 if not ie_result.get('section_end') and ie_result.get('section_start') is None:
1672 # For video clips, the id etc of the clip extractor should be used
1673 exempted_fields |= {'id', 'extractor', 'extractor_key'}
1674
1675 new_result = info.copy()
1676 new_result.update(filter_dict(ie_result, lambda k, v: v is not None and k not in exempted_fields))
1677
1678 # Extracted info may not be a video result (i.e.
1679 # info.get('_type', 'video') != video) but rather an url or
1680 # url_transparent. In such cases outer metadata (from ie_result)
1681 # should be propagated to inner one (info). For this to happen
1682 # _type of info should be overridden with url_transparent. This
1683 # fixes issue from https://github.com/ytdl-org/youtube-dl/pull/11163.
1684 if new_result.get('_type') == 'url':
1685 new_result['_type'] = 'url_transparent'
1686
1687 return self.process_ie_result(
1688 new_result, download=download, extra_info=extra_info)
1689 elif result_type in ('playlist', 'multi_video'):
1690 # Protect from infinite recursion due to recursively nested playlists
1691 # (see https://github.com/ytdl-org/youtube-dl/issues/27833)
1692 webpage_url = ie_result.get('webpage_url') # Playlists maynot have webpage_url
1693 if webpage_url and webpage_url in self._playlist_urls:
1694 self.to_screen(
1695 '[download] Skipping already downloaded playlist: %s'
1696 % ie_result.get('title') or ie_result.get('id'))
1697 return
1698
1699 self._playlist_level += 1
1700 self._playlist_urls.add(webpage_url)
1701 self._fill_common_fields(ie_result, False)
1702 self._sanitize_thumbnails(ie_result)
1703 try:
1704 return self.__process_playlist(ie_result, download)
1705 finally:
1706 self._playlist_level -= 1
1707 if not self._playlist_level:
1708 self._playlist_urls.clear()
1709 elif result_type == 'compat_list':
1710 self.report_warning(
1711 'Extractor %s returned a compat_list result. '
1712 'It needs to be updated.' % ie_result.get('extractor'))
1713
1714 def _fixup(r):
1715 self.add_extra_info(r, {
1716 'extractor': ie_result['extractor'],
1717 'webpage_url': ie_result['webpage_url'],
1718 'webpage_url_basename': url_basename(ie_result['webpage_url']),
1719 'webpage_url_domain': get_domain(ie_result['webpage_url']),
1720 'extractor_key': ie_result['extractor_key'],
1721 })
1722 return r
1723 ie_result['entries'] = [
1724 self.process_ie_result(_fixup(r), download, extra_info)
1725 for r in ie_result['entries']
1726 ]
1727 return ie_result
1728 else:
1729 raise Exception('Invalid result type: %s' % result_type)
1730
1731 def _ensure_dir_exists(self, path):
1732 return make_dir(path, self.report_error)
1733
1734 @staticmethod
1735 def _playlist_infodict(ie_result, strict=False, **kwargs):
1736 info = {
1737 'playlist_count': ie_result.get('playlist_count'),
1738 'playlist': ie_result.get('title') or ie_result.get('id'),
1739 'playlist_id': ie_result.get('id'),
1740 'playlist_title': ie_result.get('title'),
1741 'playlist_uploader': ie_result.get('uploader'),
1742 'playlist_uploader_id': ie_result.get('uploader_id'),
1743 **kwargs,
1744 }
1745 if strict:
1746 return info
1747 if ie_result.get('webpage_url'):
1748 info.update({
1749 'webpage_url': ie_result['webpage_url'],
1750 'webpage_url_basename': url_basename(ie_result['webpage_url']),
1751 'webpage_url_domain': get_domain(ie_result['webpage_url']),
1752 })
1753 return {
1754 **info,
1755 'playlist_index': 0,
1756 '__last_playlist_index': max(ie_result['requested_entries'] or (0, 0)),
1757 'extractor': ie_result['extractor'],
1758 'extractor_key': ie_result['extractor_key'],
1759 }
1760
1761 def __process_playlist(self, ie_result, download):
1762 """Process each entry in the playlist"""
1763 assert ie_result['_type'] in ('playlist', 'multi_video')
1764
1765 common_info = self._playlist_infodict(ie_result, strict=True)
1766 title = common_info.get('playlist') or '<Untitled>'
1767 if self._match_entry(common_info, incomplete=True) is not None:
1768 return
1769 self.to_screen(f'[download] Downloading {ie_result["_type"]}: {title}')
1770
1771 all_entries = PlaylistEntries(self, ie_result)
1772 entries = orderedSet(all_entries.get_requested_items(), lazy=True)
1773
1774 lazy = self.params.get('lazy_playlist')
1775 if lazy:
1776 resolved_entries, n_entries = [], 'N/A'
1777 ie_result['requested_entries'], ie_result['entries'] = None, None
1778 else:
1779 entries = resolved_entries = list(entries)
1780 n_entries = len(resolved_entries)
1781 ie_result['requested_entries'], ie_result['entries'] = tuple(zip(*resolved_entries)) or ([], [])
1782 if not ie_result.get('playlist_count'):
1783 # Better to do this after potentially exhausting entries
1784 ie_result['playlist_count'] = all_entries.get_full_count()
1785
1786 extra = self._playlist_infodict(ie_result, n_entries=int_or_none(n_entries))
1787 ie_copy = collections.ChainMap(ie_result, extra)
1788
1789 _infojson_written = False
1790 write_playlist_files = self.params.get('allow_playlist_files', True)
1791 if write_playlist_files and self.params.get('list_thumbnails'):
1792 self.list_thumbnails(ie_result)
1793 if write_playlist_files and not self.params.get('simulate'):
1794 _infojson_written = self._write_info_json(
1795 'playlist', ie_result, self.prepare_filename(ie_copy, 'pl_infojson'))
1796 if _infojson_written is None:
1797 return
1798 if self._write_description('playlist', ie_result,
1799 self.prepare_filename(ie_copy, 'pl_description')) is None:
1800 return
1801 # TODO: This should be passed to ThumbnailsConvertor if necessary
1802 self._write_thumbnails('playlist', ie_result, self.prepare_filename(ie_copy, 'pl_thumbnail'))
1803
1804 if lazy:
1805 if self.params.get('playlistreverse') or self.params.get('playlistrandom'):
1806 self.report_warning('playlistreverse and playlistrandom are not supported with lazy_playlist', only_once=True)
1807 elif self.params.get('playlistreverse'):
1808 entries.reverse()
1809 elif self.params.get('playlistrandom'):
1810 random.shuffle(entries)
1811
1812 self.to_screen(f'[{ie_result["extractor"]}] Playlist {title}: Downloading {n_entries} videos'
1813 f'{format_field(ie_result, "playlist_count", " of %s")}')
1814
1815 keep_resolved_entries = self.params.get('extract_flat') != 'discard'
1816 if self.params.get('extract_flat') == 'discard_in_playlist':
1817 keep_resolved_entries = ie_result['_type'] != 'playlist'
1818 if keep_resolved_entries:
1819 self.write_debug('The information of all playlist entries will be held in memory')
1820
1821 failures = 0
1822 max_failures = self.params.get('skip_playlist_after_errors') or float('inf')
1823 for i, (playlist_index, entry) in enumerate(entries):
1824 if lazy:
1825 resolved_entries.append((playlist_index, entry))
1826 if not entry:
1827 continue
1828
1829 entry['__x_forwarded_for_ip'] = ie_result.get('__x_forwarded_for_ip')
1830 if not lazy and 'playlist-index' in self.params.get('compat_opts', []):
1831 playlist_index = ie_result['requested_entries'][i]
1832
1833 entry_copy = collections.ChainMap(entry, {
1834 **common_info,
1835 'n_entries': int_or_none(n_entries),
1836 'playlist_index': playlist_index,
1837 'playlist_autonumber': i + 1,
1838 })
1839
1840 if self._match_entry(entry_copy, incomplete=True) is not None:
1841 # For compatabilty with youtube-dl. See https://github.com/yt-dlp/yt-dlp/issues/4369
1842 resolved_entries[i] = (playlist_index, NO_DEFAULT)
1843 continue
1844
1845 self.to_screen('[download] Downloading video %s of %s' % (
1846 self._format_screen(i + 1, self.Styles.ID), self._format_screen(n_entries, self.Styles.EMPHASIS)))
1847
1848 extra.update({
1849 'playlist_index': playlist_index,
1850 'playlist_autonumber': i + 1,
1851 })
1852 entry_result = self.__process_iterable_entry(entry, download, extra)
1853 if not entry_result:
1854 failures += 1
1855 if failures >= max_failures:
1856 self.report_error(
1857 f'Skipping the remaining entries in playlist "{title}" since {failures} items failed extraction')
1858 break
1859 if keep_resolved_entries:
1860 resolved_entries[i] = (playlist_index, entry_result)
1861
1862 # Update with processed data
1863 ie_result['requested_entries'] = [i for i, e in resolved_entries if e is not NO_DEFAULT]
1864 ie_result['entries'] = [e for _, e in resolved_entries if e is not NO_DEFAULT]
1865
1866 # Write the updated info to json
1867 if _infojson_written is True and self._write_info_json(
1868 'updated playlist', ie_result,
1869 self.prepare_filename(ie_copy, 'pl_infojson'), overwrite=True) is None:
1870 return
1871
1872 ie_result = self.run_all_pps('playlist', ie_result)
1873 self.to_screen(f'[download] Finished downloading playlist: {title}')
1874 return ie_result
1875
1876 @_handle_extraction_exceptions
1877 def __process_iterable_entry(self, entry, download, extra_info):
1878 return self.process_ie_result(
1879 entry, download=download, extra_info=extra_info)
1880
1881 def _build_format_filter(self, filter_spec):
1882 " Returns a function to filter the formats according to the filter_spec "
1883
1884 OPERATORS = {
1885 '<': operator.lt,
1886 '<=': operator.le,
1887 '>': operator.gt,
1888 '>=': operator.ge,
1889 '=': operator.eq,
1890 '!=': operator.ne,
1891 }
1892 operator_rex = re.compile(r'''(?x)\s*
1893 (?P<key>width|height|tbr|abr|vbr|asr|filesize|filesize_approx|fps)\s*
1894 (?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
1895 (?P<value>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)\s*
1896 ''' % '|'.join(map(re.escape, OPERATORS.keys())))
1897 m = operator_rex.fullmatch(filter_spec)
1898 if m:
1899 try:
1900 comparison_value = int(m.group('value'))
1901 except ValueError:
1902 comparison_value = parse_filesize(m.group('value'))
1903 if comparison_value is None:
1904 comparison_value = parse_filesize(m.group('value') + 'B')
1905 if comparison_value is None:
1906 raise ValueError(
1907 'Invalid value %r in format specification %r' % (
1908 m.group('value'), filter_spec))
1909 op = OPERATORS[m.group('op')]
1910
1911 if not m:
1912 STR_OPERATORS = {
1913 '=': operator.eq,
1914 '^=': lambda attr, value: attr.startswith(value),
1915 '$=': lambda attr, value: attr.endswith(value),
1916 '*=': lambda attr, value: value in attr,
1917 '~=': lambda attr, value: value.search(attr) is not None
1918 }
1919 str_operator_rex = re.compile(r'''(?x)\s*
1920 (?P<key>[a-zA-Z0-9._-]+)\s*
1921 (?P<negation>!\s*)?(?P<op>%s)\s*(?P<none_inclusive>\?\s*)?
1922 (?P<quote>["'])?
1923 (?P<value>(?(quote)(?:(?!(?P=quote))[^\\]|\\.)+|[\w.-]+))
1924 (?(quote)(?P=quote))\s*
1925 ''' % '|'.join(map(re.escape, STR_OPERATORS.keys())))
1926 m = str_operator_rex.fullmatch(filter_spec)
1927 if m:
1928 if m.group('op') == '~=':
1929 comparison_value = re.compile(m.group('value'))
1930 else:
1931 comparison_value = re.sub(r'''\\([\\"'])''', r'\1', m.group('value'))
1932 str_op = STR_OPERATORS[m.group('op')]
1933 if m.group('negation'):
1934 op = lambda attr, value: not str_op(attr, value)
1935 else:
1936 op = str_op
1937
1938 if not m:
1939 raise SyntaxError('Invalid filter specification %r' % filter_spec)
1940
1941 def _filter(f):
1942 actual_value = f.get(m.group('key'))
1943 if actual_value is None:
1944 return m.group('none_inclusive')
1945 return op(actual_value, comparison_value)
1946 return _filter
1947
1948 def _check_formats(self, formats):
1949 for f in formats:
1950 self.to_screen('[info] Testing format %s' % f['format_id'])
1951 path = self.get_output_path('temp')
1952 if not self._ensure_dir_exists(f'{path}/'):
1953 continue
1954 temp_file = tempfile.NamedTemporaryFile(suffix='.tmp', delete=False, dir=path or None)
1955 temp_file.close()
1956 try:
1957 success, _ = self.dl(temp_file.name, f, test=True)
1958 except (DownloadError, OSError, ValueError) + network_exceptions:
1959 success = False
1960 finally:
1961 if os.path.exists(temp_file.name):
1962 try:
1963 os.remove(temp_file.name)
1964 except OSError:
1965 self.report_warning('Unable to delete temporary file "%s"' % temp_file.name)
1966 if success:
1967 yield f
1968 else:
1969 self.to_screen('[info] Unable to download format %s. Skipping...' % f['format_id'])
1970
1971 def _default_format_spec(self, info_dict, download=True):
1972
1973 def can_merge():
1974 merger = FFmpegMergerPP(self)
1975 return merger.available and merger.can_merge()
1976
1977 prefer_best = (
1978 not self.params.get('simulate')
1979 and download
1980 and (
1981 not can_merge()
1982 or info_dict.get('is_live') and not self.params.get('live_from_start')
1983 or self.params['outtmpl']['default'] == '-'))
1984 compat = (
1985 prefer_best
1986 or self.params.get('allow_multiple_audio_streams', False)
1987 or 'format-spec' in self.params['compat_opts'])
1988
1989 return (
1990 'best/bestvideo+bestaudio' if prefer_best
1991 else 'bestvideo*+bestaudio/best' if not compat
1992 else 'bestvideo+bestaudio/best')
1993
1994 def build_format_selector(self, format_spec):
1995 def syntax_error(note, start):
1996 message = (
1997 'Invalid format specification: '
1998 '{}\n\t{}\n\t{}^'.format(note, format_spec, ' ' * start[1]))
1999 return SyntaxError(message)
2000
2001 PICKFIRST = 'PICKFIRST'
2002 MERGE = 'MERGE'
2003 SINGLE = 'SINGLE'
2004 GROUP = 'GROUP'
2005 FormatSelector = collections.namedtuple('FormatSelector', ['type', 'selector', 'filters'])
2006
2007 allow_multiple_streams = {'audio': self.params.get('allow_multiple_audio_streams', False),
2008 'video': self.params.get('allow_multiple_video_streams', False)}
2009
2010 check_formats = self.params.get('check_formats') == 'selected'
2011
2012 def _parse_filter(tokens):
2013 filter_parts = []
2014 for type, string, start, _, _ in tokens:
2015 if type == tokenize.OP and string == ']':
2016 return ''.join(filter_parts)
2017 else:
2018 filter_parts.append(string)
2019
2020 def _remove_unused_ops(tokens):
2021 # Remove operators that we don't use and join them with the surrounding strings.
2022 # E.g. 'mp4' '-' 'baseline' '-' '16x9' is converted to 'mp4-baseline-16x9'
2023 ALLOWED_OPS = ('/', '+', ',', '(', ')')
2024 last_string, last_start, last_end, last_line = None, None, None, None
2025 for type, string, start, end, line in tokens:
2026 if type == tokenize.OP and string == '[':
2027 if last_string:
2028 yield tokenize.NAME, last_string, last_start, last_end, last_line
2029 last_string = None
2030 yield type, string, start, end, line
2031 # everything inside brackets will be handled by _parse_filter
2032 for type, string, start, end, line in tokens:
2033 yield type, string, start, end, line
2034 if type == tokenize.OP and string == ']':
2035 break
2036 elif type == tokenize.OP and string in ALLOWED_OPS:
2037 if last_string:
2038 yield tokenize.NAME, last_string, last_start, last_end, last_line
2039 last_string = None
2040 yield type, string, start, end, line
2041 elif type in [tokenize.NAME, tokenize.NUMBER, tokenize.OP]:
2042 if not last_string:
2043 last_string = string
2044 last_start = start
2045 last_end = end
2046 else:
2047 last_string += string
2048 if last_string:
2049 yield tokenize.NAME, last_string, last_start, last_end, last_line
2050
2051 def _parse_format_selection(tokens, inside_merge=False, inside_choice=False, inside_group=False):
2052 selectors = []
2053 current_selector = None
2054 for type, string, start, _, _ in tokens:
2055 # ENCODING is only defined in python 3.x
2056 if type == getattr(tokenize, 'ENCODING', None):
2057 continue
2058 elif type in [tokenize.NAME, tokenize.NUMBER]:
2059 current_selector = FormatSelector(SINGLE, string, [])
2060 elif type == tokenize.OP:
2061 if string == ')':
2062 if not inside_group:
2063 # ')' will be handled by the parentheses group
2064 tokens.restore_last_token()
2065 break
2066 elif inside_merge and string in ['/', ',']:
2067 tokens.restore_last_token()
2068 break
2069 elif inside_choice and string == ',':
2070 tokens.restore_last_token()
2071 break
2072 elif string == ',':
2073 if not current_selector:
2074 raise syntax_error('"," must follow a format selector', start)
2075 selectors.append(current_selector)
2076 current_selector = None
2077 elif string == '/':
2078 if not current_selector:
2079 raise syntax_error('"/" must follow a format selector', start)
2080 first_choice = current_selector
2081 second_choice = _parse_format_selection(tokens, inside_choice=True)
2082 current_selector = FormatSelector(PICKFIRST, (first_choice, second_choice), [])
2083 elif string == '[':
2084 if not current_selector:
2085 current_selector = FormatSelector(SINGLE, 'best', [])
2086 format_filter = _parse_filter(tokens)
2087 current_selector.filters.append(format_filter)
2088 elif string == '(':
2089 if current_selector:
2090 raise syntax_error('Unexpected "("', start)
2091 group = _parse_format_selection(tokens, inside_group=True)
2092 current_selector = FormatSelector(GROUP, group, [])
2093 elif string == '+':
2094 if not current_selector:
2095 raise syntax_error('Unexpected "+"', start)
2096 selector_1 = current_selector
2097 selector_2 = _parse_format_selection(tokens, inside_merge=True)
2098 if not selector_2:
2099 raise syntax_error('Expected a selector', start)
2100 current_selector = FormatSelector(MERGE, (selector_1, selector_2), [])
2101 else:
2102 raise syntax_error(f'Operator not recognized: "{string}"', start)
2103 elif type == tokenize.ENDMARKER:
2104 break
2105 if current_selector:
2106 selectors.append(current_selector)
2107 return selectors
2108
2109 def _merge(formats_pair):
2110 format_1, format_2 = formats_pair
2111
2112 formats_info = []
2113 formats_info.extend(format_1.get('requested_formats', (format_1,)))
2114 formats_info.extend(format_2.get('requested_formats', (format_2,)))
2115
2116 if not allow_multiple_streams['video'] or not allow_multiple_streams['audio']:
2117 get_no_more = {'video': False, 'audio': False}
2118 for (i, fmt_info) in enumerate(formats_info):
2119 if fmt_info.get('acodec') == fmt_info.get('vcodec') == 'none':
2120 formats_info.pop(i)
2121 continue
2122 for aud_vid in ['audio', 'video']:
2123 if not allow_multiple_streams[aud_vid] and fmt_info.get(aud_vid[0] + 'codec') != 'none':
2124 if get_no_more[aud_vid]:
2125 formats_info.pop(i)
2126 break
2127 get_no_more[aud_vid] = True
2128
2129 if len(formats_info) == 1:
2130 return formats_info[0]
2131
2132 video_fmts = [fmt_info for fmt_info in formats_info if fmt_info.get('vcodec') != 'none']
2133 audio_fmts = [fmt_info for fmt_info in formats_info if fmt_info.get('acodec') != 'none']
2134
2135 the_only_video = video_fmts[0] if len(video_fmts) == 1 else None
2136 the_only_audio = audio_fmts[0] if len(audio_fmts) == 1 else None
2137
2138 output_ext = get_compatible_ext(
2139 vcodecs=[f.get('vcodec') for f in video_fmts],
2140 acodecs=[f.get('acodec') for f in audio_fmts],
2141 vexts=[f['ext'] for f in video_fmts],
2142 aexts=[f['ext'] for f in audio_fmts],
2143 preferences=(try_call(lambda: self.params['merge_output_format'].split('/'))
2144 or self.params.get('prefer_free_formats') and ('webm', 'mkv')))
2145
2146 filtered = lambda *keys: filter(None, (traverse_obj(fmt, *keys) for fmt in formats_info))
2147
2148 new_dict = {
2149 'requested_formats': formats_info,
2150 'format': '+'.join(filtered('format')),
2151 'format_id': '+'.join(filtered('format_id')),
2152 'ext': output_ext,
2153 'protocol': '+'.join(map(determine_protocol, formats_info)),
2154 'language': '+'.join(orderedSet(filtered('language'))) or None,
2155 'format_note': '+'.join(orderedSet(filtered('format_note'))) or None,
2156 'filesize_approx': sum(filtered('filesize', 'filesize_approx')) or None,
2157 'tbr': sum(filtered('tbr', 'vbr', 'abr')),
2158 }
2159
2160 if the_only_video:
2161 new_dict.update({
2162 'width': the_only_video.get('width'),
2163 'height': the_only_video.get('height'),
2164 'resolution': the_only_video.get('resolution') or self.format_resolution(the_only_video),
2165 'fps': the_only_video.get('fps'),
2166 'dynamic_range': the_only_video.get('dynamic_range'),
2167 'vcodec': the_only_video.get('vcodec'),
2168 'vbr': the_only_video.get('vbr'),
2169 'stretched_ratio': the_only_video.get('stretched_ratio'),
2170 })
2171
2172 if the_only_audio:
2173 new_dict.update({
2174 'acodec': the_only_audio.get('acodec'),
2175 'abr': the_only_audio.get('abr'),
2176 'asr': the_only_audio.get('asr'),
2177 'audio_channels': the_only_audio.get('audio_channels')
2178 })
2179
2180 return new_dict
2181
2182 def _check_formats(formats):
2183 if not check_formats:
2184 yield from formats
2185 return
2186 yield from self._check_formats(formats)
2187
2188 def _build_selector_function(selector):
2189 if isinstance(selector, list): # ,
2190 fs = [_build_selector_function(s) for s in selector]
2191
2192 def selector_function(ctx):
2193 for f in fs:
2194 yield from f(ctx)
2195 return selector_function
2196
2197 elif selector.type == GROUP: # ()
2198 selector_function = _build_selector_function(selector.selector)
2199
2200 elif selector.type == PICKFIRST: # /
2201 fs = [_build_selector_function(s) for s in selector.selector]
2202
2203 def selector_function(ctx):
2204 for f in fs:
2205 picked_formats = list(f(ctx))
2206 if picked_formats:
2207 return picked_formats
2208 return []
2209
2210 elif selector.type == MERGE: # +
2211 selector_1, selector_2 = map(_build_selector_function, selector.selector)
2212
2213 def selector_function(ctx):
2214 for pair in itertools.product(selector_1(ctx), selector_2(ctx)):
2215 yield _merge(pair)
2216
2217 elif selector.type == SINGLE: # atom
2218 format_spec = selector.selector or 'best'
2219
2220 # TODO: Add allvideo, allaudio etc by generalizing the code with best/worst selector
2221 if format_spec == 'all':
2222 def selector_function(ctx):
2223 yield from _check_formats(ctx['formats'][::-1])
2224 elif format_spec == 'mergeall':
2225 def selector_function(ctx):
2226 formats = list(_check_formats(
2227 f for f in ctx['formats'] if f.get('vcodec') != 'none' or f.get('acodec') != 'none'))
2228 if not formats:
2229 return
2230 merged_format = formats[-1]
2231 for f in formats[-2::-1]:
2232 merged_format = _merge((merged_format, f))
2233 yield merged_format
2234
2235 else:
2236 format_fallback, seperate_fallback, format_reverse, format_idx = False, None, True, 1
2237 mobj = re.match(
2238 r'(?P<bw>best|worst|b|w)(?P<type>video|audio|v|a)?(?P<mod>\*)?(?:\.(?P<n>[1-9]\d*))?$',
2239 format_spec)
2240 if mobj is not None:
2241 format_idx = int_or_none(mobj.group('n'), default=1)
2242 format_reverse = mobj.group('bw')[0] == 'b'
2243 format_type = (mobj.group('type') or [None])[0]
2244 not_format_type = {'v': 'a', 'a': 'v'}.get(format_type)
2245 format_modified = mobj.group('mod') is not None
2246
2247 format_fallback = not format_type and not format_modified # for b, w
2248 _filter_f = (
2249 (lambda f: f.get('%scodec' % format_type) != 'none')
2250 if format_type and format_modified # bv*, ba*, wv*, wa*
2251 else (lambda f: f.get('%scodec' % not_format_type) == 'none')
2252 if format_type # bv, ba, wv, wa
2253 else (lambda f: f.get('vcodec') != 'none' and f.get('acodec') != 'none')
2254 if not format_modified # b, w
2255 else lambda f: True) # b*, w*
2256 filter_f = lambda f: _filter_f(f) and (
2257 f.get('vcodec') != 'none' or f.get('acodec') != 'none')
2258 else:
2259 if format_spec in self._format_selection_exts['audio']:
2260 filter_f = lambda f: f.get('ext') == format_spec and f.get('acodec') != 'none'
2261 elif format_spec in self._format_selection_exts['video']:
2262 filter_f = lambda f: f.get('ext') == format_spec and f.get('acodec') != 'none' and f.get('vcodec') != 'none'
2263 seperate_fallback = lambda f: f.get('ext') == format_spec and f.get('vcodec') != 'none'
2264 elif format_spec in self._format_selection_exts['storyboards']:
2265 filter_f = lambda f: f.get('ext') == format_spec and f.get('acodec') == 'none' and f.get('vcodec') == 'none'
2266 else:
2267 filter_f = lambda f: f.get('format_id') == format_spec # id
2268
2269 def selector_function(ctx):
2270 formats = list(ctx['formats'])
2271 matches = list(filter(filter_f, formats)) if filter_f is not None else formats
2272 if not matches:
2273 if format_fallback and ctx['incomplete_formats']:
2274 # for extractors with incomplete formats (audio only (soundcloud)
2275 # or video only (imgur)) best/worst will fallback to
2276 # best/worst {video,audio}-only format
2277 matches = formats
2278 elif seperate_fallback and not ctx['has_merged_format']:
2279 # for compatibility with youtube-dl when there is no pre-merged format
2280 matches = list(filter(seperate_fallback, formats))
2281 matches = LazyList(_check_formats(matches[::-1 if format_reverse else 1]))
2282 try:
2283 yield matches[format_idx - 1]
2284 except LazyList.IndexError:
2285 return
2286
2287 filters = [self._build_format_filter(f) for f in selector.filters]
2288
2289 def final_selector(ctx):
2290 ctx_copy = dict(ctx)
2291 for _filter in filters:
2292 ctx_copy['formats'] = list(filter(_filter, ctx_copy['formats']))
2293 return selector_function(ctx_copy)
2294 return final_selector
2295
2296 stream = io.BytesIO(format_spec.encode())
2297 try:
2298 tokens = list(_remove_unused_ops(tokenize.tokenize(stream.readline)))
2299 except tokenize.TokenError:
2300 raise syntax_error('Missing closing/opening brackets or parenthesis', (0, len(format_spec)))
2301
2302 class TokenIterator:
2303 def __init__(self, tokens):
2304 self.tokens = tokens
2305 self.counter = 0
2306
2307 def __iter__(self):
2308 return self
2309
2310 def __next__(self):
2311 if self.counter >= len(self.tokens):
2312 raise StopIteration()
2313 value = self.tokens[self.counter]
2314 self.counter += 1
2315 return value
2316
2317 next = __next__
2318
2319 def restore_last_token(self):
2320 self.counter -= 1
2321
2322 parsed_selector = _parse_format_selection(iter(TokenIterator(tokens)))
2323 return _build_selector_function(parsed_selector)
2324
2325 def _calc_headers(self, info_dict):
2326 res = merge_headers(self.params['http_headers'], info_dict.get('http_headers') or {})
2327
2328 cookies = self._calc_cookies(info_dict['url'])
2329 if cookies:
2330 res['Cookie'] = cookies
2331
2332 if 'X-Forwarded-For' not in res:
2333 x_forwarded_for_ip = info_dict.get('__x_forwarded_for_ip')
2334 if x_forwarded_for_ip:
2335 res['X-Forwarded-For'] = x_forwarded_for_ip
2336
2337 return res
2338
2339 def _calc_cookies(self, url):
2340 pr = sanitized_Request(url)
2341 self.cookiejar.add_cookie_header(pr)
2342 return pr.get_header('Cookie')
2343
2344 def _sort_thumbnails(self, thumbnails):
2345 thumbnails.sort(key=lambda t: (
2346 t.get('preference') if t.get('preference') is not None else -1,
2347 t.get('width') if t.get('width') is not None else -1,
2348 t.get('height') if t.get('height') is not None else -1,
2349 t.get('id') if t.get('id') is not None else '',
2350 t.get('url')))
2351
2352 def _sanitize_thumbnails(self, info_dict):
2353 thumbnails = info_dict.get('thumbnails')
2354 if thumbnails is None:
2355 thumbnail = info_dict.get('thumbnail')
2356 if thumbnail:
2357 info_dict['thumbnails'] = thumbnails = [{'url': thumbnail}]
2358 if not thumbnails:
2359 return
2360
2361 def check_thumbnails(thumbnails):
2362 for t in thumbnails:
2363 self.to_screen(f'[info] Testing thumbnail {t["id"]}')
2364 try:
2365 self.urlopen(HEADRequest(t['url']))
2366 except network_exceptions as err:
2367 self.to_screen(f'[info] Unable to connect to thumbnail {t["id"]} URL {t["url"]!r} - {err}. Skipping...')
2368 continue
2369 yield t
2370
2371 self._sort_thumbnails(thumbnails)
2372 for i, t in enumerate(thumbnails):
2373 if t.get('id') is None:
2374 t['id'] = '%d' % i
2375 if t.get('width') and t.get('height'):
2376 t['resolution'] = '%dx%d' % (t['width'], t['height'])
2377 t['url'] = sanitize_url(t['url'])
2378
2379 if self.params.get('check_formats') is True:
2380 info_dict['thumbnails'] = LazyList(check_thumbnails(thumbnails[::-1]), reverse=True)
2381 else:
2382 info_dict['thumbnails'] = thumbnails
2383
2384 def _fill_common_fields(self, info_dict, final=True):
2385 # TODO: move sanitization here
2386 if final:
2387 title = info_dict.get('title', NO_DEFAULT)
2388 if title is NO_DEFAULT:
2389 raise ExtractorError('Missing "title" field in extractor result',
2390 video_id=info_dict['id'], ie=info_dict['extractor'])
2391 info_dict['fulltitle'] = title
2392 if not title:
2393 if title == '':
2394 self.write_debug('Extractor gave empty title. Creating a generic title')
2395 else:
2396 self.report_warning('Extractor failed to obtain "title". Creating a generic title instead')
2397 info_dict['title'] = f'{info_dict["extractor"].replace(":", "-")} video #{info_dict["id"]}'
2398
2399 if info_dict.get('duration') is not None:
2400 info_dict['duration_string'] = formatSeconds(info_dict['duration'])
2401
2402 for ts_key, date_key in (
2403 ('timestamp', 'upload_date'),
2404 ('release_timestamp', 'release_date'),
2405 ('modified_timestamp', 'modified_date'),
2406 ):
2407 if info_dict.get(date_key) is None and info_dict.get(ts_key) is not None:
2408 # Working around out-of-range timestamp values (e.g. negative ones on Windows,
2409 # see http://bugs.python.org/issue1646728)
2410 with contextlib.suppress(ValueError, OverflowError, OSError):
2411 upload_date = datetime.datetime.utcfromtimestamp(info_dict[ts_key])
2412 info_dict[date_key] = upload_date.strftime('%Y%m%d')
2413
2414 live_keys = ('is_live', 'was_live')
2415 live_status = info_dict.get('live_status')
2416 if live_status is None:
2417 for key in live_keys:
2418 if info_dict.get(key) is False:
2419 continue
2420 if info_dict.get(key):
2421 live_status = key
2422 break
2423 if all(info_dict.get(key) is False for key in live_keys):
2424 live_status = 'not_live'
2425 if live_status:
2426 info_dict['live_status'] = live_status
2427 for key in live_keys:
2428 if info_dict.get(key) is None:
2429 info_dict[key] = (live_status == key)
2430 if live_status == 'post_live':
2431 info_dict['was_live'] = True
2432
2433 # Auto generate title fields corresponding to the *_number fields when missing
2434 # in order to always have clean titles. This is very common for TV series.
2435 for field in ('chapter', 'season', 'episode'):
2436 if final and info_dict.get('%s_number' % field) is not None and not info_dict.get(field):
2437 info_dict[field] = '%s %d' % (field.capitalize(), info_dict['%s_number' % field])
2438
2439 def _raise_pending_errors(self, info):
2440 err = info.pop('__pending_error', None)
2441 if err:
2442 self.report_error(err, tb=False)
2443
2444 def process_video_result(self, info_dict, download=True):
2445 assert info_dict.get('_type', 'video') == 'video'
2446 self._num_videos += 1
2447
2448 if 'id' not in info_dict:
2449 raise ExtractorError('Missing "id" field in extractor result', ie=info_dict['extractor'])
2450 elif not info_dict.get('id'):
2451 raise ExtractorError('Extractor failed to obtain "id"', ie=info_dict['extractor'])
2452
2453 def report_force_conversion(field, field_not, conversion):
2454 self.report_warning(
2455 '"%s" field is not %s - forcing %s conversion, there is an error in extractor'
2456 % (field, field_not, conversion))
2457
2458 def sanitize_string_field(info, string_field):
2459 field = info.get(string_field)
2460 if field is None or isinstance(field, str):
2461 return
2462 report_force_conversion(string_field, 'a string', 'string')
2463 info[string_field] = str(field)
2464
2465 def sanitize_numeric_fields(info):
2466 for numeric_field in self._NUMERIC_FIELDS:
2467 field = info.get(numeric_field)
2468 if field is None or isinstance(field, (int, float)):
2469 continue
2470 report_force_conversion(numeric_field, 'numeric', 'int')
2471 info[numeric_field] = int_or_none(field)
2472
2473 sanitize_string_field(info_dict, 'id')
2474 sanitize_numeric_fields(info_dict)
2475 if info_dict.get('section_end') and info_dict.get('section_start') is not None:
2476 info_dict['duration'] = round(info_dict['section_end'] - info_dict['section_start'], 3)
2477 if (info_dict.get('duration') or 0) <= 0 and info_dict.pop('duration', None):
2478 self.report_warning('"duration" field is negative, there is an error in extractor')
2479
2480 chapters = info_dict.get('chapters') or []
2481 if chapters and chapters[0].get('start_time'):
2482 chapters.insert(0, {'start_time': 0})
2483
2484 dummy_chapter = {'end_time': 0, 'start_time': info_dict.get('duration')}
2485 for idx, (prev, current, next_) in enumerate(zip(
2486 (dummy_chapter, *chapters), chapters, (*chapters[1:], dummy_chapter)), 1):
2487 if current.get('start_time') is None:
2488 current['start_time'] = prev.get('end_time')
2489 if not current.get('end_time'):
2490 current['end_time'] = next_.get('start_time')
2491 if not current.get('title'):
2492 current['title'] = f'<Untitled Chapter {idx}>'
2493
2494 if 'playlist' not in info_dict:
2495 # It isn't part of a playlist
2496 info_dict['playlist'] = None
2497 info_dict['playlist_index'] = None
2498
2499 self._sanitize_thumbnails(info_dict)
2500
2501 thumbnail = info_dict.get('thumbnail')
2502 thumbnails = info_dict.get('thumbnails')
2503 if thumbnail:
2504 info_dict['thumbnail'] = sanitize_url(thumbnail)
2505 elif thumbnails:
2506 info_dict['thumbnail'] = thumbnails[-1]['url']
2507
2508 if info_dict.get('display_id') is None and 'id' in info_dict:
2509 info_dict['display_id'] = info_dict['id']
2510
2511 self._fill_common_fields(info_dict)
2512
2513 for cc_kind in ('subtitles', 'automatic_captions'):
2514 cc = info_dict.get(cc_kind)
2515 if cc:
2516 for _, subtitle in cc.items():
2517 for subtitle_format in subtitle:
2518 if subtitle_format.get('url'):
2519 subtitle_format['url'] = sanitize_url(subtitle_format['url'])
2520 if subtitle_format.get('ext') is None:
2521 subtitle_format['ext'] = determine_ext(subtitle_format['url']).lower()
2522
2523 automatic_captions = info_dict.get('automatic_captions')
2524 subtitles = info_dict.get('subtitles')
2525
2526 info_dict['requested_subtitles'] = self.process_subtitles(
2527 info_dict['id'], subtitles, automatic_captions)
2528
2529 formats = self._get_formats(info_dict)
2530
2531 # or None ensures --clean-infojson removes it
2532 info_dict['_has_drm'] = any(f.get('has_drm') for f in formats) or None
2533 if not self.params.get('allow_unplayable_formats'):
2534 formats = [f for f in formats if not f.get('has_drm')]
2535
2536 if formats and all(f.get('acodec') == f.get('vcodec') == 'none' for f in formats):
2537 self.report_warning(
2538 f'{"This video is DRM protected and " if info_dict["_has_drm"] else ""}'
2539 'only images are available for download. Use --list-formats to see them'.capitalize())
2540
2541 get_from_start = not info_dict.get('is_live') or bool(self.params.get('live_from_start'))
2542 if not get_from_start:
2543 info_dict['title'] += ' ' + datetime.datetime.now().strftime('%Y-%m-%d %H:%M')
2544 if info_dict.get('is_live') and formats:
2545 formats = [f for f in formats if bool(f.get('is_from_start')) == get_from_start]
2546 if get_from_start and not formats:
2547 self.raise_no_formats(info_dict, msg=(
2548 '--live-from-start is passed, but there are no formats that can be downloaded from the start. '
2549 'If you want to download from the current time, use --no-live-from-start'))
2550
2551 def is_wellformed(f):
2552 url = f.get('url')
2553 if not url:
2554 self.report_warning(
2555 '"url" field is missing or empty - skipping format, '
2556 'there is an error in extractor')
2557 return False
2558 if isinstance(url, bytes):
2559 sanitize_string_field(f, 'url')
2560 return True
2561
2562 # Filter out malformed formats for better extraction robustness
2563 formats = list(filter(is_wellformed, formats or []))
2564
2565 if not formats:
2566 self.raise_no_formats(info_dict)
2567
2568 formats_dict = {}
2569
2570 # We check that all the formats have the format and format_id fields
2571 for i, format in enumerate(formats):
2572 sanitize_string_field(format, 'format_id')
2573 sanitize_numeric_fields(format)
2574 format['url'] = sanitize_url(format['url'])
2575 if not format.get('format_id'):
2576 format['format_id'] = str(i)
2577 else:
2578 # Sanitize format_id from characters used in format selector expression
2579 format['format_id'] = re.sub(r'[\s,/+\[\]()]', '_', format['format_id'])
2580 format_id = format['format_id']
2581 if format_id not in formats_dict:
2582 formats_dict[format_id] = []
2583 formats_dict[format_id].append(format)
2584
2585 # Make sure all formats have unique format_id
2586 common_exts = set(itertools.chain(*self._format_selection_exts.values()))
2587 for format_id, ambiguous_formats in formats_dict.items():
2588 ambigious_id = len(ambiguous_formats) > 1
2589 for i, format in enumerate(ambiguous_formats):
2590 if ambigious_id:
2591 format['format_id'] = '%s-%d' % (format_id, i)
2592 if format.get('ext') is None:
2593 format['ext'] = determine_ext(format['url']).lower()
2594 # Ensure there is no conflict between id and ext in format selection
2595 # See https://github.com/yt-dlp/yt-dlp/issues/1282
2596 if format['format_id'] != format['ext'] and format['format_id'] in common_exts:
2597 format['format_id'] = 'f%s' % format['format_id']
2598
2599 for i, format in enumerate(formats):
2600 if format.get('format') is None:
2601 format['format'] = '{id} - {res}{note}'.format(
2602 id=format['format_id'],
2603 res=self.format_resolution(format),
2604 note=format_field(format, 'format_note', ' (%s)'),
2605 )
2606 if format.get('protocol') is None:
2607 format['protocol'] = determine_protocol(format)
2608 if format.get('resolution') is None:
2609 format['resolution'] = self.format_resolution(format, default=None)
2610 if format.get('dynamic_range') is None and format.get('vcodec') != 'none':
2611 format['dynamic_range'] = 'SDR'
2612 if (info_dict.get('duration') and format.get('tbr')
2613 and not format.get('filesize') and not format.get('filesize_approx')):
2614 format['filesize_approx'] = int(info_dict['duration'] * format['tbr'] * (1024 / 8))
2615
2616 # Add HTTP headers, so that external programs can use them from the
2617 # json output
2618 full_format_info = info_dict.copy()
2619 full_format_info.update(format)
2620 format['http_headers'] = self._calc_headers(full_format_info)
2621 # Remove private housekeeping stuff
2622 if '__x_forwarded_for_ip' in info_dict:
2623 del info_dict['__x_forwarded_for_ip']
2624
2625 if self.params.get('check_formats') is True:
2626 formats = LazyList(self._check_formats(formats[::-1]), reverse=True)
2627
2628 if not formats or formats[0] is not info_dict:
2629 # only set the 'formats' fields if the original info_dict list them
2630 # otherwise we end up with a circular reference, the first (and unique)
2631 # element in the 'formats' field in info_dict is info_dict itself,
2632 # which can't be exported to json
2633 info_dict['formats'] = formats
2634
2635 info_dict, _ = self.pre_process(info_dict)
2636
2637 if self._match_entry(info_dict, incomplete=self._format_fields) is not None:
2638 return info_dict
2639
2640 self.post_extract(info_dict)
2641 info_dict, _ = self.pre_process(info_dict, 'after_filter')
2642
2643 # The pre-processors may have modified the formats
2644 formats = self._get_formats(info_dict)
2645
2646 list_only = self.params.get('simulate') is None and (
2647 self.params.get('list_thumbnails') or self.params.get('listformats') or self.params.get('listsubtitles'))
2648 interactive_format_selection = not list_only and self.format_selector == '-'
2649 if self.params.get('list_thumbnails'):
2650 self.list_thumbnails(info_dict)
2651 if self.params.get('listsubtitles'):
2652 if 'automatic_captions' in info_dict:
2653 self.list_subtitles(
2654 info_dict['id'], automatic_captions, 'automatic captions')
2655 self.list_subtitles(info_dict['id'], subtitles, 'subtitles')
2656 if self.params.get('listformats') or interactive_format_selection:
2657 self.list_formats(info_dict)
2658 if list_only:
2659 # Without this printing, -F --print-json will not work
2660 self.__forced_printings(info_dict, self.prepare_filename(info_dict), incomplete=True)
2661 return info_dict
2662
2663 format_selector = self.format_selector
2664 if format_selector is None:
2665 req_format = self._default_format_spec(info_dict, download=download)
2666 self.write_debug('Default format spec: %s' % req_format)
2667 format_selector = self.build_format_selector(req_format)
2668
2669 while True:
2670 if interactive_format_selection:
2671 req_format = input(
2672 self._format_screen('\nEnter format selector: ', self.Styles.EMPHASIS))
2673 try:
2674 format_selector = self.build_format_selector(req_format)
2675 except SyntaxError as err:
2676 self.report_error(err, tb=False, is_error=False)
2677 continue
2678
2679 formats_to_download = list(format_selector({
2680 'formats': formats,
2681 'has_merged_format': any('none' not in (f.get('acodec'), f.get('vcodec')) for f in formats),
2682 'incomplete_formats': (
2683 # All formats are video-only or
2684 all(f.get('vcodec') != 'none' and f.get('acodec') == 'none' for f in formats)
2685 # all formats are audio-only
2686 or all(f.get('vcodec') == 'none' and f.get('acodec') != 'none' for f in formats)),
2687 }))
2688 if interactive_format_selection and not formats_to_download:
2689 self.report_error('Requested format is not available', tb=False, is_error=False)
2690 continue
2691 break
2692
2693 if not formats_to_download:
2694 if not self.params.get('ignore_no_formats_error'):
2695 raise ExtractorError(
2696 'Requested format is not available. Use --list-formats for a list of available formats',
2697 expected=True, video_id=info_dict['id'], ie=info_dict['extractor'])
2698 self.report_warning('Requested format is not available')
2699 # Process what we can, even without any available formats.
2700 formats_to_download = [{}]
2701
2702 requested_ranges = tuple(self.params.get('download_ranges', lambda *_: [{}])(info_dict, self))
2703 best_format, downloaded_formats = formats_to_download[-1], []
2704 if download:
2705 if best_format and requested_ranges:
2706 def to_screen(*msg):
2707 self.to_screen(f'[info] {info_dict["id"]}: {" ".join(", ".join(variadic(m)) for m in msg)}')
2708
2709 to_screen(f'Downloading {len(formats_to_download)} format(s):',
2710 (f['format_id'] for f in formats_to_download))
2711 if requested_ranges != ({}, ):
2712 to_screen(f'Downloading {len(requested_ranges)} time ranges:',
2713 (f'{c["start_time"]:.1f}-{c["end_time"]:.1f}' for c in requested_ranges))
2714 max_downloads_reached = False
2715
2716 for fmt, chapter in itertools.product(formats_to_download, requested_ranges):
2717 new_info = self._copy_infodict(info_dict)
2718 new_info.update(fmt)
2719 offset, duration = info_dict.get('section_start') or 0, info_dict.get('duration') or float('inf')
2720 end_time = offset + min(chapter.get('end_time', duration), duration)
2721 if chapter or offset:
2722 new_info.update({
2723 'section_start': offset + chapter.get('start_time', 0),
2724 # duration may not be accurate. So allow deviations <1sec
2725 'section_end': end_time if end_time <= offset + duration + 1 else None,
2726 'section_title': chapter.get('title'),
2727 'section_number': chapter.get('index'),
2728 })
2729 downloaded_formats.append(new_info)
2730 try:
2731 self.process_info(new_info)
2732 except MaxDownloadsReached:
2733 max_downloads_reached = True
2734 self._raise_pending_errors(new_info)
2735 # Remove copied info
2736 for key, val in tuple(new_info.items()):
2737 if info_dict.get(key) == val:
2738 new_info.pop(key)
2739 if max_downloads_reached:
2740 break
2741
2742 write_archive = {f.get('__write_download_archive', False) for f in downloaded_formats}
2743 assert write_archive.issubset({True, False, 'ignore'})
2744 if True in write_archive and False not in write_archive:
2745 self.record_download_archive(info_dict)
2746
2747 info_dict['requested_downloads'] = downloaded_formats
2748 info_dict = self.run_all_pps('after_video', info_dict)
2749 if max_downloads_reached:
2750 raise MaxDownloadsReached()
2751
2752 # We update the info dict with the selected best quality format (backwards compatibility)
2753 info_dict.update(best_format)
2754 return info_dict
2755
2756 def process_subtitles(self, video_id, normal_subtitles, automatic_captions):
2757 """Select the requested subtitles and their format"""
2758 available_subs, normal_sub_langs = {}, []
2759 if normal_subtitles and self.params.get('writesubtitles'):
2760 available_subs.update(normal_subtitles)
2761 normal_sub_langs = tuple(normal_subtitles.keys())
2762 if automatic_captions and self.params.get('writeautomaticsub'):
2763 for lang, cap_info in automatic_captions.items():
2764 if lang not in available_subs:
2765 available_subs[lang] = cap_info
2766
2767 if not available_subs or (
2768 not self.params.get('writesubtitles')
2769 and not self.params.get('writeautomaticsub')):
2770 return None
2771
2772 all_sub_langs = tuple(available_subs.keys())
2773 if self.params.get('allsubtitles', False):
2774 requested_langs = all_sub_langs
2775 elif self.params.get('subtitleslangs', False):
2776 try:
2777 requested_langs = orderedSet_from_options(
2778 self.params.get('subtitleslangs'), {'all': all_sub_langs}, use_regex=True)
2779 except re.error as e:
2780 raise ValueError(f'Wrong regex for subtitlelangs: {e.pattern}')
2781 elif normal_sub_langs:
2782 requested_langs = ['en'] if 'en' in normal_sub_langs else normal_sub_langs[:1]
2783 else:
2784 requested_langs = ['en'] if 'en' in all_sub_langs else all_sub_langs[:1]
2785 if requested_langs:
2786 self.to_screen(f'[info] {video_id}: Downloading subtitles: {", ".join(requested_langs)}')
2787
2788 formats_query = self.params.get('subtitlesformat', 'best')
2789 formats_preference = formats_query.split('/') if formats_query else []
2790 subs = {}
2791 for lang in requested_langs:
2792 formats = available_subs.get(lang)
2793 if formats is None:
2794 self.report_warning(f'{lang} subtitles not available for {video_id}')
2795 continue
2796 for ext in formats_preference:
2797 if ext == 'best':
2798 f = formats[-1]
2799 break
2800 matches = list(filter(lambda f: f['ext'] == ext, formats))
2801 if matches:
2802 f = matches[-1]
2803 break
2804 else:
2805 f = formats[-1]
2806 self.report_warning(
2807 'No subtitle format found matching "%s" for language %s, '
2808 'using %s' % (formats_query, lang, f['ext']))
2809 subs[lang] = f
2810 return subs
2811
2812 def _forceprint(self, key, info_dict):
2813 if info_dict is None:
2814 return
2815 info_copy = info_dict.copy()
2816 info_copy['formats_table'] = self.render_formats_table(info_dict)
2817 info_copy['thumbnails_table'] = self.render_thumbnails_table(info_dict)
2818 info_copy['subtitles_table'] = self.render_subtitles_table(info_dict.get('id'), info_dict.get('subtitles'))
2819 info_copy['automatic_captions_table'] = self.render_subtitles_table(info_dict.get('id'), info_dict.get('automatic_captions'))
2820
2821 def format_tmpl(tmpl):
2822 mobj = re.fullmatch(r'([\w.:,]|-\d|(?P<dict>{([\w.:,]|-\d)+}))+=?', tmpl)
2823 if not mobj:
2824 return tmpl
2825
2826 fmt = '%({})s'
2827 if tmpl.startswith('{'):
2828 tmpl = f'.{tmpl}'
2829 if tmpl.endswith('='):
2830 tmpl, fmt = tmpl[:-1], '{0} = %({0})#j'
2831 return '\n'.join(map(fmt.format, [tmpl] if mobj.group('dict') else tmpl.split(',')))
2832
2833 for tmpl in self.params['forceprint'].get(key, []):
2834 self.to_stdout(self.evaluate_outtmpl(format_tmpl(tmpl), info_copy))
2835
2836 for tmpl, file_tmpl in self.params['print_to_file'].get(key, []):
2837 filename = self.prepare_filename(info_dict, outtmpl=file_tmpl)
2838 tmpl = format_tmpl(tmpl)
2839 self.to_screen(f'[info] Writing {tmpl!r} to: {filename}')
2840 if self._ensure_dir_exists(filename):
2841 with open(filename, 'a', encoding='utf-8') as f:
2842 f.write(self.evaluate_outtmpl(tmpl, info_copy) + '\n')
2843
2844 def __forced_printings(self, info_dict, filename, incomplete):
2845 def print_mandatory(field, actual_field=None):
2846 if actual_field is None:
2847 actual_field = field
2848 if (self.params.get('force%s' % field, False)
2849 and (not incomplete or info_dict.get(actual_field) is not None)):
2850 self.to_stdout(info_dict[actual_field])
2851
2852 def print_optional(field):
2853 if (self.params.get('force%s' % field, False)
2854 and info_dict.get(field) is not None):
2855 self.to_stdout(info_dict[field])
2856
2857 info_dict = info_dict.copy()
2858 if filename is not None:
2859 info_dict['filename'] = filename
2860 if info_dict.get('requested_formats') is not None:
2861 # For RTMP URLs, also include the playpath
2862 info_dict['urls'] = '\n'.join(f['url'] + f.get('play_path', '') for f in info_dict['requested_formats'])
2863 elif info_dict.get('url'):
2864 info_dict['urls'] = info_dict['url'] + info_dict.get('play_path', '')
2865
2866 if (self.params.get('forcejson')
2867 or self.params['forceprint'].get('video')
2868 or self.params['print_to_file'].get('video')):
2869 self.post_extract(info_dict)
2870 self._forceprint('video', info_dict)
2871
2872 print_mandatory('title')
2873 print_mandatory('id')
2874 print_mandatory('url', 'urls')
2875 print_optional('thumbnail')
2876 print_optional('description')
2877 print_optional('filename')
2878 if self.params.get('forceduration') and info_dict.get('duration') is not None:
2879 self.to_stdout(formatSeconds(info_dict['duration']))
2880 print_mandatory('format')
2881
2882 if self.params.get('forcejson'):
2883 self.to_stdout(json.dumps(self.sanitize_info(info_dict)))
2884
2885 def dl(self, name, info, subtitle=False, test=False):
2886 if not info.get('url'):
2887 self.raise_no_formats(info, True)
2888
2889 if test:
2890 verbose = self.params.get('verbose')
2891 params = {
2892 'test': True,
2893 'quiet': self.params.get('quiet') or not verbose,
2894 'verbose': verbose,
2895 'noprogress': not verbose,
2896 'nopart': True,
2897 'skip_unavailable_fragments': False,
2898 'keep_fragments': False,
2899 'overwrites': True,
2900 '_no_ytdl_file': True,
2901 }
2902 else:
2903 params = self.params
2904 fd = get_suitable_downloader(info, params, to_stdout=(name == '-'))(self, params)
2905 if not test:
2906 for ph in self._progress_hooks:
2907 fd.add_progress_hook(ph)
2908 urls = '", "'.join(
2909 (f['url'].split(',')[0] + ',<data>' if f['url'].startswith('data:') else f['url'])
2910 for f in info.get('requested_formats', []) or [info])
2911 self.write_debug(f'Invoking {fd.FD_NAME} downloader on "{urls}"')
2912
2913 # Note: Ideally info should be a deep-copied so that hooks cannot modify it.
2914 # But it may contain objects that are not deep-copyable
2915 new_info = self._copy_infodict(info)
2916 if new_info.get('http_headers') is None:
2917 new_info['http_headers'] = self._calc_headers(new_info)
2918 return fd.download(name, new_info, subtitle)
2919
2920 def existing_file(self, filepaths, *, default_overwrite=True):
2921 existing_files = list(filter(os.path.exists, orderedSet(filepaths)))
2922 if existing_files and not self.params.get('overwrites', default_overwrite):
2923 return existing_files[0]
2924
2925 for file in existing_files:
2926 self.report_file_delete(file)
2927 os.remove(file)
2928 return None
2929
2930 def process_info(self, info_dict):
2931 """Process a single resolved IE result. (Modifies it in-place)"""
2932
2933 assert info_dict.get('_type', 'video') == 'video'
2934 original_infodict = info_dict
2935
2936 if 'format' not in info_dict and 'ext' in info_dict:
2937 info_dict['format'] = info_dict['ext']
2938
2939 # This is mostly just for backward compatibility of process_info
2940 # As a side-effect, this allows for format-specific filters
2941 if self._match_entry(info_dict) is not None:
2942 info_dict['__write_download_archive'] = 'ignore'
2943 return
2944
2945 # Does nothing under normal operation - for backward compatibility of process_info
2946 self.post_extract(info_dict)
2947 self._num_downloads += 1
2948
2949 # info_dict['_filename'] needs to be set for backward compatibility
2950 info_dict['_filename'] = full_filename = self.prepare_filename(info_dict, warn=True)
2951 temp_filename = self.prepare_filename(info_dict, 'temp')
2952 files_to_move = {}
2953
2954 # Forced printings
2955 self.__forced_printings(info_dict, full_filename, incomplete=('format' not in info_dict))
2956
2957 def check_max_downloads():
2958 if self._num_downloads >= float(self.params.get('max_downloads') or 'inf'):
2959 raise MaxDownloadsReached()
2960
2961 if self.params.get('simulate'):
2962 info_dict['__write_download_archive'] = self.params.get('force_write_download_archive')
2963 check_max_downloads()
2964 return
2965
2966 if full_filename is None:
2967 return
2968 if not self._ensure_dir_exists(encodeFilename(full_filename)):
2969 return
2970 if not self._ensure_dir_exists(encodeFilename(temp_filename)):
2971 return
2972
2973 if self._write_description('video', info_dict,
2974 self.prepare_filename(info_dict, 'description')) is None:
2975 return
2976
2977 sub_files = self._write_subtitles(info_dict, temp_filename)
2978 if sub_files is None:
2979 return
2980 files_to_move.update(dict(sub_files))
2981
2982 thumb_files = self._write_thumbnails(
2983 'video', info_dict, temp_filename, self.prepare_filename(info_dict, 'thumbnail'))
2984 if thumb_files is None:
2985 return
2986 files_to_move.update(dict(thumb_files))
2987
2988 infofn = self.prepare_filename(info_dict, 'infojson')
2989 _infojson_written = self._write_info_json('video', info_dict, infofn)
2990 if _infojson_written:
2991 info_dict['infojson_filename'] = infofn
2992 # For backward compatibility, even though it was a private field
2993 info_dict['__infojson_filename'] = infofn
2994 elif _infojson_written is None:
2995 return
2996
2997 # Note: Annotations are deprecated
2998 annofn = None
2999 if self.params.get('writeannotations', False):
3000 annofn = self.prepare_filename(info_dict, 'annotation')
3001 if annofn:
3002 if not self._ensure_dir_exists(encodeFilename(annofn)):
3003 return
3004 if not self.params.get('overwrites', True) and os.path.exists(encodeFilename(annofn)):
3005 self.to_screen('[info] Video annotations are already present')
3006 elif not info_dict.get('annotations'):
3007 self.report_warning('There are no annotations to write.')
3008 else:
3009 try:
3010 self.to_screen('[info] Writing video annotations to: ' + annofn)
3011 with open(encodeFilename(annofn), 'w', encoding='utf-8') as annofile:
3012 annofile.write(info_dict['annotations'])
3013 except (KeyError, TypeError):
3014 self.report_warning('There are no annotations to write.')
3015 except OSError:
3016 self.report_error('Cannot write annotations file: ' + annofn)
3017 return
3018
3019 # Write internet shortcut files
3020 def _write_link_file(link_type):
3021 url = try_get(info_dict['webpage_url'], iri_to_uri)
3022 if not url:
3023 self.report_warning(
3024 f'Cannot write internet shortcut file because the actual URL of "{info_dict["webpage_url"]}" is unknown')
3025 return True
3026 linkfn = replace_extension(self.prepare_filename(info_dict, 'link'), link_type, info_dict.get('ext'))
3027 if not self._ensure_dir_exists(encodeFilename(linkfn)):
3028 return False
3029 if self.params.get('overwrites', True) and os.path.exists(encodeFilename(linkfn)):
3030 self.to_screen(f'[info] Internet shortcut (.{link_type}) is already present')
3031 return True
3032 try:
3033 self.to_screen(f'[info] Writing internet shortcut (.{link_type}) to: {linkfn}')
3034 with open(encodeFilename(to_high_limit_path(linkfn)), 'w', encoding='utf-8',
3035 newline='\r\n' if link_type == 'url' else '\n') as linkfile:
3036 template_vars = {'url': url}
3037 if link_type == 'desktop':
3038 template_vars['filename'] = linkfn[:-(len(link_type) + 1)]
3039 linkfile.write(LINK_TEMPLATES[link_type] % template_vars)
3040 except OSError:
3041 self.report_error(f'Cannot write internet shortcut {linkfn}')
3042 return False
3043 return True
3044
3045 write_links = {
3046 'url': self.params.get('writeurllink'),
3047 'webloc': self.params.get('writewebloclink'),
3048 'desktop': self.params.get('writedesktoplink'),
3049 }
3050 if self.params.get('writelink'):
3051 link_type = ('webloc' if sys.platform == 'darwin'
3052 else 'desktop' if sys.platform.startswith('linux')
3053 else 'url')
3054 write_links[link_type] = True
3055
3056 if any(should_write and not _write_link_file(link_type)
3057 for link_type, should_write in write_links.items()):
3058 return
3059
3060 def replace_info_dict(new_info):
3061 nonlocal info_dict
3062 if new_info == info_dict:
3063 return
3064 info_dict.clear()
3065 info_dict.update(new_info)
3066
3067 new_info, files_to_move = self.pre_process(info_dict, 'before_dl', files_to_move)
3068 replace_info_dict(new_info)
3069
3070 if self.params.get('skip_download'):
3071 info_dict['filepath'] = temp_filename
3072 info_dict['__finaldir'] = os.path.dirname(os.path.abspath(encodeFilename(full_filename)))
3073 info_dict['__files_to_move'] = files_to_move
3074 replace_info_dict(self.run_pp(MoveFilesAfterDownloadPP(self, False), info_dict))
3075 info_dict['__write_download_archive'] = self.params.get('force_write_download_archive')
3076 else:
3077 # Download
3078 info_dict.setdefault('__postprocessors', [])
3079 try:
3080
3081 def existing_video_file(*filepaths):
3082 ext = info_dict.get('ext')
3083 converted = lambda file: replace_extension(file, self.params.get('final_ext') or ext, ext)
3084 file = self.existing_file(itertools.chain(*zip(map(converted, filepaths), filepaths)),
3085 default_overwrite=False)
3086 if file:
3087 info_dict['ext'] = os.path.splitext(file)[1][1:]
3088 return file
3089
3090 fd, success = None, True
3091 if info_dict.get('protocol') or info_dict.get('url'):
3092 fd = get_suitable_downloader(info_dict, self.params, to_stdout=temp_filename == '-')
3093 if fd is not FFmpegFD and (
3094 info_dict.get('section_start') or info_dict.get('section_end')):
3095 msg = ('This format cannot be partially downloaded' if FFmpegFD.available()
3096 else 'You have requested downloading the video partially, but ffmpeg is not installed')
3097 self.report_error(f'{msg}. Aborting')
3098 return
3099
3100 if info_dict.get('requested_formats') is not None:
3101 requested_formats = info_dict['requested_formats']
3102 old_ext = info_dict['ext']
3103 if self.params.get('merge_output_format') is None:
3104 if (info_dict['ext'] == 'webm'
3105 and info_dict.get('thumbnails')
3106 # check with type instead of pp_key, __name__, or isinstance
3107 # since we dont want any custom PPs to trigger this
3108 and any(type(pp) == EmbedThumbnailPP for pp in self._pps['post_process'])): # noqa: E721
3109 info_dict['ext'] = 'mkv'
3110 self.report_warning(
3111 'webm doesn\'t support embedding a thumbnail, mkv will be used')
3112 new_ext = info_dict['ext']
3113
3114 def correct_ext(filename, ext=new_ext):
3115 if filename == '-':
3116 return filename
3117 filename_real_ext = os.path.splitext(filename)[1][1:]
3118 filename_wo_ext = (
3119 os.path.splitext(filename)[0]
3120 if filename_real_ext in (old_ext, new_ext)
3121 else filename)
3122 return f'{filename_wo_ext}.{ext}'
3123
3124 # Ensure filename always has a correct extension for successful merge
3125 full_filename = correct_ext(full_filename)
3126 temp_filename = correct_ext(temp_filename)
3127 dl_filename = existing_video_file(full_filename, temp_filename)
3128 info_dict['__real_download'] = False
3129
3130 merger = FFmpegMergerPP(self)
3131 downloaded = []
3132 if dl_filename is not None:
3133 self.report_file_already_downloaded(dl_filename)
3134 elif fd:
3135 for f in requested_formats if fd != FFmpegFD else []:
3136 f['filepath'] = fname = prepend_extension(
3137 correct_ext(temp_filename, info_dict['ext']),
3138 'f%s' % f['format_id'], info_dict['ext'])
3139 downloaded.append(fname)
3140 info_dict['url'] = '\n'.join(f['url'] for f in requested_formats)
3141 success, real_download = self.dl(temp_filename, info_dict)
3142 info_dict['__real_download'] = real_download
3143 else:
3144 if self.params.get('allow_unplayable_formats'):
3145 self.report_warning(
3146 'You have requested merging of multiple formats '
3147 'while also allowing unplayable formats to be downloaded. '
3148 'The formats won\'t be merged to prevent data corruption.')
3149 elif not merger.available:
3150 msg = 'You have requested merging of multiple formats but ffmpeg is not installed'
3151 if not self.params.get('ignoreerrors'):
3152 self.report_error(f'{msg}. Aborting due to --abort-on-error')
3153 return
3154 self.report_warning(f'{msg}. The formats won\'t be merged')
3155
3156 if temp_filename == '-':
3157 reason = ('using a downloader other than ffmpeg' if FFmpegFD.can_merge_formats(info_dict, self.params)
3158 else 'but the formats are incompatible for simultaneous download' if merger.available
3159 else 'but ffmpeg is not installed')
3160 self.report_warning(
3161 f'You have requested downloading multiple formats to stdout {reason}. '
3162 'The formats will be streamed one after the other')
3163 fname = temp_filename
3164 for f in requested_formats:
3165 new_info = dict(info_dict)
3166 del new_info['requested_formats']
3167 new_info.update(f)
3168 if temp_filename != '-':
3169 fname = prepend_extension(
3170 correct_ext(temp_filename, new_info['ext']),
3171 'f%s' % f['format_id'], new_info['ext'])
3172 if not self._ensure_dir_exists(fname):
3173 return
3174 f['filepath'] = fname
3175 downloaded.append(fname)
3176 partial_success, real_download = self.dl(fname, new_info)
3177 info_dict['__real_download'] = info_dict['__real_download'] or real_download
3178 success = success and partial_success
3179
3180 if downloaded and merger.available and not self.params.get('allow_unplayable_formats'):
3181 info_dict['__postprocessors'].append(merger)
3182 info_dict['__files_to_merge'] = downloaded
3183 # Even if there were no downloads, it is being merged only now
3184 info_dict['__real_download'] = True
3185 else:
3186 for file in downloaded:
3187 files_to_move[file] = None
3188 else:
3189 # Just a single file
3190 dl_filename = existing_video_file(full_filename, temp_filename)
3191 if dl_filename is None or dl_filename == temp_filename:
3192 # dl_filename == temp_filename could mean that the file was partially downloaded with --no-part.
3193 # So we should try to resume the download
3194 success, real_download = self.dl(temp_filename, info_dict)
3195 info_dict['__real_download'] = real_download
3196 else:
3197 self.report_file_already_downloaded(dl_filename)
3198
3199 dl_filename = dl_filename or temp_filename
3200 info_dict['__finaldir'] = os.path.dirname(os.path.abspath(encodeFilename(full_filename)))
3201
3202 except network_exceptions as err:
3203 self.report_error('unable to download video data: %s' % error_to_compat_str(err))
3204 return
3205 except OSError as err:
3206 raise UnavailableVideoError(err)
3207 except (ContentTooShortError, ) as err:
3208 self.report_error(f'content too short (expected {err.expected} bytes and served {err.downloaded})')
3209 return
3210
3211 self._raise_pending_errors(info_dict)
3212 if success and full_filename != '-':
3213
3214 def fixup():
3215 do_fixup = True
3216 fixup_policy = self.params.get('fixup')
3217 vid = info_dict['id']
3218
3219 if fixup_policy in ('ignore', 'never'):
3220 return
3221 elif fixup_policy == 'warn':
3222 do_fixup = 'warn'
3223 elif fixup_policy != 'force':
3224 assert fixup_policy in ('detect_or_warn', None)
3225 if not info_dict.get('__real_download'):
3226 do_fixup = False
3227
3228 def ffmpeg_fixup(cndn, msg, cls):
3229 if not (do_fixup and cndn):
3230 return
3231 elif do_fixup == 'warn':
3232 self.report_warning(f'{vid}: {msg}')
3233 return
3234 pp = cls(self)
3235 if pp.available:
3236 info_dict['__postprocessors'].append(pp)
3237 else:
3238 self.report_warning(f'{vid}: {msg}. Install ffmpeg to fix this automatically')
3239
3240 stretched_ratio = info_dict.get('stretched_ratio')
3241 ffmpeg_fixup(stretched_ratio not in (1, None),
3242 f'Non-uniform pixel ratio {stretched_ratio}',
3243 FFmpegFixupStretchedPP)
3244
3245 downloader = get_suitable_downloader(info_dict, self.params) if 'protocol' in info_dict else None
3246 downloader = downloader.FD_NAME if downloader else None
3247
3248 ext = info_dict.get('ext')
3249 postprocessed_by_ffmpeg = info_dict.get('requested_formats') or any((
3250 isinstance(pp, FFmpegVideoConvertorPP)
3251 and resolve_recode_mapping(ext, pp.mapping)[0] not in (ext, None)
3252 ) for pp in self._pps['post_process'])
3253
3254 if not postprocessed_by_ffmpeg:
3255 ffmpeg_fixup(ext == 'm4a' and info_dict.get('container') == 'm4a_dash',
3256 'writing DASH m4a. Only some players support this container',
3257 FFmpegFixupM4aPP)
3258 ffmpeg_fixup(downloader == 'hlsnative' and not self.params.get('hls_use_mpegts')
3259 or info_dict.get('is_live') and self.params.get('hls_use_mpegts') is None,
3260 'Possible MPEG-TS in MP4 container or malformed AAC timestamps',
3261 FFmpegFixupM3u8PP)
3262 ffmpeg_fixup(info_dict.get('is_live') and downloader == 'DashSegmentsFD',
3263 'Possible duplicate MOOV atoms', FFmpegFixupDuplicateMoovPP)
3264
3265 ffmpeg_fixup(downloader == 'web_socket_fragment', 'Malformed timestamps detected', FFmpegFixupTimestampPP)
3266 ffmpeg_fixup(downloader == 'web_socket_fragment', 'Malformed duration detected', FFmpegFixupDurationPP)
3267
3268 fixup()
3269 try:
3270 replace_info_dict(self.post_process(dl_filename, info_dict, files_to_move))
3271 except PostProcessingError as err:
3272 self.report_error('Postprocessing: %s' % str(err))
3273 return
3274 try:
3275 for ph in self._post_hooks:
3276 ph(info_dict['filepath'])
3277 except Exception as err:
3278 self.report_error('post hooks: %s' % str(err))
3279 return
3280 info_dict['__write_download_archive'] = True
3281
3282 assert info_dict is original_infodict # Make sure the info_dict was modified in-place
3283 if self.params.get('force_write_download_archive'):
3284 info_dict['__write_download_archive'] = True
3285 check_max_downloads()
3286
3287 def __download_wrapper(self, func):
3288 @functools.wraps(func)
3289 def wrapper(*args, **kwargs):
3290 try:
3291 res = func(*args, **kwargs)
3292 except UnavailableVideoError as e:
3293 self.report_error(e)
3294 except DownloadCancelled as e:
3295 self.to_screen(f'[info] {e}')
3296 if not self.params.get('break_per_url'):
3297 raise
3298 self._num_downloads = 0
3299 else:
3300 if self.params.get('dump_single_json', False):
3301 self.post_extract(res)
3302 self.to_stdout(json.dumps(self.sanitize_info(res)))
3303 return wrapper
3304
3305 def download(self, url_list):
3306 """Download a given list of URLs."""
3307 url_list = variadic(url_list) # Passing a single URL is a common mistake
3308 outtmpl = self.params['outtmpl']['default']
3309 if (len(url_list) > 1
3310 and outtmpl != '-'
3311 and '%' not in outtmpl
3312 and self.params.get('max_downloads') != 1):
3313 raise SameFileError(outtmpl)
3314
3315 for url in url_list:
3316 self.__download_wrapper(self.extract_info)(
3317 url, force_generic_extractor=self.params.get('force_generic_extractor', False))
3318
3319 return self._download_retcode
3320
3321 def download_with_info_file(self, info_filename):
3322 with contextlib.closing(fileinput.FileInput(
3323 [info_filename], mode='r',
3324 openhook=fileinput.hook_encoded('utf-8'))) as f:
3325 # FileInput doesn't have a read method, we can't call json.load
3326 info = self.sanitize_info(json.loads('\n'.join(f)), self.params.get('clean_infojson', True))
3327 try:
3328 self.__download_wrapper(self.process_ie_result)(info, download=True)
3329 except (DownloadError, EntryNotInPlaylist, ReExtractInfo) as e:
3330 if not isinstance(e, EntryNotInPlaylist):
3331 self.to_stderr('\r')
3332 webpage_url = info.get('webpage_url')
3333 if webpage_url is not None:
3334 self.report_warning(f'The info failed to download: {e}; trying with URL {webpage_url}')
3335 return self.download([webpage_url])
3336 else:
3337 raise
3338 return self._download_retcode
3339
3340 @staticmethod
3341 def sanitize_info(info_dict, remove_private_keys=False):
3342 ''' Sanitize the infodict for converting to json '''
3343 if info_dict is None:
3344 return info_dict
3345 info_dict.setdefault('epoch', int(time.time()))
3346 info_dict.setdefault('_type', 'video')
3347 info_dict.setdefault('_version', {
3348 'version': __version__,
3349 'current_git_head': current_git_head(),
3350 'release_git_head': RELEASE_GIT_HEAD,
3351 'repository': REPOSITORY,
3352 })
3353
3354 if remove_private_keys:
3355 reject = lambda k, v: v is None or k.startswith('__') or k in {
3356 'requested_downloads', 'requested_formats', 'requested_subtitles', 'requested_entries',
3357 'entries', 'filepath', '_filename', 'infojson_filename', 'original_url', 'playlist_autonumber',
3358 }
3359 else:
3360 reject = lambda k, v: False
3361
3362 def filter_fn(obj):
3363 if isinstance(obj, dict):
3364 return {k: filter_fn(v) for k, v in obj.items() if not reject(k, v)}
3365 elif isinstance(obj, (list, tuple, set, LazyList)):
3366 return list(map(filter_fn, obj))
3367 elif obj is None or isinstance(obj, (str, int, float, bool)):
3368 return obj
3369 else:
3370 return repr(obj)
3371
3372 return filter_fn(info_dict)
3373
3374 @staticmethod
3375 def filter_requested_info(info_dict, actually_filter=True):
3376 ''' Alias of sanitize_info for backward compatibility '''
3377 return YoutubeDL.sanitize_info(info_dict, actually_filter)
3378
3379 def _delete_downloaded_files(self, *files_to_delete, info={}, msg=None):
3380 for filename in set(filter(None, files_to_delete)):
3381 if msg:
3382 self.to_screen(msg % filename)
3383 try:
3384 os.remove(filename)
3385 except OSError:
3386 self.report_warning(f'Unable to delete file {filename}')
3387 if filename in info.get('__files_to_move', []): # NB: Delete even if None
3388 del info['__files_to_move'][filename]
3389
3390 @staticmethod
3391 def post_extract(info_dict):
3392 def actual_post_extract(info_dict):
3393 if info_dict.get('_type') in ('playlist', 'multi_video'):
3394 for video_dict in info_dict.get('entries', {}):
3395 actual_post_extract(video_dict or {})
3396 return
3397
3398 post_extractor = info_dict.pop('__post_extractor', None) or (lambda: {})
3399 info_dict.update(post_extractor())
3400
3401 actual_post_extract(info_dict or {})
3402
3403 def run_pp(self, pp, infodict):
3404 files_to_delete = []
3405 if '__files_to_move' not in infodict:
3406 infodict['__files_to_move'] = {}
3407 try:
3408 files_to_delete, infodict = pp.run(infodict)
3409 except PostProcessingError as e:
3410 # Must be True and not 'only_download'
3411 if self.params.get('ignoreerrors') is True:
3412 self.report_error(e)
3413 return infodict
3414 raise
3415
3416 if not files_to_delete:
3417 return infodict
3418 if self.params.get('keepvideo', False):
3419 for f in files_to_delete:
3420 infodict['__files_to_move'].setdefault(f, '')
3421 else:
3422 self._delete_downloaded_files(
3423 *files_to_delete, info=infodict, msg='Deleting original file %s (pass -k to keep)')
3424 return infodict
3425
3426 def run_all_pps(self, key, info, *, additional_pps=None):
3427 self._forceprint(key, info)
3428 for pp in (additional_pps or []) + self._pps[key]:
3429 info = self.run_pp(pp, info)
3430 return info
3431
3432 def pre_process(self, ie_info, key='pre_process', files_to_move=None):
3433 info = dict(ie_info)
3434 info['__files_to_move'] = files_to_move or {}
3435 try:
3436 info = self.run_all_pps(key, info)
3437 except PostProcessingError as err:
3438 msg = f'Preprocessing: {err}'
3439 info.setdefault('__pending_error', msg)
3440 self.report_error(msg, is_error=False)
3441 return info, info.pop('__files_to_move', None)
3442
3443 def post_process(self, filename, info, files_to_move=None):
3444 """Run all the postprocessors on the given file."""
3445 info['filepath'] = filename
3446 info['__files_to_move'] = files_to_move or {}
3447 info = self.run_all_pps('post_process', info, additional_pps=info.get('__postprocessors'))
3448 info = self.run_pp(MoveFilesAfterDownloadPP(self), info)
3449 del info['__files_to_move']
3450 return self.run_all_pps('after_move', info)
3451
3452 def _make_archive_id(self, info_dict):
3453 video_id = info_dict.get('id')
3454 if not video_id:
3455 return
3456 # Future-proof against any change in case
3457 # and backwards compatibility with prior versions
3458 extractor = info_dict.get('extractor_key') or info_dict.get('ie_key') # key in a playlist
3459 if extractor is None:
3460 url = str_or_none(info_dict.get('url'))
3461 if not url:
3462 return
3463 # Try to find matching extractor for the URL and take its ie_key
3464 for ie_key, ie in self._ies.items():
3465 if ie.suitable(url):
3466 extractor = ie_key
3467 break
3468 else:
3469 return
3470 return make_archive_id(extractor, video_id)
3471
3472 def in_download_archive(self, info_dict):
3473 if not self.archive:
3474 return False
3475
3476 vid_ids = [self._make_archive_id(info_dict)]
3477 vid_ids.extend(info_dict.get('_old_archive_ids') or [])
3478 return any(id_ in self.archive for id_ in vid_ids)
3479
3480 def record_download_archive(self, info_dict):
3481 fn = self.params.get('download_archive')
3482 if fn is None:
3483 return
3484 vid_id = self._make_archive_id(info_dict)
3485 assert vid_id
3486
3487 self.write_debug(f'Adding to archive: {vid_id}')
3488 if is_path_like(fn):
3489 with locked_file(fn, 'a', encoding='utf-8') as archive_file:
3490 archive_file.write(vid_id + '\n')
3491 self.archive.add(vid_id)
3492
3493 @staticmethod
3494 def format_resolution(format, default='unknown'):
3495 if format.get('vcodec') == 'none' and format.get('acodec') != 'none':
3496 return 'audio only'
3497 if format.get('resolution') is not None:
3498 return format['resolution']
3499 if format.get('width') and format.get('height'):
3500 return '%dx%d' % (format['width'], format['height'])
3501 elif format.get('height'):
3502 return '%sp' % format['height']
3503 elif format.get('width'):
3504 return '%dx?' % format['width']
3505 return default
3506
3507 def _list_format_headers(self, *headers):
3508 if self.params.get('listformats_table', True) is not False:
3509 return [self._format_out(header, self.Styles.HEADERS) for header in headers]
3510 return headers
3511
3512 def _format_note(self, fdict):
3513 res = ''
3514 if fdict.get('ext') in ['f4f', 'f4m']:
3515 res += '(unsupported)'
3516 if fdict.get('language'):
3517 if res:
3518 res += ' '
3519 res += '[%s]' % fdict['language']
3520 if fdict.get('format_note') is not None:
3521 if res:
3522 res += ' '
3523 res += fdict['format_note']
3524 if fdict.get('tbr') is not None:
3525 if res:
3526 res += ', '
3527 res += '%4dk' % fdict['tbr']
3528 if fdict.get('container') is not None:
3529 if res:
3530 res += ', '
3531 res += '%s container' % fdict['container']
3532 if (fdict.get('vcodec') is not None
3533 and fdict.get('vcodec') != 'none'):
3534 if res:
3535 res += ', '
3536 res += fdict['vcodec']
3537 if fdict.get('vbr') is not None:
3538 res += '@'
3539 elif fdict.get('vbr') is not None and fdict.get('abr') is not None:
3540 res += 'video@'
3541 if fdict.get('vbr') is not None:
3542 res += '%4dk' % fdict['vbr']
3543 if fdict.get('fps') is not None:
3544 if res:
3545 res += ', '
3546 res += '%sfps' % fdict['fps']
3547 if fdict.get('acodec') is not None:
3548 if res:
3549 res += ', '
3550 if fdict['acodec'] == 'none':
3551 res += 'video only'
3552 else:
3553 res += '%-5s' % fdict['acodec']
3554 elif fdict.get('abr') is not None:
3555 if res:
3556 res += ', '
3557 res += 'audio'
3558 if fdict.get('abr') is not None:
3559 res += '@%3dk' % fdict['abr']
3560 if fdict.get('asr') is not None:
3561 res += ' (%5dHz)' % fdict['asr']
3562 if fdict.get('filesize') is not None:
3563 if res:
3564 res += ', '
3565 res += format_bytes(fdict['filesize'])
3566 elif fdict.get('filesize_approx') is not None:
3567 if res:
3568 res += ', '
3569 res += '~' + format_bytes(fdict['filesize_approx'])
3570 return res
3571
3572 def _get_formats(self, info_dict):
3573 if info_dict.get('formats') is None:
3574 if info_dict.get('url') and info_dict.get('_type', 'video') == 'video':
3575 return [info_dict]
3576 return []
3577 return info_dict['formats']
3578
3579 def render_formats_table(self, info_dict):
3580 formats = self._get_formats(info_dict)
3581 if not formats:
3582 return
3583 if not self.params.get('listformats_table', True) is not False:
3584 table = [
3585 [
3586 format_field(f, 'format_id'),
3587 format_field(f, 'ext'),
3588 self.format_resolution(f),
3589 self._format_note(f)
3590 ] for f in formats if (f.get('preference') or 0) >= -1000]
3591 return render_table(['format code', 'extension', 'resolution', 'note'], table, extra_gap=1)
3592
3593 def simplified_codec(f, field):
3594 assert field in ('acodec', 'vcodec')
3595 codec = f.get(field, 'unknown')
3596 if not codec:
3597 return 'unknown'
3598 elif codec != 'none':
3599 return '.'.join(codec.split('.')[:4])
3600
3601 if field == 'vcodec' and f.get('acodec') == 'none':
3602 return 'images'
3603 elif field == 'acodec' and f.get('vcodec') == 'none':
3604 return ''
3605 return self._format_out('audio only' if field == 'vcodec' else 'video only',
3606 self.Styles.SUPPRESS)
3607
3608 delim = self._format_out('\u2502', self.Styles.DELIM, '|', test_encoding=True)
3609 table = [
3610 [
3611 self._format_out(format_field(f, 'format_id'), self.Styles.ID),
3612 format_field(f, 'ext'),
3613 format_field(f, func=self.format_resolution, ignore=('audio only', 'images')),
3614 format_field(f, 'fps', '\t%d', func=round),
3615 format_field(f, 'dynamic_range', '%s', ignore=(None, 'SDR')).replace('HDR', ''),
3616 format_field(f, 'audio_channels', '\t%s'),
3617 delim,
3618 format_field(f, 'filesize', ' \t%s', func=format_bytes) + format_field(f, 'filesize_approx', '~\t%s', func=format_bytes),
3619 format_field(f, 'tbr', '\t%dk', func=round),
3620 shorten_protocol_name(f.get('protocol', '')),
3621 delim,
3622 simplified_codec(f, 'vcodec'),
3623 format_field(f, 'vbr', '\t%dk', func=round),
3624 simplified_codec(f, 'acodec'),
3625 format_field(f, 'abr', '\t%dk', func=round),
3626 format_field(f, 'asr', '\t%s', func=format_decimal_suffix),
3627 join_nonempty(
3628 self._format_out('UNSUPPORTED', 'light red') if f.get('ext') in ('f4f', 'f4m') else None,
3629 format_field(f, 'language', '[%s]'),
3630 join_nonempty(format_field(f, 'format_note'),
3631 format_field(f, 'container', ignore=(None, f.get('ext'))),
3632 delim=', '),
3633 delim=' '),
3634 ] for f in formats if f.get('preference') is None or f['preference'] >= -1000]
3635 header_line = self._list_format_headers(
3636 'ID', 'EXT', 'RESOLUTION', '\tFPS', 'HDR', 'CH', delim, '\tFILESIZE', '\tTBR', 'PROTO',
3637 delim, 'VCODEC', '\tVBR', 'ACODEC', '\tABR', '\tASR', 'MORE INFO')
3638
3639 return render_table(
3640 header_line, table, hide_empty=True,
3641 delim=self._format_out('\u2500', self.Styles.DELIM, '-', test_encoding=True))
3642
3643 def render_thumbnails_table(self, info_dict):
3644 thumbnails = list(info_dict.get('thumbnails') or [])
3645 if not thumbnails:
3646 return None
3647 return render_table(
3648 self._list_format_headers('ID', 'Width', 'Height', 'URL'),
3649 [[t.get('id'), t.get('width') or 'unknown', t.get('height') or 'unknown', t['url']] for t in thumbnails])
3650
3651 def render_subtitles_table(self, video_id, subtitles):
3652 def _row(lang, formats):
3653 exts, names = zip(*((f['ext'], f.get('name') or 'unknown') for f in reversed(formats)))
3654 if len(set(names)) == 1:
3655 names = [] if names[0] == 'unknown' else names[:1]
3656 return [lang, ', '.join(names), ', '.join(exts)]
3657
3658 if not subtitles:
3659 return None
3660 return render_table(
3661 self._list_format_headers('Language', 'Name', 'Formats'),
3662 [_row(lang, formats) for lang, formats in subtitles.items()],
3663 hide_empty=True)
3664
3665 def __list_table(self, video_id, name, func, *args):
3666 table = func(*args)
3667 if not table:
3668 self.to_screen(f'{video_id} has no {name}')
3669 return
3670 self.to_screen(f'[info] Available {name} for {video_id}:')
3671 self.to_stdout(table)
3672
3673 def list_formats(self, info_dict):
3674 self.__list_table(info_dict['id'], 'formats', self.render_formats_table, info_dict)
3675
3676 def list_thumbnails(self, info_dict):
3677 self.__list_table(info_dict['id'], 'thumbnails', self.render_thumbnails_table, info_dict)
3678
3679 def list_subtitles(self, video_id, subtitles, name='subtitles'):
3680 self.__list_table(video_id, name, self.render_subtitles_table, video_id, subtitles)
3681
3682 def urlopen(self, req):
3683 """ Start an HTTP download """
3684 if isinstance(req, str):
3685 req = sanitized_Request(req)
3686 return self._opener.open(req, timeout=self._socket_timeout)
3687
3688 def print_debug_header(self):
3689 if not self.params.get('verbose'):
3690 return
3691
3692 from . import _IN_CLI # Must be delayed import
3693
3694 # These imports can be slow. So import them only as needed
3695 from .extractor.extractors import _LAZY_LOADER
3696 from .extractor.extractors import _PLUGIN_CLASSES as plugin_extractors
3697
3698 def get_encoding(stream):
3699 ret = str(getattr(stream, 'encoding', 'missing (%s)' % type(stream).__name__))
3700 if not supports_terminal_sequences(stream):
3701 from .utils import WINDOWS_VT_MODE # Must be imported locally
3702 ret += ' (No VT)' if WINDOWS_VT_MODE is False else ' (No ANSI)'
3703 return ret
3704
3705 encoding_str = 'Encodings: locale %s, fs %s, pref %s, %s' % (
3706 locale.getpreferredencoding(),
3707 sys.getfilesystemencoding(),
3708 self.get_encoding(),
3709 ', '.join(
3710 f'{key} {get_encoding(stream)}' for key, stream in self._out_files.items_
3711 if stream is not None and key != 'console')
3712 )
3713
3714 logger = self.params.get('logger')
3715 if logger:
3716 write_debug = lambda msg: logger.debug(f'[debug] {msg}')
3717 write_debug(encoding_str)
3718 else:
3719 write_string(f'[debug] {encoding_str}\n', encoding=None)
3720 write_debug = lambda msg: self._write_string(f'[debug] {msg}\n')
3721
3722 source = detect_variant()
3723 if VARIANT not in (None, 'pip'):
3724 source += '*'
3725 write_debug(join_nonempty(
3726 f'{"yt-dlp" if REPOSITORY == "yt-dlp/yt-dlp" else REPOSITORY} version',
3727 __version__,
3728 f'[{RELEASE_GIT_HEAD}]' if RELEASE_GIT_HEAD else '',
3729 '' if source == 'unknown' else f'({source})',
3730 '' if _IN_CLI else 'API',
3731 delim=' '))
3732
3733 if not _IN_CLI:
3734 write_debug(f'params: {self.params}')
3735
3736 if not _LAZY_LOADER:
3737 if os.environ.get('YTDLP_NO_LAZY_EXTRACTORS'):
3738 write_debug('Lazy loading extractors is forcibly disabled')
3739 else:
3740 write_debug('Lazy loading extractors is disabled')
3741 if plugin_extractors or plugin_postprocessors:
3742 write_debug('Plugins: %s' % [
3743 '%s%s' % (klass.__name__, '' if klass.__name__ == name else f' as {name}')
3744 for name, klass in itertools.chain(plugin_extractors.items(), plugin_postprocessors.items())])
3745 if self.params['compat_opts']:
3746 write_debug('Compatibility options: %s' % ', '.join(self.params['compat_opts']))
3747
3748 if current_git_head():
3749 write_debug(f'Git HEAD: {current_git_head()}')
3750 write_debug(system_identifier())
3751
3752 exe_versions, ffmpeg_features = FFmpegPostProcessor.get_versions_and_features(self)
3753 ffmpeg_features = {key for key, val in ffmpeg_features.items() if val}
3754 if ffmpeg_features:
3755 exe_versions['ffmpeg'] += ' (%s)' % ','.join(sorted(ffmpeg_features))
3756
3757 exe_versions['rtmpdump'] = rtmpdump_version()
3758 exe_versions['phantomjs'] = PhantomJSwrapper._version()
3759 exe_str = ', '.join(
3760 f'{exe} {v}' for exe, v in sorted(exe_versions.items()) if v
3761 ) or 'none'
3762 write_debug('exe versions: %s' % exe_str)
3763
3764 from .compat.compat_utils import get_package_info
3765 from .dependencies import available_dependencies
3766
3767 write_debug('Optional libraries: %s' % (', '.join(sorted({
3768 join_nonempty(*get_package_info(m)) for m in available_dependencies.values()
3769 })) or 'none'))
3770
3771 self._setup_opener()
3772 proxy_map = {}
3773 for handler in self._opener.handlers:
3774 if hasattr(handler, 'proxies'):
3775 proxy_map.update(handler.proxies)
3776 write_debug(f'Proxy map: {proxy_map}')
3777
3778 # Not implemented
3779 if False and self.params.get('call_home'):
3780 ipaddr = self.urlopen('https://yt-dl.org/ip').read().decode()
3781 write_debug('Public IP address: %s' % ipaddr)
3782 latest_version = self.urlopen(
3783 'https://yt-dl.org/latest/version').read().decode()
3784 if version_tuple(latest_version) > version_tuple(__version__):
3785 self.report_warning(
3786 'You are using an outdated version (newest version: %s)! '
3787 'See https://yt-dl.org/update if you need help updating.' %
3788 latest_version)
3789
3790 def _setup_opener(self):
3791 if hasattr(self, '_opener'):
3792 return
3793 timeout_val = self.params.get('socket_timeout')
3794 self._socket_timeout = 20 if timeout_val is None else float(timeout_val)
3795
3796 opts_cookiesfrombrowser = self.params.get('cookiesfrombrowser')
3797 opts_cookiefile = self.params.get('cookiefile')
3798 opts_proxy = self.params.get('proxy')
3799
3800 self.cookiejar = load_cookies(opts_cookiefile, opts_cookiesfrombrowser, self)
3801
3802 cookie_processor = YoutubeDLCookieProcessor(self.cookiejar)
3803 if opts_proxy is not None:
3804 if opts_proxy == '':
3805 proxies = {}
3806 else:
3807 proxies = {'http': opts_proxy, 'https': opts_proxy}
3808 else:
3809 proxies = urllib.request.getproxies()
3810 # Set HTTPS proxy to HTTP one if given (https://github.com/ytdl-org/youtube-dl/issues/805)
3811 if 'http' in proxies and 'https' not in proxies:
3812 proxies['https'] = proxies['http']
3813 proxy_handler = PerRequestProxyHandler(proxies)
3814
3815 debuglevel = 1 if self.params.get('debug_printtraffic') else 0
3816 https_handler = make_HTTPS_handler(self.params, debuglevel=debuglevel)
3817 ydlh = YoutubeDLHandler(self.params, debuglevel=debuglevel)
3818 redirect_handler = YoutubeDLRedirectHandler()
3819 data_handler = urllib.request.DataHandler()
3820
3821 # When passing our own FileHandler instance, build_opener won't add the
3822 # default FileHandler and allows us to disable the file protocol, which
3823 # can be used for malicious purposes (see
3824 # https://github.com/ytdl-org/youtube-dl/issues/8227)
3825 file_handler = urllib.request.FileHandler()
3826
3827 def file_open(*args, **kwargs):
3828 raise urllib.error.URLError('file:// scheme is explicitly disabled in yt-dlp for security reasons')
3829 file_handler.file_open = file_open
3830
3831 opener = urllib.request.build_opener(
3832 proxy_handler, https_handler, cookie_processor, ydlh, redirect_handler, data_handler, file_handler)
3833
3834 # Delete the default user-agent header, which would otherwise apply in
3835 # cases where our custom HTTP handler doesn't come into play
3836 # (See https://github.com/ytdl-org/youtube-dl/issues/1309 for details)
3837 opener.addheaders = []
3838 self._opener = opener
3839
3840 def encode(self, s):
3841 if isinstance(s, bytes):
3842 return s # Already encoded
3843
3844 try:
3845 return s.encode(self.get_encoding())
3846 except UnicodeEncodeError as err:
3847 err.reason = err.reason + '. Check your system encoding configuration or use the --encoding option.'
3848 raise
3849
3850 def get_encoding(self):
3851 encoding = self.params.get('encoding')
3852 if encoding is None:
3853 encoding = preferredencoding()
3854 return encoding
3855
3856 def _write_info_json(self, label, ie_result, infofn, overwrite=None):
3857 ''' Write infojson and returns True = written, 'exists' = Already exists, False = skip, None = error '''
3858 if overwrite is None:
3859 overwrite = self.params.get('overwrites', True)
3860 if not self.params.get('writeinfojson'):
3861 return False
3862 elif not infofn:
3863 self.write_debug(f'Skipping writing {label} infojson')
3864 return False
3865 elif not self._ensure_dir_exists(infofn):
3866 return None
3867 elif not overwrite and os.path.exists(infofn):
3868 self.to_screen(f'[info] {label.title()} metadata is already present')
3869 return 'exists'
3870
3871 self.to_screen(f'[info] Writing {label} metadata as JSON to: {infofn}')
3872 try:
3873 write_json_file(self.sanitize_info(ie_result, self.params.get('clean_infojson', True)), infofn)
3874 return True
3875 except OSError:
3876 self.report_error(f'Cannot write {label} metadata to JSON file {infofn}')
3877 return None
3878
3879 def _write_description(self, label, ie_result, descfn):
3880 ''' Write description and returns True = written, False = skip, None = error '''
3881 if not self.params.get('writedescription'):
3882 return False
3883 elif not descfn:
3884 self.write_debug(f'Skipping writing {label} description')
3885 return False
3886 elif not self._ensure_dir_exists(descfn):
3887 return None
3888 elif not self.params.get('overwrites', True) and os.path.exists(descfn):
3889 self.to_screen(f'[info] {label.title()} description is already present')
3890 elif ie_result.get('description') is None:
3891 self.report_warning(f'There\'s no {label} description to write')
3892 return False
3893 else:
3894 try:
3895 self.to_screen(f'[info] Writing {label} description to: {descfn}')
3896 with open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile:
3897 descfile.write(ie_result['description'])
3898 except OSError:
3899 self.report_error(f'Cannot write {label} description file {descfn}')
3900 return None
3901 return True
3902
3903 def _write_subtitles(self, info_dict, filename):
3904 ''' Write subtitles to file and return list of (sub_filename, final_sub_filename); or None if error'''
3905 ret = []
3906 subtitles = info_dict.get('requested_subtitles')
3907 if not subtitles or not (self.params.get('writesubtitles') or self.params.get('writeautomaticsub')):
3908 # subtitles download errors are already managed as troubles in relevant IE
3909 # that way it will silently go on when used with unsupporting IE
3910 return ret
3911
3912 sub_filename_base = self.prepare_filename(info_dict, 'subtitle')
3913 if not sub_filename_base:
3914 self.to_screen('[info] Skipping writing video subtitles')
3915 return ret
3916 for sub_lang, sub_info in subtitles.items():
3917 sub_format = sub_info['ext']
3918 sub_filename = subtitles_filename(filename, sub_lang, sub_format, info_dict.get('ext'))
3919 sub_filename_final = subtitles_filename(sub_filename_base, sub_lang, sub_format, info_dict.get('ext'))
3920 existing_sub = self.existing_file((sub_filename_final, sub_filename))
3921 if existing_sub:
3922 self.to_screen(f'[info] Video subtitle {sub_lang}.{sub_format} is already present')
3923 sub_info['filepath'] = existing_sub
3924 ret.append((existing_sub, sub_filename_final))
3925 continue
3926
3927 self.to_screen(f'[info] Writing video subtitles to: {sub_filename}')
3928 if sub_info.get('data') is not None:
3929 try:
3930 # Use newline='' to prevent conversion of newline characters
3931 # See https://github.com/ytdl-org/youtube-dl/issues/10268
3932 with open(sub_filename, 'w', encoding='utf-8', newline='') as subfile:
3933 subfile.write(sub_info['data'])
3934 sub_info['filepath'] = sub_filename
3935 ret.append((sub_filename, sub_filename_final))
3936 continue
3937 except OSError:
3938 self.report_error(f'Cannot write video subtitles file {sub_filename}')
3939 return None
3940
3941 try:
3942 sub_copy = sub_info.copy()
3943 sub_copy.setdefault('http_headers', info_dict.get('http_headers'))
3944 self.dl(sub_filename, sub_copy, subtitle=True)
3945 sub_info['filepath'] = sub_filename
3946 ret.append((sub_filename, sub_filename_final))
3947 except (DownloadError, ExtractorError, IOError, OSError, ValueError) + network_exceptions as err:
3948 msg = f'Unable to download video subtitles for {sub_lang!r}: {err}'
3949 if self.params.get('ignoreerrors') is not True: # False or 'only_download'
3950 if not self.params.get('ignoreerrors'):
3951 self.report_error(msg)
3952 raise DownloadError(msg)
3953 self.report_warning(msg)
3954 return ret
3955
3956 def _write_thumbnails(self, label, info_dict, filename, thumb_filename_base=None):
3957 ''' Write thumbnails to file and return list of (thumb_filename, final_thumb_filename) '''
3958 write_all = self.params.get('write_all_thumbnails', False)
3959 thumbnails, ret = [], []
3960 if write_all or self.params.get('writethumbnail', False):
3961 thumbnails = info_dict.get('thumbnails') or []
3962 multiple = write_all and len(thumbnails) > 1
3963
3964 if thumb_filename_base is None:
3965 thumb_filename_base = filename
3966 if thumbnails and not thumb_filename_base:
3967 self.write_debug(f'Skipping writing {label} thumbnail')
3968 return ret
3969
3970 for idx, t in list(enumerate(thumbnails))[::-1]:
3971 thumb_ext = (f'{t["id"]}.' if multiple else '') + determine_ext(t['url'], 'jpg')
3972 thumb_display_id = f'{label} thumbnail {t["id"]}'
3973 thumb_filename = replace_extension(filename, thumb_ext, info_dict.get('ext'))
3974 thumb_filename_final = replace_extension(thumb_filename_base, thumb_ext, info_dict.get('ext'))
3975
3976 existing_thumb = self.existing_file((thumb_filename_final, thumb_filename))
3977 if existing_thumb:
3978 self.to_screen('[info] %s is already present' % (
3979 thumb_display_id if multiple else f'{label} thumbnail').capitalize())
3980 t['filepath'] = existing_thumb
3981 ret.append((existing_thumb, thumb_filename_final))
3982 else:
3983 self.to_screen(f'[info] Downloading {thumb_display_id} ...')
3984 try:
3985 uf = self.urlopen(sanitized_Request(t['url'], headers=t.get('http_headers', {})))
3986 self.to_screen(f'[info] Writing {thumb_display_id} to: {thumb_filename}')
3987 with open(encodeFilename(thumb_filename), 'wb') as thumbf:
3988 shutil.copyfileobj(uf, thumbf)
3989 ret.append((thumb_filename, thumb_filename_final))
3990 t['filepath'] = thumb_filename
3991 except network_exceptions as err:
3992 thumbnails.pop(idx)
3993 self.report_warning(f'Unable to download {thumb_display_id}: {err}')
3994 if ret and not write_all:
3995 break
3996 return ret