]> jfr.im git - yt-dlp.git/blob - yt_dlp/YoutubeDL.py
Bugfix for ebe1b4e34f43c3acad30e4bcb8484681a030c114
[yt-dlp.git] / yt_dlp / YoutubeDL.py
1 import collections
2 import contextlib
3 import datetime
4 import errno
5 import fileinput
6 import functools
7 import io
8 import itertools
9 import json
10 import locale
11 import operator
12 import os
13 import random
14 import re
15 import shutil
16 import string
17 import subprocess
18 import sys
19 import tempfile
20 import time
21 import tokenize
22 import traceback
23 import unicodedata
24
25 from .cache import Cache
26 from .compat import urllib # isort: split
27 from .compat import compat_os_name, compat_shlex_quote
28 from .cookies import load_cookies
29 from .downloader import FFmpegFD, get_suitable_downloader, shorten_protocol_name
30 from .downloader.rtmp import rtmpdump_version
31 from .extractor import gen_extractor_classes, get_info_extractor
32 from .extractor.common import UnsupportedURLIE
33 from .extractor.openload import PhantomJSwrapper
34 from .minicurses import format_text
35 from .plugins import directories as plugin_directories
36 from .postprocessor import _PLUGIN_CLASSES as plugin_pps
37 from .postprocessor import (
38 EmbedThumbnailPP,
39 FFmpegFixupDuplicateMoovPP,
40 FFmpegFixupDurationPP,
41 FFmpegFixupM3u8PP,
42 FFmpegFixupM4aPP,
43 FFmpegFixupStretchedPP,
44 FFmpegFixupTimestampPP,
45 FFmpegMergerPP,
46 FFmpegPostProcessor,
47 FFmpegVideoConvertorPP,
48 MoveFilesAfterDownloadPP,
49 get_postprocessor,
50 )
51 from .postprocessor.ffmpeg import resolve_mapping as resolve_recode_mapping
52 from .update import REPOSITORY, current_git_head, detect_variant
53 from .utils import (
54 DEFAULT_OUTTMPL,
55 IDENTITY,
56 LINK_TEMPLATES,
57 MEDIA_EXTENSIONS,
58 NO_DEFAULT,
59 NUMBER_RE,
60 OUTTMPL_TYPES,
61 POSTPROCESS_WHEN,
62 STR_FORMAT_RE_TMPL,
63 STR_FORMAT_TYPES,
64 ContentTooShortError,
65 DateRange,
66 DownloadCancelled,
67 DownloadError,
68 EntryNotInPlaylist,
69 ExistingVideoReached,
70 ExtractorError,
71 FormatSorter,
72 GeoRestrictedError,
73 HEADRequest,
74 ISO3166Utils,
75 LazyList,
76 MaxDownloadsReached,
77 Namespace,
78 PagedList,
79 PerRequestProxyHandler,
80 PlaylistEntries,
81 Popen,
82 PostProcessingError,
83 ReExtractInfo,
84 RejectedVideoReached,
85 SameFileError,
86 UnavailableVideoError,
87 UserNotLive,
88 YoutubeDLCookieProcessor,
89 YoutubeDLHandler,
90 YoutubeDLRedirectHandler,
91 age_restricted,
92 args_to_str,
93 bug_reports_message,
94 date_from_str,
95 deprecation_warning,
96 determine_ext,
97 determine_protocol,
98 encode_compat_str,
99 encodeFilename,
100 error_to_compat_str,
101 escapeHTML,
102 expand_path,
103 filter_dict,
104 float_or_none,
105 format_bytes,
106 format_decimal_suffix,
107 format_field,
108 formatSeconds,
109 get_compatible_ext,
110 get_domain,
111 int_or_none,
112 iri_to_uri,
113 is_path_like,
114 join_nonempty,
115 locked_file,
116 make_archive_id,
117 make_dir,
118 make_HTTPS_handler,
119 merge_headers,
120 network_exceptions,
121 number_of_digits,
122 orderedSet,
123 orderedSet_from_options,
124 parse_filesize,
125 preferredencoding,
126 prepend_extension,
127 remove_terminal_sequences,
128 render_table,
129 replace_extension,
130 sanitize_filename,
131 sanitize_path,
132 sanitize_url,
133 sanitized_Request,
134 std_headers,
135 str_or_none,
136 strftime_or_none,
137 subtitles_filename,
138 supports_terminal_sequences,
139 system_identifier,
140 timetuple_from_msec,
141 to_high_limit_path,
142 traverse_obj,
143 try_call,
144 try_get,
145 url_basename,
146 variadic,
147 version_tuple,
148 windows_enable_vt_mode,
149 write_json_file,
150 write_string,
151 )
152 from .version import CHANNEL, RELEASE_GIT_HEAD, VARIANT, __version__
153
154 if compat_os_name == 'nt':
155 import ctypes
156
157
158 class YoutubeDL:
159 """YoutubeDL class.
160
161 YoutubeDL objects are the ones responsible of downloading the
162 actual video file and writing it to disk if the user has requested
163 it, among some other tasks. In most cases there should be one per
164 program. As, given a video URL, the downloader doesn't know how to
165 extract all the needed information, task that InfoExtractors do, it
166 has to pass the URL to one of them.
167
168 For this, YoutubeDL objects have a method that allows
169 InfoExtractors to be registered in a given order. When it is passed
170 a URL, the YoutubeDL object handles it to the first InfoExtractor it
171 finds that reports being able to handle it. The InfoExtractor extracts
172 all the information about the video or videos the URL refers to, and
173 YoutubeDL process the extracted information, possibly using a File
174 Downloader to download the video.
175
176 YoutubeDL objects accept a lot of parameters. In order not to saturate
177 the object constructor with arguments, it receives a dictionary of
178 options instead. These options are available through the params
179 attribute for the InfoExtractors to use. The YoutubeDL also
180 registers itself as the downloader in charge for the InfoExtractors
181 that are added to it, so this is a "mutual registration".
182
183 Available options:
184
185 username: Username for authentication purposes.
186 password: Password for authentication purposes.
187 videopassword: Password for accessing a video.
188 ap_mso: Adobe Pass multiple-system operator identifier.
189 ap_username: Multiple-system operator account username.
190 ap_password: Multiple-system operator account password.
191 usenetrc: Use netrc for authentication instead.
192 netrc_location: Location of the netrc file. Defaults to ~/.netrc.
193 netrc_cmd: Use a shell command to get credentials
194 verbose: Print additional info to stdout.
195 quiet: Do not print messages to stdout.
196 no_warnings: Do not print out anything for warnings.
197 forceprint: A dict with keys WHEN mapped to a list of templates to
198 print to stdout. The allowed keys are video or any of the
199 items in utils.POSTPROCESS_WHEN.
200 For compatibility, a single list is also accepted
201 print_to_file: A dict with keys WHEN (same as forceprint) mapped to
202 a list of tuples with (template, filename)
203 forcejson: Force printing info_dict as JSON.
204 dump_single_json: Force printing the info_dict of the whole playlist
205 (or video) as a single JSON line.
206 force_write_download_archive: Force writing download archive regardless
207 of 'skip_download' or 'simulate'.
208 simulate: Do not download the video files. If unset (or None),
209 simulate only if listsubtitles, listformats or list_thumbnails is used
210 format: Video format code. see "FORMAT SELECTION" for more details.
211 You can also pass a function. The function takes 'ctx' as
212 argument and returns the formats to download.
213 See "build_format_selector" for an implementation
214 allow_unplayable_formats: Allow unplayable formats to be extracted and downloaded.
215 ignore_no_formats_error: Ignore "No video formats" error. Usefull for
216 extracting metadata even if the video is not actually
217 available for download (experimental)
218 format_sort: A list of fields by which to sort the video formats.
219 See "Sorting Formats" for more details.
220 format_sort_force: Force the given format_sort. see "Sorting Formats"
221 for more details.
222 prefer_free_formats: Whether to prefer video formats with free containers
223 over non-free ones of same quality.
224 allow_multiple_video_streams: Allow multiple video streams to be merged
225 into a single file
226 allow_multiple_audio_streams: Allow multiple audio streams to be merged
227 into a single file
228 check_formats Whether to test if the formats are downloadable.
229 Can be True (check all), False (check none),
230 'selected' (check selected formats),
231 or None (check only if requested by extractor)
232 paths: Dictionary of output paths. The allowed keys are 'home'
233 'temp' and the keys of OUTTMPL_TYPES (in utils.py)
234 outtmpl: Dictionary of templates for output names. Allowed keys
235 are 'default' and the keys of OUTTMPL_TYPES (in utils.py).
236 For compatibility with youtube-dl, a single string can also be used
237 outtmpl_na_placeholder: Placeholder for unavailable meta fields.
238 restrictfilenames: Do not allow "&" and spaces in file names
239 trim_file_name: Limit length of filename (extension excluded)
240 windowsfilenames: Force the filenames to be windows compatible
241 ignoreerrors: Do not stop on download/postprocessing errors.
242 Can be 'only_download' to ignore only download errors.
243 Default is 'only_download' for CLI, but False for API
244 skip_playlist_after_errors: Number of allowed failures until the rest of
245 the playlist is skipped
246 allowed_extractors: List of regexes to match against extractor names that are allowed
247 overwrites: Overwrite all video and metadata files if True,
248 overwrite only non-video files if None
249 and don't overwrite any file if False
250 For compatibility with youtube-dl,
251 "nooverwrites" may also be used instead
252 playlist_items: Specific indices of playlist to download.
253 playlistrandom: Download playlist items in random order.
254 lazy_playlist: Process playlist entries as they are received.
255 matchtitle: Download only matching titles.
256 rejecttitle: Reject downloads for matching titles.
257 logger: Log messages to a logging.Logger instance.
258 logtostderr: Print everything to stderr instead of stdout.
259 consoletitle: Display progress in console window's titlebar.
260 writedescription: Write the video description to a .description file
261 writeinfojson: Write the video description to a .info.json file
262 clean_infojson: Remove internal metadata from the infojson
263 getcomments: Extract video comments. This will not be written to disk
264 unless writeinfojson is also given
265 writeannotations: Write the video annotations to a .annotations.xml file
266 writethumbnail: Write the thumbnail image to a file
267 allow_playlist_files: Whether to write playlists' description, infojson etc
268 also to disk when using the 'write*' options
269 write_all_thumbnails: Write all thumbnail formats to files
270 writelink: Write an internet shortcut file, depending on the
271 current platform (.url/.webloc/.desktop)
272 writeurllink: Write a Windows internet shortcut file (.url)
273 writewebloclink: Write a macOS internet shortcut file (.webloc)
274 writedesktoplink: Write a Linux internet shortcut file (.desktop)
275 writesubtitles: Write the video subtitles to a file
276 writeautomaticsub: Write the automatically generated subtitles to a file
277 listsubtitles: Lists all available subtitles for the video
278 subtitlesformat: The format code for subtitles
279 subtitleslangs: List of languages of the subtitles to download (can be regex).
280 The list may contain "all" to refer to all the available
281 subtitles. The language can be prefixed with a "-" to
282 exclude it from the requested languages, e.g. ['all', '-live_chat']
283 keepvideo: Keep the video file after post-processing
284 daterange: A utils.DateRange object, download only if the upload_date is in the range.
285 skip_download: Skip the actual download of the video file
286 cachedir: Location of the cache files in the filesystem.
287 False to disable filesystem cache.
288 noplaylist: Download single video instead of a playlist if in doubt.
289 age_limit: An integer representing the user's age in years.
290 Unsuitable videos for the given age are skipped.
291 min_views: An integer representing the minimum view count the video
292 must have in order to not be skipped.
293 Videos without view count information are always
294 downloaded. None for no limit.
295 max_views: An integer representing the maximum view count.
296 Videos that are more popular than that are not
297 downloaded.
298 Videos without view count information are always
299 downloaded. None for no limit.
300 download_archive: A set, or the name of a file where all downloads are recorded.
301 Videos already present in the file are not downloaded again.
302 break_on_existing: Stop the download process after attempting to download a
303 file that is in the archive.
304 break_per_url: Whether break_on_reject and break_on_existing
305 should act on each input URL as opposed to for the entire queue
306 cookiefile: File name or text stream from where cookies should be read and dumped to
307 cookiesfrombrowser: A tuple containing the name of the browser, the profile
308 name/path from where cookies are loaded, the name of the keyring,
309 and the container name, e.g. ('chrome', ) or
310 ('vivaldi', 'default', 'BASICTEXT') or ('firefox', 'default', None, 'Meta')
311 legacyserverconnect: Explicitly allow HTTPS connection to servers that do not
312 support RFC 5746 secure renegotiation
313 nocheckcertificate: Do not verify SSL certificates
314 client_certificate: Path to client certificate file in PEM format. May include the private key
315 client_certificate_key: Path to private key file for client certificate
316 client_certificate_password: Password for client certificate private key, if encrypted.
317 If not provided and the key is encrypted, yt-dlp will ask interactively
318 prefer_insecure: Use HTTP instead of HTTPS to retrieve information.
319 (Only supported by some extractors)
320 enable_file_urls: Enable file:// URLs. This is disabled by default for security reasons.
321 http_headers: A dictionary of custom headers to be used for all requests
322 proxy: URL of the proxy server to use
323 geo_verification_proxy: URL of the proxy to use for IP address verification
324 on geo-restricted sites.
325 socket_timeout: Time to wait for unresponsive hosts, in seconds
326 bidi_workaround: Work around buggy terminals without bidirectional text
327 support, using fridibi
328 debug_printtraffic:Print out sent and received HTTP traffic
329 default_search: Prepend this string if an input url is not valid.
330 'auto' for elaborate guessing
331 encoding: Use this encoding instead of the system-specified.
332 extract_flat: Whether to resolve and process url_results further
333 * False: Always process. Default for API
334 * True: Never process
335 * 'in_playlist': Do not process inside playlist/multi_video
336 * 'discard': Always process, but don't return the result
337 from inside playlist/multi_video
338 * 'discard_in_playlist': Same as "discard", but only for
339 playlists (not multi_video). Default for CLI
340 wait_for_video: If given, wait for scheduled streams to become available.
341 The value should be a tuple containing the range
342 (min_secs, max_secs) to wait between retries
343 postprocessors: A list of dictionaries, each with an entry
344 * key: The name of the postprocessor. See
345 yt_dlp/postprocessor/__init__.py for a list.
346 * when: When to run the postprocessor. Allowed values are
347 the entries of utils.POSTPROCESS_WHEN
348 Assumed to be 'post_process' if not given
349 progress_hooks: A list of functions that get called on download
350 progress, with a dictionary with the entries
351 * status: One of "downloading", "error", or "finished".
352 Check this first and ignore unknown values.
353 * info_dict: The extracted info_dict
354
355 If status is one of "downloading", or "finished", the
356 following properties may also be present:
357 * filename: The final filename (always present)
358 * tmpfilename: The filename we're currently writing to
359 * downloaded_bytes: Bytes on disk
360 * total_bytes: Size of the whole file, None if unknown
361 * total_bytes_estimate: Guess of the eventual file size,
362 None if unavailable.
363 * elapsed: The number of seconds since download started.
364 * eta: The estimated time in seconds, None if unknown
365 * speed: The download speed in bytes/second, None if
366 unknown
367 * fragment_index: The counter of the currently
368 downloaded video fragment.
369 * fragment_count: The number of fragments (= individual
370 files that will be merged)
371
372 Progress hooks are guaranteed to be called at least once
373 (with status "finished") if the download is successful.
374 postprocessor_hooks: A list of functions that get called on postprocessing
375 progress, with a dictionary with the entries
376 * status: One of "started", "processing", or "finished".
377 Check this first and ignore unknown values.
378 * postprocessor: Name of the postprocessor
379 * info_dict: The extracted info_dict
380
381 Progress hooks are guaranteed to be called at least twice
382 (with status "started" and "finished") if the processing is successful.
383 merge_output_format: "/" separated list of extensions to use when merging formats.
384 final_ext: Expected final extension; used to detect when the file was
385 already downloaded and converted
386 fixup: Automatically correct known faults of the file.
387 One of:
388 - "never": do nothing
389 - "warn": only emit a warning
390 - "detect_or_warn": check whether we can do anything
391 about it, warn otherwise (default)
392 source_address: Client-side IP address to bind to.
393 sleep_interval_requests: Number of seconds to sleep between requests
394 during extraction
395 sleep_interval: Number of seconds to sleep before each download when
396 used alone or a lower bound of a range for randomized
397 sleep before each download (minimum possible number
398 of seconds to sleep) when used along with
399 max_sleep_interval.
400 max_sleep_interval:Upper bound of a range for randomized sleep before each
401 download (maximum possible number of seconds to sleep).
402 Must only be used along with sleep_interval.
403 Actual sleep time will be a random float from range
404 [sleep_interval; max_sleep_interval].
405 sleep_interval_subtitles: Number of seconds to sleep before each subtitle download
406 listformats: Print an overview of available video formats and exit.
407 list_thumbnails: Print a table of all thumbnails and exit.
408 match_filter: A function that gets called for every video with the signature
409 (info_dict, *, incomplete: bool) -> Optional[str]
410 For backward compatibility with youtube-dl, the signature
411 (info_dict) -> Optional[str] is also allowed.
412 - If it returns a message, the video is ignored.
413 - If it returns None, the video is downloaded.
414 - If it returns utils.NO_DEFAULT, the user is interactively
415 asked whether to download the video.
416 - Raise utils.DownloadCancelled(msg) to abort remaining
417 downloads when a video is rejected.
418 match_filter_func in utils.py is one example for this.
419 color: A Dictionary with output stream names as keys
420 and their respective color policy as values.
421 Can also just be a single color policy,
422 in which case it applies to all outputs.
423 Valid stream names are 'stdout' and 'stderr'.
424 Valid color policies are one of 'always', 'auto', 'no_color' or 'never'.
425 geo_bypass: Bypass geographic restriction via faking X-Forwarded-For
426 HTTP header
427 geo_bypass_country:
428 Two-letter ISO 3166-2 country code that will be used for
429 explicit geographic restriction bypassing via faking
430 X-Forwarded-For HTTP header
431 geo_bypass_ip_block:
432 IP range in CIDR notation that will be used similarly to
433 geo_bypass_country
434 external_downloader: A dictionary of protocol keys and the executable of the
435 external downloader to use for it. The allowed protocols
436 are default|http|ftp|m3u8|dash|rtsp|rtmp|mms.
437 Set the value to 'native' to use the native downloader
438 compat_opts: Compatibility options. See "Differences in default behavior".
439 The following options do not work when used through the API:
440 filename, abort-on-error, multistreams, no-live-chat, format-sort
441 no-clean-infojson, no-playlist-metafiles, no-keep-subs, no-attach-info-json.
442 Refer __init__.py for their implementation
443 progress_template: Dictionary of templates for progress outputs.
444 Allowed keys are 'download', 'postprocess',
445 'download-title' (console title) and 'postprocess-title'.
446 The template is mapped on a dictionary with keys 'progress' and 'info'
447 retry_sleep_functions: Dictionary of functions that takes the number of attempts
448 as argument and returns the time to sleep in seconds.
449 Allowed keys are 'http', 'fragment', 'file_access'
450 download_ranges: A callback function that gets called for every video with
451 the signature (info_dict, ydl) -> Iterable[Section].
452 Only the returned sections will be downloaded.
453 Each Section is a dict with the following keys:
454 * start_time: Start time of the section in seconds
455 * end_time: End time of the section in seconds
456 * title: Section title (Optional)
457 * index: Section number (Optional)
458 force_keyframes_at_cuts: Re-encode the video when downloading ranges to get precise cuts
459 noprogress: Do not print the progress bar
460 live_from_start: Whether to download livestreams videos from the start
461
462 The following parameters are not used by YoutubeDL itself, they are used by
463 the downloader (see yt_dlp/downloader/common.py):
464 nopart, updatetime, buffersize, ratelimit, throttledratelimit, min_filesize,
465 max_filesize, test, noresizebuffer, retries, file_access_retries, fragment_retries,
466 continuedl, xattr_set_filesize, hls_use_mpegts, http_chunk_size,
467 external_downloader_args, concurrent_fragment_downloads.
468
469 The following options are used by the post processors:
470 ffmpeg_location: Location of the ffmpeg/avconv binary; either the path
471 to the binary or its containing directory.
472 postprocessor_args: A dictionary of postprocessor/executable keys (in lower case)
473 and a list of additional command-line arguments for the
474 postprocessor/executable. The dict can also have "PP+EXE" keys
475 which are used when the given exe is used by the given PP.
476 Use 'default' as the name for arguments to passed to all PP
477 For compatibility with youtube-dl, a single list of args
478 can also be used
479
480 The following options are used by the extractors:
481 extractor_retries: Number of times to retry for known errors (default: 3)
482 dynamic_mpd: Whether to process dynamic DASH manifests (default: True)
483 hls_split_discontinuity: Split HLS playlists to different formats at
484 discontinuities such as ad breaks (default: False)
485 extractor_args: A dictionary of arguments to be passed to the extractors.
486 See "EXTRACTOR ARGUMENTS" for details.
487 E.g. {'youtube': {'skip': ['dash', 'hls']}}
488 mark_watched: Mark videos watched (even with --simulate). Only for YouTube
489
490 The following options are deprecated and may be removed in the future:
491
492 break_on_reject: Stop the download process when encountering a video that
493 has been filtered out.
494 - `raise DownloadCancelled(msg)` in match_filter instead
495 force_generic_extractor: Force downloader to use the generic extractor
496 - Use allowed_extractors = ['generic', 'default']
497 playliststart: - Use playlist_items
498 Playlist item to start at.
499 playlistend: - Use playlist_items
500 Playlist item to end at.
501 playlistreverse: - Use playlist_items
502 Download playlist items in reverse order.
503 forceurl: - Use forceprint
504 Force printing final URL.
505 forcetitle: - Use forceprint
506 Force printing title.
507 forceid: - Use forceprint
508 Force printing ID.
509 forcethumbnail: - Use forceprint
510 Force printing thumbnail URL.
511 forcedescription: - Use forceprint
512 Force printing description.
513 forcefilename: - Use forceprint
514 Force printing final filename.
515 forceduration: - Use forceprint
516 Force printing duration.
517 allsubtitles: - Use subtitleslangs = ['all']
518 Downloads all the subtitles of the video
519 (requires writesubtitles or writeautomaticsub)
520 include_ads: - Doesn't work
521 Download ads as well
522 call_home: - Not implemented
523 Boolean, true iff we are allowed to contact the
524 yt-dlp servers for debugging.
525 post_hooks: - Register a custom postprocessor
526 A list of functions that get called as the final step
527 for each video file, after all postprocessors have been
528 called. The filename will be passed as the only argument.
529 hls_prefer_native: - Use external_downloader = {'m3u8': 'native'} or {'m3u8': 'ffmpeg'}.
530 Use the native HLS downloader instead of ffmpeg/avconv
531 if True, otherwise use ffmpeg/avconv if False, otherwise
532 use downloader suggested by extractor if None.
533 prefer_ffmpeg: - avconv support is deprecated
534 If False, use avconv instead of ffmpeg if both are available,
535 otherwise prefer ffmpeg.
536 youtube_include_dash_manifest: - Use extractor_args
537 If True (default), DASH manifests and related
538 data will be downloaded and processed by extractor.
539 You can reduce network I/O by disabling it if you don't
540 care about DASH. (only for youtube)
541 youtube_include_hls_manifest: - Use extractor_args
542 If True (default), HLS manifests and related
543 data will be downloaded and processed by extractor.
544 You can reduce network I/O by disabling it if you don't
545 care about HLS. (only for youtube)
546 no_color: Same as `color='no_color'`
547 """
548
549 _NUMERIC_FIELDS = {
550 'width', 'height', 'asr', 'audio_channels', 'fps',
551 'tbr', 'abr', 'vbr', 'filesize', 'filesize_approx',
552 'timestamp', 'release_timestamp',
553 'duration', 'view_count', 'like_count', 'dislike_count', 'repost_count',
554 'average_rating', 'comment_count', 'age_limit',
555 'start_time', 'end_time',
556 'chapter_number', 'season_number', 'episode_number',
557 'track_number', 'disc_number', 'release_year',
558 }
559
560 _format_fields = {
561 # NB: Keep in sync with the docstring of extractor/common.py
562 'url', 'manifest_url', 'manifest_stream_number', 'ext', 'format', 'format_id', 'format_note',
563 'width', 'height', 'aspect_ratio', 'resolution', 'dynamic_range', 'tbr', 'abr', 'acodec', 'asr', 'audio_channels',
564 'vbr', 'fps', 'vcodec', 'container', 'filesize', 'filesize_approx', 'rows', 'columns',
565 'player_url', 'protocol', 'fragment_base_url', 'fragments', 'is_from_start',
566 'preference', 'language', 'language_preference', 'quality', 'source_preference',
567 'http_headers', 'stretched_ratio', 'no_resume', 'has_drm', 'extra_param_to_segment_url', 'hls_aes', 'downloader_options',
568 'page_url', 'app', 'play_path', 'tc_url', 'flash_version', 'rtmp_live', 'rtmp_conn', 'rtmp_protocol', 'rtmp_real_time'
569 }
570 _format_selection_exts = {
571 'audio': set(MEDIA_EXTENSIONS.common_audio),
572 'video': set(MEDIA_EXTENSIONS.common_video + ('3gp', )),
573 'storyboards': set(MEDIA_EXTENSIONS.storyboards),
574 }
575
576 def __init__(self, params=None, auto_init=True):
577 """Create a FileDownloader object with the given options.
578 @param auto_init Whether to load the default extractors and print header (if verbose).
579 Set to 'no_verbose_header' to not print the header
580 """
581 if params is None:
582 params = {}
583 self.params = params
584 self._ies = {}
585 self._ies_instances = {}
586 self._pps = {k: [] for k in POSTPROCESS_WHEN}
587 self._printed_messages = set()
588 self._first_webpage_request = True
589 self._post_hooks = []
590 self._progress_hooks = []
591 self._postprocessor_hooks = []
592 self._download_retcode = 0
593 self._num_downloads = 0
594 self._num_videos = 0
595 self._playlist_level = 0
596 self._playlist_urls = set()
597 self.cache = Cache(self)
598
599 stdout = sys.stderr if self.params.get('logtostderr') else sys.stdout
600 self._out_files = Namespace(
601 out=stdout,
602 error=sys.stderr,
603 screen=sys.stderr if self.params.get('quiet') else stdout,
604 console=None if compat_os_name == 'nt' else next(
605 filter(supports_terminal_sequences, (sys.stderr, sys.stdout)), None)
606 )
607
608 try:
609 windows_enable_vt_mode()
610 except Exception as e:
611 self.write_debug(f'Failed to enable VT mode: {e}')
612
613 if self.params.get('no_color'):
614 if self.params.get('color') is not None:
615 self.report_warning('Overwriting params from "color" with "no_color"')
616 self.params['color'] = 'no_color'
617
618 term_allow_color = os.environ.get('TERM', '').lower() != 'dumb'
619
620 def process_color_policy(stream):
621 stream_name = {sys.stdout: 'stdout', sys.stderr: 'stderr'}[stream]
622 policy = traverse_obj(self.params, ('color', (stream_name, None), {str}), get_all=False)
623 if policy in ('auto', None):
624 return term_allow_color and supports_terminal_sequences(stream)
625 assert policy in ('always', 'never', 'no_color')
626 return {'always': True, 'never': False}.get(policy, policy)
627
628 self._allow_colors = Namespace(**{
629 name: process_color_policy(stream)
630 for name, stream in self._out_files.items_ if name != 'console'
631 })
632
633 # The code is left like this to be reused for future deprecations
634 MIN_SUPPORTED, MIN_RECOMMENDED = (3, 7), (3, 7)
635 current_version = sys.version_info[:2]
636 if current_version < MIN_RECOMMENDED:
637 msg = ('Support for Python version %d.%d has been deprecated. '
638 'See https://github.com/yt-dlp/yt-dlp/issues/3764 for more details.'
639 '\n You will no longer receive updates on this version')
640 if current_version < MIN_SUPPORTED:
641 msg = 'Python version %d.%d is no longer supported'
642 self.deprecated_feature(
643 f'{msg}! Please update to Python %d.%d or above' % (*current_version, *MIN_RECOMMENDED))
644
645 if self.params.get('allow_unplayable_formats'):
646 self.report_warning(
647 f'You have asked for {self._format_err("UNPLAYABLE", self.Styles.EMPHASIS)} formats to be listed/downloaded. '
648 'This is a developer option intended for debugging. \n'
649 ' If you experience any issues while using this option, '
650 f'{self._format_err("DO NOT", self.Styles.ERROR)} open a bug report')
651
652 if self.params.get('bidi_workaround', False):
653 try:
654 import pty
655 master, slave = pty.openpty()
656 width = shutil.get_terminal_size().columns
657 width_args = [] if width is None else ['-w', str(width)]
658 sp_kwargs = {'stdin': subprocess.PIPE, 'stdout': slave, 'stderr': self._out_files.error}
659 try:
660 self._output_process = Popen(['bidiv'] + width_args, **sp_kwargs)
661 except OSError:
662 self._output_process = Popen(['fribidi', '-c', 'UTF-8'] + width_args, **sp_kwargs)
663 self._output_channel = os.fdopen(master, 'rb')
664 except OSError as ose:
665 if ose.errno == errno.ENOENT:
666 self.report_warning(
667 'Could not find fribidi executable, ignoring --bidi-workaround. '
668 'Make sure that fribidi is an executable file in one of the directories in your $PATH.')
669 else:
670 raise
671
672 self.params['compat_opts'] = set(self.params.get('compat_opts', ()))
673 if auto_init and auto_init != 'no_verbose_header':
674 self.print_debug_header()
675
676 def check_deprecated(param, option, suggestion):
677 if self.params.get(param) is not None:
678 self.report_warning(f'{option} is deprecated. Use {suggestion} instead')
679 return True
680 return False
681
682 if check_deprecated('cn_verification_proxy', '--cn-verification-proxy', '--geo-verification-proxy'):
683 if self.params.get('geo_verification_proxy') is None:
684 self.params['geo_verification_proxy'] = self.params['cn_verification_proxy']
685
686 check_deprecated('autonumber', '--auto-number', '-o "%(autonumber)s-%(title)s.%(ext)s"')
687 check_deprecated('usetitle', '--title', '-o "%(title)s-%(id)s.%(ext)s"')
688 check_deprecated('useid', '--id', '-o "%(id)s.%(ext)s"')
689
690 for msg in self.params.get('_warnings', []):
691 self.report_warning(msg)
692 for msg in self.params.get('_deprecation_warnings', []):
693 self.deprecated_feature(msg)
694
695 if 'list-formats' in self.params['compat_opts']:
696 self.params['listformats_table'] = False
697
698 if 'overwrites' not in self.params and self.params.get('nooverwrites') is not None:
699 # nooverwrites was unnecessarily changed to overwrites
700 # in 0c3d0f51778b153f65c21906031c2e091fcfb641
701 # This ensures compatibility with both keys
702 self.params['overwrites'] = not self.params['nooverwrites']
703 elif self.params.get('overwrites') is None:
704 self.params.pop('overwrites', None)
705 else:
706 self.params['nooverwrites'] = not self.params['overwrites']
707
708 if self.params.get('simulate') is None and any((
709 self.params.get('list_thumbnails'),
710 self.params.get('listformats'),
711 self.params.get('listsubtitles'),
712 )):
713 self.params['simulate'] = 'list_only'
714
715 self.params.setdefault('forceprint', {})
716 self.params.setdefault('print_to_file', {})
717
718 # Compatibility with older syntax
719 if not isinstance(params['forceprint'], dict):
720 self.params['forceprint'] = {'video': params['forceprint']}
721
722 if auto_init:
723 self.add_default_info_extractors()
724
725 if (sys.platform != 'win32'
726 and sys.getfilesystemencoding() in ['ascii', 'ANSI_X3.4-1968']
727 and not self.params.get('restrictfilenames', False)):
728 # Unicode filesystem API will throw errors (#1474, #13027)
729 self.report_warning(
730 'Assuming --restrict-filenames since file system encoding '
731 'cannot encode all characters. '
732 'Set the LC_ALL environment variable to fix this.')
733 self.params['restrictfilenames'] = True
734
735 self._parse_outtmpl()
736
737 # Creating format selector here allows us to catch syntax errors before the extraction
738 self.format_selector = (
739 self.params.get('format') if self.params.get('format') in (None, '-')
740 else self.params['format'] if callable(self.params['format'])
741 else self.build_format_selector(self.params['format']))
742
743 # Set http_headers defaults according to std_headers
744 self.params['http_headers'] = merge_headers(std_headers, self.params.get('http_headers', {}))
745
746 hooks = {
747 'post_hooks': self.add_post_hook,
748 'progress_hooks': self.add_progress_hook,
749 'postprocessor_hooks': self.add_postprocessor_hook,
750 }
751 for opt, fn in hooks.items():
752 for ph in self.params.get(opt, []):
753 fn(ph)
754
755 for pp_def_raw in self.params.get('postprocessors', []):
756 pp_def = dict(pp_def_raw)
757 when = pp_def.pop('when', 'post_process')
758 self.add_post_processor(
759 get_postprocessor(pp_def.pop('key'))(self, **pp_def),
760 when=when)
761
762 self._setup_opener()
763
764 def preload_download_archive(fn):
765 """Preload the archive, if any is specified"""
766 archive = set()
767 if fn is None:
768 return archive
769 elif not is_path_like(fn):
770 return fn
771
772 self.write_debug(f'Loading archive file {fn!r}')
773 try:
774 with locked_file(fn, 'r', encoding='utf-8') as archive_file:
775 for line in archive_file:
776 archive.add(line.strip())
777 except OSError as ioe:
778 if ioe.errno != errno.ENOENT:
779 raise
780 return archive
781
782 self.archive = preload_download_archive(self.params.get('download_archive'))
783
784 def warn_if_short_id(self, argv):
785 # short YouTube ID starting with dash?
786 idxs = [
787 i for i, a in enumerate(argv)
788 if re.match(r'^-[0-9A-Za-z_-]{10}$', a)]
789 if idxs:
790 correct_argv = (
791 ['yt-dlp']
792 + [a for i, a in enumerate(argv) if i not in idxs]
793 + ['--'] + [argv[i] for i in idxs]
794 )
795 self.report_warning(
796 'Long argument string detected. '
797 'Use -- to separate parameters and URLs, like this:\n%s' %
798 args_to_str(correct_argv))
799
800 def add_info_extractor(self, ie):
801 """Add an InfoExtractor object to the end of the list."""
802 ie_key = ie.ie_key()
803 self._ies[ie_key] = ie
804 if not isinstance(ie, type):
805 self._ies_instances[ie_key] = ie
806 ie.set_downloader(self)
807
808 def get_info_extractor(self, ie_key):
809 """
810 Get an instance of an IE with name ie_key, it will try to get one from
811 the _ies list, if there's no instance it will create a new one and add
812 it to the extractor list.
813 """
814 ie = self._ies_instances.get(ie_key)
815 if ie is None:
816 ie = get_info_extractor(ie_key)()
817 self.add_info_extractor(ie)
818 return ie
819
820 def add_default_info_extractors(self):
821 """
822 Add the InfoExtractors returned by gen_extractors to the end of the list
823 """
824 all_ies = {ie.IE_NAME.lower(): ie for ie in gen_extractor_classes()}
825 all_ies['end'] = UnsupportedURLIE()
826 try:
827 ie_names = orderedSet_from_options(
828 self.params.get('allowed_extractors', ['default']), {
829 'all': list(all_ies),
830 'default': [name for name, ie in all_ies.items() if ie._ENABLED],
831 }, use_regex=True)
832 except re.error as e:
833 raise ValueError(f'Wrong regex for allowed_extractors: {e.pattern}')
834 for name in ie_names:
835 self.add_info_extractor(all_ies[name])
836 self.write_debug(f'Loaded {len(ie_names)} extractors')
837
838 def add_post_processor(self, pp, when='post_process'):
839 """Add a PostProcessor object to the end of the chain."""
840 assert when in POSTPROCESS_WHEN, f'Invalid when={when}'
841 self._pps[when].append(pp)
842 pp.set_downloader(self)
843
844 def add_post_hook(self, ph):
845 """Add the post hook"""
846 self._post_hooks.append(ph)
847
848 def add_progress_hook(self, ph):
849 """Add the download progress hook"""
850 self._progress_hooks.append(ph)
851
852 def add_postprocessor_hook(self, ph):
853 """Add the postprocessing progress hook"""
854 self._postprocessor_hooks.append(ph)
855 for pps in self._pps.values():
856 for pp in pps:
857 pp.add_progress_hook(ph)
858
859 def _bidi_workaround(self, message):
860 if not hasattr(self, '_output_channel'):
861 return message
862
863 assert hasattr(self, '_output_process')
864 assert isinstance(message, str)
865 line_count = message.count('\n') + 1
866 self._output_process.stdin.write((message + '\n').encode())
867 self._output_process.stdin.flush()
868 res = ''.join(self._output_channel.readline().decode()
869 for _ in range(line_count))
870 return res[:-len('\n')]
871
872 def _write_string(self, message, out=None, only_once=False):
873 if only_once:
874 if message in self._printed_messages:
875 return
876 self._printed_messages.add(message)
877 write_string(message, out=out, encoding=self.params.get('encoding'))
878
879 def to_stdout(self, message, skip_eol=False, quiet=None):
880 """Print message to stdout"""
881 if quiet is not None:
882 self.deprecation_warning('"YoutubeDL.to_stdout" no longer accepts the argument quiet. '
883 'Use "YoutubeDL.to_screen" instead')
884 if skip_eol is not False:
885 self.deprecation_warning('"YoutubeDL.to_stdout" no longer accepts the argument skip_eol. '
886 'Use "YoutubeDL.to_screen" instead')
887 self._write_string(f'{self._bidi_workaround(message)}\n', self._out_files.out)
888
889 def to_screen(self, message, skip_eol=False, quiet=None, only_once=False):
890 """Print message to screen if not in quiet mode"""
891 if self.params.get('logger'):
892 self.params['logger'].debug(message)
893 return
894 if (self.params.get('quiet') if quiet is None else quiet) and not self.params.get('verbose'):
895 return
896 self._write_string(
897 '%s%s' % (self._bidi_workaround(message), ('' if skip_eol else '\n')),
898 self._out_files.screen, only_once=only_once)
899
900 def to_stderr(self, message, only_once=False):
901 """Print message to stderr"""
902 assert isinstance(message, str)
903 if self.params.get('logger'):
904 self.params['logger'].error(message)
905 else:
906 self._write_string(f'{self._bidi_workaround(message)}\n', self._out_files.error, only_once=only_once)
907
908 def _send_console_code(self, code):
909 if compat_os_name == 'nt' or not self._out_files.console:
910 return
911 self._write_string(code, self._out_files.console)
912
913 def to_console_title(self, message):
914 if not self.params.get('consoletitle', False):
915 return
916 message = remove_terminal_sequences(message)
917 if compat_os_name == 'nt':
918 if ctypes.windll.kernel32.GetConsoleWindow():
919 # c_wchar_p() might not be necessary if `message` is
920 # already of type unicode()
921 ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(message))
922 else:
923 self._send_console_code(f'\033]0;{message}\007')
924
925 def save_console_title(self):
926 if not self.params.get('consoletitle') or self.params.get('simulate'):
927 return
928 self._send_console_code('\033[22;0t') # Save the title on stack
929
930 def restore_console_title(self):
931 if not self.params.get('consoletitle') or self.params.get('simulate'):
932 return
933 self._send_console_code('\033[23;0t') # Restore the title from stack
934
935 def __enter__(self):
936 self.save_console_title()
937 return self
938
939 def __exit__(self, *args):
940 self.restore_console_title()
941
942 if self.params.get('cookiefile') is not None:
943 self.cookiejar.save(ignore_discard=True, ignore_expires=True)
944
945 def trouble(self, message=None, tb=None, is_error=True):
946 """Determine action to take when a download problem appears.
947
948 Depending on if the downloader has been configured to ignore
949 download errors or not, this method may throw an exception or
950 not when errors are found, after printing the message.
951
952 @param tb If given, is additional traceback information
953 @param is_error Whether to raise error according to ignorerrors
954 """
955 if message is not None:
956 self.to_stderr(message)
957 if self.params.get('verbose'):
958 if tb is None:
959 if sys.exc_info()[0]: # if .trouble has been called from an except block
960 tb = ''
961 if hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
962 tb += ''.join(traceback.format_exception(*sys.exc_info()[1].exc_info))
963 tb += encode_compat_str(traceback.format_exc())
964 else:
965 tb_data = traceback.format_list(traceback.extract_stack())
966 tb = ''.join(tb_data)
967 if tb:
968 self.to_stderr(tb)
969 if not is_error:
970 return
971 if not self.params.get('ignoreerrors'):
972 if sys.exc_info()[0] and hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
973 exc_info = sys.exc_info()[1].exc_info
974 else:
975 exc_info = sys.exc_info()
976 raise DownloadError(message, exc_info)
977 self._download_retcode = 1
978
979 Styles = Namespace(
980 HEADERS='yellow',
981 EMPHASIS='light blue',
982 FILENAME='green',
983 ID='green',
984 DELIM='blue',
985 ERROR='red',
986 WARNING='yellow',
987 SUPPRESS='light black',
988 )
989
990 def _format_text(self, handle, allow_colors, text, f, fallback=None, *, test_encoding=False):
991 text = str(text)
992 if test_encoding:
993 original_text = text
994 # handle.encoding can be None. See https://github.com/yt-dlp/yt-dlp/issues/2711
995 encoding = self.params.get('encoding') or getattr(handle, 'encoding', None) or 'ascii'
996 text = text.encode(encoding, 'ignore').decode(encoding)
997 if fallback is not None and text != original_text:
998 text = fallback
999 return format_text(text, f) if allow_colors is True else text if fallback is None else fallback
1000
1001 def _format_out(self, *args, **kwargs):
1002 return self._format_text(self._out_files.out, self._allow_colors.out, *args, **kwargs)
1003
1004 def _format_screen(self, *args, **kwargs):
1005 return self._format_text(self._out_files.screen, self._allow_colors.screen, *args, **kwargs)
1006
1007 def _format_err(self, *args, **kwargs):
1008 return self._format_text(self._out_files.error, self._allow_colors.error, *args, **kwargs)
1009
1010 def report_warning(self, message, only_once=False):
1011 '''
1012 Print the message to stderr, it will be prefixed with 'WARNING:'
1013 If stderr is a tty file the 'WARNING:' will be colored
1014 '''
1015 if self.params.get('logger') is not None:
1016 self.params['logger'].warning(message)
1017 else:
1018 if self.params.get('no_warnings'):
1019 return
1020 self.to_stderr(f'{self._format_err("WARNING:", self.Styles.WARNING)} {message}', only_once)
1021
1022 def deprecation_warning(self, message, *, stacklevel=0):
1023 deprecation_warning(
1024 message, stacklevel=stacklevel + 1, printer=self.report_error, is_error=False)
1025
1026 def deprecated_feature(self, message):
1027 if self.params.get('logger') is not None:
1028 self.params['logger'].warning(f'Deprecated Feature: {message}')
1029 self.to_stderr(f'{self._format_err("Deprecated Feature:", self.Styles.ERROR)} {message}', True)
1030
1031 def report_error(self, message, *args, **kwargs):
1032 '''
1033 Do the same as trouble, but prefixes the message with 'ERROR:', colored
1034 in red if stderr is a tty file.
1035 '''
1036 self.trouble(f'{self._format_err("ERROR:", self.Styles.ERROR)} {message}', *args, **kwargs)
1037
1038 def write_debug(self, message, only_once=False):
1039 '''Log debug message or Print message to stderr'''
1040 if not self.params.get('verbose', False):
1041 return
1042 message = f'[debug] {message}'
1043 if self.params.get('logger'):
1044 self.params['logger'].debug(message)
1045 else:
1046 self.to_stderr(message, only_once)
1047
1048 def report_file_already_downloaded(self, file_name):
1049 """Report file has already been fully downloaded."""
1050 try:
1051 self.to_screen('[download] %s has already been downloaded' % file_name)
1052 except UnicodeEncodeError:
1053 self.to_screen('[download] The file has already been downloaded')
1054
1055 def report_file_delete(self, file_name):
1056 """Report that existing file will be deleted."""
1057 try:
1058 self.to_screen('Deleting existing file %s' % file_name)
1059 except UnicodeEncodeError:
1060 self.to_screen('Deleting existing file')
1061
1062 def raise_no_formats(self, info, forced=False, *, msg=None):
1063 has_drm = info.get('_has_drm')
1064 ignored, expected = self.params.get('ignore_no_formats_error'), bool(msg)
1065 msg = msg or has_drm and 'This video is DRM protected' or 'No video formats found!'
1066 if forced or not ignored:
1067 raise ExtractorError(msg, video_id=info['id'], ie=info['extractor'],
1068 expected=has_drm or ignored or expected)
1069 else:
1070 self.report_warning(msg)
1071
1072 def parse_outtmpl(self):
1073 self.deprecation_warning('"YoutubeDL.parse_outtmpl" is deprecated and may be removed in a future version')
1074 self._parse_outtmpl()
1075 return self.params['outtmpl']
1076
1077 def _parse_outtmpl(self):
1078 sanitize = IDENTITY
1079 if self.params.get('restrictfilenames'): # Remove spaces in the default template
1080 sanitize = lambda x: x.replace(' - ', ' ').replace(' ', '-')
1081
1082 outtmpl = self.params.setdefault('outtmpl', {})
1083 if not isinstance(outtmpl, dict):
1084 self.params['outtmpl'] = outtmpl = {'default': outtmpl}
1085 outtmpl.update({k: sanitize(v) for k, v in DEFAULT_OUTTMPL.items() if outtmpl.get(k) is None})
1086
1087 def get_output_path(self, dir_type='', filename=None):
1088 paths = self.params.get('paths', {})
1089 assert isinstance(paths, dict), '"paths" parameter must be a dictionary'
1090 path = os.path.join(
1091 expand_path(paths.get('home', '').strip()),
1092 expand_path(paths.get(dir_type, '').strip()) if dir_type else '',
1093 filename or '')
1094 return sanitize_path(path, force=self.params.get('windowsfilenames'))
1095
1096 @staticmethod
1097 def _outtmpl_expandpath(outtmpl):
1098 # expand_path translates '%%' into '%' and '$$' into '$'
1099 # correspondingly that is not what we want since we need to keep
1100 # '%%' intact for template dict substitution step. Working around
1101 # with boundary-alike separator hack.
1102 sep = ''.join(random.choices(string.ascii_letters, k=32))
1103 outtmpl = outtmpl.replace('%%', f'%{sep}%').replace('$$', f'${sep}$')
1104
1105 # outtmpl should be expand_path'ed before template dict substitution
1106 # because meta fields may contain env variables we don't want to
1107 # be expanded. E.g. for outtmpl "%(title)s.%(ext)s" and
1108 # title "Hello $PATH", we don't want `$PATH` to be expanded.
1109 return expand_path(outtmpl).replace(sep, '')
1110
1111 @staticmethod
1112 def escape_outtmpl(outtmpl):
1113 ''' Escape any remaining strings like %s, %abc% etc. '''
1114 return re.sub(
1115 STR_FORMAT_RE_TMPL.format('', '(?![%(\0])'),
1116 lambda mobj: ('' if mobj.group('has_key') else '%') + mobj.group(0),
1117 outtmpl)
1118
1119 @classmethod
1120 def validate_outtmpl(cls, outtmpl):
1121 ''' @return None or Exception object '''
1122 outtmpl = re.sub(
1123 STR_FORMAT_RE_TMPL.format('[^)]*', '[ljhqBUDS]'),
1124 lambda mobj: f'{mobj.group(0)[:-1]}s',
1125 cls._outtmpl_expandpath(outtmpl))
1126 try:
1127 cls.escape_outtmpl(outtmpl) % collections.defaultdict(int)
1128 return None
1129 except ValueError as err:
1130 return err
1131
1132 @staticmethod
1133 def _copy_infodict(info_dict):
1134 info_dict = dict(info_dict)
1135 info_dict.pop('__postprocessors', None)
1136 info_dict.pop('__pending_error', None)
1137 return info_dict
1138
1139 def prepare_outtmpl(self, outtmpl, info_dict, sanitize=False):
1140 """ Make the outtmpl and info_dict suitable for substitution: ydl.escape_outtmpl(outtmpl) % info_dict
1141 @param sanitize Whether to sanitize the output as a filename.
1142 For backward compatibility, a function can also be passed
1143 """
1144
1145 info_dict.setdefault('epoch', int(time.time())) # keep epoch consistent once set
1146
1147 info_dict = self._copy_infodict(info_dict)
1148 info_dict['duration_string'] = ( # %(duration>%H-%M-%S)s is wrong if duration > 24hrs
1149 formatSeconds(info_dict['duration'], '-' if sanitize else ':')
1150 if info_dict.get('duration', None) is not None
1151 else None)
1152 info_dict['autonumber'] = int(self.params.get('autonumber_start', 1) - 1 + self._num_downloads)
1153 info_dict['video_autonumber'] = self._num_videos
1154 if info_dict.get('resolution') is None:
1155 info_dict['resolution'] = self.format_resolution(info_dict, default=None)
1156
1157 # For fields playlist_index, playlist_autonumber and autonumber convert all occurrences
1158 # of %(field)s to %(field)0Nd for backward compatibility
1159 field_size_compat_map = {
1160 'playlist_index': number_of_digits(info_dict.get('__last_playlist_index') or 0),
1161 'playlist_autonumber': number_of_digits(info_dict.get('n_entries') or 0),
1162 'autonumber': self.params.get('autonumber_size') or 5,
1163 }
1164
1165 TMPL_DICT = {}
1166 EXTERNAL_FORMAT_RE = re.compile(STR_FORMAT_RE_TMPL.format('[^)]*', f'[{STR_FORMAT_TYPES}ljhqBUDS]'))
1167 MATH_FUNCTIONS = {
1168 '+': float.__add__,
1169 '-': float.__sub__,
1170 }
1171 # Field is of the form key1.key2...
1172 # where keys (except first) can be string, int, slice or "{field, ...}"
1173 FIELD_INNER_RE = r'(?:\w+|%(num)s|%(num)s?(?::%(num)s?){1,2})' % {'num': r'(?:-?\d+)'}
1174 FIELD_RE = r'\w*(?:\.(?:%(inner)s|{%(field)s(?:,%(field)s)*}))*' % {
1175 'inner': FIELD_INNER_RE,
1176 'field': rf'\w*(?:\.{FIELD_INNER_RE})*'
1177 }
1178 MATH_FIELD_RE = rf'(?:{FIELD_RE}|-?{NUMBER_RE})'
1179 MATH_OPERATORS_RE = r'(?:%s)' % '|'.join(map(re.escape, MATH_FUNCTIONS.keys()))
1180 INTERNAL_FORMAT_RE = re.compile(rf'''(?xs)
1181 (?P<negate>-)?
1182 (?P<fields>{FIELD_RE})
1183 (?P<maths>(?:{MATH_OPERATORS_RE}{MATH_FIELD_RE})*)
1184 (?:>(?P<strf_format>.+?))?
1185 (?P<remaining>
1186 (?P<alternate>(?<!\\),[^|&)]+)?
1187 (?:&(?P<replacement>.*?))?
1188 (?:\|(?P<default>.*?))?
1189 )$''')
1190
1191 def _traverse_infodict(fields):
1192 fields = [f for x in re.split(r'\.({.+?})\.?', fields)
1193 for f in ([x] if x.startswith('{') else x.split('.'))]
1194 for i in (0, -1):
1195 if fields and not fields[i]:
1196 fields.pop(i)
1197
1198 for i, f in enumerate(fields):
1199 if not f.startswith('{'):
1200 continue
1201 assert f.endswith('}'), f'No closing brace for {f} in {fields}'
1202 fields[i] = {k: k.split('.') for k in f[1:-1].split(',')}
1203
1204 return traverse_obj(info_dict, fields, is_user_input=True, traverse_string=True)
1205
1206 def get_value(mdict):
1207 # Object traversal
1208 value = _traverse_infodict(mdict['fields'])
1209 # Negative
1210 if mdict['negate']:
1211 value = float_or_none(value)
1212 if value is not None:
1213 value *= -1
1214 # Do maths
1215 offset_key = mdict['maths']
1216 if offset_key:
1217 value = float_or_none(value)
1218 operator = None
1219 while offset_key:
1220 item = re.match(
1221 MATH_FIELD_RE if operator else MATH_OPERATORS_RE,
1222 offset_key).group(0)
1223 offset_key = offset_key[len(item):]
1224 if operator is None:
1225 operator = MATH_FUNCTIONS[item]
1226 continue
1227 item, multiplier = (item[1:], -1) if item[0] == '-' else (item, 1)
1228 offset = float_or_none(item)
1229 if offset is None:
1230 offset = float_or_none(_traverse_infodict(item))
1231 try:
1232 value = operator(value, multiplier * offset)
1233 except (TypeError, ZeroDivisionError):
1234 return None
1235 operator = None
1236 # Datetime formatting
1237 if mdict['strf_format']:
1238 value = strftime_or_none(value, mdict['strf_format'].replace('\\,', ','))
1239
1240 # XXX: Workaround for https://github.com/yt-dlp/yt-dlp/issues/4485
1241 if sanitize and value == '':
1242 value = None
1243 return value
1244
1245 na = self.params.get('outtmpl_na_placeholder', 'NA')
1246
1247 def filename_sanitizer(key, value, restricted=self.params.get('restrictfilenames')):
1248 return sanitize_filename(str(value), restricted=restricted, is_id=(
1249 bool(re.search(r'(^|[_.])id(\.|$)', key))
1250 if 'filename-sanitization' in self.params['compat_opts']
1251 else NO_DEFAULT))
1252
1253 sanitizer = sanitize if callable(sanitize) else filename_sanitizer
1254 sanitize = bool(sanitize)
1255
1256 def _dumpjson_default(obj):
1257 if isinstance(obj, (set, LazyList)):
1258 return list(obj)
1259 return repr(obj)
1260
1261 class _ReplacementFormatter(string.Formatter):
1262 def get_field(self, field_name, args, kwargs):
1263 if field_name.isdigit():
1264 return args[0], -1
1265 raise ValueError('Unsupported field')
1266
1267 replacement_formatter = _ReplacementFormatter()
1268
1269 def create_key(outer_mobj):
1270 if not outer_mobj.group('has_key'):
1271 return outer_mobj.group(0)
1272 key = outer_mobj.group('key')
1273 mobj = re.match(INTERNAL_FORMAT_RE, key)
1274 initial_field = mobj.group('fields') if mobj else ''
1275 value, replacement, default = None, None, na
1276 while mobj:
1277 mobj = mobj.groupdict()
1278 default = mobj['default'] if mobj['default'] is not None else default
1279 value = get_value(mobj)
1280 replacement = mobj['replacement']
1281 if value is None and mobj['alternate']:
1282 mobj = re.match(INTERNAL_FORMAT_RE, mobj['remaining'][1:])
1283 else:
1284 break
1285
1286 fmt = outer_mobj.group('format')
1287 if fmt == 's' and value is not None and key in field_size_compat_map.keys():
1288 fmt = f'0{field_size_compat_map[key]:d}d'
1289
1290 if None not in (value, replacement):
1291 try:
1292 value = replacement_formatter.format(replacement, value)
1293 except ValueError:
1294 value, default = None, na
1295
1296 flags = outer_mobj.group('conversion') or ''
1297 str_fmt = f'{fmt[:-1]}s'
1298 if value is None:
1299 value, fmt = default, 's'
1300 elif fmt[-1] == 'l': # list
1301 delim = '\n' if '#' in flags else ', '
1302 value, fmt = delim.join(map(str, variadic(value, allowed_types=(str, bytes)))), str_fmt
1303 elif fmt[-1] == 'j': # json
1304 value, fmt = json.dumps(
1305 value, default=_dumpjson_default,
1306 indent=4 if '#' in flags else None, ensure_ascii='+' not in flags), str_fmt
1307 elif fmt[-1] == 'h': # html
1308 value, fmt = escapeHTML(str(value)), str_fmt
1309 elif fmt[-1] == 'q': # quoted
1310 value = map(str, variadic(value) if '#' in flags else [value])
1311 value, fmt = ' '.join(map(compat_shlex_quote, value)), str_fmt
1312 elif fmt[-1] == 'B': # bytes
1313 value = f'%{str_fmt}'.encode() % str(value).encode()
1314 value, fmt = value.decode('utf-8', 'ignore'), 's'
1315 elif fmt[-1] == 'U': # unicode normalized
1316 value, fmt = unicodedata.normalize(
1317 # "+" = compatibility equivalence, "#" = NFD
1318 'NF%s%s' % ('K' if '+' in flags else '', 'D' if '#' in flags else 'C'),
1319 value), str_fmt
1320 elif fmt[-1] == 'D': # decimal suffix
1321 num_fmt, fmt = fmt[:-1].replace('#', ''), 's'
1322 value = format_decimal_suffix(value, f'%{num_fmt}f%s' if num_fmt else '%d%s',
1323 factor=1024 if '#' in flags else 1000)
1324 elif fmt[-1] == 'S': # filename sanitization
1325 value, fmt = filename_sanitizer(initial_field, value, restricted='#' in flags), str_fmt
1326 elif fmt[-1] == 'c':
1327 if value:
1328 value = str(value)[0]
1329 else:
1330 fmt = str_fmt
1331 elif fmt[-1] not in 'rsa': # numeric
1332 value = float_or_none(value)
1333 if value is None:
1334 value, fmt = default, 's'
1335
1336 if sanitize:
1337 # If value is an object, sanitize might convert it to a string
1338 # So we convert it to repr first
1339 if fmt[-1] == 'r':
1340 value, fmt = repr(value), str_fmt
1341 elif fmt[-1] == 'a':
1342 value, fmt = ascii(value), str_fmt
1343 if fmt[-1] in 'csra':
1344 value = sanitizer(initial_field, value)
1345
1346 key = '%s\0%s' % (key.replace('%', '%\0'), outer_mobj.group('format'))
1347 TMPL_DICT[key] = value
1348 return '{prefix}%({key}){fmt}'.format(key=key, fmt=fmt, prefix=outer_mobj.group('prefix'))
1349
1350 return EXTERNAL_FORMAT_RE.sub(create_key, outtmpl), TMPL_DICT
1351
1352 def evaluate_outtmpl(self, outtmpl, info_dict, *args, **kwargs):
1353 outtmpl, info_dict = self.prepare_outtmpl(outtmpl, info_dict, *args, **kwargs)
1354 return self.escape_outtmpl(outtmpl) % info_dict
1355
1356 def _prepare_filename(self, info_dict, *, outtmpl=None, tmpl_type=None):
1357 assert None in (outtmpl, tmpl_type), 'outtmpl and tmpl_type are mutually exclusive'
1358 if outtmpl is None:
1359 outtmpl = self.params['outtmpl'].get(tmpl_type or 'default', self.params['outtmpl']['default'])
1360 try:
1361 outtmpl = self._outtmpl_expandpath(outtmpl)
1362 filename = self.evaluate_outtmpl(outtmpl, info_dict, True)
1363 if not filename:
1364 return None
1365
1366 if tmpl_type in ('', 'temp'):
1367 final_ext, ext = self.params.get('final_ext'), info_dict.get('ext')
1368 if final_ext and ext and final_ext != ext and filename.endswith(f'.{final_ext}'):
1369 filename = replace_extension(filename, ext, final_ext)
1370 elif tmpl_type:
1371 force_ext = OUTTMPL_TYPES[tmpl_type]
1372 if force_ext:
1373 filename = replace_extension(filename, force_ext, info_dict.get('ext'))
1374
1375 # https://github.com/blackjack4494/youtube-dlc/issues/85
1376 trim_file_name = self.params.get('trim_file_name', False)
1377 if trim_file_name:
1378 no_ext, *ext = filename.rsplit('.', 2)
1379 filename = join_nonempty(no_ext[:trim_file_name], *ext, delim='.')
1380
1381 return filename
1382 except ValueError as err:
1383 self.report_error('Error in output template: ' + str(err) + ' (encoding: ' + repr(preferredencoding()) + ')')
1384 return None
1385
1386 def prepare_filename(self, info_dict, dir_type='', *, outtmpl=None, warn=False):
1387 """Generate the output filename"""
1388 if outtmpl:
1389 assert not dir_type, 'outtmpl and dir_type are mutually exclusive'
1390 dir_type = None
1391 filename = self._prepare_filename(info_dict, tmpl_type=dir_type, outtmpl=outtmpl)
1392 if not filename and dir_type not in ('', 'temp'):
1393 return ''
1394
1395 if warn:
1396 if not self.params.get('paths'):
1397 pass
1398 elif filename == '-':
1399 self.report_warning('--paths is ignored when an outputting to stdout', only_once=True)
1400 elif os.path.isabs(filename):
1401 self.report_warning('--paths is ignored since an absolute path is given in output template', only_once=True)
1402 if filename == '-' or not filename:
1403 return filename
1404
1405 return self.get_output_path(dir_type, filename)
1406
1407 def _match_entry(self, info_dict, incomplete=False, silent=False):
1408 """Returns None if the file should be downloaded"""
1409 _type = 'video' if 'playlist-match-filter' in self.params['compat_opts'] else info_dict.get('_type', 'video')
1410 assert incomplete or _type == 'video', 'Only video result can be considered complete'
1411
1412 video_title = info_dict.get('title', info_dict.get('id', 'entry'))
1413
1414 def check_filter():
1415 if _type in ('playlist', 'multi_video'):
1416 return
1417 elif _type in ('url', 'url_transparent') and not try_call(
1418 lambda: self.get_info_extractor(info_dict['ie_key']).is_single_video(info_dict['url'])):
1419 return
1420
1421 if 'title' in info_dict:
1422 # This can happen when we're just evaluating the playlist
1423 title = info_dict['title']
1424 matchtitle = self.params.get('matchtitle', False)
1425 if matchtitle:
1426 if not re.search(matchtitle, title, re.IGNORECASE):
1427 return '"' + title + '" title did not match pattern "' + matchtitle + '"'
1428 rejecttitle = self.params.get('rejecttitle', False)
1429 if rejecttitle:
1430 if re.search(rejecttitle, title, re.IGNORECASE):
1431 return '"' + title + '" title matched reject pattern "' + rejecttitle + '"'
1432
1433 date = info_dict.get('upload_date')
1434 if date is not None:
1435 dateRange = self.params.get('daterange', DateRange())
1436 if date not in dateRange:
1437 return f'{date_from_str(date).isoformat()} upload date is not in range {dateRange}'
1438 view_count = info_dict.get('view_count')
1439 if view_count is not None:
1440 min_views = self.params.get('min_views')
1441 if min_views is not None and view_count < min_views:
1442 return 'Skipping %s, because it has not reached minimum view count (%d/%d)' % (video_title, view_count, min_views)
1443 max_views = self.params.get('max_views')
1444 if max_views is not None and view_count > max_views:
1445 return 'Skipping %s, because it has exceeded the maximum view count (%d/%d)' % (video_title, view_count, max_views)
1446 if age_restricted(info_dict.get('age_limit'), self.params.get('age_limit')):
1447 return 'Skipping "%s" because it is age restricted' % video_title
1448
1449 match_filter = self.params.get('match_filter')
1450 if match_filter is None:
1451 return None
1452
1453 cancelled = None
1454 try:
1455 try:
1456 ret = match_filter(info_dict, incomplete=incomplete)
1457 except TypeError:
1458 # For backward compatibility
1459 ret = None if incomplete else match_filter(info_dict)
1460 except DownloadCancelled as err:
1461 if err.msg is not NO_DEFAULT:
1462 raise
1463 ret, cancelled = err.msg, err
1464
1465 if ret is NO_DEFAULT:
1466 while True:
1467 filename = self._format_screen(self.prepare_filename(info_dict), self.Styles.FILENAME)
1468 reply = input(self._format_screen(
1469 f'Download "{filename}"? (Y/n): ', self.Styles.EMPHASIS)).lower().strip()
1470 if reply in {'y', ''}:
1471 return None
1472 elif reply == 'n':
1473 if cancelled:
1474 raise type(cancelled)(f'Skipping {video_title}')
1475 return f'Skipping {video_title}'
1476 return ret
1477
1478 if self.in_download_archive(info_dict):
1479 reason = '%s has already been recorded in the archive' % video_title
1480 break_opt, break_err = 'break_on_existing', ExistingVideoReached
1481 else:
1482 try:
1483 reason = check_filter()
1484 except DownloadCancelled as e:
1485 reason, break_opt, break_err = e.msg, 'match_filter', type(e)
1486 else:
1487 break_opt, break_err = 'break_on_reject', RejectedVideoReached
1488 if reason is not None:
1489 if not silent:
1490 self.to_screen('[download] ' + reason)
1491 if self.params.get(break_opt, False):
1492 raise break_err()
1493 return reason
1494
1495 @staticmethod
1496 def add_extra_info(info_dict, extra_info):
1497 '''Set the keys from extra_info in info dict if they are missing'''
1498 for key, value in extra_info.items():
1499 info_dict.setdefault(key, value)
1500
1501 def extract_info(self, url, download=True, ie_key=None, extra_info=None,
1502 process=True, force_generic_extractor=False):
1503 """
1504 Extract and return the information dictionary of the URL
1505
1506 Arguments:
1507 @param url URL to extract
1508
1509 Keyword arguments:
1510 @param download Whether to download videos
1511 @param process Whether to resolve all unresolved references (URLs, playlist items).
1512 Must be True for download to work
1513 @param ie_key Use only the extractor with this key
1514
1515 @param extra_info Dictionary containing the extra values to add to the info (For internal use only)
1516 @force_generic_extractor Force using the generic extractor (Deprecated; use ie_key='Generic')
1517 """
1518
1519 if extra_info is None:
1520 extra_info = {}
1521
1522 if not ie_key and force_generic_extractor:
1523 ie_key = 'Generic'
1524
1525 if ie_key:
1526 ies = {ie_key: self._ies[ie_key]} if ie_key in self._ies else {}
1527 else:
1528 ies = self._ies
1529
1530 for key, ie in ies.items():
1531 if not ie.suitable(url):
1532 continue
1533
1534 if not ie.working():
1535 self.report_warning('The program functionality for this site has been marked as broken, '
1536 'and will probably not work.')
1537
1538 temp_id = ie.get_temp_id(url)
1539 if temp_id is not None and self.in_download_archive({'id': temp_id, 'ie_key': key}):
1540 self.to_screen(f'[{key}] {temp_id}: has already been recorded in the archive')
1541 if self.params.get('break_on_existing', False):
1542 raise ExistingVideoReached()
1543 break
1544 return self.__extract_info(url, self.get_info_extractor(key), download, extra_info, process)
1545 else:
1546 extractors_restricted = self.params.get('allowed_extractors') not in (None, ['default'])
1547 self.report_error(f'No suitable extractor{format_field(ie_key, None, " (%s)")} found for URL {url}',
1548 tb=False if extractors_restricted else None)
1549
1550 def _handle_extraction_exceptions(func):
1551 @functools.wraps(func)
1552 def wrapper(self, *args, **kwargs):
1553 while True:
1554 try:
1555 return func(self, *args, **kwargs)
1556 except (DownloadCancelled, LazyList.IndexError, PagedList.IndexError):
1557 raise
1558 except ReExtractInfo as e:
1559 if e.expected:
1560 self.to_screen(f'{e}; Re-extracting data')
1561 else:
1562 self.to_stderr('\r')
1563 self.report_warning(f'{e}; Re-extracting data')
1564 continue
1565 except GeoRestrictedError as e:
1566 msg = e.msg
1567 if e.countries:
1568 msg += '\nThis video is available in %s.' % ', '.join(
1569 map(ISO3166Utils.short2full, e.countries))
1570 msg += '\nYou might want to use a VPN or a proxy server (with --proxy) to workaround.'
1571 self.report_error(msg)
1572 except ExtractorError as e: # An error we somewhat expected
1573 self.report_error(str(e), e.format_traceback())
1574 except Exception as e:
1575 if self.params.get('ignoreerrors'):
1576 self.report_error(str(e), tb=encode_compat_str(traceback.format_exc()))
1577 else:
1578 raise
1579 break
1580 return wrapper
1581
1582 def _wait_for_video(self, ie_result={}):
1583 if (not self.params.get('wait_for_video')
1584 or ie_result.get('_type', 'video') != 'video'
1585 or ie_result.get('formats') or ie_result.get('url')):
1586 return
1587
1588 format_dur = lambda dur: '%02d:%02d:%02d' % timetuple_from_msec(dur * 1000)[:-1]
1589 last_msg = ''
1590
1591 def progress(msg):
1592 nonlocal last_msg
1593 full_msg = f'{msg}\n'
1594 if not self.params.get('noprogress'):
1595 full_msg = msg + ' ' * (len(last_msg) - len(msg)) + '\r'
1596 elif last_msg:
1597 return
1598 self.to_screen(full_msg, skip_eol=True)
1599 last_msg = msg
1600
1601 min_wait, max_wait = self.params.get('wait_for_video')
1602 diff = try_get(ie_result, lambda x: x['release_timestamp'] - time.time())
1603 if diff is None and ie_result.get('live_status') == 'is_upcoming':
1604 diff = round(random.uniform(min_wait, max_wait) if (max_wait and min_wait) else (max_wait or min_wait), 0)
1605 self.report_warning('Release time of video is not known')
1606 elif ie_result and (diff or 0) <= 0:
1607 self.report_warning('Video should already be available according to extracted info')
1608 diff = min(max(diff or 0, min_wait or 0), max_wait or float('inf'))
1609 self.to_screen(f'[wait] Waiting for {format_dur(diff)} - Press Ctrl+C to try now')
1610
1611 wait_till = time.time() + diff
1612 try:
1613 while True:
1614 diff = wait_till - time.time()
1615 if diff <= 0:
1616 progress('')
1617 raise ReExtractInfo('[wait] Wait period ended', expected=True)
1618 progress(f'[wait] Remaining time until next attempt: {self._format_screen(format_dur(diff), self.Styles.EMPHASIS)}')
1619 time.sleep(1)
1620 except KeyboardInterrupt:
1621 progress('')
1622 raise ReExtractInfo('[wait] Interrupted by user', expected=True)
1623 except BaseException as e:
1624 if not isinstance(e, ReExtractInfo):
1625 self.to_screen('')
1626 raise
1627
1628 @_handle_extraction_exceptions
1629 def __extract_info(self, url, ie, download, extra_info, process):
1630 try:
1631 ie_result = ie.extract(url)
1632 except UserNotLive as e:
1633 if process:
1634 if self.params.get('wait_for_video'):
1635 self.report_warning(e)
1636 self._wait_for_video()
1637 raise
1638 if ie_result is None: # Finished already (backwards compatibility; listformats and friends should be moved here)
1639 self.report_warning(f'Extractor {ie.IE_NAME} returned nothing{bug_reports_message()}')
1640 return
1641 if isinstance(ie_result, list):
1642 # Backwards compatibility: old IE result format
1643 ie_result = {
1644 '_type': 'compat_list',
1645 'entries': ie_result,
1646 }
1647 if extra_info.get('original_url'):
1648 ie_result.setdefault('original_url', extra_info['original_url'])
1649 self.add_default_extra_info(ie_result, ie, url)
1650 if process:
1651 self._wait_for_video(ie_result)
1652 return self.process_ie_result(ie_result, download, extra_info)
1653 else:
1654 return ie_result
1655
1656 def add_default_extra_info(self, ie_result, ie, url):
1657 if url is not None:
1658 self.add_extra_info(ie_result, {
1659 'webpage_url': url,
1660 'original_url': url,
1661 })
1662 webpage_url = ie_result.get('webpage_url')
1663 if webpage_url:
1664 self.add_extra_info(ie_result, {
1665 'webpage_url_basename': url_basename(webpage_url),
1666 'webpage_url_domain': get_domain(webpage_url),
1667 })
1668 if ie is not None:
1669 self.add_extra_info(ie_result, {
1670 'extractor': ie.IE_NAME,
1671 'extractor_key': ie.ie_key(),
1672 })
1673
1674 def process_ie_result(self, ie_result, download=True, extra_info=None):
1675 """
1676 Take the result of the ie(may be modified) and resolve all unresolved
1677 references (URLs, playlist items).
1678
1679 It will also download the videos if 'download'.
1680 Returns the resolved ie_result.
1681 """
1682 if extra_info is None:
1683 extra_info = {}
1684 result_type = ie_result.get('_type', 'video')
1685
1686 if result_type in ('url', 'url_transparent'):
1687 ie_result['url'] = sanitize_url(
1688 ie_result['url'], scheme='http' if self.params.get('prefer_insecure') else 'https')
1689 if ie_result.get('original_url') and not extra_info.get('original_url'):
1690 extra_info = {'original_url': ie_result['original_url'], **extra_info}
1691
1692 extract_flat = self.params.get('extract_flat', False)
1693 if ((extract_flat == 'in_playlist' and 'playlist' in extra_info)
1694 or extract_flat is True):
1695 info_copy = ie_result.copy()
1696 ie = try_get(ie_result.get('ie_key'), self.get_info_extractor)
1697 if ie and not ie_result.get('id'):
1698 info_copy['id'] = ie.get_temp_id(ie_result['url'])
1699 self.add_default_extra_info(info_copy, ie, ie_result['url'])
1700 self.add_extra_info(info_copy, extra_info)
1701 info_copy, _ = self.pre_process(info_copy)
1702 self._fill_common_fields(info_copy, False)
1703 self.__forced_printings(info_copy)
1704 self._raise_pending_errors(info_copy)
1705 if self.params.get('force_write_download_archive', False):
1706 self.record_download_archive(info_copy)
1707 return ie_result
1708
1709 if result_type == 'video':
1710 self.add_extra_info(ie_result, extra_info)
1711 ie_result = self.process_video_result(ie_result, download=download)
1712 self._raise_pending_errors(ie_result)
1713 additional_urls = (ie_result or {}).get('additional_urls')
1714 if additional_urls:
1715 # TODO: Improve MetadataParserPP to allow setting a list
1716 if isinstance(additional_urls, str):
1717 additional_urls = [additional_urls]
1718 self.to_screen(
1719 '[info] %s: %d additional URL(s) requested' % (ie_result['id'], len(additional_urls)))
1720 self.write_debug('Additional URLs: "%s"' % '", "'.join(additional_urls))
1721 ie_result['additional_entries'] = [
1722 self.extract_info(
1723 url, download, extra_info=extra_info,
1724 force_generic_extractor=self.params.get('force_generic_extractor'))
1725 for url in additional_urls
1726 ]
1727 return ie_result
1728 elif result_type == 'url':
1729 # We have to add extra_info to the results because it may be
1730 # contained in a playlist
1731 return self.extract_info(
1732 ie_result['url'], download,
1733 ie_key=ie_result.get('ie_key'),
1734 extra_info=extra_info)
1735 elif result_type == 'url_transparent':
1736 # Use the information from the embedding page
1737 info = self.extract_info(
1738 ie_result['url'], ie_key=ie_result.get('ie_key'),
1739 extra_info=extra_info, download=False, process=False)
1740
1741 # extract_info may return None when ignoreerrors is enabled and
1742 # extraction failed with an error, don't crash and return early
1743 # in this case
1744 if not info:
1745 return info
1746
1747 exempted_fields = {'_type', 'url', 'ie_key'}
1748 if not ie_result.get('section_end') and ie_result.get('section_start') is None:
1749 # For video clips, the id etc of the clip extractor should be used
1750 exempted_fields |= {'id', 'extractor', 'extractor_key'}
1751
1752 new_result = info.copy()
1753 new_result.update(filter_dict(ie_result, lambda k, v: v is not None and k not in exempted_fields))
1754
1755 # Extracted info may not be a video result (i.e.
1756 # info.get('_type', 'video') != video) but rather an url or
1757 # url_transparent. In such cases outer metadata (from ie_result)
1758 # should be propagated to inner one (info). For this to happen
1759 # _type of info should be overridden with url_transparent. This
1760 # fixes issue from https://github.com/ytdl-org/youtube-dl/pull/11163.
1761 if new_result.get('_type') == 'url':
1762 new_result['_type'] = 'url_transparent'
1763
1764 return self.process_ie_result(
1765 new_result, download=download, extra_info=extra_info)
1766 elif result_type in ('playlist', 'multi_video'):
1767 # Protect from infinite recursion due to recursively nested playlists
1768 # (see https://github.com/ytdl-org/youtube-dl/issues/27833)
1769 webpage_url = ie_result.get('webpage_url') # Playlists maynot have webpage_url
1770 if webpage_url and webpage_url in self._playlist_urls:
1771 self.to_screen(
1772 '[download] Skipping already downloaded playlist: %s'
1773 % ie_result.get('title') or ie_result.get('id'))
1774 return
1775
1776 self._playlist_level += 1
1777 self._playlist_urls.add(webpage_url)
1778 self._fill_common_fields(ie_result, False)
1779 self._sanitize_thumbnails(ie_result)
1780 try:
1781 return self.__process_playlist(ie_result, download)
1782 finally:
1783 self._playlist_level -= 1
1784 if not self._playlist_level:
1785 self._playlist_urls.clear()
1786 elif result_type == 'compat_list':
1787 self.report_warning(
1788 'Extractor %s returned a compat_list result. '
1789 'It needs to be updated.' % ie_result.get('extractor'))
1790
1791 def _fixup(r):
1792 self.add_extra_info(r, {
1793 'extractor': ie_result['extractor'],
1794 'webpage_url': ie_result['webpage_url'],
1795 'webpage_url_basename': url_basename(ie_result['webpage_url']),
1796 'webpage_url_domain': get_domain(ie_result['webpage_url']),
1797 'extractor_key': ie_result['extractor_key'],
1798 })
1799 return r
1800 ie_result['entries'] = [
1801 self.process_ie_result(_fixup(r), download, extra_info)
1802 for r in ie_result['entries']
1803 ]
1804 return ie_result
1805 else:
1806 raise Exception('Invalid result type: %s' % result_type)
1807
1808 def _ensure_dir_exists(self, path):
1809 return make_dir(path, self.report_error)
1810
1811 @staticmethod
1812 def _playlist_infodict(ie_result, strict=False, **kwargs):
1813 info = {
1814 'playlist_count': ie_result.get('playlist_count'),
1815 'playlist': ie_result.get('title') or ie_result.get('id'),
1816 'playlist_id': ie_result.get('id'),
1817 'playlist_title': ie_result.get('title'),
1818 'playlist_uploader': ie_result.get('uploader'),
1819 'playlist_uploader_id': ie_result.get('uploader_id'),
1820 **kwargs,
1821 }
1822 if strict:
1823 return info
1824 if ie_result.get('webpage_url'):
1825 info.update({
1826 'webpage_url': ie_result['webpage_url'],
1827 'webpage_url_basename': url_basename(ie_result['webpage_url']),
1828 'webpage_url_domain': get_domain(ie_result['webpage_url']),
1829 })
1830 return {
1831 **info,
1832 'playlist_index': 0,
1833 '__last_playlist_index': max(ie_result.get('requested_entries') or (0, 0)),
1834 'extractor': ie_result['extractor'],
1835 'extractor_key': ie_result['extractor_key'],
1836 }
1837
1838 def __process_playlist(self, ie_result, download):
1839 """Process each entry in the playlist"""
1840 assert ie_result['_type'] in ('playlist', 'multi_video')
1841
1842 common_info = self._playlist_infodict(ie_result, strict=True)
1843 title = common_info.get('playlist') or '<Untitled>'
1844 if self._match_entry(common_info, incomplete=True) is not None:
1845 return
1846 self.to_screen(f'[download] Downloading {ie_result["_type"]}: {title}')
1847
1848 all_entries = PlaylistEntries(self, ie_result)
1849 entries = orderedSet(all_entries.get_requested_items(), lazy=True)
1850
1851 lazy = self.params.get('lazy_playlist')
1852 if lazy:
1853 resolved_entries, n_entries = [], 'N/A'
1854 ie_result['requested_entries'], ie_result['entries'] = None, None
1855 else:
1856 entries = resolved_entries = list(entries)
1857 n_entries = len(resolved_entries)
1858 ie_result['requested_entries'], ie_result['entries'] = tuple(zip(*resolved_entries)) or ([], [])
1859 if not ie_result.get('playlist_count'):
1860 # Better to do this after potentially exhausting entries
1861 ie_result['playlist_count'] = all_entries.get_full_count()
1862
1863 extra = self._playlist_infodict(ie_result, n_entries=int_or_none(n_entries))
1864 ie_copy = collections.ChainMap(ie_result, extra)
1865
1866 _infojson_written = False
1867 write_playlist_files = self.params.get('allow_playlist_files', True)
1868 if write_playlist_files and self.params.get('list_thumbnails'):
1869 self.list_thumbnails(ie_result)
1870 if write_playlist_files and not self.params.get('simulate'):
1871 _infojson_written = self._write_info_json(
1872 'playlist', ie_result, self.prepare_filename(ie_copy, 'pl_infojson'))
1873 if _infojson_written is None:
1874 return
1875 if self._write_description('playlist', ie_result,
1876 self.prepare_filename(ie_copy, 'pl_description')) is None:
1877 return
1878 # TODO: This should be passed to ThumbnailsConvertor if necessary
1879 self._write_thumbnails('playlist', ie_result, self.prepare_filename(ie_copy, 'pl_thumbnail'))
1880
1881 if lazy:
1882 if self.params.get('playlistreverse') or self.params.get('playlistrandom'):
1883 self.report_warning('playlistreverse and playlistrandom are not supported with lazy_playlist', only_once=True)
1884 elif self.params.get('playlistreverse'):
1885 entries.reverse()
1886 elif self.params.get('playlistrandom'):
1887 random.shuffle(entries)
1888
1889 self.to_screen(f'[{ie_result["extractor"]}] Playlist {title}: Downloading {n_entries} items'
1890 f'{format_field(ie_result, "playlist_count", " of %s")}')
1891
1892 keep_resolved_entries = self.params.get('extract_flat') != 'discard'
1893 if self.params.get('extract_flat') == 'discard_in_playlist':
1894 keep_resolved_entries = ie_result['_type'] != 'playlist'
1895 if keep_resolved_entries:
1896 self.write_debug('The information of all playlist entries will be held in memory')
1897
1898 failures = 0
1899 max_failures = self.params.get('skip_playlist_after_errors') or float('inf')
1900 for i, (playlist_index, entry) in enumerate(entries):
1901 if lazy:
1902 resolved_entries.append((playlist_index, entry))
1903 if not entry:
1904 continue
1905
1906 entry['__x_forwarded_for_ip'] = ie_result.get('__x_forwarded_for_ip')
1907 if not lazy and 'playlist-index' in self.params['compat_opts']:
1908 playlist_index = ie_result['requested_entries'][i]
1909
1910 entry_copy = collections.ChainMap(entry, {
1911 **common_info,
1912 'n_entries': int_or_none(n_entries),
1913 'playlist_index': playlist_index,
1914 'playlist_autonumber': i + 1,
1915 })
1916
1917 if self._match_entry(entry_copy, incomplete=True) is not None:
1918 # For compatabilty with youtube-dl. See https://github.com/yt-dlp/yt-dlp/issues/4369
1919 resolved_entries[i] = (playlist_index, NO_DEFAULT)
1920 continue
1921
1922 self.to_screen('[download] Downloading item %s of %s' % (
1923 self._format_screen(i + 1, self.Styles.ID), self._format_screen(n_entries, self.Styles.EMPHASIS)))
1924
1925 entry_result = self.__process_iterable_entry(entry, download, collections.ChainMap({
1926 'playlist_index': playlist_index,
1927 'playlist_autonumber': i + 1,
1928 }, extra))
1929 if not entry_result:
1930 failures += 1
1931 if failures >= max_failures:
1932 self.report_error(
1933 f'Skipping the remaining entries in playlist "{title}" since {failures} items failed extraction')
1934 break
1935 if keep_resolved_entries:
1936 resolved_entries[i] = (playlist_index, entry_result)
1937
1938 # Update with processed data
1939 ie_result['entries'] = [e for _, e in resolved_entries if e is not NO_DEFAULT]
1940 ie_result['requested_entries'] = [i for i, e in resolved_entries if e is not NO_DEFAULT]
1941 if ie_result['requested_entries'] == try_call(lambda: list(range(1, ie_result['playlist_count'] + 1))):
1942 # Do not set for full playlist
1943 ie_result.pop('requested_entries')
1944
1945 # Write the updated info to json
1946 if _infojson_written is True and self._write_info_json(
1947 'updated playlist', ie_result,
1948 self.prepare_filename(ie_copy, 'pl_infojson'), overwrite=True) is None:
1949 return
1950
1951 ie_result = self.run_all_pps('playlist', ie_result)
1952 self.to_screen(f'[download] Finished downloading playlist: {title}')
1953 return ie_result
1954
1955 @_handle_extraction_exceptions
1956 def __process_iterable_entry(self, entry, download, extra_info):
1957 return self.process_ie_result(
1958 entry, download=download, extra_info=extra_info)
1959
1960 def _build_format_filter(self, filter_spec):
1961 " Returns a function to filter the formats according to the filter_spec "
1962
1963 OPERATORS = {
1964 '<': operator.lt,
1965 '<=': operator.le,
1966 '>': operator.gt,
1967 '>=': operator.ge,
1968 '=': operator.eq,
1969 '!=': operator.ne,
1970 }
1971 operator_rex = re.compile(r'''(?x)\s*
1972 (?P<key>[\w.-]+)\s*
1973 (?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
1974 (?P<value>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)\s*
1975 ''' % '|'.join(map(re.escape, OPERATORS.keys())))
1976 m = operator_rex.fullmatch(filter_spec)
1977 if m:
1978 try:
1979 comparison_value = int(m.group('value'))
1980 except ValueError:
1981 comparison_value = parse_filesize(m.group('value'))
1982 if comparison_value is None:
1983 comparison_value = parse_filesize(m.group('value') + 'B')
1984 if comparison_value is None:
1985 raise ValueError(
1986 'Invalid value %r in format specification %r' % (
1987 m.group('value'), filter_spec))
1988 op = OPERATORS[m.group('op')]
1989
1990 if not m:
1991 STR_OPERATORS = {
1992 '=': operator.eq,
1993 '^=': lambda attr, value: attr.startswith(value),
1994 '$=': lambda attr, value: attr.endswith(value),
1995 '*=': lambda attr, value: value in attr,
1996 '~=': lambda attr, value: value.search(attr) is not None
1997 }
1998 str_operator_rex = re.compile(r'''(?x)\s*
1999 (?P<key>[a-zA-Z0-9._-]+)\s*
2000 (?P<negation>!\s*)?(?P<op>%s)\s*(?P<none_inclusive>\?\s*)?
2001 (?P<quote>["'])?
2002 (?P<value>(?(quote)(?:(?!(?P=quote))[^\\]|\\.)+|[\w.-]+))
2003 (?(quote)(?P=quote))\s*
2004 ''' % '|'.join(map(re.escape, STR_OPERATORS.keys())))
2005 m = str_operator_rex.fullmatch(filter_spec)
2006 if m:
2007 if m.group('op') == '~=':
2008 comparison_value = re.compile(m.group('value'))
2009 else:
2010 comparison_value = re.sub(r'''\\([\\"'])''', r'\1', m.group('value'))
2011 str_op = STR_OPERATORS[m.group('op')]
2012 if m.group('negation'):
2013 op = lambda attr, value: not str_op(attr, value)
2014 else:
2015 op = str_op
2016
2017 if not m:
2018 raise SyntaxError('Invalid filter specification %r' % filter_spec)
2019
2020 def _filter(f):
2021 actual_value = f.get(m.group('key'))
2022 if actual_value is None:
2023 return m.group('none_inclusive')
2024 return op(actual_value, comparison_value)
2025 return _filter
2026
2027 def _check_formats(self, formats):
2028 for f in formats:
2029 self.to_screen('[info] Testing format %s' % f['format_id'])
2030 path = self.get_output_path('temp')
2031 if not self._ensure_dir_exists(f'{path}/'):
2032 continue
2033 temp_file = tempfile.NamedTemporaryFile(suffix='.tmp', delete=False, dir=path or None)
2034 temp_file.close()
2035 try:
2036 success, _ = self.dl(temp_file.name, f, test=True)
2037 except (DownloadError, OSError, ValueError) + network_exceptions:
2038 success = False
2039 finally:
2040 if os.path.exists(temp_file.name):
2041 try:
2042 os.remove(temp_file.name)
2043 except OSError:
2044 self.report_warning('Unable to delete temporary file "%s"' % temp_file.name)
2045 if success:
2046 yield f
2047 else:
2048 self.to_screen('[info] Unable to download format %s. Skipping...' % f['format_id'])
2049
2050 def _default_format_spec(self, info_dict, download=True):
2051
2052 def can_merge():
2053 merger = FFmpegMergerPP(self)
2054 return merger.available and merger.can_merge()
2055
2056 prefer_best = (
2057 not self.params.get('simulate')
2058 and download
2059 and (
2060 not can_merge()
2061 or info_dict.get('is_live') and not self.params.get('live_from_start')
2062 or self.params['outtmpl']['default'] == '-'))
2063 compat = (
2064 prefer_best
2065 or self.params.get('allow_multiple_audio_streams', False)
2066 or 'format-spec' in self.params['compat_opts'])
2067
2068 return (
2069 'best/bestvideo+bestaudio' if prefer_best
2070 else 'bestvideo*+bestaudio/best' if not compat
2071 else 'bestvideo+bestaudio/best')
2072
2073 def build_format_selector(self, format_spec):
2074 def syntax_error(note, start):
2075 message = (
2076 'Invalid format specification: '
2077 '{}\n\t{}\n\t{}^'.format(note, format_spec, ' ' * start[1]))
2078 return SyntaxError(message)
2079
2080 PICKFIRST = 'PICKFIRST'
2081 MERGE = 'MERGE'
2082 SINGLE = 'SINGLE'
2083 GROUP = 'GROUP'
2084 FormatSelector = collections.namedtuple('FormatSelector', ['type', 'selector', 'filters'])
2085
2086 allow_multiple_streams = {'audio': self.params.get('allow_multiple_audio_streams', False),
2087 'video': self.params.get('allow_multiple_video_streams', False)}
2088
2089 check_formats = self.params.get('check_formats') == 'selected'
2090
2091 def _parse_filter(tokens):
2092 filter_parts = []
2093 for type, string_, start, _, _ in tokens:
2094 if type == tokenize.OP and string_ == ']':
2095 return ''.join(filter_parts)
2096 else:
2097 filter_parts.append(string_)
2098
2099 def _remove_unused_ops(tokens):
2100 # Remove operators that we don't use and join them with the surrounding strings.
2101 # E.g. 'mp4' '-' 'baseline' '-' '16x9' is converted to 'mp4-baseline-16x9'
2102 ALLOWED_OPS = ('/', '+', ',', '(', ')')
2103 last_string, last_start, last_end, last_line = None, None, None, None
2104 for type, string_, start, end, line in tokens:
2105 if type == tokenize.OP and string_ == '[':
2106 if last_string:
2107 yield tokenize.NAME, last_string, last_start, last_end, last_line
2108 last_string = None
2109 yield type, string_, start, end, line
2110 # everything inside brackets will be handled by _parse_filter
2111 for type, string_, start, end, line in tokens:
2112 yield type, string_, start, end, line
2113 if type == tokenize.OP and string_ == ']':
2114 break
2115 elif type == tokenize.OP and string_ in ALLOWED_OPS:
2116 if last_string:
2117 yield tokenize.NAME, last_string, last_start, last_end, last_line
2118 last_string = None
2119 yield type, string_, start, end, line
2120 elif type in [tokenize.NAME, tokenize.NUMBER, tokenize.OP]:
2121 if not last_string:
2122 last_string = string_
2123 last_start = start
2124 last_end = end
2125 else:
2126 last_string += string_
2127 if last_string:
2128 yield tokenize.NAME, last_string, last_start, last_end, last_line
2129
2130 def _parse_format_selection(tokens, inside_merge=False, inside_choice=False, inside_group=False):
2131 selectors = []
2132 current_selector = None
2133 for type, string_, start, _, _ in tokens:
2134 # ENCODING is only defined in python 3.x
2135 if type == getattr(tokenize, 'ENCODING', None):
2136 continue
2137 elif type in [tokenize.NAME, tokenize.NUMBER]:
2138 current_selector = FormatSelector(SINGLE, string_, [])
2139 elif type == tokenize.OP:
2140 if string_ == ')':
2141 if not inside_group:
2142 # ')' will be handled by the parentheses group
2143 tokens.restore_last_token()
2144 break
2145 elif inside_merge and string_ in ['/', ',']:
2146 tokens.restore_last_token()
2147 break
2148 elif inside_choice and string_ == ',':
2149 tokens.restore_last_token()
2150 break
2151 elif string_ == ',':
2152 if not current_selector:
2153 raise syntax_error('"," must follow a format selector', start)
2154 selectors.append(current_selector)
2155 current_selector = None
2156 elif string_ == '/':
2157 if not current_selector:
2158 raise syntax_error('"/" must follow a format selector', start)
2159 first_choice = current_selector
2160 second_choice = _parse_format_selection(tokens, inside_choice=True)
2161 current_selector = FormatSelector(PICKFIRST, (first_choice, second_choice), [])
2162 elif string_ == '[':
2163 if not current_selector:
2164 current_selector = FormatSelector(SINGLE, 'best', [])
2165 format_filter = _parse_filter(tokens)
2166 current_selector.filters.append(format_filter)
2167 elif string_ == '(':
2168 if current_selector:
2169 raise syntax_error('Unexpected "("', start)
2170 group = _parse_format_selection(tokens, inside_group=True)
2171 current_selector = FormatSelector(GROUP, group, [])
2172 elif string_ == '+':
2173 if not current_selector:
2174 raise syntax_error('Unexpected "+"', start)
2175 selector_1 = current_selector
2176 selector_2 = _parse_format_selection(tokens, inside_merge=True)
2177 if not selector_2:
2178 raise syntax_error('Expected a selector', start)
2179 current_selector = FormatSelector(MERGE, (selector_1, selector_2), [])
2180 else:
2181 raise syntax_error(f'Operator not recognized: "{string_}"', start)
2182 elif type == tokenize.ENDMARKER:
2183 break
2184 if current_selector:
2185 selectors.append(current_selector)
2186 return selectors
2187
2188 def _merge(formats_pair):
2189 format_1, format_2 = formats_pair
2190
2191 formats_info = []
2192 formats_info.extend(format_1.get('requested_formats', (format_1,)))
2193 formats_info.extend(format_2.get('requested_formats', (format_2,)))
2194
2195 if not allow_multiple_streams['video'] or not allow_multiple_streams['audio']:
2196 get_no_more = {'video': False, 'audio': False}
2197 for (i, fmt_info) in enumerate(formats_info):
2198 if fmt_info.get('acodec') == fmt_info.get('vcodec') == 'none':
2199 formats_info.pop(i)
2200 continue
2201 for aud_vid in ['audio', 'video']:
2202 if not allow_multiple_streams[aud_vid] and fmt_info.get(aud_vid[0] + 'codec') != 'none':
2203 if get_no_more[aud_vid]:
2204 formats_info.pop(i)
2205 break
2206 get_no_more[aud_vid] = True
2207
2208 if len(formats_info) == 1:
2209 return formats_info[0]
2210
2211 video_fmts = [fmt_info for fmt_info in formats_info if fmt_info.get('vcodec') != 'none']
2212 audio_fmts = [fmt_info for fmt_info in formats_info if fmt_info.get('acodec') != 'none']
2213
2214 the_only_video = video_fmts[0] if len(video_fmts) == 1 else None
2215 the_only_audio = audio_fmts[0] if len(audio_fmts) == 1 else None
2216
2217 output_ext = get_compatible_ext(
2218 vcodecs=[f.get('vcodec') for f in video_fmts],
2219 acodecs=[f.get('acodec') for f in audio_fmts],
2220 vexts=[f['ext'] for f in video_fmts],
2221 aexts=[f['ext'] for f in audio_fmts],
2222 preferences=(try_call(lambda: self.params['merge_output_format'].split('/'))
2223 or self.params.get('prefer_free_formats') and ('webm', 'mkv')))
2224
2225 filtered = lambda *keys: filter(None, (traverse_obj(fmt, *keys) for fmt in formats_info))
2226
2227 new_dict = {
2228 'requested_formats': formats_info,
2229 'format': '+'.join(filtered('format')),
2230 'format_id': '+'.join(filtered('format_id')),
2231 'ext': output_ext,
2232 'protocol': '+'.join(map(determine_protocol, formats_info)),
2233 'language': '+'.join(orderedSet(filtered('language'))) or None,
2234 'format_note': '+'.join(orderedSet(filtered('format_note'))) or None,
2235 'filesize_approx': sum(filtered('filesize', 'filesize_approx')) or None,
2236 'tbr': sum(filtered('tbr', 'vbr', 'abr')),
2237 }
2238
2239 if the_only_video:
2240 new_dict.update({
2241 'width': the_only_video.get('width'),
2242 'height': the_only_video.get('height'),
2243 'resolution': the_only_video.get('resolution') or self.format_resolution(the_only_video),
2244 'fps': the_only_video.get('fps'),
2245 'dynamic_range': the_only_video.get('dynamic_range'),
2246 'vcodec': the_only_video.get('vcodec'),
2247 'vbr': the_only_video.get('vbr'),
2248 'stretched_ratio': the_only_video.get('stretched_ratio'),
2249 'aspect_ratio': the_only_video.get('aspect_ratio'),
2250 })
2251
2252 if the_only_audio:
2253 new_dict.update({
2254 'acodec': the_only_audio.get('acodec'),
2255 'abr': the_only_audio.get('abr'),
2256 'asr': the_only_audio.get('asr'),
2257 'audio_channels': the_only_audio.get('audio_channels')
2258 })
2259
2260 return new_dict
2261
2262 def _check_formats(formats):
2263 if not check_formats:
2264 yield from formats
2265 return
2266 yield from self._check_formats(formats)
2267
2268 def _build_selector_function(selector):
2269 if isinstance(selector, list): # ,
2270 fs = [_build_selector_function(s) for s in selector]
2271
2272 def selector_function(ctx):
2273 for f in fs:
2274 yield from f(ctx)
2275 return selector_function
2276
2277 elif selector.type == GROUP: # ()
2278 selector_function = _build_selector_function(selector.selector)
2279
2280 elif selector.type == PICKFIRST: # /
2281 fs = [_build_selector_function(s) for s in selector.selector]
2282
2283 def selector_function(ctx):
2284 for f in fs:
2285 picked_formats = list(f(ctx))
2286 if picked_formats:
2287 return picked_formats
2288 return []
2289
2290 elif selector.type == MERGE: # +
2291 selector_1, selector_2 = map(_build_selector_function, selector.selector)
2292
2293 def selector_function(ctx):
2294 for pair in itertools.product(selector_1(ctx), selector_2(ctx)):
2295 yield _merge(pair)
2296
2297 elif selector.type == SINGLE: # atom
2298 format_spec = selector.selector or 'best'
2299
2300 # TODO: Add allvideo, allaudio etc by generalizing the code with best/worst selector
2301 if format_spec == 'all':
2302 def selector_function(ctx):
2303 yield from _check_formats(ctx['formats'][::-1])
2304 elif format_spec == 'mergeall':
2305 def selector_function(ctx):
2306 formats = list(_check_formats(
2307 f for f in ctx['formats'] if f.get('vcodec') != 'none' or f.get('acodec') != 'none'))
2308 if not formats:
2309 return
2310 merged_format = formats[-1]
2311 for f in formats[-2::-1]:
2312 merged_format = _merge((merged_format, f))
2313 yield merged_format
2314
2315 else:
2316 format_fallback, seperate_fallback, format_reverse, format_idx = False, None, True, 1
2317 mobj = re.match(
2318 r'(?P<bw>best|worst|b|w)(?P<type>video|audio|v|a)?(?P<mod>\*)?(?:\.(?P<n>[1-9]\d*))?$',
2319 format_spec)
2320 if mobj is not None:
2321 format_idx = int_or_none(mobj.group('n'), default=1)
2322 format_reverse = mobj.group('bw')[0] == 'b'
2323 format_type = (mobj.group('type') or [None])[0]
2324 not_format_type = {'v': 'a', 'a': 'v'}.get(format_type)
2325 format_modified = mobj.group('mod') is not None
2326
2327 format_fallback = not format_type and not format_modified # for b, w
2328 _filter_f = (
2329 (lambda f: f.get('%scodec' % format_type) != 'none')
2330 if format_type and format_modified # bv*, ba*, wv*, wa*
2331 else (lambda f: f.get('%scodec' % not_format_type) == 'none')
2332 if format_type # bv, ba, wv, wa
2333 else (lambda f: f.get('vcodec') != 'none' and f.get('acodec') != 'none')
2334 if not format_modified # b, w
2335 else lambda f: True) # b*, w*
2336 filter_f = lambda f: _filter_f(f) and (
2337 f.get('vcodec') != 'none' or f.get('acodec') != 'none')
2338 else:
2339 if format_spec in self._format_selection_exts['audio']:
2340 filter_f = lambda f: f.get('ext') == format_spec and f.get('acodec') != 'none'
2341 elif format_spec in self._format_selection_exts['video']:
2342 filter_f = lambda f: f.get('ext') == format_spec and f.get('acodec') != 'none' and f.get('vcodec') != 'none'
2343 seperate_fallback = lambda f: f.get('ext') == format_spec and f.get('vcodec') != 'none'
2344 elif format_spec in self._format_selection_exts['storyboards']:
2345 filter_f = lambda f: f.get('ext') == format_spec and f.get('acodec') == 'none' and f.get('vcodec') == 'none'
2346 else:
2347 filter_f = lambda f: f.get('format_id') == format_spec # id
2348
2349 def selector_function(ctx):
2350 formats = list(ctx['formats'])
2351 matches = list(filter(filter_f, formats)) if filter_f is not None else formats
2352 if not matches:
2353 if format_fallback and ctx['incomplete_formats']:
2354 # for extractors with incomplete formats (audio only (soundcloud)
2355 # or video only (imgur)) best/worst will fallback to
2356 # best/worst {video,audio}-only format
2357 matches = formats
2358 elif seperate_fallback and not ctx['has_merged_format']:
2359 # for compatibility with youtube-dl when there is no pre-merged format
2360 matches = list(filter(seperate_fallback, formats))
2361 matches = LazyList(_check_formats(matches[::-1 if format_reverse else 1]))
2362 try:
2363 yield matches[format_idx - 1]
2364 except LazyList.IndexError:
2365 return
2366
2367 filters = [self._build_format_filter(f) for f in selector.filters]
2368
2369 def final_selector(ctx):
2370 ctx_copy = dict(ctx)
2371 for _filter in filters:
2372 ctx_copy['formats'] = list(filter(_filter, ctx_copy['formats']))
2373 return selector_function(ctx_copy)
2374 return final_selector
2375
2376 stream = io.BytesIO(format_spec.encode())
2377 try:
2378 tokens = list(_remove_unused_ops(tokenize.tokenize(stream.readline)))
2379 except tokenize.TokenError:
2380 raise syntax_error('Missing closing/opening brackets or parenthesis', (0, len(format_spec)))
2381
2382 class TokenIterator:
2383 def __init__(self, tokens):
2384 self.tokens = tokens
2385 self.counter = 0
2386
2387 def __iter__(self):
2388 return self
2389
2390 def __next__(self):
2391 if self.counter >= len(self.tokens):
2392 raise StopIteration()
2393 value = self.tokens[self.counter]
2394 self.counter += 1
2395 return value
2396
2397 next = __next__
2398
2399 def restore_last_token(self):
2400 self.counter -= 1
2401
2402 parsed_selector = _parse_format_selection(iter(TokenIterator(tokens)))
2403 return _build_selector_function(parsed_selector)
2404
2405 def _calc_headers(self, info_dict):
2406 res = merge_headers(self.params['http_headers'], info_dict.get('http_headers') or {})
2407 if 'Youtubedl-No-Compression' in res: # deprecated
2408 res.pop('Youtubedl-No-Compression', None)
2409 res['Accept-Encoding'] = 'identity'
2410 cookies = self.cookiejar.get_cookie_header(info_dict['url'])
2411 if cookies:
2412 res['Cookie'] = cookies
2413
2414 if 'X-Forwarded-For' not in res:
2415 x_forwarded_for_ip = info_dict.get('__x_forwarded_for_ip')
2416 if x_forwarded_for_ip:
2417 res['X-Forwarded-For'] = x_forwarded_for_ip
2418
2419 return res
2420
2421 def _calc_cookies(self, url):
2422 self.deprecation_warning('"YoutubeDL._calc_cookies" is deprecated and may be removed in a future version')
2423 return self.cookiejar.get_cookie_header(url)
2424
2425 def _sort_thumbnails(self, thumbnails):
2426 thumbnails.sort(key=lambda t: (
2427 t.get('preference') if t.get('preference') is not None else -1,
2428 t.get('width') if t.get('width') is not None else -1,
2429 t.get('height') if t.get('height') is not None else -1,
2430 t.get('id') if t.get('id') is not None else '',
2431 t.get('url')))
2432
2433 def _sanitize_thumbnails(self, info_dict):
2434 thumbnails = info_dict.get('thumbnails')
2435 if thumbnails is None:
2436 thumbnail = info_dict.get('thumbnail')
2437 if thumbnail:
2438 info_dict['thumbnails'] = thumbnails = [{'url': thumbnail}]
2439 if not thumbnails:
2440 return
2441
2442 def check_thumbnails(thumbnails):
2443 for t in thumbnails:
2444 self.to_screen(f'[info] Testing thumbnail {t["id"]}')
2445 try:
2446 self.urlopen(HEADRequest(t['url']))
2447 except network_exceptions as err:
2448 self.to_screen(f'[info] Unable to connect to thumbnail {t["id"]} URL {t["url"]!r} - {err}. Skipping...')
2449 continue
2450 yield t
2451
2452 self._sort_thumbnails(thumbnails)
2453 for i, t in enumerate(thumbnails):
2454 if t.get('id') is None:
2455 t['id'] = '%d' % i
2456 if t.get('width') and t.get('height'):
2457 t['resolution'] = '%dx%d' % (t['width'], t['height'])
2458 t['url'] = sanitize_url(t['url'])
2459
2460 if self.params.get('check_formats') is True:
2461 info_dict['thumbnails'] = LazyList(check_thumbnails(thumbnails[::-1]), reverse=True)
2462 else:
2463 info_dict['thumbnails'] = thumbnails
2464
2465 def _fill_common_fields(self, info_dict, final=True):
2466 # TODO: move sanitization here
2467 if final:
2468 title = info_dict['fulltitle'] = info_dict.get('title')
2469 if not title:
2470 if title == '':
2471 self.write_debug('Extractor gave empty title. Creating a generic title')
2472 else:
2473 self.report_warning('Extractor failed to obtain "title". Creating a generic title instead')
2474 info_dict['title'] = f'{info_dict["extractor"].replace(":", "-")} video #{info_dict["id"]}'
2475
2476 if info_dict.get('duration') is not None:
2477 info_dict['duration_string'] = formatSeconds(info_dict['duration'])
2478
2479 for ts_key, date_key in (
2480 ('timestamp', 'upload_date'),
2481 ('release_timestamp', 'release_date'),
2482 ('modified_timestamp', 'modified_date'),
2483 ):
2484 if info_dict.get(date_key) is None and info_dict.get(ts_key) is not None:
2485 # Working around out-of-range timestamp values (e.g. negative ones on Windows,
2486 # see http://bugs.python.org/issue1646728)
2487 with contextlib.suppress(ValueError, OverflowError, OSError):
2488 upload_date = datetime.datetime.utcfromtimestamp(info_dict[ts_key])
2489 info_dict[date_key] = upload_date.strftime('%Y%m%d')
2490
2491 live_keys = ('is_live', 'was_live')
2492 live_status = info_dict.get('live_status')
2493 if live_status is None:
2494 for key in live_keys:
2495 if info_dict.get(key) is False:
2496 continue
2497 if info_dict.get(key):
2498 live_status = key
2499 break
2500 if all(info_dict.get(key) is False for key in live_keys):
2501 live_status = 'not_live'
2502 if live_status:
2503 info_dict['live_status'] = live_status
2504 for key in live_keys:
2505 if info_dict.get(key) is None:
2506 info_dict[key] = (live_status == key)
2507 if live_status == 'post_live':
2508 info_dict['was_live'] = True
2509
2510 # Auto generate title fields corresponding to the *_number fields when missing
2511 # in order to always have clean titles. This is very common for TV series.
2512 for field in ('chapter', 'season', 'episode'):
2513 if final and info_dict.get('%s_number' % field) is not None and not info_dict.get(field):
2514 info_dict[field] = '%s %d' % (field.capitalize(), info_dict['%s_number' % field])
2515
2516 def _raise_pending_errors(self, info):
2517 err = info.pop('__pending_error', None)
2518 if err:
2519 self.report_error(err, tb=False)
2520
2521 def sort_formats(self, info_dict):
2522 formats = self._get_formats(info_dict)
2523 formats.sort(key=FormatSorter(
2524 self, info_dict.get('_format_sort_fields') or []).calculate_preference)
2525
2526 def process_video_result(self, info_dict, download=True):
2527 assert info_dict.get('_type', 'video') == 'video'
2528 self._num_videos += 1
2529
2530 if 'id' not in info_dict:
2531 raise ExtractorError('Missing "id" field in extractor result', ie=info_dict['extractor'])
2532 elif not info_dict.get('id'):
2533 raise ExtractorError('Extractor failed to obtain "id"', ie=info_dict['extractor'])
2534
2535 def report_force_conversion(field, field_not, conversion):
2536 self.report_warning(
2537 '"%s" field is not %s - forcing %s conversion, there is an error in extractor'
2538 % (field, field_not, conversion))
2539
2540 def sanitize_string_field(info, string_field):
2541 field = info.get(string_field)
2542 if field is None or isinstance(field, str):
2543 return
2544 report_force_conversion(string_field, 'a string', 'string')
2545 info[string_field] = str(field)
2546
2547 def sanitize_numeric_fields(info):
2548 for numeric_field in self._NUMERIC_FIELDS:
2549 field = info.get(numeric_field)
2550 if field is None or isinstance(field, (int, float)):
2551 continue
2552 report_force_conversion(numeric_field, 'numeric', 'int')
2553 info[numeric_field] = int_or_none(field)
2554
2555 sanitize_string_field(info_dict, 'id')
2556 sanitize_numeric_fields(info_dict)
2557 if info_dict.get('section_end') and info_dict.get('section_start') is not None:
2558 info_dict['duration'] = round(info_dict['section_end'] - info_dict['section_start'], 3)
2559 if (info_dict.get('duration') or 0) <= 0 and info_dict.pop('duration', None):
2560 self.report_warning('"duration" field is negative, there is an error in extractor')
2561
2562 chapters = info_dict.get('chapters') or []
2563 if chapters and chapters[0].get('start_time'):
2564 chapters.insert(0, {'start_time': 0})
2565
2566 dummy_chapter = {'end_time': 0, 'start_time': info_dict.get('duration')}
2567 for idx, (prev, current, next_) in enumerate(zip(
2568 (dummy_chapter, *chapters), chapters, (*chapters[1:], dummy_chapter)), 1):
2569 if current.get('start_time') is None:
2570 current['start_time'] = prev.get('end_time')
2571 if not current.get('end_time'):
2572 current['end_time'] = next_.get('start_time')
2573 if not current.get('title'):
2574 current['title'] = f'<Untitled Chapter {idx}>'
2575
2576 if 'playlist' not in info_dict:
2577 # It isn't part of a playlist
2578 info_dict['playlist'] = None
2579 info_dict['playlist_index'] = None
2580
2581 self._sanitize_thumbnails(info_dict)
2582
2583 thumbnail = info_dict.get('thumbnail')
2584 thumbnails = info_dict.get('thumbnails')
2585 if thumbnail:
2586 info_dict['thumbnail'] = sanitize_url(thumbnail)
2587 elif thumbnails:
2588 info_dict['thumbnail'] = thumbnails[-1]['url']
2589
2590 if info_dict.get('display_id') is None and 'id' in info_dict:
2591 info_dict['display_id'] = info_dict['id']
2592
2593 self._fill_common_fields(info_dict)
2594
2595 for cc_kind in ('subtitles', 'automatic_captions'):
2596 cc = info_dict.get(cc_kind)
2597 if cc:
2598 for _, subtitle in cc.items():
2599 for subtitle_format in subtitle:
2600 if subtitle_format.get('url'):
2601 subtitle_format['url'] = sanitize_url(subtitle_format['url'])
2602 if subtitle_format.get('ext') is None:
2603 subtitle_format['ext'] = determine_ext(subtitle_format['url']).lower()
2604
2605 automatic_captions = info_dict.get('automatic_captions')
2606 subtitles = info_dict.get('subtitles')
2607
2608 info_dict['requested_subtitles'] = self.process_subtitles(
2609 info_dict['id'], subtitles, automatic_captions)
2610
2611 formats = self._get_formats(info_dict)
2612
2613 # Backward compatibility with InfoExtractor._sort_formats
2614 field_preference = (formats or [{}])[0].pop('__sort_fields', None)
2615 if field_preference:
2616 info_dict['_format_sort_fields'] = field_preference
2617
2618 # or None ensures --clean-infojson removes it
2619 info_dict['_has_drm'] = any(f.get('has_drm') for f in formats) or None
2620 if not self.params.get('allow_unplayable_formats'):
2621 formats = [f for f in formats if not f.get('has_drm')]
2622
2623 if formats and all(f.get('acodec') == f.get('vcodec') == 'none' for f in formats):
2624 self.report_warning(
2625 f'{"This video is DRM protected and " if info_dict["_has_drm"] else ""}'
2626 'only images are available for download. Use --list-formats to see them'.capitalize())
2627
2628 get_from_start = not info_dict.get('is_live') or bool(self.params.get('live_from_start'))
2629 if not get_from_start:
2630 info_dict['title'] += ' ' + datetime.datetime.now().strftime('%Y-%m-%d %H:%M')
2631 if info_dict.get('is_live') and formats:
2632 formats = [f for f in formats if bool(f.get('is_from_start')) == get_from_start]
2633 if get_from_start and not formats:
2634 self.raise_no_formats(info_dict, msg=(
2635 '--live-from-start is passed, but there are no formats that can be downloaded from the start. '
2636 'If you want to download from the current time, use --no-live-from-start'))
2637
2638 def is_wellformed(f):
2639 url = f.get('url')
2640 if not url:
2641 self.report_warning(
2642 '"url" field is missing or empty - skipping format, '
2643 'there is an error in extractor')
2644 return False
2645 if isinstance(url, bytes):
2646 sanitize_string_field(f, 'url')
2647 return True
2648
2649 # Filter out malformed formats for better extraction robustness
2650 formats = list(filter(is_wellformed, formats or []))
2651
2652 if not formats:
2653 self.raise_no_formats(info_dict)
2654
2655 for format in formats:
2656 sanitize_string_field(format, 'format_id')
2657 sanitize_numeric_fields(format)
2658 format['url'] = sanitize_url(format['url'])
2659 if format.get('ext') is None:
2660 format['ext'] = determine_ext(format['url']).lower()
2661 if format.get('protocol') is None:
2662 format['protocol'] = determine_protocol(format)
2663 if format.get('resolution') is None:
2664 format['resolution'] = self.format_resolution(format, default=None)
2665 if format.get('dynamic_range') is None and format.get('vcodec') != 'none':
2666 format['dynamic_range'] = 'SDR'
2667 if format.get('aspect_ratio') is None:
2668 format['aspect_ratio'] = try_call(lambda: round(format['width'] / format['height'], 2))
2669 if (info_dict.get('duration') and format.get('tbr')
2670 and not format.get('filesize') and not format.get('filesize_approx')):
2671 format['filesize_approx'] = int(info_dict['duration'] * format['tbr'] * (1024 / 8))
2672 format['http_headers'] = self._calc_headers(collections.ChainMap(format, info_dict))
2673
2674 # This is copied to http_headers by the above _calc_headers and can now be removed
2675 if '__x_forwarded_for_ip' in info_dict:
2676 del info_dict['__x_forwarded_for_ip']
2677
2678 self.sort_formats({
2679 'formats': formats,
2680 '_format_sort_fields': info_dict.get('_format_sort_fields')
2681 })
2682
2683 # Sanitize and group by format_id
2684 formats_dict = {}
2685 for i, format in enumerate(formats):
2686 if not format.get('format_id'):
2687 format['format_id'] = str(i)
2688 else:
2689 # Sanitize format_id from characters used in format selector expression
2690 format['format_id'] = re.sub(r'[\s,/+\[\]()]', '_', format['format_id'])
2691 formats_dict.setdefault(format['format_id'], []).append(format)
2692
2693 # Make sure all formats have unique format_id
2694 common_exts = set(itertools.chain(*self._format_selection_exts.values()))
2695 for format_id, ambiguous_formats in formats_dict.items():
2696 ambigious_id = len(ambiguous_formats) > 1
2697 for i, format in enumerate(ambiguous_formats):
2698 if ambigious_id:
2699 format['format_id'] = '%s-%d' % (format_id, i)
2700 # Ensure there is no conflict between id and ext in format selection
2701 # See https://github.com/yt-dlp/yt-dlp/issues/1282
2702 if format['format_id'] != format['ext'] and format['format_id'] in common_exts:
2703 format['format_id'] = 'f%s' % format['format_id']
2704
2705 if format.get('format') is None:
2706 format['format'] = '{id} - {res}{note}'.format(
2707 id=format['format_id'],
2708 res=self.format_resolution(format),
2709 note=format_field(format, 'format_note', ' (%s)'),
2710 )
2711
2712 if self.params.get('check_formats') is True:
2713 formats = LazyList(self._check_formats(formats[::-1]), reverse=True)
2714
2715 if not formats or formats[0] is not info_dict:
2716 # only set the 'formats' fields if the original info_dict list them
2717 # otherwise we end up with a circular reference, the first (and unique)
2718 # element in the 'formats' field in info_dict is info_dict itself,
2719 # which can't be exported to json
2720 info_dict['formats'] = formats
2721
2722 info_dict, _ = self.pre_process(info_dict)
2723
2724 if self._match_entry(info_dict, incomplete=self._format_fields) is not None:
2725 return info_dict
2726
2727 self.post_extract(info_dict)
2728 info_dict, _ = self.pre_process(info_dict, 'after_filter')
2729
2730 # The pre-processors may have modified the formats
2731 formats = self._get_formats(info_dict)
2732
2733 list_only = self.params.get('simulate') == 'list_only'
2734 interactive_format_selection = not list_only and self.format_selector == '-'
2735 if self.params.get('list_thumbnails'):
2736 self.list_thumbnails(info_dict)
2737 if self.params.get('listsubtitles'):
2738 if 'automatic_captions' in info_dict:
2739 self.list_subtitles(
2740 info_dict['id'], automatic_captions, 'automatic captions')
2741 self.list_subtitles(info_dict['id'], subtitles, 'subtitles')
2742 if self.params.get('listformats') or interactive_format_selection:
2743 self.list_formats(info_dict)
2744 if list_only:
2745 # Without this printing, -F --print-json will not work
2746 self.__forced_printings(info_dict)
2747 return info_dict
2748
2749 format_selector = self.format_selector
2750 while True:
2751 if interactive_format_selection:
2752 req_format = input(self._format_screen('\nEnter format selector ', self.Styles.EMPHASIS)
2753 + '(Press ENTER for default, or Ctrl+C to quit)'
2754 + self._format_screen(': ', self.Styles.EMPHASIS))
2755 try:
2756 format_selector = self.build_format_selector(req_format) if req_format else None
2757 except SyntaxError as err:
2758 self.report_error(err, tb=False, is_error=False)
2759 continue
2760
2761 if format_selector is None:
2762 req_format = self._default_format_spec(info_dict, download=download)
2763 self.write_debug(f'Default format spec: {req_format}')
2764 format_selector = self.build_format_selector(req_format)
2765
2766 formats_to_download = list(format_selector({
2767 'formats': formats,
2768 'has_merged_format': any('none' not in (f.get('acodec'), f.get('vcodec')) for f in formats),
2769 'incomplete_formats': (
2770 # All formats are video-only or
2771 all(f.get('vcodec') != 'none' and f.get('acodec') == 'none' for f in formats)
2772 # all formats are audio-only
2773 or all(f.get('vcodec') == 'none' and f.get('acodec') != 'none' for f in formats)),
2774 }))
2775 if interactive_format_selection and not formats_to_download:
2776 self.report_error('Requested format is not available', tb=False, is_error=False)
2777 continue
2778 break
2779
2780 if not formats_to_download:
2781 if not self.params.get('ignore_no_formats_error'):
2782 raise ExtractorError(
2783 'Requested format is not available. Use --list-formats for a list of available formats',
2784 expected=True, video_id=info_dict['id'], ie=info_dict['extractor'])
2785 self.report_warning('Requested format is not available')
2786 # Process what we can, even without any available formats.
2787 formats_to_download = [{}]
2788
2789 requested_ranges = tuple(self.params.get('download_ranges', lambda *_: [{}])(info_dict, self))
2790 best_format, downloaded_formats = formats_to_download[-1], []
2791 if download:
2792 if best_format and requested_ranges:
2793 def to_screen(*msg):
2794 self.to_screen(f'[info] {info_dict["id"]}: {" ".join(", ".join(variadic(m)) for m in msg)}')
2795
2796 to_screen(f'Downloading {len(formats_to_download)} format(s):',
2797 (f['format_id'] for f in formats_to_download))
2798 if requested_ranges != ({}, ):
2799 to_screen(f'Downloading {len(requested_ranges)} time ranges:',
2800 (f'{c["start_time"]:.1f}-{c["end_time"]:.1f}' for c in requested_ranges))
2801 max_downloads_reached = False
2802
2803 for fmt, chapter in itertools.product(formats_to_download, requested_ranges):
2804 new_info = self._copy_infodict(info_dict)
2805 new_info.update(fmt)
2806 offset, duration = info_dict.get('section_start') or 0, info_dict.get('duration') or float('inf')
2807 end_time = offset + min(chapter.get('end_time', duration), duration)
2808 if chapter or offset:
2809 new_info.update({
2810 'section_start': offset + chapter.get('start_time', 0),
2811 # duration may not be accurate. So allow deviations <1sec
2812 'section_end': end_time if end_time <= offset + duration + 1 else None,
2813 'section_title': chapter.get('title'),
2814 'section_number': chapter.get('index'),
2815 })
2816 downloaded_formats.append(new_info)
2817 try:
2818 self.process_info(new_info)
2819 except MaxDownloadsReached:
2820 max_downloads_reached = True
2821 self._raise_pending_errors(new_info)
2822 # Remove copied info
2823 for key, val in tuple(new_info.items()):
2824 if info_dict.get(key) == val:
2825 new_info.pop(key)
2826 if max_downloads_reached:
2827 break
2828
2829 write_archive = {f.get('__write_download_archive', False) for f in downloaded_formats}
2830 assert write_archive.issubset({True, False, 'ignore'})
2831 if True in write_archive and False not in write_archive:
2832 self.record_download_archive(info_dict)
2833
2834 info_dict['requested_downloads'] = downloaded_formats
2835 info_dict = self.run_all_pps('after_video', info_dict)
2836 if max_downloads_reached:
2837 raise MaxDownloadsReached()
2838
2839 # We update the info dict with the selected best quality format (backwards compatibility)
2840 info_dict.update(best_format)
2841 return info_dict
2842
2843 def process_subtitles(self, video_id, normal_subtitles, automatic_captions):
2844 """Select the requested subtitles and their format"""
2845 available_subs, normal_sub_langs = {}, []
2846 if normal_subtitles and self.params.get('writesubtitles'):
2847 available_subs.update(normal_subtitles)
2848 normal_sub_langs = tuple(normal_subtitles.keys())
2849 if automatic_captions and self.params.get('writeautomaticsub'):
2850 for lang, cap_info in automatic_captions.items():
2851 if lang not in available_subs:
2852 available_subs[lang] = cap_info
2853
2854 if not available_subs or (
2855 not self.params.get('writesubtitles')
2856 and not self.params.get('writeautomaticsub')):
2857 return None
2858
2859 all_sub_langs = tuple(available_subs.keys())
2860 if self.params.get('allsubtitles', False):
2861 requested_langs = all_sub_langs
2862 elif self.params.get('subtitleslangs', False):
2863 try:
2864 requested_langs = orderedSet_from_options(
2865 self.params.get('subtitleslangs'), {'all': all_sub_langs}, use_regex=True)
2866 except re.error as e:
2867 raise ValueError(f'Wrong regex for subtitlelangs: {e.pattern}')
2868 else:
2869 requested_langs = LazyList(itertools.chain(
2870 ['en'] if 'en' in normal_sub_langs else [],
2871 filter(lambda f: f.startswith('en'), normal_sub_langs),
2872 ['en'] if 'en' in all_sub_langs else [],
2873 filter(lambda f: f.startswith('en'), all_sub_langs),
2874 normal_sub_langs, all_sub_langs,
2875 ))[:1]
2876 if requested_langs:
2877 self.to_screen(f'[info] {video_id}: Downloading subtitles: {", ".join(requested_langs)}')
2878
2879 formats_query = self.params.get('subtitlesformat', 'best')
2880 formats_preference = formats_query.split('/') if formats_query else []
2881 subs = {}
2882 for lang in requested_langs:
2883 formats = available_subs.get(lang)
2884 if formats is None:
2885 self.report_warning(f'{lang} subtitles not available for {video_id}')
2886 continue
2887 for ext in formats_preference:
2888 if ext == 'best':
2889 f = formats[-1]
2890 break
2891 matches = list(filter(lambda f: f['ext'] == ext, formats))
2892 if matches:
2893 f = matches[-1]
2894 break
2895 else:
2896 f = formats[-1]
2897 self.report_warning(
2898 'No subtitle format found matching "%s" for language %s, '
2899 'using %s' % (formats_query, lang, f['ext']))
2900 subs[lang] = f
2901 return subs
2902
2903 def _forceprint(self, key, info_dict):
2904 if info_dict is None:
2905 return
2906 info_copy = info_dict.copy()
2907 info_copy.setdefault('filename', self.prepare_filename(info_dict))
2908 if info_dict.get('requested_formats') is not None:
2909 # For RTMP URLs, also include the playpath
2910 info_copy['urls'] = '\n'.join(f['url'] + f.get('play_path', '') for f in info_dict['requested_formats'])
2911 elif info_dict.get('url'):
2912 info_copy['urls'] = info_dict['url'] + info_dict.get('play_path', '')
2913 info_copy['formats_table'] = self.render_formats_table(info_dict)
2914 info_copy['thumbnails_table'] = self.render_thumbnails_table(info_dict)
2915 info_copy['subtitles_table'] = self.render_subtitles_table(info_dict.get('id'), info_dict.get('subtitles'))
2916 info_copy['automatic_captions_table'] = self.render_subtitles_table(info_dict.get('id'), info_dict.get('automatic_captions'))
2917
2918 def format_tmpl(tmpl):
2919 mobj = re.fullmatch(r'([\w.:,]|-\d|(?P<dict>{([\w.:,]|-\d)+}))+=?', tmpl)
2920 if not mobj:
2921 return tmpl
2922
2923 fmt = '%({})s'
2924 if tmpl.startswith('{'):
2925 tmpl, fmt = f'.{tmpl}', '%({})j'
2926 if tmpl.endswith('='):
2927 tmpl, fmt = tmpl[:-1], '{0} = %({0})#j'
2928 return '\n'.join(map(fmt.format, [tmpl] if mobj.group('dict') else tmpl.split(',')))
2929
2930 for tmpl in self.params['forceprint'].get(key, []):
2931 self.to_stdout(self.evaluate_outtmpl(format_tmpl(tmpl), info_copy))
2932
2933 for tmpl, file_tmpl in self.params['print_to_file'].get(key, []):
2934 filename = self.prepare_filename(info_dict, outtmpl=file_tmpl)
2935 tmpl = format_tmpl(tmpl)
2936 self.to_screen(f'[info] Writing {tmpl!r} to: {filename}')
2937 if self._ensure_dir_exists(filename):
2938 with open(filename, 'a', encoding='utf-8', newline='') as f:
2939 f.write(self.evaluate_outtmpl(tmpl, info_copy) + os.linesep)
2940
2941 return info_copy
2942
2943 def __forced_printings(self, info_dict, filename=None, incomplete=True):
2944 if (self.params.get('forcejson')
2945 or self.params['forceprint'].get('video')
2946 or self.params['print_to_file'].get('video')):
2947 self.post_extract(info_dict)
2948 if filename:
2949 info_dict['filename'] = filename
2950 info_copy = self._forceprint('video', info_dict)
2951
2952 def print_field(field, actual_field=None, optional=False):
2953 if actual_field is None:
2954 actual_field = field
2955 if self.params.get(f'force{field}') and (
2956 info_copy.get(field) is not None or (not optional and not incomplete)):
2957 self.to_stdout(info_copy[actual_field])
2958
2959 print_field('title')
2960 print_field('id')
2961 print_field('url', 'urls')
2962 print_field('thumbnail', optional=True)
2963 print_field('description', optional=True)
2964 print_field('filename')
2965 if self.params.get('forceduration') and info_copy.get('duration') is not None:
2966 self.to_stdout(formatSeconds(info_copy['duration']))
2967 print_field('format')
2968
2969 if self.params.get('forcejson'):
2970 self.to_stdout(json.dumps(self.sanitize_info(info_dict)))
2971
2972 def dl(self, name, info, subtitle=False, test=False):
2973 if not info.get('url'):
2974 self.raise_no_formats(info, True)
2975
2976 if test:
2977 verbose = self.params.get('verbose')
2978 params = {
2979 'test': True,
2980 'quiet': self.params.get('quiet') or not verbose,
2981 'verbose': verbose,
2982 'noprogress': not verbose,
2983 'nopart': True,
2984 'skip_unavailable_fragments': False,
2985 'keep_fragments': False,
2986 'overwrites': True,
2987 '_no_ytdl_file': True,
2988 }
2989 else:
2990 params = self.params
2991 fd = get_suitable_downloader(info, params, to_stdout=(name == '-'))(self, params)
2992 if not test:
2993 for ph in self._progress_hooks:
2994 fd.add_progress_hook(ph)
2995 urls = '", "'.join(
2996 (f['url'].split(',')[0] + ',<data>' if f['url'].startswith('data:') else f['url'])
2997 for f in info.get('requested_formats', []) or [info])
2998 self.write_debug(f'Invoking {fd.FD_NAME} downloader on "{urls}"')
2999
3000 # Note: Ideally info should be a deep-copied so that hooks cannot modify it.
3001 # But it may contain objects that are not deep-copyable
3002 new_info = self._copy_infodict(info)
3003 if new_info.get('http_headers') is None:
3004 new_info['http_headers'] = self._calc_headers(new_info)
3005 return fd.download(name, new_info, subtitle)
3006
3007 def existing_file(self, filepaths, *, default_overwrite=True):
3008 existing_files = list(filter(os.path.exists, orderedSet(filepaths)))
3009 if existing_files and not self.params.get('overwrites', default_overwrite):
3010 return existing_files[0]
3011
3012 for file in existing_files:
3013 self.report_file_delete(file)
3014 os.remove(file)
3015 return None
3016
3017 def process_info(self, info_dict):
3018 """Process a single resolved IE result. (Modifies it in-place)"""
3019
3020 assert info_dict.get('_type', 'video') == 'video'
3021 original_infodict = info_dict
3022
3023 if 'format' not in info_dict and 'ext' in info_dict:
3024 info_dict['format'] = info_dict['ext']
3025
3026 if self._match_entry(info_dict) is not None:
3027 info_dict['__write_download_archive'] = 'ignore'
3028 return
3029
3030 # Does nothing under normal operation - for backward compatibility of process_info
3031 self.post_extract(info_dict)
3032
3033 def replace_info_dict(new_info):
3034 nonlocal info_dict
3035 if new_info == info_dict:
3036 return
3037 info_dict.clear()
3038 info_dict.update(new_info)
3039
3040 new_info, _ = self.pre_process(info_dict, 'video')
3041 replace_info_dict(new_info)
3042 self._num_downloads += 1
3043
3044 # info_dict['_filename'] needs to be set for backward compatibility
3045 info_dict['_filename'] = full_filename = self.prepare_filename(info_dict, warn=True)
3046 temp_filename = self.prepare_filename(info_dict, 'temp')
3047 files_to_move = {}
3048
3049 # Forced printings
3050 self.__forced_printings(info_dict, full_filename, incomplete=('format' not in info_dict))
3051
3052 def check_max_downloads():
3053 if self._num_downloads >= float(self.params.get('max_downloads') or 'inf'):
3054 raise MaxDownloadsReached()
3055
3056 if self.params.get('simulate'):
3057 info_dict['__write_download_archive'] = self.params.get('force_write_download_archive')
3058 check_max_downloads()
3059 return
3060
3061 if full_filename is None:
3062 return
3063 if not self._ensure_dir_exists(encodeFilename(full_filename)):
3064 return
3065 if not self._ensure_dir_exists(encodeFilename(temp_filename)):
3066 return
3067
3068 if self._write_description('video', info_dict,
3069 self.prepare_filename(info_dict, 'description')) is None:
3070 return
3071
3072 sub_files = self._write_subtitles(info_dict, temp_filename)
3073 if sub_files is None:
3074 return
3075 files_to_move.update(dict(sub_files))
3076
3077 thumb_files = self._write_thumbnails(
3078 'video', info_dict, temp_filename, self.prepare_filename(info_dict, 'thumbnail'))
3079 if thumb_files is None:
3080 return
3081 files_to_move.update(dict(thumb_files))
3082
3083 infofn = self.prepare_filename(info_dict, 'infojson')
3084 _infojson_written = self._write_info_json('video', info_dict, infofn)
3085 if _infojson_written:
3086 info_dict['infojson_filename'] = infofn
3087 # For backward compatibility, even though it was a private field
3088 info_dict['__infojson_filename'] = infofn
3089 elif _infojson_written is None:
3090 return
3091
3092 # Note: Annotations are deprecated
3093 annofn = None
3094 if self.params.get('writeannotations', False):
3095 annofn = self.prepare_filename(info_dict, 'annotation')
3096 if annofn:
3097 if not self._ensure_dir_exists(encodeFilename(annofn)):
3098 return
3099 if not self.params.get('overwrites', True) and os.path.exists(encodeFilename(annofn)):
3100 self.to_screen('[info] Video annotations are already present')
3101 elif not info_dict.get('annotations'):
3102 self.report_warning('There are no annotations to write.')
3103 else:
3104 try:
3105 self.to_screen('[info] Writing video annotations to: ' + annofn)
3106 with open(encodeFilename(annofn), 'w', encoding='utf-8') as annofile:
3107 annofile.write(info_dict['annotations'])
3108 except (KeyError, TypeError):
3109 self.report_warning('There are no annotations to write.')
3110 except OSError:
3111 self.report_error('Cannot write annotations file: ' + annofn)
3112 return
3113
3114 # Write internet shortcut files
3115 def _write_link_file(link_type):
3116 url = try_get(info_dict['webpage_url'], iri_to_uri)
3117 if not url:
3118 self.report_warning(
3119 f'Cannot write internet shortcut file because the actual URL of "{info_dict["webpage_url"]}" is unknown')
3120 return True
3121 linkfn = replace_extension(self.prepare_filename(info_dict, 'link'), link_type, info_dict.get('ext'))
3122 if not self._ensure_dir_exists(encodeFilename(linkfn)):
3123 return False
3124 if self.params.get('overwrites', True) and os.path.exists(encodeFilename(linkfn)):
3125 self.to_screen(f'[info] Internet shortcut (.{link_type}) is already present')
3126 return True
3127 try:
3128 self.to_screen(f'[info] Writing internet shortcut (.{link_type}) to: {linkfn}')
3129 with open(encodeFilename(to_high_limit_path(linkfn)), 'w', encoding='utf-8',
3130 newline='\r\n' if link_type == 'url' else '\n') as linkfile:
3131 template_vars = {'url': url}
3132 if link_type == 'desktop':
3133 template_vars['filename'] = linkfn[:-(len(link_type) + 1)]
3134 linkfile.write(LINK_TEMPLATES[link_type] % template_vars)
3135 except OSError:
3136 self.report_error(f'Cannot write internet shortcut {linkfn}')
3137 return False
3138 return True
3139
3140 write_links = {
3141 'url': self.params.get('writeurllink'),
3142 'webloc': self.params.get('writewebloclink'),
3143 'desktop': self.params.get('writedesktoplink'),
3144 }
3145 if self.params.get('writelink'):
3146 link_type = ('webloc' if sys.platform == 'darwin'
3147 else 'desktop' if sys.platform.startswith('linux')
3148 else 'url')
3149 write_links[link_type] = True
3150
3151 if any(should_write and not _write_link_file(link_type)
3152 for link_type, should_write in write_links.items()):
3153 return
3154
3155 new_info, files_to_move = self.pre_process(info_dict, 'before_dl', files_to_move)
3156 replace_info_dict(new_info)
3157
3158 if self.params.get('skip_download'):
3159 info_dict['filepath'] = temp_filename
3160 info_dict['__finaldir'] = os.path.dirname(os.path.abspath(encodeFilename(full_filename)))
3161 info_dict['__files_to_move'] = files_to_move
3162 replace_info_dict(self.run_pp(MoveFilesAfterDownloadPP(self, False), info_dict))
3163 info_dict['__write_download_archive'] = self.params.get('force_write_download_archive')
3164 else:
3165 # Download
3166 info_dict.setdefault('__postprocessors', [])
3167 try:
3168
3169 def existing_video_file(*filepaths):
3170 ext = info_dict.get('ext')
3171 converted = lambda file: replace_extension(file, self.params.get('final_ext') or ext, ext)
3172 file = self.existing_file(itertools.chain(*zip(map(converted, filepaths), filepaths)),
3173 default_overwrite=False)
3174 if file:
3175 info_dict['ext'] = os.path.splitext(file)[1][1:]
3176 return file
3177
3178 fd, success = None, True
3179 if info_dict.get('protocol') or info_dict.get('url'):
3180 fd = get_suitable_downloader(info_dict, self.params, to_stdout=temp_filename == '-')
3181 if fd is not FFmpegFD and 'no-direct-merge' not in self.params['compat_opts'] and (
3182 info_dict.get('section_start') or info_dict.get('section_end')):
3183 msg = ('This format cannot be partially downloaded' if FFmpegFD.available()
3184 else 'You have requested downloading the video partially, but ffmpeg is not installed')
3185 self.report_error(f'{msg}. Aborting')
3186 return
3187
3188 if info_dict.get('requested_formats') is not None:
3189 old_ext = info_dict['ext']
3190 if self.params.get('merge_output_format') is None:
3191 if (info_dict['ext'] == 'webm'
3192 and info_dict.get('thumbnails')
3193 # check with type instead of pp_key, __name__, or isinstance
3194 # since we dont want any custom PPs to trigger this
3195 and any(type(pp) == EmbedThumbnailPP for pp in self._pps['post_process'])): # noqa: E721
3196 info_dict['ext'] = 'mkv'
3197 self.report_warning(
3198 'webm doesn\'t support embedding a thumbnail, mkv will be used')
3199 new_ext = info_dict['ext']
3200
3201 def correct_ext(filename, ext=new_ext):
3202 if filename == '-':
3203 return filename
3204 filename_real_ext = os.path.splitext(filename)[1][1:]
3205 filename_wo_ext = (
3206 os.path.splitext(filename)[0]
3207 if filename_real_ext in (old_ext, new_ext)
3208 else filename)
3209 return f'{filename_wo_ext}.{ext}'
3210
3211 # Ensure filename always has a correct extension for successful merge
3212 full_filename = correct_ext(full_filename)
3213 temp_filename = correct_ext(temp_filename)
3214 dl_filename = existing_video_file(full_filename, temp_filename)
3215
3216 info_dict['__real_download'] = False
3217 # NOTE: Copy so that original format dicts are not modified
3218 info_dict['requested_formats'] = list(map(dict, info_dict['requested_formats']))
3219
3220 merger = FFmpegMergerPP(self)
3221 downloaded = []
3222 if dl_filename is not None:
3223 self.report_file_already_downloaded(dl_filename)
3224 elif fd:
3225 for f in info_dict['requested_formats'] if fd != FFmpegFD else []:
3226 f['filepath'] = fname = prepend_extension(
3227 correct_ext(temp_filename, info_dict['ext']),
3228 'f%s' % f['format_id'], info_dict['ext'])
3229 downloaded.append(fname)
3230 info_dict['url'] = '\n'.join(f['url'] for f in info_dict['requested_formats'])
3231 success, real_download = self.dl(temp_filename, info_dict)
3232 info_dict['__real_download'] = real_download
3233 else:
3234 if self.params.get('allow_unplayable_formats'):
3235 self.report_warning(
3236 'You have requested merging of multiple formats '
3237 'while also allowing unplayable formats to be downloaded. '
3238 'The formats won\'t be merged to prevent data corruption.')
3239 elif not merger.available:
3240 msg = 'You have requested merging of multiple formats but ffmpeg is not installed'
3241 if not self.params.get('ignoreerrors'):
3242 self.report_error(f'{msg}. Aborting due to --abort-on-error')
3243 return
3244 self.report_warning(f'{msg}. The formats won\'t be merged')
3245
3246 if temp_filename == '-':
3247 reason = ('using a downloader other than ffmpeg' if FFmpegFD.can_merge_formats(info_dict, self.params)
3248 else 'but the formats are incompatible for simultaneous download' if merger.available
3249 else 'but ffmpeg is not installed')
3250 self.report_warning(
3251 f'You have requested downloading multiple formats to stdout {reason}. '
3252 'The formats will be streamed one after the other')
3253 fname = temp_filename
3254 for f in info_dict['requested_formats']:
3255 new_info = dict(info_dict)
3256 del new_info['requested_formats']
3257 new_info.update(f)
3258 if temp_filename != '-':
3259 fname = prepend_extension(
3260 correct_ext(temp_filename, new_info['ext']),
3261 'f%s' % f['format_id'], new_info['ext'])
3262 if not self._ensure_dir_exists(fname):
3263 return
3264 f['filepath'] = fname
3265 downloaded.append(fname)
3266 partial_success, real_download = self.dl(fname, new_info)
3267 info_dict['__real_download'] = info_dict['__real_download'] or real_download
3268 success = success and partial_success
3269
3270 if downloaded and merger.available and not self.params.get('allow_unplayable_formats'):
3271 info_dict['__postprocessors'].append(merger)
3272 info_dict['__files_to_merge'] = downloaded
3273 # Even if there were no downloads, it is being merged only now
3274 info_dict['__real_download'] = True
3275 else:
3276 for file in downloaded:
3277 files_to_move[file] = None
3278 else:
3279 # Just a single file
3280 dl_filename = existing_video_file(full_filename, temp_filename)
3281 if dl_filename is None or dl_filename == temp_filename:
3282 # dl_filename == temp_filename could mean that the file was partially downloaded with --no-part.
3283 # So we should try to resume the download
3284 success, real_download = self.dl(temp_filename, info_dict)
3285 info_dict['__real_download'] = real_download
3286 else:
3287 self.report_file_already_downloaded(dl_filename)
3288
3289 dl_filename = dl_filename or temp_filename
3290 info_dict['__finaldir'] = os.path.dirname(os.path.abspath(encodeFilename(full_filename)))
3291
3292 except network_exceptions as err:
3293 self.report_error('unable to download video data: %s' % error_to_compat_str(err))
3294 return
3295 except OSError as err:
3296 raise UnavailableVideoError(err)
3297 except (ContentTooShortError, ) as err:
3298 self.report_error(f'content too short (expected {err.expected} bytes and served {err.downloaded})')
3299 return
3300
3301 self._raise_pending_errors(info_dict)
3302 if success and full_filename != '-':
3303
3304 def fixup():
3305 do_fixup = True
3306 fixup_policy = self.params.get('fixup')
3307 vid = info_dict['id']
3308
3309 if fixup_policy in ('ignore', 'never'):
3310 return
3311 elif fixup_policy == 'warn':
3312 do_fixup = 'warn'
3313 elif fixup_policy != 'force':
3314 assert fixup_policy in ('detect_or_warn', None)
3315 if not info_dict.get('__real_download'):
3316 do_fixup = False
3317
3318 def ffmpeg_fixup(cndn, msg, cls):
3319 if not (do_fixup and cndn):
3320 return
3321 elif do_fixup == 'warn':
3322 self.report_warning(f'{vid}: {msg}')
3323 return
3324 pp = cls(self)
3325 if pp.available:
3326 info_dict['__postprocessors'].append(pp)
3327 else:
3328 self.report_warning(f'{vid}: {msg}. Install ffmpeg to fix this automatically')
3329
3330 stretched_ratio = info_dict.get('stretched_ratio')
3331 ffmpeg_fixup(stretched_ratio not in (1, None),
3332 f'Non-uniform pixel ratio {stretched_ratio}',
3333 FFmpegFixupStretchedPP)
3334
3335 downloader = get_suitable_downloader(info_dict, self.params) if 'protocol' in info_dict else None
3336 downloader = downloader.FD_NAME if downloader else None
3337
3338 ext = info_dict.get('ext')
3339 postprocessed_by_ffmpeg = info_dict.get('requested_formats') or any((
3340 isinstance(pp, FFmpegVideoConvertorPP)
3341 and resolve_recode_mapping(ext, pp.mapping)[0] not in (ext, None)
3342 ) for pp in self._pps['post_process'])
3343
3344 if not postprocessed_by_ffmpeg:
3345 ffmpeg_fixup(ext == 'm4a' and info_dict.get('container') == 'm4a_dash',
3346 'writing DASH m4a. Only some players support this container',
3347 FFmpegFixupM4aPP)
3348 ffmpeg_fixup(downloader == 'hlsnative' and not self.params.get('hls_use_mpegts')
3349 or info_dict.get('is_live') and self.params.get('hls_use_mpegts') is None,
3350 'Possible MPEG-TS in MP4 container or malformed AAC timestamps',
3351 FFmpegFixupM3u8PP)
3352 ffmpeg_fixup(info_dict.get('is_live') and downloader == 'dashsegments',
3353 'Possible duplicate MOOV atoms', FFmpegFixupDuplicateMoovPP)
3354
3355 ffmpeg_fixup(downloader == 'web_socket_fragment', 'Malformed timestamps detected', FFmpegFixupTimestampPP)
3356 ffmpeg_fixup(downloader == 'web_socket_fragment', 'Malformed duration detected', FFmpegFixupDurationPP)
3357
3358 fixup()
3359 try:
3360 replace_info_dict(self.post_process(dl_filename, info_dict, files_to_move))
3361 except PostProcessingError as err:
3362 self.report_error('Postprocessing: %s' % str(err))
3363 return
3364 try:
3365 for ph in self._post_hooks:
3366 ph(info_dict['filepath'])
3367 except Exception as err:
3368 self.report_error('post hooks: %s' % str(err))
3369 return
3370 info_dict['__write_download_archive'] = True
3371
3372 assert info_dict is original_infodict # Make sure the info_dict was modified in-place
3373 if self.params.get('force_write_download_archive'):
3374 info_dict['__write_download_archive'] = True
3375 check_max_downloads()
3376
3377 def __download_wrapper(self, func):
3378 @functools.wraps(func)
3379 def wrapper(*args, **kwargs):
3380 try:
3381 res = func(*args, **kwargs)
3382 except UnavailableVideoError as e:
3383 self.report_error(e)
3384 except DownloadCancelled as e:
3385 self.to_screen(f'[info] {e}')
3386 if not self.params.get('break_per_url'):
3387 raise
3388 self._num_downloads = 0
3389 else:
3390 if self.params.get('dump_single_json', False):
3391 self.post_extract(res)
3392 self.to_stdout(json.dumps(self.sanitize_info(res)))
3393 return wrapper
3394
3395 def download(self, url_list):
3396 """Download a given list of URLs."""
3397 url_list = variadic(url_list) # Passing a single URL is a common mistake
3398 outtmpl = self.params['outtmpl']['default']
3399 if (len(url_list) > 1
3400 and outtmpl != '-'
3401 and '%' not in outtmpl
3402 and self.params.get('max_downloads') != 1):
3403 raise SameFileError(outtmpl)
3404
3405 for url in url_list:
3406 self.__download_wrapper(self.extract_info)(
3407 url, force_generic_extractor=self.params.get('force_generic_extractor', False))
3408
3409 return self._download_retcode
3410
3411 def download_with_info_file(self, info_filename):
3412 with contextlib.closing(fileinput.FileInput(
3413 [info_filename], mode='r',
3414 openhook=fileinput.hook_encoded('utf-8'))) as f:
3415 # FileInput doesn't have a read method, we can't call json.load
3416 infos = [self.sanitize_info(info, self.params.get('clean_infojson', True))
3417 for info in variadic(json.loads('\n'.join(f)))]
3418 for info in infos:
3419 try:
3420 self.__download_wrapper(self.process_ie_result)(info, download=True)
3421 except (DownloadError, EntryNotInPlaylist, ReExtractInfo) as e:
3422 if not isinstance(e, EntryNotInPlaylist):
3423 self.to_stderr('\r')
3424 webpage_url = info.get('webpage_url')
3425 if webpage_url is None:
3426 raise
3427 self.report_warning(f'The info failed to download: {e}; trying with URL {webpage_url}')
3428 self.download([webpage_url])
3429 return self._download_retcode
3430
3431 @staticmethod
3432 def sanitize_info(info_dict, remove_private_keys=False):
3433 ''' Sanitize the infodict for converting to json '''
3434 if info_dict is None:
3435 return info_dict
3436 info_dict.setdefault('epoch', int(time.time()))
3437 info_dict.setdefault('_type', 'video')
3438 info_dict.setdefault('_version', {
3439 'version': __version__,
3440 'current_git_head': current_git_head(),
3441 'release_git_head': RELEASE_GIT_HEAD,
3442 'repository': REPOSITORY,
3443 })
3444
3445 if remove_private_keys:
3446 reject = lambda k, v: v is None or k.startswith('__') or k in {
3447 'requested_downloads', 'requested_formats', 'requested_subtitles', 'requested_entries',
3448 'entries', 'filepath', '_filename', 'filename', 'infojson_filename', 'original_url',
3449 'playlist_autonumber', '_format_sort_fields',
3450 }
3451 else:
3452 reject = lambda k, v: False
3453
3454 def filter_fn(obj):
3455 if isinstance(obj, dict):
3456 return {k: filter_fn(v) for k, v in obj.items() if not reject(k, v)}
3457 elif isinstance(obj, (list, tuple, set, LazyList)):
3458 return list(map(filter_fn, obj))
3459 elif obj is None or isinstance(obj, (str, int, float, bool)):
3460 return obj
3461 else:
3462 return repr(obj)
3463
3464 return filter_fn(info_dict)
3465
3466 @staticmethod
3467 def filter_requested_info(info_dict, actually_filter=True):
3468 ''' Alias of sanitize_info for backward compatibility '''
3469 return YoutubeDL.sanitize_info(info_dict, actually_filter)
3470
3471 def _delete_downloaded_files(self, *files_to_delete, info={}, msg=None):
3472 for filename in set(filter(None, files_to_delete)):
3473 if msg:
3474 self.to_screen(msg % filename)
3475 try:
3476 os.remove(filename)
3477 except OSError:
3478 self.report_warning(f'Unable to delete file {filename}')
3479 if filename in info.get('__files_to_move', []): # NB: Delete even if None
3480 del info['__files_to_move'][filename]
3481
3482 @staticmethod
3483 def post_extract(info_dict):
3484 def actual_post_extract(info_dict):
3485 if info_dict.get('_type') in ('playlist', 'multi_video'):
3486 for video_dict in info_dict.get('entries', {}):
3487 actual_post_extract(video_dict or {})
3488 return
3489
3490 post_extractor = info_dict.pop('__post_extractor', None) or (lambda: {})
3491 info_dict.update(post_extractor())
3492
3493 actual_post_extract(info_dict or {})
3494
3495 def run_pp(self, pp, infodict):
3496 files_to_delete = []
3497 if '__files_to_move' not in infodict:
3498 infodict['__files_to_move'] = {}
3499 try:
3500 files_to_delete, infodict = pp.run(infodict)
3501 except PostProcessingError as e:
3502 # Must be True and not 'only_download'
3503 if self.params.get('ignoreerrors') is True:
3504 self.report_error(e)
3505 return infodict
3506 raise
3507
3508 if not files_to_delete:
3509 return infodict
3510 if self.params.get('keepvideo', False):
3511 for f in files_to_delete:
3512 infodict['__files_to_move'].setdefault(f, '')
3513 else:
3514 self._delete_downloaded_files(
3515 *files_to_delete, info=infodict, msg='Deleting original file %s (pass -k to keep)')
3516 return infodict
3517
3518 def run_all_pps(self, key, info, *, additional_pps=None):
3519 if key != 'video':
3520 self._forceprint(key, info)
3521 for pp in (additional_pps or []) + self._pps[key]:
3522 info = self.run_pp(pp, info)
3523 return info
3524
3525 def pre_process(self, ie_info, key='pre_process', files_to_move=None):
3526 info = dict(ie_info)
3527 info['__files_to_move'] = files_to_move or {}
3528 try:
3529 info = self.run_all_pps(key, info)
3530 except PostProcessingError as err:
3531 msg = f'Preprocessing: {err}'
3532 info.setdefault('__pending_error', msg)
3533 self.report_error(msg, is_error=False)
3534 return info, info.pop('__files_to_move', None)
3535
3536 def post_process(self, filename, info, files_to_move=None):
3537 """Run all the postprocessors on the given file."""
3538 info['filepath'] = filename
3539 info['__files_to_move'] = files_to_move or {}
3540 info = self.run_all_pps('post_process', info, additional_pps=info.get('__postprocessors'))
3541 info = self.run_pp(MoveFilesAfterDownloadPP(self), info)
3542 del info['__files_to_move']
3543 return self.run_all_pps('after_move', info)
3544
3545 def _make_archive_id(self, info_dict):
3546 video_id = info_dict.get('id')
3547 if not video_id:
3548 return
3549 # Future-proof against any change in case
3550 # and backwards compatibility with prior versions
3551 extractor = info_dict.get('extractor_key') or info_dict.get('ie_key') # key in a playlist
3552 if extractor is None:
3553 url = str_or_none(info_dict.get('url'))
3554 if not url:
3555 return
3556 # Try to find matching extractor for the URL and take its ie_key
3557 for ie_key, ie in self._ies.items():
3558 if ie.suitable(url):
3559 extractor = ie_key
3560 break
3561 else:
3562 return
3563 return make_archive_id(extractor, video_id)
3564
3565 def in_download_archive(self, info_dict):
3566 if not self.archive:
3567 return False
3568
3569 vid_ids = [self._make_archive_id(info_dict)]
3570 vid_ids.extend(info_dict.get('_old_archive_ids') or [])
3571 return any(id_ in self.archive for id_ in vid_ids)
3572
3573 def record_download_archive(self, info_dict):
3574 fn = self.params.get('download_archive')
3575 if fn is None:
3576 return
3577 vid_id = self._make_archive_id(info_dict)
3578 assert vid_id
3579
3580 self.write_debug(f'Adding to archive: {vid_id}')
3581 if is_path_like(fn):
3582 with locked_file(fn, 'a', encoding='utf-8') as archive_file:
3583 archive_file.write(vid_id + '\n')
3584 self.archive.add(vid_id)
3585
3586 @staticmethod
3587 def format_resolution(format, default='unknown'):
3588 if format.get('vcodec') == 'none' and format.get('acodec') != 'none':
3589 return 'audio only'
3590 if format.get('resolution') is not None:
3591 return format['resolution']
3592 if format.get('width') and format.get('height'):
3593 return '%dx%d' % (format['width'], format['height'])
3594 elif format.get('height'):
3595 return '%sp' % format['height']
3596 elif format.get('width'):
3597 return '%dx?' % format['width']
3598 return default
3599
3600 def _list_format_headers(self, *headers):
3601 if self.params.get('listformats_table', True) is not False:
3602 return [self._format_out(header, self.Styles.HEADERS) for header in headers]
3603 return headers
3604
3605 def _format_note(self, fdict):
3606 res = ''
3607 if fdict.get('ext') in ['f4f', 'f4m']:
3608 res += '(unsupported)'
3609 if fdict.get('language'):
3610 if res:
3611 res += ' '
3612 res += '[%s]' % fdict['language']
3613 if fdict.get('format_note') is not None:
3614 if res:
3615 res += ' '
3616 res += fdict['format_note']
3617 if fdict.get('tbr') is not None:
3618 if res:
3619 res += ', '
3620 res += '%4dk' % fdict['tbr']
3621 if fdict.get('container') is not None:
3622 if res:
3623 res += ', '
3624 res += '%s container' % fdict['container']
3625 if (fdict.get('vcodec') is not None
3626 and fdict.get('vcodec') != 'none'):
3627 if res:
3628 res += ', '
3629 res += fdict['vcodec']
3630 if fdict.get('vbr') is not None:
3631 res += '@'
3632 elif fdict.get('vbr') is not None and fdict.get('abr') is not None:
3633 res += 'video@'
3634 if fdict.get('vbr') is not None:
3635 res += '%4dk' % fdict['vbr']
3636 if fdict.get('fps') is not None:
3637 if res:
3638 res += ', '
3639 res += '%sfps' % fdict['fps']
3640 if fdict.get('acodec') is not None:
3641 if res:
3642 res += ', '
3643 if fdict['acodec'] == 'none':
3644 res += 'video only'
3645 else:
3646 res += '%-5s' % fdict['acodec']
3647 elif fdict.get('abr') is not None:
3648 if res:
3649 res += ', '
3650 res += 'audio'
3651 if fdict.get('abr') is not None:
3652 res += '@%3dk' % fdict['abr']
3653 if fdict.get('asr') is not None:
3654 res += ' (%5dHz)' % fdict['asr']
3655 if fdict.get('filesize') is not None:
3656 if res:
3657 res += ', '
3658 res += format_bytes(fdict['filesize'])
3659 elif fdict.get('filesize_approx') is not None:
3660 if res:
3661 res += ', '
3662 res += '~' + format_bytes(fdict['filesize_approx'])
3663 return res
3664
3665 def _get_formats(self, info_dict):
3666 if info_dict.get('formats') is None:
3667 if info_dict.get('url') and info_dict.get('_type', 'video') == 'video':
3668 return [info_dict]
3669 return []
3670 return info_dict['formats']
3671
3672 def render_formats_table(self, info_dict):
3673 formats = self._get_formats(info_dict)
3674 if not formats:
3675 return
3676 if not self.params.get('listformats_table', True) is not False:
3677 table = [
3678 [
3679 format_field(f, 'format_id'),
3680 format_field(f, 'ext'),
3681 self.format_resolution(f),
3682 self._format_note(f)
3683 ] for f in formats if (f.get('preference') or 0) >= -1000]
3684 return render_table(['format code', 'extension', 'resolution', 'note'], table, extra_gap=1)
3685
3686 def simplified_codec(f, field):
3687 assert field in ('acodec', 'vcodec')
3688 codec = f.get(field, 'unknown')
3689 if not codec:
3690 return 'unknown'
3691 elif codec != 'none':
3692 return '.'.join(codec.split('.')[:4])
3693
3694 if field == 'vcodec' and f.get('acodec') == 'none':
3695 return 'images'
3696 elif field == 'acodec' and f.get('vcodec') == 'none':
3697 return ''
3698 return self._format_out('audio only' if field == 'vcodec' else 'video only',
3699 self.Styles.SUPPRESS)
3700
3701 delim = self._format_out('\u2502', self.Styles.DELIM, '|', test_encoding=True)
3702 table = [
3703 [
3704 self._format_out(format_field(f, 'format_id'), self.Styles.ID),
3705 format_field(f, 'ext'),
3706 format_field(f, func=self.format_resolution, ignore=('audio only', 'images')),
3707 format_field(f, 'fps', '\t%d', func=round),
3708 format_field(f, 'dynamic_range', '%s', ignore=(None, 'SDR')).replace('HDR', ''),
3709 format_field(f, 'audio_channels', '\t%s'),
3710 delim,
3711 format_field(f, 'filesize', ' \t%s', func=format_bytes) + format_field(f, 'filesize_approx', '~\t%s', func=format_bytes),
3712 format_field(f, 'tbr', '\t%dk', func=round),
3713 shorten_protocol_name(f.get('protocol', '')),
3714 delim,
3715 simplified_codec(f, 'vcodec'),
3716 format_field(f, 'vbr', '\t%dk', func=round),
3717 simplified_codec(f, 'acodec'),
3718 format_field(f, 'abr', '\t%dk', func=round),
3719 format_field(f, 'asr', '\t%s', func=format_decimal_suffix),
3720 join_nonempty(
3721 self._format_out('UNSUPPORTED', 'light red') if f.get('ext') in ('f4f', 'f4m') else None,
3722 self._format_out('DRM', 'light red') if f.get('has_drm') else None,
3723 format_field(f, 'language', '[%s]'),
3724 join_nonempty(format_field(f, 'format_note'),
3725 format_field(f, 'container', ignore=(None, f.get('ext'))),
3726 delim=', '),
3727 delim=' '),
3728 ] for f in formats if f.get('preference') is None or f['preference'] >= -1000]
3729 header_line = self._list_format_headers(
3730 'ID', 'EXT', 'RESOLUTION', '\tFPS', 'HDR', 'CH', delim, '\tFILESIZE', '\tTBR', 'PROTO',
3731 delim, 'VCODEC', '\tVBR', 'ACODEC', '\tABR', '\tASR', 'MORE INFO')
3732
3733 return render_table(
3734 header_line, table, hide_empty=True,
3735 delim=self._format_out('\u2500', self.Styles.DELIM, '-', test_encoding=True))
3736
3737 def render_thumbnails_table(self, info_dict):
3738 thumbnails = list(info_dict.get('thumbnails') or [])
3739 if not thumbnails:
3740 return None
3741 return render_table(
3742 self._list_format_headers('ID', 'Width', 'Height', 'URL'),
3743 [[t.get('id'), t.get('width') or 'unknown', t.get('height') or 'unknown', t['url']] for t in thumbnails])
3744
3745 def render_subtitles_table(self, video_id, subtitles):
3746 def _row(lang, formats):
3747 exts, names = zip(*((f['ext'], f.get('name') or 'unknown') for f in reversed(formats)))
3748 if len(set(names)) == 1:
3749 names = [] if names[0] == 'unknown' else names[:1]
3750 return [lang, ', '.join(names), ', '.join(exts)]
3751
3752 if not subtitles:
3753 return None
3754 return render_table(
3755 self._list_format_headers('Language', 'Name', 'Formats'),
3756 [_row(lang, formats) for lang, formats in subtitles.items()],
3757 hide_empty=True)
3758
3759 def __list_table(self, video_id, name, func, *args):
3760 table = func(*args)
3761 if not table:
3762 self.to_screen(f'{video_id} has no {name}')
3763 return
3764 self.to_screen(f'[info] Available {name} for {video_id}:')
3765 self.to_stdout(table)
3766
3767 def list_formats(self, info_dict):
3768 self.__list_table(info_dict['id'], 'formats', self.render_formats_table, info_dict)
3769
3770 def list_thumbnails(self, info_dict):
3771 self.__list_table(info_dict['id'], 'thumbnails', self.render_thumbnails_table, info_dict)
3772
3773 def list_subtitles(self, video_id, subtitles, name='subtitles'):
3774 self.__list_table(video_id, name, self.render_subtitles_table, video_id, subtitles)
3775
3776 def urlopen(self, req):
3777 """ Start an HTTP download """
3778 if isinstance(req, str):
3779 req = sanitized_Request(req)
3780 return self._opener.open(req, timeout=self._socket_timeout)
3781
3782 def print_debug_header(self):
3783 if not self.params.get('verbose'):
3784 return
3785
3786 from . import _IN_CLI # Must be delayed import
3787
3788 # These imports can be slow. So import them only as needed
3789 from .extractor.extractors import _LAZY_LOADER
3790 from .extractor.extractors import (
3791 _PLUGIN_CLASSES as plugin_ies,
3792 _PLUGIN_OVERRIDES as plugin_ie_overrides
3793 )
3794
3795 def get_encoding(stream):
3796 ret = str(getattr(stream, 'encoding', 'missing (%s)' % type(stream).__name__))
3797 additional_info = []
3798 if os.environ.get('TERM', '').lower() == 'dumb':
3799 additional_info.append('dumb')
3800 if not supports_terminal_sequences(stream):
3801 from .utils import WINDOWS_VT_MODE # Must be imported locally
3802 additional_info.append('No VT' if WINDOWS_VT_MODE is False else 'No ANSI')
3803 if additional_info:
3804 ret = f'{ret} ({",".join(additional_info)})'
3805 return ret
3806
3807 encoding_str = 'Encodings: locale %s, fs %s, pref %s, %s' % (
3808 locale.getpreferredencoding(),
3809 sys.getfilesystemencoding(),
3810 self.get_encoding(),
3811 ', '.join(
3812 f'{key} {get_encoding(stream)}' for key, stream in self._out_files.items_
3813 if stream is not None and key != 'console')
3814 )
3815
3816 logger = self.params.get('logger')
3817 if logger:
3818 write_debug = lambda msg: logger.debug(f'[debug] {msg}')
3819 write_debug(encoding_str)
3820 else:
3821 write_string(f'[debug] {encoding_str}\n', encoding=None)
3822 write_debug = lambda msg: self._write_string(f'[debug] {msg}\n')
3823
3824 source = detect_variant()
3825 if VARIANT not in (None, 'pip'):
3826 source += '*'
3827 klass = type(self)
3828 write_debug(join_nonempty(
3829 f'{"yt-dlp" if REPOSITORY == "yt-dlp/yt-dlp" else REPOSITORY} version',
3830 f'{CHANNEL}@{__version__}',
3831 f'[{RELEASE_GIT_HEAD[:9]}]' if RELEASE_GIT_HEAD else '',
3832 '' if source == 'unknown' else f'({source})',
3833 '' if _IN_CLI else 'API' if klass == YoutubeDL else f'API:{self.__module__}.{klass.__qualname__}',
3834 delim=' '))
3835
3836 if not _IN_CLI:
3837 write_debug(f'params: {self.params}')
3838
3839 if not _LAZY_LOADER:
3840 if os.environ.get('YTDLP_NO_LAZY_EXTRACTORS'):
3841 write_debug('Lazy loading extractors is forcibly disabled')
3842 else:
3843 write_debug('Lazy loading extractors is disabled')
3844 if self.params['compat_opts']:
3845 write_debug('Compatibility options: %s' % ', '.join(self.params['compat_opts']))
3846
3847 if current_git_head():
3848 write_debug(f'Git HEAD: {current_git_head()}')
3849 write_debug(system_identifier())
3850
3851 exe_versions, ffmpeg_features = FFmpegPostProcessor.get_versions_and_features(self)
3852 ffmpeg_features = {key for key, val in ffmpeg_features.items() if val}
3853 if ffmpeg_features:
3854 exe_versions['ffmpeg'] += ' (%s)' % ','.join(sorted(ffmpeg_features))
3855
3856 exe_versions['rtmpdump'] = rtmpdump_version()
3857 exe_versions['phantomjs'] = PhantomJSwrapper._version()
3858 exe_str = ', '.join(
3859 f'{exe} {v}' for exe, v in sorted(exe_versions.items()) if v
3860 ) or 'none'
3861 write_debug('exe versions: %s' % exe_str)
3862
3863 from .compat.compat_utils import get_package_info
3864 from .dependencies import available_dependencies
3865
3866 write_debug('Optional libraries: %s' % (', '.join(sorted({
3867 join_nonempty(*get_package_info(m)) for m in available_dependencies.values()
3868 })) or 'none'))
3869
3870 self._setup_opener()
3871 proxy_map = {}
3872 for handler in self._opener.handlers:
3873 if hasattr(handler, 'proxies'):
3874 proxy_map.update(handler.proxies)
3875 write_debug(f'Proxy map: {proxy_map}')
3876
3877 for plugin_type, plugins in {'Extractor': plugin_ies, 'Post-Processor': plugin_pps}.items():
3878 display_list = ['%s%s' % (
3879 klass.__name__, '' if klass.__name__ == name else f' as {name}')
3880 for name, klass in plugins.items()]
3881 if plugin_type == 'Extractor':
3882 display_list.extend(f'{plugins[-1].IE_NAME.partition("+")[2]} ({parent.__name__})'
3883 for parent, plugins in plugin_ie_overrides.items())
3884 if not display_list:
3885 continue
3886 write_debug(f'{plugin_type} Plugins: {", ".join(sorted(display_list))}')
3887
3888 plugin_dirs = plugin_directories()
3889 if plugin_dirs:
3890 write_debug(f'Plugin directories: {plugin_dirs}')
3891
3892 # Not implemented
3893 if False and self.params.get('call_home'):
3894 ipaddr = self.urlopen('https://yt-dl.org/ip').read().decode()
3895 write_debug('Public IP address: %s' % ipaddr)
3896 latest_version = self.urlopen(
3897 'https://yt-dl.org/latest/version').read().decode()
3898 if version_tuple(latest_version) > version_tuple(__version__):
3899 self.report_warning(
3900 'You are using an outdated version (newest version: %s)! '
3901 'See https://yt-dl.org/update if you need help updating.' %
3902 latest_version)
3903
3904 def _setup_opener(self):
3905 if hasattr(self, '_opener'):
3906 return
3907 timeout_val = self.params.get('socket_timeout')
3908 self._socket_timeout = 20 if timeout_val is None else float(timeout_val)
3909
3910 opts_cookiesfrombrowser = self.params.get('cookiesfrombrowser')
3911 opts_cookiefile = self.params.get('cookiefile')
3912 opts_proxy = self.params.get('proxy')
3913
3914 self.cookiejar = load_cookies(opts_cookiefile, opts_cookiesfrombrowser, self)
3915
3916 cookie_processor = YoutubeDLCookieProcessor(self.cookiejar)
3917 if opts_proxy is not None:
3918 if opts_proxy == '':
3919 proxies = {}
3920 else:
3921 proxies = {'http': opts_proxy, 'https': opts_proxy}
3922 else:
3923 proxies = urllib.request.getproxies()
3924 # Set HTTPS proxy to HTTP one if given (https://github.com/ytdl-org/youtube-dl/issues/805)
3925 if 'http' in proxies and 'https' not in proxies:
3926 proxies['https'] = proxies['http']
3927 proxy_handler = PerRequestProxyHandler(proxies)
3928
3929 debuglevel = 1 if self.params.get('debug_printtraffic') else 0
3930 https_handler = make_HTTPS_handler(self.params, debuglevel=debuglevel)
3931 ydlh = YoutubeDLHandler(self.params, debuglevel=debuglevel)
3932 redirect_handler = YoutubeDLRedirectHandler()
3933 data_handler = urllib.request.DataHandler()
3934
3935 # When passing our own FileHandler instance, build_opener won't add the
3936 # default FileHandler and allows us to disable the file protocol, which
3937 # can be used for malicious purposes (see
3938 # https://github.com/ytdl-org/youtube-dl/issues/8227)
3939 file_handler = urllib.request.FileHandler()
3940
3941 if not self.params.get('enable_file_urls'):
3942 def file_open(*args, **kwargs):
3943 raise urllib.error.URLError(
3944 'file:// URLs are explicitly disabled in yt-dlp for security reasons. '
3945 'Use --enable-file-urls to enable at your own risk.')
3946 file_handler.file_open = file_open
3947
3948 opener = urllib.request.build_opener(
3949 proxy_handler, https_handler, cookie_processor, ydlh, redirect_handler, data_handler, file_handler)
3950
3951 # Delete the default user-agent header, which would otherwise apply in
3952 # cases where our custom HTTP handler doesn't come into play
3953 # (See https://github.com/ytdl-org/youtube-dl/issues/1309 for details)
3954 opener.addheaders = []
3955 self._opener = opener
3956
3957 def encode(self, s):
3958 if isinstance(s, bytes):
3959 return s # Already encoded
3960
3961 try:
3962 return s.encode(self.get_encoding())
3963 except UnicodeEncodeError as err:
3964 err.reason = err.reason + '. Check your system encoding configuration or use the --encoding option.'
3965 raise
3966
3967 def get_encoding(self):
3968 encoding = self.params.get('encoding')
3969 if encoding is None:
3970 encoding = preferredencoding()
3971 return encoding
3972
3973 def _write_info_json(self, label, ie_result, infofn, overwrite=None):
3974 ''' Write infojson and returns True = written, 'exists' = Already exists, False = skip, None = error '''
3975 if overwrite is None:
3976 overwrite = self.params.get('overwrites', True)
3977 if not self.params.get('writeinfojson'):
3978 return False
3979 elif not infofn:
3980 self.write_debug(f'Skipping writing {label} infojson')
3981 return False
3982 elif not self._ensure_dir_exists(infofn):
3983 return None
3984 elif not overwrite and os.path.exists(infofn):
3985 self.to_screen(f'[info] {label.title()} metadata is already present')
3986 return 'exists'
3987
3988 self.to_screen(f'[info] Writing {label} metadata as JSON to: {infofn}')
3989 try:
3990 write_json_file(self.sanitize_info(ie_result, self.params.get('clean_infojson', True)), infofn)
3991 return True
3992 except OSError:
3993 self.report_error(f'Cannot write {label} metadata to JSON file {infofn}')
3994 return None
3995
3996 def _write_description(self, label, ie_result, descfn):
3997 ''' Write description and returns True = written, False = skip, None = error '''
3998 if not self.params.get('writedescription'):
3999 return False
4000 elif not descfn:
4001 self.write_debug(f'Skipping writing {label} description')
4002 return False
4003 elif not self._ensure_dir_exists(descfn):
4004 return None
4005 elif not self.params.get('overwrites', True) and os.path.exists(descfn):
4006 self.to_screen(f'[info] {label.title()} description is already present')
4007 elif ie_result.get('description') is None:
4008 self.to_screen(f'[info] There\'s no {label} description to write')
4009 return False
4010 else:
4011 try:
4012 self.to_screen(f'[info] Writing {label} description to: {descfn}')
4013 with open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile:
4014 descfile.write(ie_result['description'])
4015 except OSError:
4016 self.report_error(f'Cannot write {label} description file {descfn}')
4017 return None
4018 return True
4019
4020 def _write_subtitles(self, info_dict, filename):
4021 ''' Write subtitles to file and return list of (sub_filename, final_sub_filename); or None if error'''
4022 ret = []
4023 subtitles = info_dict.get('requested_subtitles')
4024 if not (self.params.get('writesubtitles') or self.params.get('writeautomaticsub')):
4025 # subtitles download errors are already managed as troubles in relevant IE
4026 # that way it will silently go on when used with unsupporting IE
4027 return ret
4028 elif not subtitles:
4029 self.to_screen('[info] There are no subtitles for the requested languages')
4030 return ret
4031 sub_filename_base = self.prepare_filename(info_dict, 'subtitle')
4032 if not sub_filename_base:
4033 self.to_screen('[info] Skipping writing video subtitles')
4034 return ret
4035
4036 for sub_lang, sub_info in subtitles.items():
4037 sub_format = sub_info['ext']
4038 sub_filename = subtitles_filename(filename, sub_lang, sub_format, info_dict.get('ext'))
4039 sub_filename_final = subtitles_filename(sub_filename_base, sub_lang, sub_format, info_dict.get('ext'))
4040 existing_sub = self.existing_file((sub_filename_final, sub_filename))
4041 if existing_sub:
4042 self.to_screen(f'[info] Video subtitle {sub_lang}.{sub_format} is already present')
4043 sub_info['filepath'] = existing_sub
4044 ret.append((existing_sub, sub_filename_final))
4045 continue
4046
4047 self.to_screen(f'[info] Writing video subtitles to: {sub_filename}')
4048 if sub_info.get('data') is not None:
4049 try:
4050 # Use newline='' to prevent conversion of newline characters
4051 # See https://github.com/ytdl-org/youtube-dl/issues/10268
4052 with open(sub_filename, 'w', encoding='utf-8', newline='') as subfile:
4053 subfile.write(sub_info['data'])
4054 sub_info['filepath'] = sub_filename
4055 ret.append((sub_filename, sub_filename_final))
4056 continue
4057 except OSError:
4058 self.report_error(f'Cannot write video subtitles file {sub_filename}')
4059 return None
4060
4061 try:
4062 sub_copy = sub_info.copy()
4063 sub_copy.setdefault('http_headers', info_dict.get('http_headers'))
4064 self.dl(sub_filename, sub_copy, subtitle=True)
4065 sub_info['filepath'] = sub_filename
4066 ret.append((sub_filename, sub_filename_final))
4067 except (DownloadError, ExtractorError, IOError, OSError, ValueError) + network_exceptions as err:
4068 msg = f'Unable to download video subtitles for {sub_lang!r}: {err}'
4069 if self.params.get('ignoreerrors') is not True: # False or 'only_download'
4070 if not self.params.get('ignoreerrors'):
4071 self.report_error(msg)
4072 raise DownloadError(msg)
4073 self.report_warning(msg)
4074 return ret
4075
4076 def _write_thumbnails(self, label, info_dict, filename, thumb_filename_base=None):
4077 ''' Write thumbnails to file and return list of (thumb_filename, final_thumb_filename) '''
4078 write_all = self.params.get('write_all_thumbnails', False)
4079 thumbnails, ret = [], []
4080 if write_all or self.params.get('writethumbnail', False):
4081 thumbnails = info_dict.get('thumbnails') or []
4082 if not thumbnails:
4083 self.to_screen(f'[info] There are no {label} thumbnails to download')
4084 return ret
4085 multiple = write_all and len(thumbnails) > 1
4086
4087 if thumb_filename_base is None:
4088 thumb_filename_base = filename
4089 if thumbnails and not thumb_filename_base:
4090 self.write_debug(f'Skipping writing {label} thumbnail')
4091 return ret
4092
4093 for idx, t in list(enumerate(thumbnails))[::-1]:
4094 thumb_ext = (f'{t["id"]}.' if multiple else '') + determine_ext(t['url'], 'jpg')
4095 thumb_display_id = f'{label} thumbnail {t["id"]}'
4096 thumb_filename = replace_extension(filename, thumb_ext, info_dict.get('ext'))
4097 thumb_filename_final = replace_extension(thumb_filename_base, thumb_ext, info_dict.get('ext'))
4098
4099 existing_thumb = self.existing_file((thumb_filename_final, thumb_filename))
4100 if existing_thumb:
4101 self.to_screen('[info] %s is already present' % (
4102 thumb_display_id if multiple else f'{label} thumbnail').capitalize())
4103 t['filepath'] = existing_thumb
4104 ret.append((existing_thumb, thumb_filename_final))
4105 else:
4106 self.to_screen(f'[info] Downloading {thumb_display_id} ...')
4107 try:
4108 uf = self.urlopen(sanitized_Request(t['url'], headers=t.get('http_headers', {})))
4109 self.to_screen(f'[info] Writing {thumb_display_id} to: {thumb_filename}')
4110 with open(encodeFilename(thumb_filename), 'wb') as thumbf:
4111 shutil.copyfileobj(uf, thumbf)
4112 ret.append((thumb_filename, thumb_filename_final))
4113 t['filepath'] = thumb_filename
4114 except network_exceptions as err:
4115 if isinstance(err, urllib.error.HTTPError) and err.code == 404:
4116 self.to_screen(f'[info] {thumb_display_id.title()} does not exist')
4117 else:
4118 self.report_warning(f'Unable to download {thumb_display_id}: {err}')
4119 thumbnails.pop(idx)
4120 if ret and not write_all:
4121 break
4122 return ret