]> jfr.im git - yt-dlp.git/blob - yt_dlp/YoutubeDL.py
Add option `--use-extractors`
[yt-dlp.git] / yt_dlp / YoutubeDL.py
1 import collections
2 import contextlib
3 import datetime
4 import errno
5 import fileinput
6 import functools
7 import io
8 import itertools
9 import json
10 import locale
11 import operator
12 import os
13 import random
14 import re
15 import shutil
16 import subprocess
17 import sys
18 import tempfile
19 import time
20 import tokenize
21 import traceback
22 import unicodedata
23 import urllib.request
24 from string import ascii_letters
25
26 from .cache import Cache
27 from .compat import compat_os_name, compat_shlex_quote
28 from .cookies import load_cookies
29 from .downloader import FFmpegFD, get_suitable_downloader, shorten_protocol_name
30 from .downloader.rtmp import rtmpdump_version
31 from .extractor import gen_extractor_classes, get_info_extractor
32 from .extractor.common import UnsupportedURLIE
33 from .extractor.openload import PhantomJSwrapper
34 from .minicurses import format_text
35 from .postprocessor import _PLUGIN_CLASSES as plugin_postprocessors
36 from .postprocessor import (
37 EmbedThumbnailPP,
38 FFmpegFixupDuplicateMoovPP,
39 FFmpegFixupDurationPP,
40 FFmpegFixupM3u8PP,
41 FFmpegFixupM4aPP,
42 FFmpegFixupStretchedPP,
43 FFmpegFixupTimestampPP,
44 FFmpegMergerPP,
45 FFmpegPostProcessor,
46 FFmpegVideoConvertorPP,
47 MoveFilesAfterDownloadPP,
48 get_postprocessor,
49 )
50 from .postprocessor.ffmpeg import resolve_mapping as resolve_recode_mapping
51 from .update import detect_variant
52 from .utils import (
53 DEFAULT_OUTTMPL,
54 IDENTITY,
55 LINK_TEMPLATES,
56 MEDIA_EXTENSIONS,
57 NO_DEFAULT,
58 NUMBER_RE,
59 OUTTMPL_TYPES,
60 POSTPROCESS_WHEN,
61 STR_FORMAT_RE_TMPL,
62 STR_FORMAT_TYPES,
63 ContentTooShortError,
64 DateRange,
65 DownloadCancelled,
66 DownloadError,
67 EntryNotInPlaylist,
68 ExistingVideoReached,
69 ExtractorError,
70 GeoRestrictedError,
71 HEADRequest,
72 ISO3166Utils,
73 LazyList,
74 MaxDownloadsReached,
75 Namespace,
76 PagedList,
77 PerRequestProxyHandler,
78 PlaylistEntries,
79 Popen,
80 PostProcessingError,
81 ReExtractInfo,
82 RejectedVideoReached,
83 SameFileError,
84 UnavailableVideoError,
85 UserNotLive,
86 YoutubeDLCookieProcessor,
87 YoutubeDLHandler,
88 YoutubeDLRedirectHandler,
89 age_restricted,
90 args_to_str,
91 bug_reports_message,
92 date_from_str,
93 determine_ext,
94 determine_protocol,
95 encode_compat_str,
96 encodeFilename,
97 error_to_compat_str,
98 escapeHTML,
99 expand_path,
100 filter_dict,
101 float_or_none,
102 format_bytes,
103 format_decimal_suffix,
104 format_field,
105 formatSeconds,
106 get_compatible_ext,
107 get_domain,
108 int_or_none,
109 iri_to_uri,
110 join_nonempty,
111 locked_file,
112 make_archive_id,
113 make_dir,
114 make_HTTPS_handler,
115 merge_headers,
116 network_exceptions,
117 number_of_digits,
118 orderedSet,
119 orderedSet_from_options,
120 parse_filesize,
121 preferredencoding,
122 prepend_extension,
123 register_socks_protocols,
124 remove_terminal_sequences,
125 render_table,
126 replace_extension,
127 sanitize_filename,
128 sanitize_path,
129 sanitize_url,
130 sanitized_Request,
131 std_headers,
132 str_or_none,
133 strftime_or_none,
134 subtitles_filename,
135 supports_terminal_sequences,
136 system_identifier,
137 timetuple_from_msec,
138 to_high_limit_path,
139 traverse_obj,
140 try_call,
141 try_get,
142 url_basename,
143 variadic,
144 version_tuple,
145 windows_enable_vt_mode,
146 write_json_file,
147 write_string,
148 )
149 from .version import RELEASE_GIT_HEAD, VARIANT, __version__
150
151 if compat_os_name == 'nt':
152 import ctypes
153
154
155 class YoutubeDL:
156 """YoutubeDL class.
157
158 YoutubeDL objects are the ones responsible of downloading the
159 actual video file and writing it to disk if the user has requested
160 it, among some other tasks. In most cases there should be one per
161 program. As, given a video URL, the downloader doesn't know how to
162 extract all the needed information, task that InfoExtractors do, it
163 has to pass the URL to one of them.
164
165 For this, YoutubeDL objects have a method that allows
166 InfoExtractors to be registered in a given order. When it is passed
167 a URL, the YoutubeDL object handles it to the first InfoExtractor it
168 finds that reports being able to handle it. The InfoExtractor extracts
169 all the information about the video or videos the URL refers to, and
170 YoutubeDL process the extracted information, possibly using a File
171 Downloader to download the video.
172
173 YoutubeDL objects accept a lot of parameters. In order not to saturate
174 the object constructor with arguments, it receives a dictionary of
175 options instead. These options are available through the params
176 attribute for the InfoExtractors to use. The YoutubeDL also
177 registers itself as the downloader in charge for the InfoExtractors
178 that are added to it, so this is a "mutual registration".
179
180 Available options:
181
182 username: Username for authentication purposes.
183 password: Password for authentication purposes.
184 videopassword: Password for accessing a video.
185 ap_mso: Adobe Pass multiple-system operator identifier.
186 ap_username: Multiple-system operator account username.
187 ap_password: Multiple-system operator account password.
188 usenetrc: Use netrc for authentication instead.
189 verbose: Print additional info to stdout.
190 quiet: Do not print messages to stdout.
191 no_warnings: Do not print out anything for warnings.
192 forceprint: A dict with keys WHEN mapped to a list of templates to
193 print to stdout. The allowed keys are video or any of the
194 items in utils.POSTPROCESS_WHEN.
195 For compatibility, a single list is also accepted
196 print_to_file: A dict with keys WHEN (same as forceprint) mapped to
197 a list of tuples with (template, filename)
198 forcejson: Force printing info_dict as JSON.
199 dump_single_json: Force printing the info_dict of the whole playlist
200 (or video) as a single JSON line.
201 force_write_download_archive: Force writing download archive regardless
202 of 'skip_download' or 'simulate'.
203 simulate: Do not download the video files. If unset (or None),
204 simulate only if listsubtitles, listformats or list_thumbnails is used
205 format: Video format code. see "FORMAT SELECTION" for more details.
206 You can also pass a function. The function takes 'ctx' as
207 argument and returns the formats to download.
208 See "build_format_selector" for an implementation
209 allow_unplayable_formats: Allow unplayable formats to be extracted and downloaded.
210 ignore_no_formats_error: Ignore "No video formats" error. Usefull for
211 extracting metadata even if the video is not actually
212 available for download (experimental)
213 format_sort: A list of fields by which to sort the video formats.
214 See "Sorting Formats" for more details.
215 format_sort_force: Force the given format_sort. see "Sorting Formats"
216 for more details.
217 prefer_free_formats: Whether to prefer video formats with free containers
218 over non-free ones of same quality.
219 allow_multiple_video_streams: Allow multiple video streams to be merged
220 into a single file
221 allow_multiple_audio_streams: Allow multiple audio streams to be merged
222 into a single file
223 check_formats Whether to test if the formats are downloadable.
224 Can be True (check all), False (check none),
225 'selected' (check selected formats),
226 or None (check only if requested by extractor)
227 paths: Dictionary of output paths. The allowed keys are 'home'
228 'temp' and the keys of OUTTMPL_TYPES (in utils.py)
229 outtmpl: Dictionary of templates for output names. Allowed keys
230 are 'default' and the keys of OUTTMPL_TYPES (in utils.py).
231 For compatibility with youtube-dl, a single string can also be used
232 outtmpl_na_placeholder: Placeholder for unavailable meta fields.
233 restrictfilenames: Do not allow "&" and spaces in file names
234 trim_file_name: Limit length of filename (extension excluded)
235 windowsfilenames: Force the filenames to be windows compatible
236 ignoreerrors: Do not stop on download/postprocessing errors.
237 Can be 'only_download' to ignore only download errors.
238 Default is 'only_download' for CLI, but False for API
239 skip_playlist_after_errors: Number of allowed failures until the rest of
240 the playlist is skipped
241 allowed_extractors: List of regexes to match against extractor names that are allowed
242 overwrites: Overwrite all video and metadata files if True,
243 overwrite only non-video files if None
244 and don't overwrite any file if False
245 For compatibility with youtube-dl,
246 "nooverwrites" may also be used instead
247 playlist_items: Specific indices of playlist to download.
248 playlistrandom: Download playlist items in random order.
249 lazy_playlist: Process playlist entries as they are received.
250 matchtitle: Download only matching titles.
251 rejecttitle: Reject downloads for matching titles.
252 logger: Log messages to a logging.Logger instance.
253 logtostderr: Log messages to stderr instead of stdout.
254 consoletitle: Display progress in console window's titlebar.
255 writedescription: Write the video description to a .description file
256 writeinfojson: Write the video description to a .info.json file
257 clean_infojson: Remove private fields from the infojson
258 getcomments: Extract video comments. This will not be written to disk
259 unless writeinfojson is also given
260 writeannotations: Write the video annotations to a .annotations.xml file
261 writethumbnail: Write the thumbnail image to a file
262 allow_playlist_files: Whether to write playlists' description, infojson etc
263 also to disk when using the 'write*' options
264 write_all_thumbnails: Write all thumbnail formats to files
265 writelink: Write an internet shortcut file, depending on the
266 current platform (.url/.webloc/.desktop)
267 writeurllink: Write a Windows internet shortcut file (.url)
268 writewebloclink: Write a macOS internet shortcut file (.webloc)
269 writedesktoplink: Write a Linux internet shortcut file (.desktop)
270 writesubtitles: Write the video subtitles to a file
271 writeautomaticsub: Write the automatically generated subtitles to a file
272 listsubtitles: Lists all available subtitles for the video
273 subtitlesformat: The format code for subtitles
274 subtitleslangs: List of languages of the subtitles to download (can be regex).
275 The list may contain "all" to refer to all the available
276 subtitles. The language can be prefixed with a "-" to
277 exclude it from the requested languages, e.g. ['all', '-live_chat']
278 keepvideo: Keep the video file after post-processing
279 daterange: A DateRange object, download only if the upload_date is in the range.
280 skip_download: Skip the actual download of the video file
281 cachedir: Location of the cache files in the filesystem.
282 False to disable filesystem cache.
283 noplaylist: Download single video instead of a playlist if in doubt.
284 age_limit: An integer representing the user's age in years.
285 Unsuitable videos for the given age are skipped.
286 min_views: An integer representing the minimum view count the video
287 must have in order to not be skipped.
288 Videos without view count information are always
289 downloaded. None for no limit.
290 max_views: An integer representing the maximum view count.
291 Videos that are more popular than that are not
292 downloaded.
293 Videos without view count information are always
294 downloaded. None for no limit.
295 download_archive: File name of a file where all downloads are recorded.
296 Videos already present in the file are not downloaded
297 again.
298 break_on_existing: Stop the download process after attempting to download a
299 file that is in the archive.
300 break_on_reject: Stop the download process when encountering a video that
301 has been filtered out.
302 break_per_url: Whether break_on_reject and break_on_existing
303 should act on each input URL as opposed to for the entire queue
304 cookiefile: File name or text stream from where cookies should be read and dumped to
305 cookiesfrombrowser: A tuple containing the name of the browser, the profile
306 name/path from where cookies are loaded, and the name of the
307 keyring, e.g. ('chrome', ) or ('vivaldi', 'default', 'BASICTEXT')
308 legacyserverconnect: Explicitly allow HTTPS connection to servers that do not
309 support RFC 5746 secure renegotiation
310 nocheckcertificate: Do not verify SSL certificates
311 client_certificate: Path to client certificate file in PEM format. May include the private key
312 client_certificate_key: Path to private key file for client certificate
313 client_certificate_password: Password for client certificate private key, if encrypted.
314 If not provided and the key is encrypted, yt-dlp will ask interactively
315 prefer_insecure: Use HTTP instead of HTTPS to retrieve information.
316 (Only supported by some extractors)
317 http_headers: A dictionary of custom headers to be used for all requests
318 proxy: URL of the proxy server to use
319 geo_verification_proxy: URL of the proxy to use for IP address verification
320 on geo-restricted sites.
321 socket_timeout: Time to wait for unresponsive hosts, in seconds
322 bidi_workaround: Work around buggy terminals without bidirectional text
323 support, using fridibi
324 debug_printtraffic:Print out sent and received HTTP traffic
325 default_search: Prepend this string if an input url is not valid.
326 'auto' for elaborate guessing
327 encoding: Use this encoding instead of the system-specified.
328 extract_flat: Whether to resolve and process url_results further
329 * False: Always process (default)
330 * True: Never process
331 * 'in_playlist': Do not process inside playlist/multi_video
332 * 'discard': Always process, but don't return the result
333 from inside playlist/multi_video
334 * 'discard_in_playlist': Same as "discard", but only for
335 playlists (not multi_video)
336 wait_for_video: If given, wait for scheduled streams to become available.
337 The value should be a tuple containing the range
338 (min_secs, max_secs) to wait between retries
339 postprocessors: A list of dictionaries, each with an entry
340 * key: The name of the postprocessor. See
341 yt_dlp/postprocessor/__init__.py for a list.
342 * when: When to run the postprocessor. Allowed values are
343 the entries of utils.POSTPROCESS_WHEN
344 Assumed to be 'post_process' if not given
345 progress_hooks: A list of functions that get called on download
346 progress, with a dictionary with the entries
347 * status: One of "downloading", "error", or "finished".
348 Check this first and ignore unknown values.
349 * info_dict: The extracted info_dict
350
351 If status is one of "downloading", or "finished", the
352 following properties may also be present:
353 * filename: The final filename (always present)
354 * tmpfilename: The filename we're currently writing to
355 * downloaded_bytes: Bytes on disk
356 * total_bytes: Size of the whole file, None if unknown
357 * total_bytes_estimate: Guess of the eventual file size,
358 None if unavailable.
359 * elapsed: The number of seconds since download started.
360 * eta: The estimated time in seconds, None if unknown
361 * speed: The download speed in bytes/second, None if
362 unknown
363 * fragment_index: The counter of the currently
364 downloaded video fragment.
365 * fragment_count: The number of fragments (= individual
366 files that will be merged)
367
368 Progress hooks are guaranteed to be called at least once
369 (with status "finished") if the download is successful.
370 postprocessor_hooks: A list of functions that get called on postprocessing
371 progress, with a dictionary with the entries
372 * status: One of "started", "processing", or "finished".
373 Check this first and ignore unknown values.
374 * postprocessor: Name of the postprocessor
375 * info_dict: The extracted info_dict
376
377 Progress hooks are guaranteed to be called at least twice
378 (with status "started" and "finished") if the processing is successful.
379 merge_output_format: "/" separated list of extensions to use when merging formats.
380 final_ext: Expected final extension; used to detect when the file was
381 already downloaded and converted
382 fixup: Automatically correct known faults of the file.
383 One of:
384 - "never": do nothing
385 - "warn": only emit a warning
386 - "detect_or_warn": check whether we can do anything
387 about it, warn otherwise (default)
388 source_address: Client-side IP address to bind to.
389 sleep_interval_requests: Number of seconds to sleep between requests
390 during extraction
391 sleep_interval: Number of seconds to sleep before each download when
392 used alone or a lower bound of a range for randomized
393 sleep before each download (minimum possible number
394 of seconds to sleep) when used along with
395 max_sleep_interval.
396 max_sleep_interval:Upper bound of a range for randomized sleep before each
397 download (maximum possible number of seconds to sleep).
398 Must only be used along with sleep_interval.
399 Actual sleep time will be a random float from range
400 [sleep_interval; max_sleep_interval].
401 sleep_interval_subtitles: Number of seconds to sleep before each subtitle download
402 listformats: Print an overview of available video formats and exit.
403 list_thumbnails: Print a table of all thumbnails and exit.
404 match_filter: A function that gets called for every video with the signature
405 (info_dict, *, incomplete: bool) -> Optional[str]
406 For backward compatibility with youtube-dl, the signature
407 (info_dict) -> Optional[str] is also allowed.
408 - If it returns a message, the video is ignored.
409 - If it returns None, the video is downloaded.
410 - If it returns utils.NO_DEFAULT, the user is interactively
411 asked whether to download the video.
412 match_filter_func in utils.py is one example for this.
413 no_color: Do not emit color codes in output.
414 geo_bypass: Bypass geographic restriction via faking X-Forwarded-For
415 HTTP header
416 geo_bypass_country:
417 Two-letter ISO 3166-2 country code that will be used for
418 explicit geographic restriction bypassing via faking
419 X-Forwarded-For HTTP header
420 geo_bypass_ip_block:
421 IP range in CIDR notation that will be used similarly to
422 geo_bypass_country
423 external_downloader: A dictionary of protocol keys and the executable of the
424 external downloader to use for it. The allowed protocols
425 are default|http|ftp|m3u8|dash|rtsp|rtmp|mms.
426 Set the value to 'native' to use the native downloader
427 compat_opts: Compatibility options. See "Differences in default behavior".
428 The following options do not work when used through the API:
429 filename, abort-on-error, multistreams, no-live-chat, format-sort
430 no-clean-infojson, no-playlist-metafiles, no-keep-subs, no-attach-info-json.
431 Refer __init__.py for their implementation
432 progress_template: Dictionary of templates for progress outputs.
433 Allowed keys are 'download', 'postprocess',
434 'download-title' (console title) and 'postprocess-title'.
435 The template is mapped on a dictionary with keys 'progress' and 'info'
436 retry_sleep_functions: Dictionary of functions that takes the number of attempts
437 as argument and returns the time to sleep in seconds.
438 Allowed keys are 'http', 'fragment', 'file_access'
439 download_ranges: A callback function that gets called for every video with
440 the signature (info_dict, ydl) -> Iterable[Section].
441 Only the returned sections will be downloaded.
442 Each Section is a dict with the following keys:
443 * start_time: Start time of the section in seconds
444 * end_time: End time of the section in seconds
445 * title: Section title (Optional)
446 * index: Section number (Optional)
447 force_keyframes_at_cuts: Re-encode the video when downloading ranges to get precise cuts
448 noprogress: Do not print the progress bar
449 live_from_start: Whether to download livestreams videos from the start
450
451 The following parameters are not used by YoutubeDL itself, they are used by
452 the downloader (see yt_dlp/downloader/common.py):
453 nopart, updatetime, buffersize, ratelimit, throttledratelimit, min_filesize,
454 max_filesize, test, noresizebuffer, retries, file_access_retries, fragment_retries,
455 continuedl, xattr_set_filesize, hls_use_mpegts, http_chunk_size,
456 external_downloader_args, concurrent_fragment_downloads.
457
458 The following options are used by the post processors:
459 ffmpeg_location: Location of the ffmpeg/avconv binary; either the path
460 to the binary or its containing directory.
461 postprocessor_args: A dictionary of postprocessor/executable keys (in lower case)
462 and a list of additional command-line arguments for the
463 postprocessor/executable. The dict can also have "PP+EXE" keys
464 which are used when the given exe is used by the given PP.
465 Use 'default' as the name for arguments to passed to all PP
466 For compatibility with youtube-dl, a single list of args
467 can also be used
468
469 The following options are used by the extractors:
470 extractor_retries: Number of times to retry for known errors
471 dynamic_mpd: Whether to process dynamic DASH manifests (default: True)
472 hls_split_discontinuity: Split HLS playlists to different formats at
473 discontinuities such as ad breaks (default: False)
474 extractor_args: A dictionary of arguments to be passed to the extractors.
475 See "EXTRACTOR ARGUMENTS" for details.
476 E.g. {'youtube': {'skip': ['dash', 'hls']}}
477 mark_watched: Mark videos watched (even with --simulate). Only for YouTube
478
479 The following options are deprecated and may be removed in the future:
480
481 force_generic_extractor: Force downloader to use the generic extractor
482 - Use allowed_extractors = ['generic', 'default']
483 playliststart: - Use playlist_items
484 Playlist item to start at.
485 playlistend: - Use playlist_items
486 Playlist item to end at.
487 playlistreverse: - Use playlist_items
488 Download playlist items in reverse order.
489 forceurl: - Use forceprint
490 Force printing final URL.
491 forcetitle: - Use forceprint
492 Force printing title.
493 forceid: - Use forceprint
494 Force printing ID.
495 forcethumbnail: - Use forceprint
496 Force printing thumbnail URL.
497 forcedescription: - Use forceprint
498 Force printing description.
499 forcefilename: - Use forceprint
500 Force printing final filename.
501 forceduration: - Use forceprint
502 Force printing duration.
503 allsubtitles: - Use subtitleslangs = ['all']
504 Downloads all the subtitles of the video
505 (requires writesubtitles or writeautomaticsub)
506 include_ads: - Doesn't work
507 Download ads as well
508 call_home: - Not implemented
509 Boolean, true iff we are allowed to contact the
510 yt-dlp servers for debugging.
511 post_hooks: - Register a custom postprocessor
512 A list of functions that get called as the final step
513 for each video file, after all postprocessors have been
514 called. The filename will be passed as the only argument.
515 hls_prefer_native: - Use external_downloader = {'m3u8': 'native'} or {'m3u8': 'ffmpeg'}.
516 Use the native HLS downloader instead of ffmpeg/avconv
517 if True, otherwise use ffmpeg/avconv if False, otherwise
518 use downloader suggested by extractor if None.
519 prefer_ffmpeg: - avconv support is deprecated
520 If False, use avconv instead of ffmpeg if both are available,
521 otherwise prefer ffmpeg.
522 youtube_include_dash_manifest: - Use extractor_args
523 If True (default), DASH manifests and related
524 data will be downloaded and processed by extractor.
525 You can reduce network I/O by disabling it if you don't
526 care about DASH. (only for youtube)
527 youtube_include_hls_manifest: - Use extractor_args
528 If True (default), HLS manifests and related
529 data will be downloaded and processed by extractor.
530 You can reduce network I/O by disabling it if you don't
531 care about HLS. (only for youtube)
532 """
533
534 _NUMERIC_FIELDS = {
535 'width', 'height', 'asr', 'audio_channels', 'fps',
536 'tbr', 'abr', 'vbr', 'filesize', 'filesize_approx',
537 'timestamp', 'release_timestamp',
538 'duration', 'view_count', 'like_count', 'dislike_count', 'repost_count',
539 'average_rating', 'comment_count', 'age_limit',
540 'start_time', 'end_time',
541 'chapter_number', 'season_number', 'episode_number',
542 'track_number', 'disc_number', 'release_year',
543 }
544
545 _format_fields = {
546 # NB: Keep in sync with the docstring of extractor/common.py
547 'url', 'manifest_url', 'manifest_stream_number', 'ext', 'format', 'format_id', 'format_note',
548 'width', 'height', 'resolution', 'dynamic_range', 'tbr', 'abr', 'acodec', 'asr', 'audio_channels',
549 'vbr', 'fps', 'vcodec', 'container', 'filesize', 'filesize_approx',
550 'player_url', 'protocol', 'fragment_base_url', 'fragments', 'is_from_start',
551 'preference', 'language', 'language_preference', 'quality', 'source_preference',
552 'http_headers', 'stretched_ratio', 'no_resume', 'has_drm', 'downloader_options',
553 'page_url', 'app', 'play_path', 'tc_url', 'flash_version', 'rtmp_live', 'rtmp_conn', 'rtmp_protocol', 'rtmp_real_time'
554 }
555 _format_selection_exts = {
556 'audio': set(MEDIA_EXTENSIONS.common_audio),
557 'video': set(MEDIA_EXTENSIONS.common_video + ('3gp', )),
558 'storyboards': set(MEDIA_EXTENSIONS.storyboards),
559 }
560
561 def __init__(self, params=None, auto_init=True):
562 """Create a FileDownloader object with the given options.
563 @param auto_init Whether to load the default extractors and print header (if verbose).
564 Set to 'no_verbose_header' to not print the header
565 """
566 if params is None:
567 params = {}
568 self.params = params
569 self._ies = {}
570 self._ies_instances = {}
571 self._pps = {k: [] for k in POSTPROCESS_WHEN}
572 self._printed_messages = set()
573 self._first_webpage_request = True
574 self._post_hooks = []
575 self._progress_hooks = []
576 self._postprocessor_hooks = []
577 self._download_retcode = 0
578 self._num_downloads = 0
579 self._num_videos = 0
580 self._playlist_level = 0
581 self._playlist_urls = set()
582 self.cache = Cache(self)
583
584 windows_enable_vt_mode()
585 stdout = sys.stderr if self.params.get('logtostderr') else sys.stdout
586 self._out_files = Namespace(
587 out=stdout,
588 error=sys.stderr,
589 screen=sys.stderr if self.params.get('quiet') else stdout,
590 console=None if compat_os_name == 'nt' else next(
591 filter(supports_terminal_sequences, (sys.stderr, sys.stdout)), None)
592 )
593 self._allow_colors = Namespace(**{
594 type_: not self.params.get('no_color') and supports_terminal_sequences(stream)
595 for type_, stream in self._out_files.items_ if type_ != 'console'
596 })
597
598 # The code is left like this to be reused for future deprecations
599 MIN_SUPPORTED, MIN_RECOMMENDED = (3, 7), (3, 7)
600 current_version = sys.version_info[:2]
601 if current_version < MIN_RECOMMENDED:
602 msg = ('Support for Python version %d.%d has been deprecated. '
603 'See https://github.com/yt-dlp/yt-dlp/issues/3764 for more details.'
604 '\n You will no longer receive updates on this version')
605 if current_version < MIN_SUPPORTED:
606 msg = 'Python version %d.%d is no longer supported'
607 self.deprecation_warning(
608 f'{msg}! Please update to Python %d.%d or above' % (*current_version, *MIN_RECOMMENDED))
609
610 if self.params.get('allow_unplayable_formats'):
611 self.report_warning(
612 f'You have asked for {self._format_err("UNPLAYABLE", self.Styles.EMPHASIS)} formats to be listed/downloaded. '
613 'This is a developer option intended for debugging. \n'
614 ' If you experience any issues while using this option, '
615 f'{self._format_err("DO NOT", self.Styles.ERROR)} open a bug report')
616
617 def check_deprecated(param, option, suggestion):
618 if self.params.get(param) is not None:
619 self.report_warning(f'{option} is deprecated. Use {suggestion} instead')
620 return True
621 return False
622
623 if check_deprecated('cn_verification_proxy', '--cn-verification-proxy', '--geo-verification-proxy'):
624 if self.params.get('geo_verification_proxy') is None:
625 self.params['geo_verification_proxy'] = self.params['cn_verification_proxy']
626
627 check_deprecated('autonumber', '--auto-number', '-o "%(autonumber)s-%(title)s.%(ext)s"')
628 check_deprecated('usetitle', '--title', '-o "%(title)s-%(id)s.%(ext)s"')
629 check_deprecated('useid', '--id', '-o "%(id)s.%(ext)s"')
630
631 for msg in self.params.get('_warnings', []):
632 self.report_warning(msg)
633 for msg in self.params.get('_deprecation_warnings', []):
634 self.deprecation_warning(msg)
635
636 self.params['compat_opts'] = set(self.params.get('compat_opts', ()))
637 if 'list-formats' in self.params['compat_opts']:
638 self.params['listformats_table'] = False
639
640 if 'overwrites' not in self.params and self.params.get('nooverwrites') is not None:
641 # nooverwrites was unnecessarily changed to overwrites
642 # in 0c3d0f51778b153f65c21906031c2e091fcfb641
643 # This ensures compatibility with both keys
644 self.params['overwrites'] = not self.params['nooverwrites']
645 elif self.params.get('overwrites') is None:
646 self.params.pop('overwrites', None)
647 else:
648 self.params['nooverwrites'] = not self.params['overwrites']
649
650 self.params.setdefault('forceprint', {})
651 self.params.setdefault('print_to_file', {})
652
653 # Compatibility with older syntax
654 if not isinstance(params['forceprint'], dict):
655 self.params['forceprint'] = {'video': params['forceprint']}
656
657 if self.params.get('bidi_workaround', False):
658 try:
659 import pty
660 master, slave = pty.openpty()
661 width = shutil.get_terminal_size().columns
662 width_args = [] if width is None else ['-w', str(width)]
663 sp_kwargs = {'stdin': subprocess.PIPE, 'stdout': slave, 'stderr': self._out_files.error}
664 try:
665 self._output_process = Popen(['bidiv'] + width_args, **sp_kwargs)
666 except OSError:
667 self._output_process = Popen(['fribidi', '-c', 'UTF-8'] + width_args, **sp_kwargs)
668 self._output_channel = os.fdopen(master, 'rb')
669 except OSError as ose:
670 if ose.errno == errno.ENOENT:
671 self.report_warning(
672 'Could not find fribidi executable, ignoring --bidi-workaround. '
673 'Make sure that fribidi is an executable file in one of the directories in your $PATH.')
674 else:
675 raise
676
677 if auto_init:
678 if auto_init != 'no_verbose_header':
679 self.print_debug_header()
680 self.add_default_info_extractors()
681
682 if (sys.platform != 'win32'
683 and sys.getfilesystemencoding() in ['ascii', 'ANSI_X3.4-1968']
684 and not self.params.get('restrictfilenames', False)):
685 # Unicode filesystem API will throw errors (#1474, #13027)
686 self.report_warning(
687 'Assuming --restrict-filenames since file system encoding '
688 'cannot encode all characters. '
689 'Set the LC_ALL environment variable to fix this.')
690 self.params['restrictfilenames'] = True
691
692 self._parse_outtmpl()
693
694 # Creating format selector here allows us to catch syntax errors before the extraction
695 self.format_selector = (
696 self.params.get('format') if self.params.get('format') in (None, '-')
697 else self.params['format'] if callable(self.params['format'])
698 else self.build_format_selector(self.params['format']))
699
700 # Set http_headers defaults according to std_headers
701 self.params['http_headers'] = merge_headers(std_headers, self.params.get('http_headers', {}))
702
703 hooks = {
704 'post_hooks': self.add_post_hook,
705 'progress_hooks': self.add_progress_hook,
706 'postprocessor_hooks': self.add_postprocessor_hook,
707 }
708 for opt, fn in hooks.items():
709 for ph in self.params.get(opt, []):
710 fn(ph)
711
712 for pp_def_raw in self.params.get('postprocessors', []):
713 pp_def = dict(pp_def_raw)
714 when = pp_def.pop('when', 'post_process')
715 self.add_post_processor(
716 get_postprocessor(pp_def.pop('key'))(self, **pp_def),
717 when=when)
718
719 self._setup_opener()
720 register_socks_protocols()
721
722 def preload_download_archive(fn):
723 """Preload the archive, if any is specified"""
724 if fn is None:
725 return False
726 self.write_debug(f'Loading archive file {fn!r}')
727 try:
728 with locked_file(fn, 'r', encoding='utf-8') as archive_file:
729 for line in archive_file:
730 self.archive.add(line.strip())
731 except OSError as ioe:
732 if ioe.errno != errno.ENOENT:
733 raise
734 return False
735 return True
736
737 self.archive = set()
738 preload_download_archive(self.params.get('download_archive'))
739
740 def warn_if_short_id(self, argv):
741 # short YouTube ID starting with dash?
742 idxs = [
743 i for i, a in enumerate(argv)
744 if re.match(r'^-[0-9A-Za-z_-]{10}$', a)]
745 if idxs:
746 correct_argv = (
747 ['yt-dlp']
748 + [a for i, a in enumerate(argv) if i not in idxs]
749 + ['--'] + [argv[i] for i in idxs]
750 )
751 self.report_warning(
752 'Long argument string detected. '
753 'Use -- to separate parameters and URLs, like this:\n%s' %
754 args_to_str(correct_argv))
755
756 def add_info_extractor(self, ie):
757 """Add an InfoExtractor object to the end of the list."""
758 ie_key = ie.ie_key()
759 self._ies[ie_key] = ie
760 if not isinstance(ie, type):
761 self._ies_instances[ie_key] = ie
762 ie.set_downloader(self)
763
764 def get_info_extractor(self, ie_key):
765 """
766 Get an instance of an IE with name ie_key, it will try to get one from
767 the _ies list, if there's no instance it will create a new one and add
768 it to the extractor list.
769 """
770 ie = self._ies_instances.get(ie_key)
771 if ie is None:
772 ie = get_info_extractor(ie_key)()
773 self.add_info_extractor(ie)
774 return ie
775
776 def add_default_info_extractors(self):
777 """
778 Add the InfoExtractors returned by gen_extractors to the end of the list
779 """
780 all_ies = {ie.IE_NAME.lower(): ie for ie in gen_extractor_classes()}
781 all_ies['end'] = UnsupportedURLIE()
782 try:
783 ie_names = orderedSet_from_options(
784 self.params.get('allowed_extractors', ['default']), {
785 'all': list(all_ies),
786 'default': [name for name, ie in all_ies.items() if ie._ENABLED],
787 }, use_regex=True)
788 except re.error as e:
789 raise ValueError(f'Wrong regex for allowed_extractors: {e.pattern}')
790 for name in ie_names:
791 self.add_info_extractor(all_ies[name])
792 self.write_debug(f'Loaded {len(ie_names)} extractors')
793
794 def add_post_processor(self, pp, when='post_process'):
795 """Add a PostProcessor object to the end of the chain."""
796 assert when in POSTPROCESS_WHEN, f'Invalid when={when}'
797 self._pps[when].append(pp)
798 pp.set_downloader(self)
799
800 def add_post_hook(self, ph):
801 """Add the post hook"""
802 self._post_hooks.append(ph)
803
804 def add_progress_hook(self, ph):
805 """Add the download progress hook"""
806 self._progress_hooks.append(ph)
807
808 def add_postprocessor_hook(self, ph):
809 """Add the postprocessing progress hook"""
810 self._postprocessor_hooks.append(ph)
811 for pps in self._pps.values():
812 for pp in pps:
813 pp.add_progress_hook(ph)
814
815 def _bidi_workaround(self, message):
816 if not hasattr(self, '_output_channel'):
817 return message
818
819 assert hasattr(self, '_output_process')
820 assert isinstance(message, str)
821 line_count = message.count('\n') + 1
822 self._output_process.stdin.write((message + '\n').encode())
823 self._output_process.stdin.flush()
824 res = ''.join(self._output_channel.readline().decode()
825 for _ in range(line_count))
826 return res[:-len('\n')]
827
828 def _write_string(self, message, out=None, only_once=False):
829 if only_once:
830 if message in self._printed_messages:
831 return
832 self._printed_messages.add(message)
833 write_string(message, out=out, encoding=self.params.get('encoding'))
834
835 def to_stdout(self, message, skip_eol=False, quiet=None):
836 """Print message to stdout"""
837 if quiet is not None:
838 self.deprecation_warning('"YoutubeDL.to_stdout" no longer accepts the argument quiet. Use "YoutubeDL.to_screen" instead')
839 if skip_eol is not False:
840 self.deprecation_warning('"YoutubeDL.to_stdout" no longer accepts the argument skip_eol. Use "YoutubeDL.to_screen" instead')
841 self._write_string(f'{self._bidi_workaround(message)}\n', self._out_files.out)
842
843 def to_screen(self, message, skip_eol=False, quiet=None):
844 """Print message to screen if not in quiet mode"""
845 if self.params.get('logger'):
846 self.params['logger'].debug(message)
847 return
848 if (self.params.get('quiet') if quiet is None else quiet) and not self.params.get('verbose'):
849 return
850 self._write_string(
851 '%s%s' % (self._bidi_workaround(message), ('' if skip_eol else '\n')),
852 self._out_files.screen)
853
854 def to_stderr(self, message, only_once=False):
855 """Print message to stderr"""
856 assert isinstance(message, str)
857 if self.params.get('logger'):
858 self.params['logger'].error(message)
859 else:
860 self._write_string(f'{self._bidi_workaround(message)}\n', self._out_files.error, only_once=only_once)
861
862 def _send_console_code(self, code):
863 if compat_os_name == 'nt' or not self._out_files.console:
864 return
865 self._write_string(code, self._out_files.console)
866
867 def to_console_title(self, message):
868 if not self.params.get('consoletitle', False):
869 return
870 message = remove_terminal_sequences(message)
871 if compat_os_name == 'nt':
872 if ctypes.windll.kernel32.GetConsoleWindow():
873 # c_wchar_p() might not be necessary if `message` is
874 # already of type unicode()
875 ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(message))
876 else:
877 self._send_console_code(f'\033]0;{message}\007')
878
879 def save_console_title(self):
880 if not self.params.get('consoletitle') or self.params.get('simulate'):
881 return
882 self._send_console_code('\033[22;0t') # Save the title on stack
883
884 def restore_console_title(self):
885 if not self.params.get('consoletitle') or self.params.get('simulate'):
886 return
887 self._send_console_code('\033[23;0t') # Restore the title from stack
888
889 def __enter__(self):
890 self.save_console_title()
891 return self
892
893 def __exit__(self, *args):
894 self.restore_console_title()
895
896 if self.params.get('cookiefile') is not None:
897 self.cookiejar.save(ignore_discard=True, ignore_expires=True)
898
899 def trouble(self, message=None, tb=None, is_error=True):
900 """Determine action to take when a download problem appears.
901
902 Depending on if the downloader has been configured to ignore
903 download errors or not, this method may throw an exception or
904 not when errors are found, after printing the message.
905
906 @param tb If given, is additional traceback information
907 @param is_error Whether to raise error according to ignorerrors
908 """
909 if message is not None:
910 self.to_stderr(message)
911 if self.params.get('verbose'):
912 if tb is None:
913 if sys.exc_info()[0]: # if .trouble has been called from an except block
914 tb = ''
915 if hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
916 tb += ''.join(traceback.format_exception(*sys.exc_info()[1].exc_info))
917 tb += encode_compat_str(traceback.format_exc())
918 else:
919 tb_data = traceback.format_list(traceback.extract_stack())
920 tb = ''.join(tb_data)
921 if tb:
922 self.to_stderr(tb)
923 if not is_error:
924 return
925 if not self.params.get('ignoreerrors'):
926 if sys.exc_info()[0] and hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
927 exc_info = sys.exc_info()[1].exc_info
928 else:
929 exc_info = sys.exc_info()
930 raise DownloadError(message, exc_info)
931 self._download_retcode = 1
932
933 Styles = Namespace(
934 HEADERS='yellow',
935 EMPHASIS='light blue',
936 FILENAME='green',
937 ID='green',
938 DELIM='blue',
939 ERROR='red',
940 WARNING='yellow',
941 SUPPRESS='light black',
942 )
943
944 def _format_text(self, handle, allow_colors, text, f, fallback=None, *, test_encoding=False):
945 text = str(text)
946 if test_encoding:
947 original_text = text
948 # handle.encoding can be None. See https://github.com/yt-dlp/yt-dlp/issues/2711
949 encoding = self.params.get('encoding') or getattr(handle, 'encoding', None) or 'ascii'
950 text = text.encode(encoding, 'ignore').decode(encoding)
951 if fallback is not None and text != original_text:
952 text = fallback
953 return format_text(text, f) if allow_colors else text if fallback is None else fallback
954
955 def _format_out(self, *args, **kwargs):
956 return self._format_text(self._out_files.out, self._allow_colors.out, *args, **kwargs)
957
958 def _format_screen(self, *args, **kwargs):
959 return self._format_text(self._out_files.screen, self._allow_colors.screen, *args, **kwargs)
960
961 def _format_err(self, *args, **kwargs):
962 return self._format_text(self._out_files.error, self._allow_colors.error, *args, **kwargs)
963
964 def report_warning(self, message, only_once=False):
965 '''
966 Print the message to stderr, it will be prefixed with 'WARNING:'
967 If stderr is a tty file the 'WARNING:' will be colored
968 '''
969 if self.params.get('logger') is not None:
970 self.params['logger'].warning(message)
971 else:
972 if self.params.get('no_warnings'):
973 return
974 self.to_stderr(f'{self._format_err("WARNING:", self.Styles.WARNING)} {message}', only_once)
975
976 def deprecation_warning(self, message):
977 if self.params.get('logger') is not None:
978 self.params['logger'].warning(f'DeprecationWarning: {message}')
979 else:
980 self.to_stderr(f'{self._format_err("DeprecationWarning:", self.Styles.ERROR)} {message}', True)
981
982 def report_error(self, message, *args, **kwargs):
983 '''
984 Do the same as trouble, but prefixes the message with 'ERROR:', colored
985 in red if stderr is a tty file.
986 '''
987 self.trouble(f'{self._format_err("ERROR:", self.Styles.ERROR)} {message}', *args, **kwargs)
988
989 def write_debug(self, message, only_once=False):
990 '''Log debug message or Print message to stderr'''
991 if not self.params.get('verbose', False):
992 return
993 message = f'[debug] {message}'
994 if self.params.get('logger'):
995 self.params['logger'].debug(message)
996 else:
997 self.to_stderr(message, only_once)
998
999 def report_file_already_downloaded(self, file_name):
1000 """Report file has already been fully downloaded."""
1001 try:
1002 self.to_screen('[download] %s has already been downloaded' % file_name)
1003 except UnicodeEncodeError:
1004 self.to_screen('[download] The file has already been downloaded')
1005
1006 def report_file_delete(self, file_name):
1007 """Report that existing file will be deleted."""
1008 try:
1009 self.to_screen('Deleting existing file %s' % file_name)
1010 except UnicodeEncodeError:
1011 self.to_screen('Deleting existing file')
1012
1013 def raise_no_formats(self, info, forced=False, *, msg=None):
1014 has_drm = info.get('_has_drm')
1015 ignored, expected = self.params.get('ignore_no_formats_error'), bool(msg)
1016 msg = msg or has_drm and 'This video is DRM protected' or 'No video formats found!'
1017 if forced or not ignored:
1018 raise ExtractorError(msg, video_id=info['id'], ie=info['extractor'],
1019 expected=has_drm or ignored or expected)
1020 else:
1021 self.report_warning(msg)
1022
1023 def parse_outtmpl(self):
1024 self.deprecation_warning('"YoutubeDL.parse_outtmpl" is deprecated and may be removed in a future version')
1025 self._parse_outtmpl()
1026 return self.params['outtmpl']
1027
1028 def _parse_outtmpl(self):
1029 sanitize = IDENTITY
1030 if self.params.get('restrictfilenames'): # Remove spaces in the default template
1031 sanitize = lambda x: x.replace(' - ', ' ').replace(' ', '-')
1032
1033 outtmpl = self.params.setdefault('outtmpl', {})
1034 if not isinstance(outtmpl, dict):
1035 self.params['outtmpl'] = outtmpl = {'default': outtmpl}
1036 outtmpl.update({k: sanitize(v) for k, v in DEFAULT_OUTTMPL.items() if outtmpl.get(k) is None})
1037
1038 def get_output_path(self, dir_type='', filename=None):
1039 paths = self.params.get('paths', {})
1040 assert isinstance(paths, dict)
1041 path = os.path.join(
1042 expand_path(paths.get('home', '').strip()),
1043 expand_path(paths.get(dir_type, '').strip()) if dir_type else '',
1044 filename or '')
1045 return sanitize_path(path, force=self.params.get('windowsfilenames'))
1046
1047 @staticmethod
1048 def _outtmpl_expandpath(outtmpl):
1049 # expand_path translates '%%' into '%' and '$$' into '$'
1050 # correspondingly that is not what we want since we need to keep
1051 # '%%' intact for template dict substitution step. Working around
1052 # with boundary-alike separator hack.
1053 sep = ''.join([random.choice(ascii_letters) for _ in range(32)])
1054 outtmpl = outtmpl.replace('%%', f'%{sep}%').replace('$$', f'${sep}$')
1055
1056 # outtmpl should be expand_path'ed before template dict substitution
1057 # because meta fields may contain env variables we don't want to
1058 # be expanded. E.g. for outtmpl "%(title)s.%(ext)s" and
1059 # title "Hello $PATH", we don't want `$PATH` to be expanded.
1060 return expand_path(outtmpl).replace(sep, '')
1061
1062 @staticmethod
1063 def escape_outtmpl(outtmpl):
1064 ''' Escape any remaining strings like %s, %abc% etc. '''
1065 return re.sub(
1066 STR_FORMAT_RE_TMPL.format('', '(?![%(\0])'),
1067 lambda mobj: ('' if mobj.group('has_key') else '%') + mobj.group(0),
1068 outtmpl)
1069
1070 @classmethod
1071 def validate_outtmpl(cls, outtmpl):
1072 ''' @return None or Exception object '''
1073 outtmpl = re.sub(
1074 STR_FORMAT_RE_TMPL.format('[^)]*', '[ljhqBUDS]'),
1075 lambda mobj: f'{mobj.group(0)[:-1]}s',
1076 cls._outtmpl_expandpath(outtmpl))
1077 try:
1078 cls.escape_outtmpl(outtmpl) % collections.defaultdict(int)
1079 return None
1080 except ValueError as err:
1081 return err
1082
1083 @staticmethod
1084 def _copy_infodict(info_dict):
1085 info_dict = dict(info_dict)
1086 info_dict.pop('__postprocessors', None)
1087 info_dict.pop('__pending_error', None)
1088 return info_dict
1089
1090 def prepare_outtmpl(self, outtmpl, info_dict, sanitize=False):
1091 """ Make the outtmpl and info_dict suitable for substitution: ydl.escape_outtmpl(outtmpl) % info_dict
1092 @param sanitize Whether to sanitize the output as a filename.
1093 For backward compatibility, a function can also be passed
1094 """
1095
1096 info_dict.setdefault('epoch', int(time.time())) # keep epoch consistent once set
1097
1098 info_dict = self._copy_infodict(info_dict)
1099 info_dict['duration_string'] = ( # %(duration>%H-%M-%S)s is wrong if duration > 24hrs
1100 formatSeconds(info_dict['duration'], '-' if sanitize else ':')
1101 if info_dict.get('duration', None) is not None
1102 else None)
1103 info_dict['autonumber'] = int(self.params.get('autonumber_start', 1) - 1 + self._num_downloads)
1104 info_dict['video_autonumber'] = self._num_videos
1105 if info_dict.get('resolution') is None:
1106 info_dict['resolution'] = self.format_resolution(info_dict, default=None)
1107
1108 # For fields playlist_index, playlist_autonumber and autonumber convert all occurrences
1109 # of %(field)s to %(field)0Nd for backward compatibility
1110 field_size_compat_map = {
1111 'playlist_index': number_of_digits(info_dict.get('__last_playlist_index') or 0),
1112 'playlist_autonumber': number_of_digits(info_dict.get('n_entries') or 0),
1113 'autonumber': self.params.get('autonumber_size') or 5,
1114 }
1115
1116 TMPL_DICT = {}
1117 EXTERNAL_FORMAT_RE = re.compile(STR_FORMAT_RE_TMPL.format('[^)]*', f'[{STR_FORMAT_TYPES}ljhqBUDS]'))
1118 MATH_FUNCTIONS = {
1119 '+': float.__add__,
1120 '-': float.__sub__,
1121 }
1122 # Field is of the form key1.key2...
1123 # where keys (except first) can be string, int or slice
1124 FIELD_RE = r'\w*(?:\.(?:\w+|{num}|{num}?(?::{num}?){{1,2}}))*'.format(num=r'(?:-?\d+)')
1125 MATH_FIELD_RE = rf'(?:{FIELD_RE}|-?{NUMBER_RE})'
1126 MATH_OPERATORS_RE = r'(?:%s)' % '|'.join(map(re.escape, MATH_FUNCTIONS.keys()))
1127 INTERNAL_FORMAT_RE = re.compile(rf'''(?x)
1128 (?P<negate>-)?
1129 (?P<fields>{FIELD_RE})
1130 (?P<maths>(?:{MATH_OPERATORS_RE}{MATH_FIELD_RE})*)
1131 (?:>(?P<strf_format>.+?))?
1132 (?P<remaining>
1133 (?P<alternate>(?<!\\),[^|&)]+)?
1134 (?:&(?P<replacement>.*?))?
1135 (?:\|(?P<default>.*?))?
1136 )$''')
1137
1138 def _traverse_infodict(k):
1139 k = k.split('.')
1140 if k[0] == '':
1141 k.pop(0)
1142 return traverse_obj(info_dict, k, is_user_input=True, traverse_string=True)
1143
1144 def get_value(mdict):
1145 # Object traversal
1146 value = _traverse_infodict(mdict['fields'])
1147 # Negative
1148 if mdict['negate']:
1149 value = float_or_none(value)
1150 if value is not None:
1151 value *= -1
1152 # Do maths
1153 offset_key = mdict['maths']
1154 if offset_key:
1155 value = float_or_none(value)
1156 operator = None
1157 while offset_key:
1158 item = re.match(
1159 MATH_FIELD_RE if operator else MATH_OPERATORS_RE,
1160 offset_key).group(0)
1161 offset_key = offset_key[len(item):]
1162 if operator is None:
1163 operator = MATH_FUNCTIONS[item]
1164 continue
1165 item, multiplier = (item[1:], -1) if item[0] == '-' else (item, 1)
1166 offset = float_or_none(item)
1167 if offset is None:
1168 offset = float_or_none(_traverse_infodict(item))
1169 try:
1170 value = operator(value, multiplier * offset)
1171 except (TypeError, ZeroDivisionError):
1172 return None
1173 operator = None
1174 # Datetime formatting
1175 if mdict['strf_format']:
1176 value = strftime_or_none(value, mdict['strf_format'].replace('\\,', ','))
1177
1178 # XXX: Workaround for https://github.com/yt-dlp/yt-dlp/issues/4485
1179 if sanitize and value == '':
1180 value = None
1181 return value
1182
1183 na = self.params.get('outtmpl_na_placeholder', 'NA')
1184
1185 def filename_sanitizer(key, value, restricted=self.params.get('restrictfilenames')):
1186 return sanitize_filename(str(value), restricted=restricted, is_id=(
1187 bool(re.search(r'(^|[_.])id(\.|$)', key))
1188 if 'filename-sanitization' in self.params['compat_opts']
1189 else NO_DEFAULT))
1190
1191 sanitizer = sanitize if callable(sanitize) else filename_sanitizer
1192 sanitize = bool(sanitize)
1193
1194 def _dumpjson_default(obj):
1195 if isinstance(obj, (set, LazyList)):
1196 return list(obj)
1197 return repr(obj)
1198
1199 def create_key(outer_mobj):
1200 if not outer_mobj.group('has_key'):
1201 return outer_mobj.group(0)
1202 key = outer_mobj.group('key')
1203 mobj = re.match(INTERNAL_FORMAT_RE, key)
1204 initial_field = mobj.group('fields') if mobj else ''
1205 value, replacement, default = None, None, na
1206 while mobj:
1207 mobj = mobj.groupdict()
1208 default = mobj['default'] if mobj['default'] is not None else default
1209 value = get_value(mobj)
1210 replacement = mobj['replacement']
1211 if value is None and mobj['alternate']:
1212 mobj = re.match(INTERNAL_FORMAT_RE, mobj['remaining'][1:])
1213 else:
1214 break
1215
1216 fmt = outer_mobj.group('format')
1217 if fmt == 's' and value is not None and key in field_size_compat_map.keys():
1218 fmt = f'0{field_size_compat_map[key]:d}d'
1219
1220 value = default if value is None else value if replacement is None else replacement
1221
1222 flags = outer_mobj.group('conversion') or ''
1223 str_fmt = f'{fmt[:-1]}s'
1224 if fmt[-1] == 'l': # list
1225 delim = '\n' if '#' in flags else ', '
1226 value, fmt = delim.join(map(str, variadic(value, allowed_types=(str, bytes)))), str_fmt
1227 elif fmt[-1] == 'j': # json
1228 value, fmt = json.dumps(value, default=_dumpjson_default, indent=4 if '#' in flags else None), str_fmt
1229 elif fmt[-1] == 'h': # html
1230 value, fmt = escapeHTML(value), str_fmt
1231 elif fmt[-1] == 'q': # quoted
1232 value = map(str, variadic(value) if '#' in flags else [value])
1233 value, fmt = ' '.join(map(compat_shlex_quote, value)), str_fmt
1234 elif fmt[-1] == 'B': # bytes
1235 value = f'%{str_fmt}'.encode() % str(value).encode()
1236 value, fmt = value.decode('utf-8', 'ignore'), 's'
1237 elif fmt[-1] == 'U': # unicode normalized
1238 value, fmt = unicodedata.normalize(
1239 # "+" = compatibility equivalence, "#" = NFD
1240 'NF%s%s' % ('K' if '+' in flags else '', 'D' if '#' in flags else 'C'),
1241 value), str_fmt
1242 elif fmt[-1] == 'D': # decimal suffix
1243 num_fmt, fmt = fmt[:-1].replace('#', ''), 's'
1244 value = format_decimal_suffix(value, f'%{num_fmt}f%s' if num_fmt else '%d%s',
1245 factor=1024 if '#' in flags else 1000)
1246 elif fmt[-1] == 'S': # filename sanitization
1247 value, fmt = filename_sanitizer(initial_field, value, restricted='#' in flags), str_fmt
1248 elif fmt[-1] == 'c':
1249 if value:
1250 value = str(value)[0]
1251 else:
1252 fmt = str_fmt
1253 elif fmt[-1] not in 'rs': # numeric
1254 value = float_or_none(value)
1255 if value is None:
1256 value, fmt = default, 's'
1257
1258 if sanitize:
1259 if fmt[-1] == 'r':
1260 # If value is an object, sanitize might convert it to a string
1261 # So we convert it to repr first
1262 value, fmt = repr(value), str_fmt
1263 if fmt[-1] in 'csr':
1264 value = sanitizer(initial_field, value)
1265
1266 key = '%s\0%s' % (key.replace('%', '%\0'), outer_mobj.group('format'))
1267 TMPL_DICT[key] = value
1268 return '{prefix}%({key}){fmt}'.format(key=key, fmt=fmt, prefix=outer_mobj.group('prefix'))
1269
1270 return EXTERNAL_FORMAT_RE.sub(create_key, outtmpl), TMPL_DICT
1271
1272 def evaluate_outtmpl(self, outtmpl, info_dict, *args, **kwargs):
1273 outtmpl, info_dict = self.prepare_outtmpl(outtmpl, info_dict, *args, **kwargs)
1274 return self.escape_outtmpl(outtmpl) % info_dict
1275
1276 def _prepare_filename(self, info_dict, *, outtmpl=None, tmpl_type=None):
1277 assert None in (outtmpl, tmpl_type), 'outtmpl and tmpl_type are mutually exclusive'
1278 if outtmpl is None:
1279 outtmpl = self.params['outtmpl'].get(tmpl_type or 'default', self.params['outtmpl']['default'])
1280 try:
1281 outtmpl = self._outtmpl_expandpath(outtmpl)
1282 filename = self.evaluate_outtmpl(outtmpl, info_dict, True)
1283 if not filename:
1284 return None
1285
1286 if tmpl_type in ('', 'temp'):
1287 final_ext, ext = self.params.get('final_ext'), info_dict.get('ext')
1288 if final_ext and ext and final_ext != ext and filename.endswith(f'.{final_ext}'):
1289 filename = replace_extension(filename, ext, final_ext)
1290 elif tmpl_type:
1291 force_ext = OUTTMPL_TYPES[tmpl_type]
1292 if force_ext:
1293 filename = replace_extension(filename, force_ext, info_dict.get('ext'))
1294
1295 # https://github.com/blackjack4494/youtube-dlc/issues/85
1296 trim_file_name = self.params.get('trim_file_name', False)
1297 if trim_file_name:
1298 no_ext, *ext = filename.rsplit('.', 2)
1299 filename = join_nonempty(no_ext[:trim_file_name], *ext, delim='.')
1300
1301 return filename
1302 except ValueError as err:
1303 self.report_error('Error in output template: ' + str(err) + ' (encoding: ' + repr(preferredencoding()) + ')')
1304 return None
1305
1306 def prepare_filename(self, info_dict, dir_type='', *, outtmpl=None, warn=False):
1307 """Generate the output filename"""
1308 if outtmpl:
1309 assert not dir_type, 'outtmpl and dir_type are mutually exclusive'
1310 dir_type = None
1311 filename = self._prepare_filename(info_dict, tmpl_type=dir_type, outtmpl=outtmpl)
1312 if not filename and dir_type not in ('', 'temp'):
1313 return ''
1314
1315 if warn:
1316 if not self.params.get('paths'):
1317 pass
1318 elif filename == '-':
1319 self.report_warning('--paths is ignored when an outputting to stdout', only_once=True)
1320 elif os.path.isabs(filename):
1321 self.report_warning('--paths is ignored since an absolute path is given in output template', only_once=True)
1322 if filename == '-' or not filename:
1323 return filename
1324
1325 return self.get_output_path(dir_type, filename)
1326
1327 def _match_entry(self, info_dict, incomplete=False, silent=False):
1328 """ Returns None if the file should be downloaded """
1329
1330 video_title = info_dict.get('title', info_dict.get('id', 'entry'))
1331
1332 def check_filter():
1333 if 'title' in info_dict:
1334 # This can happen when we're just evaluating the playlist
1335 title = info_dict['title']
1336 matchtitle = self.params.get('matchtitle', False)
1337 if matchtitle:
1338 if not re.search(matchtitle, title, re.IGNORECASE):
1339 return '"' + title + '" title did not match pattern "' + matchtitle + '"'
1340 rejecttitle = self.params.get('rejecttitle', False)
1341 if rejecttitle:
1342 if re.search(rejecttitle, title, re.IGNORECASE):
1343 return '"' + title + '" title matched reject pattern "' + rejecttitle + '"'
1344 date = info_dict.get('upload_date')
1345 if date is not None:
1346 dateRange = self.params.get('daterange', DateRange())
1347 if date not in dateRange:
1348 return f'{date_from_str(date).isoformat()} upload date is not in range {dateRange}'
1349 view_count = info_dict.get('view_count')
1350 if view_count is not None:
1351 min_views = self.params.get('min_views')
1352 if min_views is not None and view_count < min_views:
1353 return 'Skipping %s, because it has not reached minimum view count (%d/%d)' % (video_title, view_count, min_views)
1354 max_views = self.params.get('max_views')
1355 if max_views is not None and view_count > max_views:
1356 return 'Skipping %s, because it has exceeded the maximum view count (%d/%d)' % (video_title, view_count, max_views)
1357 if age_restricted(info_dict.get('age_limit'), self.params.get('age_limit')):
1358 return 'Skipping "%s" because it is age restricted' % video_title
1359
1360 match_filter = self.params.get('match_filter')
1361 if match_filter is not None:
1362 try:
1363 ret = match_filter(info_dict, incomplete=incomplete)
1364 except TypeError:
1365 # For backward compatibility
1366 ret = None if incomplete else match_filter(info_dict)
1367 if ret is NO_DEFAULT:
1368 while True:
1369 filename = self._format_screen(self.prepare_filename(info_dict), self.Styles.FILENAME)
1370 reply = input(self._format_screen(
1371 f'Download "{filename}"? (Y/n): ', self.Styles.EMPHASIS)).lower().strip()
1372 if reply in {'y', ''}:
1373 return None
1374 elif reply == 'n':
1375 return f'Skipping {video_title}'
1376 elif ret is not None:
1377 return ret
1378 return None
1379
1380 if self.in_download_archive(info_dict):
1381 reason = '%s has already been recorded in the archive' % video_title
1382 break_opt, break_err = 'break_on_existing', ExistingVideoReached
1383 else:
1384 reason = check_filter()
1385 break_opt, break_err = 'break_on_reject', RejectedVideoReached
1386 if reason is not None:
1387 if not silent:
1388 self.to_screen('[download] ' + reason)
1389 if self.params.get(break_opt, False):
1390 raise break_err()
1391 return reason
1392
1393 @staticmethod
1394 def add_extra_info(info_dict, extra_info):
1395 '''Set the keys from extra_info in info dict if they are missing'''
1396 for key, value in extra_info.items():
1397 info_dict.setdefault(key, value)
1398
1399 def extract_info(self, url, download=True, ie_key=None, extra_info=None,
1400 process=True, force_generic_extractor=False):
1401 """
1402 Return a list with a dictionary for each video extracted.
1403
1404 Arguments:
1405 url -- URL to extract
1406
1407 Keyword arguments:
1408 download -- whether to download videos during extraction
1409 ie_key -- extractor key hint
1410 extra_info -- dictionary containing the extra values to add to each result
1411 process -- whether to resolve all unresolved references (URLs, playlist items),
1412 must be True for download to work.
1413 force_generic_extractor -- force using the generic extractor
1414 """
1415
1416 if extra_info is None:
1417 extra_info = {}
1418
1419 if not ie_key and force_generic_extractor:
1420 ie_key = 'Generic'
1421
1422 if ie_key:
1423 ies = {ie_key: self._ies[ie_key]} if ie_key in self._ies else {}
1424 else:
1425 ies = self._ies
1426
1427 for key, ie in ies.items():
1428 if not ie.suitable(url):
1429 continue
1430
1431 if not ie.working():
1432 self.report_warning('The program functionality for this site has been marked as broken, '
1433 'and will probably not work.')
1434
1435 temp_id = ie.get_temp_id(url)
1436 if temp_id is not None and self.in_download_archive({'id': temp_id, 'ie_key': key}):
1437 self.to_screen(f'[{key}] {temp_id}: has already been recorded in the archive')
1438 if self.params.get('break_on_existing', False):
1439 raise ExistingVideoReached()
1440 break
1441 return self.__extract_info(url, self.get_info_extractor(key), download, extra_info, process)
1442 else:
1443 extractors_restricted = self.params.get('allowed_extractors') not in (None, ['default'])
1444 self.report_error(f'No suitable extractor{format_field(ie_key, None, " (%s)")} found for URL {url}',
1445 tb=False if extractors_restricted else None)
1446
1447 def _handle_extraction_exceptions(func):
1448 @functools.wraps(func)
1449 def wrapper(self, *args, **kwargs):
1450 while True:
1451 try:
1452 return func(self, *args, **kwargs)
1453 except (DownloadCancelled, LazyList.IndexError, PagedList.IndexError):
1454 raise
1455 except ReExtractInfo as e:
1456 if e.expected:
1457 self.to_screen(f'{e}; Re-extracting data')
1458 else:
1459 self.to_stderr('\r')
1460 self.report_warning(f'{e}; Re-extracting data')
1461 continue
1462 except GeoRestrictedError as e:
1463 msg = e.msg
1464 if e.countries:
1465 msg += '\nThis video is available in %s.' % ', '.join(
1466 map(ISO3166Utils.short2full, e.countries))
1467 msg += '\nYou might want to use a VPN or a proxy server (with --proxy) to workaround.'
1468 self.report_error(msg)
1469 except ExtractorError as e: # An error we somewhat expected
1470 self.report_error(str(e), e.format_traceback())
1471 except Exception as e:
1472 if self.params.get('ignoreerrors'):
1473 self.report_error(str(e), tb=encode_compat_str(traceback.format_exc()))
1474 else:
1475 raise
1476 break
1477 return wrapper
1478
1479 def _wait_for_video(self, ie_result={}):
1480 if (not self.params.get('wait_for_video')
1481 or ie_result.get('_type', 'video') != 'video'
1482 or ie_result.get('formats') or ie_result.get('url')):
1483 return
1484
1485 format_dur = lambda dur: '%02d:%02d:%02d' % timetuple_from_msec(dur * 1000)[:-1]
1486 last_msg = ''
1487
1488 def progress(msg):
1489 nonlocal last_msg
1490 full_msg = f'{msg}\n'
1491 if not self.params.get('noprogress'):
1492 full_msg = msg + ' ' * (len(last_msg) - len(msg)) + '\r'
1493 elif last_msg:
1494 return
1495 self.to_screen(full_msg, skip_eol=True)
1496 last_msg = msg
1497
1498 min_wait, max_wait = self.params.get('wait_for_video')
1499 diff = try_get(ie_result, lambda x: x['release_timestamp'] - time.time())
1500 if diff is None and ie_result.get('live_status') == 'is_upcoming':
1501 diff = round(random.uniform(min_wait, max_wait) if (max_wait and min_wait) else (max_wait or min_wait), 0)
1502 self.report_warning('Release time of video is not known')
1503 elif ie_result and (diff or 0) <= 0:
1504 self.report_warning('Video should already be available according to extracted info')
1505 diff = min(max(diff or 0, min_wait or 0), max_wait or float('inf'))
1506 self.to_screen(f'[wait] Waiting for {format_dur(diff)} - Press Ctrl+C to try now')
1507
1508 wait_till = time.time() + diff
1509 try:
1510 while True:
1511 diff = wait_till - time.time()
1512 if diff <= 0:
1513 progress('')
1514 raise ReExtractInfo('[wait] Wait period ended', expected=True)
1515 progress(f'[wait] Remaining time until next attempt: {self._format_screen(format_dur(diff), self.Styles.EMPHASIS)}')
1516 time.sleep(1)
1517 except KeyboardInterrupt:
1518 progress('')
1519 raise ReExtractInfo('[wait] Interrupted by user', expected=True)
1520 except BaseException as e:
1521 if not isinstance(e, ReExtractInfo):
1522 self.to_screen('')
1523 raise
1524
1525 @_handle_extraction_exceptions
1526 def __extract_info(self, url, ie, download, extra_info, process):
1527 try:
1528 ie_result = ie.extract(url)
1529 except UserNotLive as e:
1530 if process:
1531 if self.params.get('wait_for_video'):
1532 self.report_warning(e)
1533 self._wait_for_video()
1534 raise
1535 if ie_result is None: # Finished already (backwards compatibility; listformats and friends should be moved here)
1536 self.report_warning(f'Extractor {ie.IE_NAME} returned nothing{bug_reports_message()}')
1537 return
1538 if isinstance(ie_result, list):
1539 # Backwards compatibility: old IE result format
1540 ie_result = {
1541 '_type': 'compat_list',
1542 'entries': ie_result,
1543 }
1544 if extra_info.get('original_url'):
1545 ie_result.setdefault('original_url', extra_info['original_url'])
1546 self.add_default_extra_info(ie_result, ie, url)
1547 if process:
1548 self._wait_for_video(ie_result)
1549 return self.process_ie_result(ie_result, download, extra_info)
1550 else:
1551 return ie_result
1552
1553 def add_default_extra_info(self, ie_result, ie, url):
1554 if url is not None:
1555 self.add_extra_info(ie_result, {
1556 'webpage_url': url,
1557 'original_url': url,
1558 })
1559 webpage_url = ie_result.get('webpage_url')
1560 if webpage_url:
1561 self.add_extra_info(ie_result, {
1562 'webpage_url_basename': url_basename(webpage_url),
1563 'webpage_url_domain': get_domain(webpage_url),
1564 })
1565 if ie is not None:
1566 self.add_extra_info(ie_result, {
1567 'extractor': ie.IE_NAME,
1568 'extractor_key': ie.ie_key(),
1569 })
1570
1571 def process_ie_result(self, ie_result, download=True, extra_info=None):
1572 """
1573 Take the result of the ie(may be modified) and resolve all unresolved
1574 references (URLs, playlist items).
1575
1576 It will also download the videos if 'download'.
1577 Returns the resolved ie_result.
1578 """
1579 if extra_info is None:
1580 extra_info = {}
1581 result_type = ie_result.get('_type', 'video')
1582
1583 if result_type in ('url', 'url_transparent'):
1584 ie_result['url'] = sanitize_url(
1585 ie_result['url'], scheme='http' if self.params.get('prefer_insecure') else 'https')
1586 if ie_result.get('original_url'):
1587 extra_info.setdefault('original_url', ie_result['original_url'])
1588
1589 extract_flat = self.params.get('extract_flat', False)
1590 if ((extract_flat == 'in_playlist' and 'playlist' in extra_info)
1591 or extract_flat is True):
1592 info_copy = ie_result.copy()
1593 ie = try_get(ie_result.get('ie_key'), self.get_info_extractor)
1594 if ie and not ie_result.get('id'):
1595 info_copy['id'] = ie.get_temp_id(ie_result['url'])
1596 self.add_default_extra_info(info_copy, ie, ie_result['url'])
1597 self.add_extra_info(info_copy, extra_info)
1598 info_copy, _ = self.pre_process(info_copy)
1599 self.__forced_printings(info_copy, self.prepare_filename(info_copy), incomplete=True)
1600 self._raise_pending_errors(info_copy)
1601 if self.params.get('force_write_download_archive', False):
1602 self.record_download_archive(info_copy)
1603 return ie_result
1604
1605 if result_type == 'video':
1606 self.add_extra_info(ie_result, extra_info)
1607 ie_result = self.process_video_result(ie_result, download=download)
1608 self._raise_pending_errors(ie_result)
1609 additional_urls = (ie_result or {}).get('additional_urls')
1610 if additional_urls:
1611 # TODO: Improve MetadataParserPP to allow setting a list
1612 if isinstance(additional_urls, str):
1613 additional_urls = [additional_urls]
1614 self.to_screen(
1615 '[info] %s: %d additional URL(s) requested' % (ie_result['id'], len(additional_urls)))
1616 self.write_debug('Additional URLs: "%s"' % '", "'.join(additional_urls))
1617 ie_result['additional_entries'] = [
1618 self.extract_info(
1619 url, download, extra_info=extra_info,
1620 force_generic_extractor=self.params.get('force_generic_extractor'))
1621 for url in additional_urls
1622 ]
1623 return ie_result
1624 elif result_type == 'url':
1625 # We have to add extra_info to the results because it may be
1626 # contained in a playlist
1627 return self.extract_info(
1628 ie_result['url'], download,
1629 ie_key=ie_result.get('ie_key'),
1630 extra_info=extra_info)
1631 elif result_type == 'url_transparent':
1632 # Use the information from the embedding page
1633 info = self.extract_info(
1634 ie_result['url'], ie_key=ie_result.get('ie_key'),
1635 extra_info=extra_info, download=False, process=False)
1636
1637 # extract_info may return None when ignoreerrors is enabled and
1638 # extraction failed with an error, don't crash and return early
1639 # in this case
1640 if not info:
1641 return info
1642
1643 exempted_fields = {'_type', 'url', 'ie_key'}
1644 if not ie_result.get('section_end') and ie_result.get('section_start') is None:
1645 # For video clips, the id etc of the clip extractor should be used
1646 exempted_fields |= {'id', 'extractor', 'extractor_key'}
1647
1648 new_result = info.copy()
1649 new_result.update(filter_dict(ie_result, lambda k, v: v is not None and k not in exempted_fields))
1650
1651 # Extracted info may not be a video result (i.e.
1652 # info.get('_type', 'video') != video) but rather an url or
1653 # url_transparent. In such cases outer metadata (from ie_result)
1654 # should be propagated to inner one (info). For this to happen
1655 # _type of info should be overridden with url_transparent. This
1656 # fixes issue from https://github.com/ytdl-org/youtube-dl/pull/11163.
1657 if new_result.get('_type') == 'url':
1658 new_result['_type'] = 'url_transparent'
1659
1660 return self.process_ie_result(
1661 new_result, download=download, extra_info=extra_info)
1662 elif result_type in ('playlist', 'multi_video'):
1663 # Protect from infinite recursion due to recursively nested playlists
1664 # (see https://github.com/ytdl-org/youtube-dl/issues/27833)
1665 webpage_url = ie_result['webpage_url']
1666 if webpage_url in self._playlist_urls:
1667 self.to_screen(
1668 '[download] Skipping already downloaded playlist: %s'
1669 % ie_result.get('title') or ie_result.get('id'))
1670 return
1671
1672 self._playlist_level += 1
1673 self._playlist_urls.add(webpage_url)
1674 self._fill_common_fields(ie_result, False)
1675 self._sanitize_thumbnails(ie_result)
1676 try:
1677 return self.__process_playlist(ie_result, download)
1678 finally:
1679 self._playlist_level -= 1
1680 if not self._playlist_level:
1681 self._playlist_urls.clear()
1682 elif result_type == 'compat_list':
1683 self.report_warning(
1684 'Extractor %s returned a compat_list result. '
1685 'It needs to be updated.' % ie_result.get('extractor'))
1686
1687 def _fixup(r):
1688 self.add_extra_info(r, {
1689 'extractor': ie_result['extractor'],
1690 'webpage_url': ie_result['webpage_url'],
1691 'webpage_url_basename': url_basename(ie_result['webpage_url']),
1692 'webpage_url_domain': get_domain(ie_result['webpage_url']),
1693 'extractor_key': ie_result['extractor_key'],
1694 })
1695 return r
1696 ie_result['entries'] = [
1697 self.process_ie_result(_fixup(r), download, extra_info)
1698 for r in ie_result['entries']
1699 ]
1700 return ie_result
1701 else:
1702 raise Exception('Invalid result type: %s' % result_type)
1703
1704 def _ensure_dir_exists(self, path):
1705 return make_dir(path, self.report_error)
1706
1707 @staticmethod
1708 def _playlist_infodict(ie_result, strict=False, **kwargs):
1709 info = {
1710 'playlist_count': ie_result.get('playlist_count'),
1711 'playlist': ie_result.get('title') or ie_result.get('id'),
1712 'playlist_id': ie_result.get('id'),
1713 'playlist_title': ie_result.get('title'),
1714 'playlist_uploader': ie_result.get('uploader'),
1715 'playlist_uploader_id': ie_result.get('uploader_id'),
1716 **kwargs,
1717 }
1718 if strict:
1719 return info
1720 return {
1721 **info,
1722 'playlist_index': 0,
1723 '__last_playlist_index': max(ie_result['requested_entries'] or (0, 0)),
1724 'extractor': ie_result['extractor'],
1725 'webpage_url': ie_result['webpage_url'],
1726 'webpage_url_basename': url_basename(ie_result['webpage_url']),
1727 'webpage_url_domain': get_domain(ie_result['webpage_url']),
1728 'extractor_key': ie_result['extractor_key'],
1729 }
1730
1731 def __process_playlist(self, ie_result, download):
1732 """Process each entry in the playlist"""
1733 assert ie_result['_type'] in ('playlist', 'multi_video')
1734
1735 common_info = self._playlist_infodict(ie_result, strict=True)
1736 title = common_info.get('playlist') or '<Untitled>'
1737 if self._match_entry(common_info, incomplete=True) is not None:
1738 return
1739 self.to_screen(f'[download] Downloading {ie_result["_type"]}: {title}')
1740
1741 all_entries = PlaylistEntries(self, ie_result)
1742 entries = orderedSet(all_entries.get_requested_items(), lazy=True)
1743
1744 lazy = self.params.get('lazy_playlist')
1745 if lazy:
1746 resolved_entries, n_entries = [], 'N/A'
1747 ie_result['requested_entries'], ie_result['entries'] = None, None
1748 else:
1749 entries = resolved_entries = list(entries)
1750 n_entries = len(resolved_entries)
1751 ie_result['requested_entries'], ie_result['entries'] = tuple(zip(*resolved_entries)) or ([], [])
1752 if not ie_result.get('playlist_count'):
1753 # Better to do this after potentially exhausting entries
1754 ie_result['playlist_count'] = all_entries.get_full_count()
1755
1756 extra = self._playlist_infodict(ie_result, n_entries=int_or_none(n_entries))
1757 ie_copy = collections.ChainMap(ie_result, extra)
1758
1759 _infojson_written = False
1760 write_playlist_files = self.params.get('allow_playlist_files', True)
1761 if write_playlist_files and self.params.get('list_thumbnails'):
1762 self.list_thumbnails(ie_result)
1763 if write_playlist_files and not self.params.get('simulate'):
1764 _infojson_written = self._write_info_json(
1765 'playlist', ie_result, self.prepare_filename(ie_copy, 'pl_infojson'))
1766 if _infojson_written is None:
1767 return
1768 if self._write_description('playlist', ie_result,
1769 self.prepare_filename(ie_copy, 'pl_description')) is None:
1770 return
1771 # TODO: This should be passed to ThumbnailsConvertor if necessary
1772 self._write_thumbnails('playlist', ie_result, self.prepare_filename(ie_copy, 'pl_thumbnail'))
1773
1774 if lazy:
1775 if self.params.get('playlistreverse') or self.params.get('playlistrandom'):
1776 self.report_warning('playlistreverse and playlistrandom are not supported with lazy_playlist', only_once=True)
1777 elif self.params.get('playlistreverse'):
1778 entries.reverse()
1779 elif self.params.get('playlistrandom'):
1780 random.shuffle(entries)
1781
1782 self.to_screen(f'[{ie_result["extractor"]}] Playlist {title}: Downloading {n_entries} videos'
1783 f'{format_field(ie_result, "playlist_count", " of %s")}')
1784
1785 keep_resolved_entries = self.params.get('extract_flat') != 'discard'
1786 if self.params.get('extract_flat') == 'discard_in_playlist':
1787 keep_resolved_entries = ie_result['_type'] != 'playlist'
1788 if keep_resolved_entries:
1789 self.write_debug('The information of all playlist entries will be held in memory')
1790
1791 failures = 0
1792 max_failures = self.params.get('skip_playlist_after_errors') or float('inf')
1793 for i, (playlist_index, entry) in enumerate(entries):
1794 if lazy:
1795 resolved_entries.append((playlist_index, entry))
1796 if not entry:
1797 continue
1798
1799 entry['__x_forwarded_for_ip'] = ie_result.get('__x_forwarded_for_ip')
1800 if not lazy and 'playlist-index' in self.params.get('compat_opts', []):
1801 playlist_index = ie_result['requested_entries'][i]
1802
1803 entry_copy = collections.ChainMap(entry, {
1804 **common_info,
1805 'n_entries': int_or_none(n_entries),
1806 'playlist_index': playlist_index,
1807 'playlist_autonumber': i + 1,
1808 })
1809
1810 if self._match_entry(entry_copy, incomplete=True) is not None:
1811 # For compatabilty with youtube-dl. See https://github.com/yt-dlp/yt-dlp/issues/4369
1812 resolved_entries[i] = (playlist_index, NO_DEFAULT)
1813 continue
1814
1815 self.to_screen('[download] Downloading video %s of %s' % (
1816 self._format_screen(i + 1, self.Styles.ID), self._format_screen(n_entries, self.Styles.EMPHASIS)))
1817
1818 extra.update({
1819 'playlist_index': playlist_index,
1820 'playlist_autonumber': i + 1,
1821 })
1822 entry_result = self.__process_iterable_entry(entry, download, extra)
1823 if not entry_result:
1824 failures += 1
1825 if failures >= max_failures:
1826 self.report_error(
1827 f'Skipping the remaining entries in playlist "{title}" since {failures} items failed extraction')
1828 break
1829 if keep_resolved_entries:
1830 resolved_entries[i] = (playlist_index, entry_result)
1831
1832 # Update with processed data
1833 ie_result['requested_entries'] = [i for i, e in resolved_entries if e is not NO_DEFAULT]
1834 ie_result['entries'] = [e for _, e in resolved_entries if e is not NO_DEFAULT]
1835
1836 # Write the updated info to json
1837 if _infojson_written is True and self._write_info_json(
1838 'updated playlist', ie_result,
1839 self.prepare_filename(ie_copy, 'pl_infojson'), overwrite=True) is None:
1840 return
1841
1842 ie_result = self.run_all_pps('playlist', ie_result)
1843 self.to_screen(f'[download] Finished downloading playlist: {title}')
1844 return ie_result
1845
1846 @_handle_extraction_exceptions
1847 def __process_iterable_entry(self, entry, download, extra_info):
1848 return self.process_ie_result(
1849 entry, download=download, extra_info=extra_info)
1850
1851 def _build_format_filter(self, filter_spec):
1852 " Returns a function to filter the formats according to the filter_spec "
1853
1854 OPERATORS = {
1855 '<': operator.lt,
1856 '<=': operator.le,
1857 '>': operator.gt,
1858 '>=': operator.ge,
1859 '=': operator.eq,
1860 '!=': operator.ne,
1861 }
1862 operator_rex = re.compile(r'''(?x)\s*
1863 (?P<key>width|height|tbr|abr|vbr|asr|filesize|filesize_approx|fps)\s*
1864 (?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
1865 (?P<value>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)\s*
1866 ''' % '|'.join(map(re.escape, OPERATORS.keys())))
1867 m = operator_rex.fullmatch(filter_spec)
1868 if m:
1869 try:
1870 comparison_value = int(m.group('value'))
1871 except ValueError:
1872 comparison_value = parse_filesize(m.group('value'))
1873 if comparison_value is None:
1874 comparison_value = parse_filesize(m.group('value') + 'B')
1875 if comparison_value is None:
1876 raise ValueError(
1877 'Invalid value %r in format specification %r' % (
1878 m.group('value'), filter_spec))
1879 op = OPERATORS[m.group('op')]
1880
1881 if not m:
1882 STR_OPERATORS = {
1883 '=': operator.eq,
1884 '^=': lambda attr, value: attr.startswith(value),
1885 '$=': lambda attr, value: attr.endswith(value),
1886 '*=': lambda attr, value: value in attr,
1887 '~=': lambda attr, value: value.search(attr) is not None
1888 }
1889 str_operator_rex = re.compile(r'''(?x)\s*
1890 (?P<key>[a-zA-Z0-9._-]+)\s*
1891 (?P<negation>!\s*)?(?P<op>%s)\s*(?P<none_inclusive>\?\s*)?
1892 (?P<quote>["'])?
1893 (?P<value>(?(quote)(?:(?!(?P=quote))[^\\]|\\.)+|[\w.-]+))
1894 (?(quote)(?P=quote))\s*
1895 ''' % '|'.join(map(re.escape, STR_OPERATORS.keys())))
1896 m = str_operator_rex.fullmatch(filter_spec)
1897 if m:
1898 if m.group('op') == '~=':
1899 comparison_value = re.compile(m.group('value'))
1900 else:
1901 comparison_value = re.sub(r'''\\([\\"'])''', r'\1', m.group('value'))
1902 str_op = STR_OPERATORS[m.group('op')]
1903 if m.group('negation'):
1904 op = lambda attr, value: not str_op(attr, value)
1905 else:
1906 op = str_op
1907
1908 if not m:
1909 raise SyntaxError('Invalid filter specification %r' % filter_spec)
1910
1911 def _filter(f):
1912 actual_value = f.get(m.group('key'))
1913 if actual_value is None:
1914 return m.group('none_inclusive')
1915 return op(actual_value, comparison_value)
1916 return _filter
1917
1918 def _check_formats(self, formats):
1919 for f in formats:
1920 self.to_screen('[info] Testing format %s' % f['format_id'])
1921 path = self.get_output_path('temp')
1922 if not self._ensure_dir_exists(f'{path}/'):
1923 continue
1924 temp_file = tempfile.NamedTemporaryFile(suffix='.tmp', delete=False, dir=path or None)
1925 temp_file.close()
1926 try:
1927 success, _ = self.dl(temp_file.name, f, test=True)
1928 except (DownloadError, OSError, ValueError) + network_exceptions:
1929 success = False
1930 finally:
1931 if os.path.exists(temp_file.name):
1932 try:
1933 os.remove(temp_file.name)
1934 except OSError:
1935 self.report_warning('Unable to delete temporary file "%s"' % temp_file.name)
1936 if success:
1937 yield f
1938 else:
1939 self.to_screen('[info] Unable to download format %s. Skipping...' % f['format_id'])
1940
1941 def _default_format_spec(self, info_dict, download=True):
1942
1943 def can_merge():
1944 merger = FFmpegMergerPP(self)
1945 return merger.available and merger.can_merge()
1946
1947 prefer_best = (
1948 not self.params.get('simulate')
1949 and download
1950 and (
1951 not can_merge()
1952 or info_dict.get('is_live') and not self.params.get('live_from_start')
1953 or self.params['outtmpl']['default'] == '-'))
1954 compat = (
1955 prefer_best
1956 or self.params.get('allow_multiple_audio_streams', False)
1957 or 'format-spec' in self.params['compat_opts'])
1958
1959 return (
1960 'best/bestvideo+bestaudio' if prefer_best
1961 else 'bestvideo*+bestaudio/best' if not compat
1962 else 'bestvideo+bestaudio/best')
1963
1964 def build_format_selector(self, format_spec):
1965 def syntax_error(note, start):
1966 message = (
1967 'Invalid format specification: '
1968 '{}\n\t{}\n\t{}^'.format(note, format_spec, ' ' * start[1]))
1969 return SyntaxError(message)
1970
1971 PICKFIRST = 'PICKFIRST'
1972 MERGE = 'MERGE'
1973 SINGLE = 'SINGLE'
1974 GROUP = 'GROUP'
1975 FormatSelector = collections.namedtuple('FormatSelector', ['type', 'selector', 'filters'])
1976
1977 allow_multiple_streams = {'audio': self.params.get('allow_multiple_audio_streams', False),
1978 'video': self.params.get('allow_multiple_video_streams', False)}
1979
1980 check_formats = self.params.get('check_formats') == 'selected'
1981
1982 def _parse_filter(tokens):
1983 filter_parts = []
1984 for type, string, start, _, _ in tokens:
1985 if type == tokenize.OP and string == ']':
1986 return ''.join(filter_parts)
1987 else:
1988 filter_parts.append(string)
1989
1990 def _remove_unused_ops(tokens):
1991 # Remove operators that we don't use and join them with the surrounding strings.
1992 # E.g. 'mp4' '-' 'baseline' '-' '16x9' is converted to 'mp4-baseline-16x9'
1993 ALLOWED_OPS = ('/', '+', ',', '(', ')')
1994 last_string, last_start, last_end, last_line = None, None, None, None
1995 for type, string, start, end, line in tokens:
1996 if type == tokenize.OP and string == '[':
1997 if last_string:
1998 yield tokenize.NAME, last_string, last_start, last_end, last_line
1999 last_string = None
2000 yield type, string, start, end, line
2001 # everything inside brackets will be handled by _parse_filter
2002 for type, string, start, end, line in tokens:
2003 yield type, string, start, end, line
2004 if type == tokenize.OP and string == ']':
2005 break
2006 elif type == tokenize.OP and string in ALLOWED_OPS:
2007 if last_string:
2008 yield tokenize.NAME, last_string, last_start, last_end, last_line
2009 last_string = None
2010 yield type, string, start, end, line
2011 elif type in [tokenize.NAME, tokenize.NUMBER, tokenize.OP]:
2012 if not last_string:
2013 last_string = string
2014 last_start = start
2015 last_end = end
2016 else:
2017 last_string += string
2018 if last_string:
2019 yield tokenize.NAME, last_string, last_start, last_end, last_line
2020
2021 def _parse_format_selection(tokens, inside_merge=False, inside_choice=False, inside_group=False):
2022 selectors = []
2023 current_selector = None
2024 for type, string, start, _, _ in tokens:
2025 # ENCODING is only defined in python 3.x
2026 if type == getattr(tokenize, 'ENCODING', None):
2027 continue
2028 elif type in [tokenize.NAME, tokenize.NUMBER]:
2029 current_selector = FormatSelector(SINGLE, string, [])
2030 elif type == tokenize.OP:
2031 if string == ')':
2032 if not inside_group:
2033 # ')' will be handled by the parentheses group
2034 tokens.restore_last_token()
2035 break
2036 elif inside_merge and string in ['/', ',']:
2037 tokens.restore_last_token()
2038 break
2039 elif inside_choice and string == ',':
2040 tokens.restore_last_token()
2041 break
2042 elif string == ',':
2043 if not current_selector:
2044 raise syntax_error('"," must follow a format selector', start)
2045 selectors.append(current_selector)
2046 current_selector = None
2047 elif string == '/':
2048 if not current_selector:
2049 raise syntax_error('"/" must follow a format selector', start)
2050 first_choice = current_selector
2051 second_choice = _parse_format_selection(tokens, inside_choice=True)
2052 current_selector = FormatSelector(PICKFIRST, (first_choice, second_choice), [])
2053 elif string == '[':
2054 if not current_selector:
2055 current_selector = FormatSelector(SINGLE, 'best', [])
2056 format_filter = _parse_filter(tokens)
2057 current_selector.filters.append(format_filter)
2058 elif string == '(':
2059 if current_selector:
2060 raise syntax_error('Unexpected "("', start)
2061 group = _parse_format_selection(tokens, inside_group=True)
2062 current_selector = FormatSelector(GROUP, group, [])
2063 elif string == '+':
2064 if not current_selector:
2065 raise syntax_error('Unexpected "+"', start)
2066 selector_1 = current_selector
2067 selector_2 = _parse_format_selection(tokens, inside_merge=True)
2068 if not selector_2:
2069 raise syntax_error('Expected a selector', start)
2070 current_selector = FormatSelector(MERGE, (selector_1, selector_2), [])
2071 else:
2072 raise syntax_error(f'Operator not recognized: "{string}"', start)
2073 elif type == tokenize.ENDMARKER:
2074 break
2075 if current_selector:
2076 selectors.append(current_selector)
2077 return selectors
2078
2079 def _merge(formats_pair):
2080 format_1, format_2 = formats_pair
2081
2082 formats_info = []
2083 formats_info.extend(format_1.get('requested_formats', (format_1,)))
2084 formats_info.extend(format_2.get('requested_formats', (format_2,)))
2085
2086 if not allow_multiple_streams['video'] or not allow_multiple_streams['audio']:
2087 get_no_more = {'video': False, 'audio': False}
2088 for (i, fmt_info) in enumerate(formats_info):
2089 if fmt_info.get('acodec') == fmt_info.get('vcodec') == 'none':
2090 formats_info.pop(i)
2091 continue
2092 for aud_vid in ['audio', 'video']:
2093 if not allow_multiple_streams[aud_vid] and fmt_info.get(aud_vid[0] + 'codec') != 'none':
2094 if get_no_more[aud_vid]:
2095 formats_info.pop(i)
2096 break
2097 get_no_more[aud_vid] = True
2098
2099 if len(formats_info) == 1:
2100 return formats_info[0]
2101
2102 video_fmts = [fmt_info for fmt_info in formats_info if fmt_info.get('vcodec') != 'none']
2103 audio_fmts = [fmt_info for fmt_info in formats_info if fmt_info.get('acodec') != 'none']
2104
2105 the_only_video = video_fmts[0] if len(video_fmts) == 1 else None
2106 the_only_audio = audio_fmts[0] if len(audio_fmts) == 1 else None
2107
2108 output_ext = get_compatible_ext(
2109 vcodecs=[f.get('vcodec') for f in video_fmts],
2110 acodecs=[f.get('acodec') for f in audio_fmts],
2111 vexts=[f['ext'] for f in video_fmts],
2112 aexts=[f['ext'] for f in audio_fmts],
2113 preferences=(try_call(lambda: self.params['merge_output_format'].split('/'))
2114 or self.params.get('prefer_free_formats') and ('webm', 'mkv')))
2115
2116 filtered = lambda *keys: filter(None, (traverse_obj(fmt, *keys) for fmt in formats_info))
2117
2118 new_dict = {
2119 'requested_formats': formats_info,
2120 'format': '+'.join(filtered('format')),
2121 'format_id': '+'.join(filtered('format_id')),
2122 'ext': output_ext,
2123 'protocol': '+'.join(map(determine_protocol, formats_info)),
2124 'language': '+'.join(orderedSet(filtered('language'))) or None,
2125 'format_note': '+'.join(orderedSet(filtered('format_note'))) or None,
2126 'filesize_approx': sum(filtered('filesize', 'filesize_approx')) or None,
2127 'tbr': sum(filtered('tbr', 'vbr', 'abr')),
2128 }
2129
2130 if the_only_video:
2131 new_dict.update({
2132 'width': the_only_video.get('width'),
2133 'height': the_only_video.get('height'),
2134 'resolution': the_only_video.get('resolution') or self.format_resolution(the_only_video),
2135 'fps': the_only_video.get('fps'),
2136 'dynamic_range': the_only_video.get('dynamic_range'),
2137 'vcodec': the_only_video.get('vcodec'),
2138 'vbr': the_only_video.get('vbr'),
2139 'stretched_ratio': the_only_video.get('stretched_ratio'),
2140 })
2141
2142 if the_only_audio:
2143 new_dict.update({
2144 'acodec': the_only_audio.get('acodec'),
2145 'abr': the_only_audio.get('abr'),
2146 'asr': the_only_audio.get('asr'),
2147 'audio_channels': the_only_audio.get('audio_channels')
2148 })
2149
2150 return new_dict
2151
2152 def _check_formats(formats):
2153 if not check_formats:
2154 yield from formats
2155 return
2156 yield from self._check_formats(formats)
2157
2158 def _build_selector_function(selector):
2159 if isinstance(selector, list): # ,
2160 fs = [_build_selector_function(s) for s in selector]
2161
2162 def selector_function(ctx):
2163 for f in fs:
2164 yield from f(ctx)
2165 return selector_function
2166
2167 elif selector.type == GROUP: # ()
2168 selector_function = _build_selector_function(selector.selector)
2169
2170 elif selector.type == PICKFIRST: # /
2171 fs = [_build_selector_function(s) for s in selector.selector]
2172
2173 def selector_function(ctx):
2174 for f in fs:
2175 picked_formats = list(f(ctx))
2176 if picked_formats:
2177 return picked_formats
2178 return []
2179
2180 elif selector.type == MERGE: # +
2181 selector_1, selector_2 = map(_build_selector_function, selector.selector)
2182
2183 def selector_function(ctx):
2184 for pair in itertools.product(selector_1(ctx), selector_2(ctx)):
2185 yield _merge(pair)
2186
2187 elif selector.type == SINGLE: # atom
2188 format_spec = selector.selector or 'best'
2189
2190 # TODO: Add allvideo, allaudio etc by generalizing the code with best/worst selector
2191 if format_spec == 'all':
2192 def selector_function(ctx):
2193 yield from _check_formats(ctx['formats'][::-1])
2194 elif format_spec == 'mergeall':
2195 def selector_function(ctx):
2196 formats = list(_check_formats(
2197 f for f in ctx['formats'] if f.get('vcodec') != 'none' or f.get('acodec') != 'none'))
2198 if not formats:
2199 return
2200 merged_format = formats[-1]
2201 for f in formats[-2::-1]:
2202 merged_format = _merge((merged_format, f))
2203 yield merged_format
2204
2205 else:
2206 format_fallback, seperate_fallback, format_reverse, format_idx = False, None, True, 1
2207 mobj = re.match(
2208 r'(?P<bw>best|worst|b|w)(?P<type>video|audio|v|a)?(?P<mod>\*)?(?:\.(?P<n>[1-9]\d*))?$',
2209 format_spec)
2210 if mobj is not None:
2211 format_idx = int_or_none(mobj.group('n'), default=1)
2212 format_reverse = mobj.group('bw')[0] == 'b'
2213 format_type = (mobj.group('type') or [None])[0]
2214 not_format_type = {'v': 'a', 'a': 'v'}.get(format_type)
2215 format_modified = mobj.group('mod') is not None
2216
2217 format_fallback = not format_type and not format_modified # for b, w
2218 _filter_f = (
2219 (lambda f: f.get('%scodec' % format_type) != 'none')
2220 if format_type and format_modified # bv*, ba*, wv*, wa*
2221 else (lambda f: f.get('%scodec' % not_format_type) == 'none')
2222 if format_type # bv, ba, wv, wa
2223 else (lambda f: f.get('vcodec') != 'none' and f.get('acodec') != 'none')
2224 if not format_modified # b, w
2225 else lambda f: True) # b*, w*
2226 filter_f = lambda f: _filter_f(f) and (
2227 f.get('vcodec') != 'none' or f.get('acodec') != 'none')
2228 else:
2229 if format_spec in self._format_selection_exts['audio']:
2230 filter_f = lambda f: f.get('ext') == format_spec and f.get('acodec') != 'none'
2231 elif format_spec in self._format_selection_exts['video']:
2232 filter_f = lambda f: f.get('ext') == format_spec and f.get('acodec') != 'none' and f.get('vcodec') != 'none'
2233 seperate_fallback = lambda f: f.get('ext') == format_spec and f.get('vcodec') != 'none'
2234 elif format_spec in self._format_selection_exts['storyboards']:
2235 filter_f = lambda f: f.get('ext') == format_spec and f.get('acodec') == 'none' and f.get('vcodec') == 'none'
2236 else:
2237 filter_f = lambda f: f.get('format_id') == format_spec # id
2238
2239 def selector_function(ctx):
2240 formats = list(ctx['formats'])
2241 matches = list(filter(filter_f, formats)) if filter_f is not None else formats
2242 if not matches:
2243 if format_fallback and ctx['incomplete_formats']:
2244 # for extractors with incomplete formats (audio only (soundcloud)
2245 # or video only (imgur)) best/worst will fallback to
2246 # best/worst {video,audio}-only format
2247 matches = formats
2248 elif seperate_fallback and not ctx['has_merged_format']:
2249 # for compatibility with youtube-dl when there is no pre-merged format
2250 matches = list(filter(seperate_fallback, formats))
2251 matches = LazyList(_check_formats(matches[::-1 if format_reverse else 1]))
2252 try:
2253 yield matches[format_idx - 1]
2254 except LazyList.IndexError:
2255 return
2256
2257 filters = [self._build_format_filter(f) for f in selector.filters]
2258
2259 def final_selector(ctx):
2260 ctx_copy = dict(ctx)
2261 for _filter in filters:
2262 ctx_copy['formats'] = list(filter(_filter, ctx_copy['formats']))
2263 return selector_function(ctx_copy)
2264 return final_selector
2265
2266 stream = io.BytesIO(format_spec.encode())
2267 try:
2268 tokens = list(_remove_unused_ops(tokenize.tokenize(stream.readline)))
2269 except tokenize.TokenError:
2270 raise syntax_error('Missing closing/opening brackets or parenthesis', (0, len(format_spec)))
2271
2272 class TokenIterator:
2273 def __init__(self, tokens):
2274 self.tokens = tokens
2275 self.counter = 0
2276
2277 def __iter__(self):
2278 return self
2279
2280 def __next__(self):
2281 if self.counter >= len(self.tokens):
2282 raise StopIteration()
2283 value = self.tokens[self.counter]
2284 self.counter += 1
2285 return value
2286
2287 next = __next__
2288
2289 def restore_last_token(self):
2290 self.counter -= 1
2291
2292 parsed_selector = _parse_format_selection(iter(TokenIterator(tokens)))
2293 return _build_selector_function(parsed_selector)
2294
2295 def _calc_headers(self, info_dict):
2296 res = merge_headers(self.params['http_headers'], info_dict.get('http_headers') or {})
2297
2298 cookies = self._calc_cookies(info_dict['url'])
2299 if cookies:
2300 res['Cookie'] = cookies
2301
2302 if 'X-Forwarded-For' not in res:
2303 x_forwarded_for_ip = info_dict.get('__x_forwarded_for_ip')
2304 if x_forwarded_for_ip:
2305 res['X-Forwarded-For'] = x_forwarded_for_ip
2306
2307 return res
2308
2309 def _calc_cookies(self, url):
2310 pr = sanitized_Request(url)
2311 self.cookiejar.add_cookie_header(pr)
2312 return pr.get_header('Cookie')
2313
2314 def _sort_thumbnails(self, thumbnails):
2315 thumbnails.sort(key=lambda t: (
2316 t.get('preference') if t.get('preference') is not None else -1,
2317 t.get('width') if t.get('width') is not None else -1,
2318 t.get('height') if t.get('height') is not None else -1,
2319 t.get('id') if t.get('id') is not None else '',
2320 t.get('url')))
2321
2322 def _sanitize_thumbnails(self, info_dict):
2323 thumbnails = info_dict.get('thumbnails')
2324 if thumbnails is None:
2325 thumbnail = info_dict.get('thumbnail')
2326 if thumbnail:
2327 info_dict['thumbnails'] = thumbnails = [{'url': thumbnail}]
2328 if not thumbnails:
2329 return
2330
2331 def check_thumbnails(thumbnails):
2332 for t in thumbnails:
2333 self.to_screen(f'[info] Testing thumbnail {t["id"]}')
2334 try:
2335 self.urlopen(HEADRequest(t['url']))
2336 except network_exceptions as err:
2337 self.to_screen(f'[info] Unable to connect to thumbnail {t["id"]} URL {t["url"]!r} - {err}. Skipping...')
2338 continue
2339 yield t
2340
2341 self._sort_thumbnails(thumbnails)
2342 for i, t in enumerate(thumbnails):
2343 if t.get('id') is None:
2344 t['id'] = '%d' % i
2345 if t.get('width') and t.get('height'):
2346 t['resolution'] = '%dx%d' % (t['width'], t['height'])
2347 t['url'] = sanitize_url(t['url'])
2348
2349 if self.params.get('check_formats') is True:
2350 info_dict['thumbnails'] = LazyList(check_thumbnails(thumbnails[::-1]), reverse=True)
2351 else:
2352 info_dict['thumbnails'] = thumbnails
2353
2354 def _fill_common_fields(self, info_dict, is_video=True):
2355 # TODO: move sanitization here
2356 if is_video:
2357 # playlists are allowed to lack "title"
2358 title = info_dict.get('title', NO_DEFAULT)
2359 if title is NO_DEFAULT:
2360 raise ExtractorError('Missing "title" field in extractor result',
2361 video_id=info_dict['id'], ie=info_dict['extractor'])
2362 info_dict['fulltitle'] = title
2363 if not title:
2364 if title == '':
2365 self.write_debug('Extractor gave empty title. Creating a generic title')
2366 else:
2367 self.report_warning('Extractor failed to obtain "title". Creating a generic title instead')
2368 info_dict['title'] = f'{info_dict["extractor"].replace(":", "-")} video #{info_dict["id"]}'
2369
2370 if info_dict.get('duration') is not None:
2371 info_dict['duration_string'] = formatSeconds(info_dict['duration'])
2372
2373 for ts_key, date_key in (
2374 ('timestamp', 'upload_date'),
2375 ('release_timestamp', 'release_date'),
2376 ('modified_timestamp', 'modified_date'),
2377 ):
2378 if info_dict.get(date_key) is None and info_dict.get(ts_key) is not None:
2379 # Working around out-of-range timestamp values (e.g. negative ones on Windows,
2380 # see http://bugs.python.org/issue1646728)
2381 with contextlib.suppress(ValueError, OverflowError, OSError):
2382 upload_date = datetime.datetime.utcfromtimestamp(info_dict[ts_key])
2383 info_dict[date_key] = upload_date.strftime('%Y%m%d')
2384
2385 live_keys = ('is_live', 'was_live')
2386 live_status = info_dict.get('live_status')
2387 if live_status is None:
2388 for key in live_keys:
2389 if info_dict.get(key) is False:
2390 continue
2391 if info_dict.get(key):
2392 live_status = key
2393 break
2394 if all(info_dict.get(key) is False for key in live_keys):
2395 live_status = 'not_live'
2396 if live_status:
2397 info_dict['live_status'] = live_status
2398 for key in live_keys:
2399 if info_dict.get(key) is None:
2400 info_dict[key] = (live_status == key)
2401
2402 # Auto generate title fields corresponding to the *_number fields when missing
2403 # in order to always have clean titles. This is very common for TV series.
2404 for field in ('chapter', 'season', 'episode'):
2405 if info_dict.get('%s_number' % field) is not None and not info_dict.get(field):
2406 info_dict[field] = '%s %d' % (field.capitalize(), info_dict['%s_number' % field])
2407
2408 def _raise_pending_errors(self, info):
2409 err = info.pop('__pending_error', None)
2410 if err:
2411 self.report_error(err, tb=False)
2412
2413 def process_video_result(self, info_dict, download=True):
2414 assert info_dict.get('_type', 'video') == 'video'
2415 self._num_videos += 1
2416
2417 if 'id' not in info_dict:
2418 raise ExtractorError('Missing "id" field in extractor result', ie=info_dict['extractor'])
2419 elif not info_dict.get('id'):
2420 raise ExtractorError('Extractor failed to obtain "id"', ie=info_dict['extractor'])
2421
2422 def report_force_conversion(field, field_not, conversion):
2423 self.report_warning(
2424 '"%s" field is not %s - forcing %s conversion, there is an error in extractor'
2425 % (field, field_not, conversion))
2426
2427 def sanitize_string_field(info, string_field):
2428 field = info.get(string_field)
2429 if field is None or isinstance(field, str):
2430 return
2431 report_force_conversion(string_field, 'a string', 'string')
2432 info[string_field] = str(field)
2433
2434 def sanitize_numeric_fields(info):
2435 for numeric_field in self._NUMERIC_FIELDS:
2436 field = info.get(numeric_field)
2437 if field is None or isinstance(field, (int, float)):
2438 continue
2439 report_force_conversion(numeric_field, 'numeric', 'int')
2440 info[numeric_field] = int_or_none(field)
2441
2442 sanitize_string_field(info_dict, 'id')
2443 sanitize_numeric_fields(info_dict)
2444 if info_dict.get('section_end') and info_dict.get('section_start') is not None:
2445 info_dict['duration'] = round(info_dict['section_end'] - info_dict['section_start'], 3)
2446 if (info_dict.get('duration') or 0) <= 0 and info_dict.pop('duration', None):
2447 self.report_warning('"duration" field is negative, there is an error in extractor')
2448
2449 chapters = info_dict.get('chapters') or []
2450 if chapters and chapters[0].get('start_time'):
2451 chapters.insert(0, {'start_time': 0})
2452
2453 dummy_chapter = {'end_time': 0, 'start_time': info_dict.get('duration')}
2454 for idx, (prev, current, next_) in enumerate(zip(
2455 (dummy_chapter, *chapters), chapters, (*chapters[1:], dummy_chapter)), 1):
2456 if current.get('start_time') is None:
2457 current['start_time'] = prev.get('end_time')
2458 if not current.get('end_time'):
2459 current['end_time'] = next_.get('start_time')
2460 if not current.get('title'):
2461 current['title'] = f'<Untitled Chapter {idx}>'
2462
2463 if 'playlist' not in info_dict:
2464 # It isn't part of a playlist
2465 info_dict['playlist'] = None
2466 info_dict['playlist_index'] = None
2467
2468 self._sanitize_thumbnails(info_dict)
2469
2470 thumbnail = info_dict.get('thumbnail')
2471 thumbnails = info_dict.get('thumbnails')
2472 if thumbnail:
2473 info_dict['thumbnail'] = sanitize_url(thumbnail)
2474 elif thumbnails:
2475 info_dict['thumbnail'] = thumbnails[-1]['url']
2476
2477 if info_dict.get('display_id') is None and 'id' in info_dict:
2478 info_dict['display_id'] = info_dict['id']
2479
2480 self._fill_common_fields(info_dict)
2481
2482 for cc_kind in ('subtitles', 'automatic_captions'):
2483 cc = info_dict.get(cc_kind)
2484 if cc:
2485 for _, subtitle in cc.items():
2486 for subtitle_format in subtitle:
2487 if subtitle_format.get('url'):
2488 subtitle_format['url'] = sanitize_url(subtitle_format['url'])
2489 if subtitle_format.get('ext') is None:
2490 subtitle_format['ext'] = determine_ext(subtitle_format['url']).lower()
2491
2492 automatic_captions = info_dict.get('automatic_captions')
2493 subtitles = info_dict.get('subtitles')
2494
2495 info_dict['requested_subtitles'] = self.process_subtitles(
2496 info_dict['id'], subtitles, automatic_captions)
2497
2498 if info_dict.get('formats') is None:
2499 # There's only one format available
2500 formats = [info_dict]
2501 else:
2502 formats = info_dict['formats']
2503
2504 # or None ensures --clean-infojson removes it
2505 info_dict['_has_drm'] = any(f.get('has_drm') for f in formats) or None
2506 if not self.params.get('allow_unplayable_formats'):
2507 formats = [f for f in formats if not f.get('has_drm')]
2508 if info_dict['_has_drm'] and formats and all(
2509 f.get('acodec') == f.get('vcodec') == 'none' for f in formats):
2510 self.report_warning(
2511 'This video is DRM protected and only images are available for download. '
2512 'Use --list-formats to see them')
2513
2514 get_from_start = not info_dict.get('is_live') or bool(self.params.get('live_from_start'))
2515 if not get_from_start:
2516 info_dict['title'] += ' ' + datetime.datetime.now().strftime('%Y-%m-%d %H:%M')
2517 if info_dict.get('is_live') and formats:
2518 formats = [f for f in formats if bool(f.get('is_from_start')) == get_from_start]
2519 if get_from_start and not formats:
2520 self.raise_no_formats(info_dict, msg=(
2521 '--live-from-start is passed, but there are no formats that can be downloaded from the start. '
2522 'If you want to download from the current time, use --no-live-from-start'))
2523
2524 if not formats:
2525 self.raise_no_formats(info_dict)
2526
2527 def is_wellformed(f):
2528 url = f.get('url')
2529 if not url:
2530 self.report_warning(
2531 '"url" field is missing or empty - skipping format, '
2532 'there is an error in extractor')
2533 return False
2534 if isinstance(url, bytes):
2535 sanitize_string_field(f, 'url')
2536 return True
2537
2538 # Filter out malformed formats for better extraction robustness
2539 formats = list(filter(is_wellformed, formats))
2540
2541 formats_dict = {}
2542
2543 # We check that all the formats have the format and format_id fields
2544 for i, format in enumerate(formats):
2545 sanitize_string_field(format, 'format_id')
2546 sanitize_numeric_fields(format)
2547 format['url'] = sanitize_url(format['url'])
2548 if not format.get('format_id'):
2549 format['format_id'] = str(i)
2550 else:
2551 # Sanitize format_id from characters used in format selector expression
2552 format['format_id'] = re.sub(r'[\s,/+\[\]()]', '_', format['format_id'])
2553 format_id = format['format_id']
2554 if format_id not in formats_dict:
2555 formats_dict[format_id] = []
2556 formats_dict[format_id].append(format)
2557
2558 # Make sure all formats have unique format_id
2559 common_exts = set(itertools.chain(*self._format_selection_exts.values()))
2560 for format_id, ambiguous_formats in formats_dict.items():
2561 ambigious_id = len(ambiguous_formats) > 1
2562 for i, format in enumerate(ambiguous_formats):
2563 if ambigious_id:
2564 format['format_id'] = '%s-%d' % (format_id, i)
2565 if format.get('ext') is None:
2566 format['ext'] = determine_ext(format['url']).lower()
2567 # Ensure there is no conflict between id and ext in format selection
2568 # See https://github.com/yt-dlp/yt-dlp/issues/1282
2569 if format['format_id'] != format['ext'] and format['format_id'] in common_exts:
2570 format['format_id'] = 'f%s' % format['format_id']
2571
2572 for i, format in enumerate(formats):
2573 if format.get('format') is None:
2574 format['format'] = '{id} - {res}{note}'.format(
2575 id=format['format_id'],
2576 res=self.format_resolution(format),
2577 note=format_field(format, 'format_note', ' (%s)'),
2578 )
2579 if format.get('protocol') is None:
2580 format['protocol'] = determine_protocol(format)
2581 if format.get('resolution') is None:
2582 format['resolution'] = self.format_resolution(format, default=None)
2583 if format.get('dynamic_range') is None and format.get('vcodec') != 'none':
2584 format['dynamic_range'] = 'SDR'
2585 if (info_dict.get('duration') and format.get('tbr')
2586 and not format.get('filesize') and not format.get('filesize_approx')):
2587 format['filesize_approx'] = int(info_dict['duration'] * format['tbr'] * (1024 / 8))
2588
2589 # Add HTTP headers, so that external programs can use them from the
2590 # json output
2591 full_format_info = info_dict.copy()
2592 full_format_info.update(format)
2593 format['http_headers'] = self._calc_headers(full_format_info)
2594 # Remove private housekeeping stuff
2595 if '__x_forwarded_for_ip' in info_dict:
2596 del info_dict['__x_forwarded_for_ip']
2597
2598 if self.params.get('check_formats') is True:
2599 formats = LazyList(self._check_formats(formats[::-1]), reverse=True)
2600
2601 if not formats or formats[0] is not info_dict:
2602 # only set the 'formats' fields if the original info_dict list them
2603 # otherwise we end up with a circular reference, the first (and unique)
2604 # element in the 'formats' field in info_dict is info_dict itself,
2605 # which can't be exported to json
2606 info_dict['formats'] = formats
2607
2608 info_dict, _ = self.pre_process(info_dict)
2609
2610 if self._match_entry(info_dict, incomplete=self._format_fields) is not None:
2611 return info_dict
2612
2613 self.post_extract(info_dict)
2614 info_dict, _ = self.pre_process(info_dict, 'after_filter')
2615
2616 # The pre-processors may have modified the formats
2617 formats = info_dict.get('formats', [info_dict])
2618
2619 list_only = self.params.get('simulate') is None and (
2620 self.params.get('list_thumbnails') or self.params.get('listformats') or self.params.get('listsubtitles'))
2621 interactive_format_selection = not list_only and self.format_selector == '-'
2622 if self.params.get('list_thumbnails'):
2623 self.list_thumbnails(info_dict)
2624 if self.params.get('listsubtitles'):
2625 if 'automatic_captions' in info_dict:
2626 self.list_subtitles(
2627 info_dict['id'], automatic_captions, 'automatic captions')
2628 self.list_subtitles(info_dict['id'], subtitles, 'subtitles')
2629 if self.params.get('listformats') or interactive_format_selection:
2630 self.list_formats(info_dict)
2631 if list_only:
2632 # Without this printing, -F --print-json will not work
2633 self.__forced_printings(info_dict, self.prepare_filename(info_dict), incomplete=True)
2634 return info_dict
2635
2636 format_selector = self.format_selector
2637 if format_selector is None:
2638 req_format = self._default_format_spec(info_dict, download=download)
2639 self.write_debug('Default format spec: %s' % req_format)
2640 format_selector = self.build_format_selector(req_format)
2641
2642 while True:
2643 if interactive_format_selection:
2644 req_format = input(
2645 self._format_screen('\nEnter format selector: ', self.Styles.EMPHASIS))
2646 try:
2647 format_selector = self.build_format_selector(req_format)
2648 except SyntaxError as err:
2649 self.report_error(err, tb=False, is_error=False)
2650 continue
2651
2652 formats_to_download = list(format_selector({
2653 'formats': formats,
2654 'has_merged_format': any('none' not in (f.get('acodec'), f.get('vcodec')) for f in formats),
2655 'incomplete_formats': (
2656 # All formats are video-only or
2657 all(f.get('vcodec') != 'none' and f.get('acodec') == 'none' for f in formats)
2658 # all formats are audio-only
2659 or all(f.get('vcodec') == 'none' and f.get('acodec') != 'none' for f in formats)),
2660 }))
2661 if interactive_format_selection and not formats_to_download:
2662 self.report_error('Requested format is not available', tb=False, is_error=False)
2663 continue
2664 break
2665
2666 if not formats_to_download:
2667 if not self.params.get('ignore_no_formats_error'):
2668 raise ExtractorError(
2669 'Requested format is not available. Use --list-formats for a list of available formats',
2670 expected=True, video_id=info_dict['id'], ie=info_dict['extractor'])
2671 self.report_warning('Requested format is not available')
2672 # Process what we can, even without any available formats.
2673 formats_to_download = [{}]
2674
2675 requested_ranges = self.params.get('download_ranges')
2676 if requested_ranges:
2677 requested_ranges = tuple(requested_ranges(info_dict, self))
2678
2679 best_format, downloaded_formats = formats_to_download[-1], []
2680 if download:
2681 if best_format:
2682 def to_screen(*msg):
2683 self.to_screen(f'[info] {info_dict["id"]}: {" ".join(", ".join(variadic(m)) for m in msg)}')
2684
2685 to_screen(f'Downloading {len(formats_to_download)} format(s):',
2686 (f['format_id'] for f in formats_to_download))
2687 if requested_ranges:
2688 to_screen(f'Downloading {len(requested_ranges)} time ranges:',
2689 (f'{int(c["start_time"])}-{int(c["end_time"])}' for c in requested_ranges))
2690 max_downloads_reached = False
2691
2692 for fmt, chapter in itertools.product(formats_to_download, requested_ranges or [{}]):
2693 new_info = self._copy_infodict(info_dict)
2694 new_info.update(fmt)
2695 offset, duration = info_dict.get('section_start') or 0, info_dict.get('duration') or float('inf')
2696 if chapter or offset:
2697 new_info.update({
2698 'section_start': offset + chapter.get('start_time', 0),
2699 'section_end': offset + min(chapter.get('end_time', duration), duration),
2700 'section_title': chapter.get('title'),
2701 'section_number': chapter.get('index'),
2702 })
2703 downloaded_formats.append(new_info)
2704 try:
2705 self.process_info(new_info)
2706 except MaxDownloadsReached:
2707 max_downloads_reached = True
2708 self._raise_pending_errors(new_info)
2709 # Remove copied info
2710 for key, val in tuple(new_info.items()):
2711 if info_dict.get(key) == val:
2712 new_info.pop(key)
2713 if max_downloads_reached:
2714 break
2715
2716 write_archive = {f.get('__write_download_archive', False) for f in downloaded_formats}
2717 assert write_archive.issubset({True, False, 'ignore'})
2718 if True in write_archive and False not in write_archive:
2719 self.record_download_archive(info_dict)
2720
2721 info_dict['requested_downloads'] = downloaded_formats
2722 info_dict = self.run_all_pps('after_video', info_dict)
2723 if max_downloads_reached:
2724 raise MaxDownloadsReached()
2725
2726 # We update the info dict with the selected best quality format (backwards compatibility)
2727 info_dict.update(best_format)
2728 return info_dict
2729
2730 def process_subtitles(self, video_id, normal_subtitles, automatic_captions):
2731 """Select the requested subtitles and their format"""
2732 available_subs, normal_sub_langs = {}, []
2733 if normal_subtitles and self.params.get('writesubtitles'):
2734 available_subs.update(normal_subtitles)
2735 normal_sub_langs = tuple(normal_subtitles.keys())
2736 if automatic_captions and self.params.get('writeautomaticsub'):
2737 for lang, cap_info in automatic_captions.items():
2738 if lang not in available_subs:
2739 available_subs[lang] = cap_info
2740
2741 if (not self.params.get('writesubtitles') and not
2742 self.params.get('writeautomaticsub') or not
2743 available_subs):
2744 return None
2745
2746 all_sub_langs = tuple(available_subs.keys())
2747 if self.params.get('allsubtitles', False):
2748 requested_langs = all_sub_langs
2749 elif self.params.get('subtitleslangs', False):
2750 try:
2751 requested_langs = orderedSet_from_options(
2752 self.params.get('subtitleslangs'), {'all': all_sub_langs}, use_regex=True)
2753 except re.error as e:
2754 raise ValueError(f'Wrong regex for subtitlelangs: {e.pattern}')
2755 elif normal_sub_langs:
2756 requested_langs = ['en'] if 'en' in normal_sub_langs else normal_sub_langs[:1]
2757 else:
2758 requested_langs = ['en'] if 'en' in all_sub_langs else all_sub_langs[:1]
2759 if requested_langs:
2760 self.write_debug('Downloading subtitles: %s' % ', '.join(requested_langs))
2761
2762 formats_query = self.params.get('subtitlesformat', 'best')
2763 formats_preference = formats_query.split('/') if formats_query else []
2764 subs = {}
2765 for lang in requested_langs:
2766 formats = available_subs.get(lang)
2767 if formats is None:
2768 self.report_warning(f'{lang} subtitles not available for {video_id}')
2769 continue
2770 for ext in formats_preference:
2771 if ext == 'best':
2772 f = formats[-1]
2773 break
2774 matches = list(filter(lambda f: f['ext'] == ext, formats))
2775 if matches:
2776 f = matches[-1]
2777 break
2778 else:
2779 f = formats[-1]
2780 self.report_warning(
2781 'No subtitle format found matching "%s" for language %s, '
2782 'using %s' % (formats_query, lang, f['ext']))
2783 subs[lang] = f
2784 return subs
2785
2786 def _forceprint(self, key, info_dict):
2787 if info_dict is None:
2788 return
2789 info_copy = info_dict.copy()
2790 info_copy['formats_table'] = self.render_formats_table(info_dict)
2791 info_copy['thumbnails_table'] = self.render_thumbnails_table(info_dict)
2792 info_copy['subtitles_table'] = self.render_subtitles_table(info_dict.get('id'), info_dict.get('subtitles'))
2793 info_copy['automatic_captions_table'] = self.render_subtitles_table(info_dict.get('id'), info_dict.get('automatic_captions'))
2794
2795 def format_tmpl(tmpl):
2796 mobj = re.match(r'\w+(=?)$', tmpl)
2797 if mobj and mobj.group(1):
2798 return f'{tmpl[:-1]} = %({tmpl[:-1]})r'
2799 elif mobj:
2800 return f'%({tmpl})s'
2801 return tmpl
2802
2803 for tmpl in self.params['forceprint'].get(key, []):
2804 self.to_stdout(self.evaluate_outtmpl(format_tmpl(tmpl), info_copy))
2805
2806 for tmpl, file_tmpl in self.params['print_to_file'].get(key, []):
2807 filename = self.prepare_filename(info_dict, outtmpl=file_tmpl)
2808 tmpl = format_tmpl(tmpl)
2809 self.to_screen(f'[info] Writing {tmpl!r} to: {filename}')
2810 if self._ensure_dir_exists(filename):
2811 with open(filename, 'a', encoding='utf-8') as f:
2812 f.write(self.evaluate_outtmpl(tmpl, info_copy) + '\n')
2813
2814 def __forced_printings(self, info_dict, filename, incomplete):
2815 def print_mandatory(field, actual_field=None):
2816 if actual_field is None:
2817 actual_field = field
2818 if (self.params.get('force%s' % field, False)
2819 and (not incomplete or info_dict.get(actual_field) is not None)):
2820 self.to_stdout(info_dict[actual_field])
2821
2822 def print_optional(field):
2823 if (self.params.get('force%s' % field, False)
2824 and info_dict.get(field) is not None):
2825 self.to_stdout(info_dict[field])
2826
2827 info_dict = info_dict.copy()
2828 if filename is not None:
2829 info_dict['filename'] = filename
2830 if info_dict.get('requested_formats') is not None:
2831 # For RTMP URLs, also include the playpath
2832 info_dict['urls'] = '\n'.join(f['url'] + f.get('play_path', '') for f in info_dict['requested_formats'])
2833 elif info_dict.get('url'):
2834 info_dict['urls'] = info_dict['url'] + info_dict.get('play_path', '')
2835
2836 if (self.params.get('forcejson')
2837 or self.params['forceprint'].get('video')
2838 or self.params['print_to_file'].get('video')):
2839 self.post_extract(info_dict)
2840 self._forceprint('video', info_dict)
2841
2842 print_mandatory('title')
2843 print_mandatory('id')
2844 print_mandatory('url', 'urls')
2845 print_optional('thumbnail')
2846 print_optional('description')
2847 print_optional('filename')
2848 if self.params.get('forceduration') and info_dict.get('duration') is not None:
2849 self.to_stdout(formatSeconds(info_dict['duration']))
2850 print_mandatory('format')
2851
2852 if self.params.get('forcejson'):
2853 self.to_stdout(json.dumps(self.sanitize_info(info_dict)))
2854
2855 def dl(self, name, info, subtitle=False, test=False):
2856 if not info.get('url'):
2857 self.raise_no_formats(info, True)
2858
2859 if test:
2860 verbose = self.params.get('verbose')
2861 params = {
2862 'test': True,
2863 'quiet': self.params.get('quiet') or not verbose,
2864 'verbose': verbose,
2865 'noprogress': not verbose,
2866 'nopart': True,
2867 'skip_unavailable_fragments': False,
2868 'keep_fragments': False,
2869 'overwrites': True,
2870 '_no_ytdl_file': True,
2871 }
2872 else:
2873 params = self.params
2874 fd = get_suitable_downloader(info, params, to_stdout=(name == '-'))(self, params)
2875 if not test:
2876 for ph in self._progress_hooks:
2877 fd.add_progress_hook(ph)
2878 urls = '", "'.join(
2879 (f['url'].split(',')[0] + ',<data>' if f['url'].startswith('data:') else f['url'])
2880 for f in info.get('requested_formats', []) or [info])
2881 self.write_debug(f'Invoking {fd.FD_NAME} downloader on "{urls}"')
2882
2883 # Note: Ideally info should be a deep-copied so that hooks cannot modify it.
2884 # But it may contain objects that are not deep-copyable
2885 new_info = self._copy_infodict(info)
2886 if new_info.get('http_headers') is None:
2887 new_info['http_headers'] = self._calc_headers(new_info)
2888 return fd.download(name, new_info, subtitle)
2889
2890 def existing_file(self, filepaths, *, default_overwrite=True):
2891 existing_files = list(filter(os.path.exists, orderedSet(filepaths)))
2892 if existing_files and not self.params.get('overwrites', default_overwrite):
2893 return existing_files[0]
2894
2895 for file in existing_files:
2896 self.report_file_delete(file)
2897 os.remove(file)
2898 return None
2899
2900 def process_info(self, info_dict):
2901 """Process a single resolved IE result. (Modifies it in-place)"""
2902
2903 assert info_dict.get('_type', 'video') == 'video'
2904 original_infodict = info_dict
2905
2906 if 'format' not in info_dict and 'ext' in info_dict:
2907 info_dict['format'] = info_dict['ext']
2908
2909 # This is mostly just for backward compatibility of process_info
2910 # As a side-effect, this allows for format-specific filters
2911 if self._match_entry(info_dict) is not None:
2912 info_dict['__write_download_archive'] = 'ignore'
2913 return
2914
2915 # Does nothing under normal operation - for backward compatibility of process_info
2916 self.post_extract(info_dict)
2917 self._num_downloads += 1
2918
2919 # info_dict['_filename'] needs to be set for backward compatibility
2920 info_dict['_filename'] = full_filename = self.prepare_filename(info_dict, warn=True)
2921 temp_filename = self.prepare_filename(info_dict, 'temp')
2922 files_to_move = {}
2923
2924 # Forced printings
2925 self.__forced_printings(info_dict, full_filename, incomplete=('format' not in info_dict))
2926
2927 def check_max_downloads():
2928 if self._num_downloads >= float(self.params.get('max_downloads') or 'inf'):
2929 raise MaxDownloadsReached()
2930
2931 if self.params.get('simulate'):
2932 info_dict['__write_download_archive'] = self.params.get('force_write_download_archive')
2933 check_max_downloads()
2934 return
2935
2936 if full_filename is None:
2937 return
2938 if not self._ensure_dir_exists(encodeFilename(full_filename)):
2939 return
2940 if not self._ensure_dir_exists(encodeFilename(temp_filename)):
2941 return
2942
2943 if self._write_description('video', info_dict,
2944 self.prepare_filename(info_dict, 'description')) is None:
2945 return
2946
2947 sub_files = self._write_subtitles(info_dict, temp_filename)
2948 if sub_files is None:
2949 return
2950 files_to_move.update(dict(sub_files))
2951
2952 thumb_files = self._write_thumbnails(
2953 'video', info_dict, temp_filename, self.prepare_filename(info_dict, 'thumbnail'))
2954 if thumb_files is None:
2955 return
2956 files_to_move.update(dict(thumb_files))
2957
2958 infofn = self.prepare_filename(info_dict, 'infojson')
2959 _infojson_written = self._write_info_json('video', info_dict, infofn)
2960 if _infojson_written:
2961 info_dict['infojson_filename'] = infofn
2962 # For backward compatibility, even though it was a private field
2963 info_dict['__infojson_filename'] = infofn
2964 elif _infojson_written is None:
2965 return
2966
2967 # Note: Annotations are deprecated
2968 annofn = None
2969 if self.params.get('writeannotations', False):
2970 annofn = self.prepare_filename(info_dict, 'annotation')
2971 if annofn:
2972 if not self._ensure_dir_exists(encodeFilename(annofn)):
2973 return
2974 if not self.params.get('overwrites', True) and os.path.exists(encodeFilename(annofn)):
2975 self.to_screen('[info] Video annotations are already present')
2976 elif not info_dict.get('annotations'):
2977 self.report_warning('There are no annotations to write.')
2978 else:
2979 try:
2980 self.to_screen('[info] Writing video annotations to: ' + annofn)
2981 with open(encodeFilename(annofn), 'w', encoding='utf-8') as annofile:
2982 annofile.write(info_dict['annotations'])
2983 except (KeyError, TypeError):
2984 self.report_warning('There are no annotations to write.')
2985 except OSError:
2986 self.report_error('Cannot write annotations file: ' + annofn)
2987 return
2988
2989 # Write internet shortcut files
2990 def _write_link_file(link_type):
2991 url = try_get(info_dict['webpage_url'], iri_to_uri)
2992 if not url:
2993 self.report_warning(
2994 f'Cannot write internet shortcut file because the actual URL of "{info_dict["webpage_url"]}" is unknown')
2995 return True
2996 linkfn = replace_extension(self.prepare_filename(info_dict, 'link'), link_type, info_dict.get('ext'))
2997 if not self._ensure_dir_exists(encodeFilename(linkfn)):
2998 return False
2999 if self.params.get('overwrites', True) and os.path.exists(encodeFilename(linkfn)):
3000 self.to_screen(f'[info] Internet shortcut (.{link_type}) is already present')
3001 return True
3002 try:
3003 self.to_screen(f'[info] Writing internet shortcut (.{link_type}) to: {linkfn}')
3004 with open(encodeFilename(to_high_limit_path(linkfn)), 'w', encoding='utf-8',
3005 newline='\r\n' if link_type == 'url' else '\n') as linkfile:
3006 template_vars = {'url': url}
3007 if link_type == 'desktop':
3008 template_vars['filename'] = linkfn[:-(len(link_type) + 1)]
3009 linkfile.write(LINK_TEMPLATES[link_type] % template_vars)
3010 except OSError:
3011 self.report_error(f'Cannot write internet shortcut {linkfn}')
3012 return False
3013 return True
3014
3015 write_links = {
3016 'url': self.params.get('writeurllink'),
3017 'webloc': self.params.get('writewebloclink'),
3018 'desktop': self.params.get('writedesktoplink'),
3019 }
3020 if self.params.get('writelink'):
3021 link_type = ('webloc' if sys.platform == 'darwin'
3022 else 'desktop' if sys.platform.startswith('linux')
3023 else 'url')
3024 write_links[link_type] = True
3025
3026 if any(should_write and not _write_link_file(link_type)
3027 for link_type, should_write in write_links.items()):
3028 return
3029
3030 def replace_info_dict(new_info):
3031 nonlocal info_dict
3032 if new_info == info_dict:
3033 return
3034 info_dict.clear()
3035 info_dict.update(new_info)
3036
3037 new_info, files_to_move = self.pre_process(info_dict, 'before_dl', files_to_move)
3038 replace_info_dict(new_info)
3039
3040 if self.params.get('skip_download'):
3041 info_dict['filepath'] = temp_filename
3042 info_dict['__finaldir'] = os.path.dirname(os.path.abspath(encodeFilename(full_filename)))
3043 info_dict['__files_to_move'] = files_to_move
3044 replace_info_dict(self.run_pp(MoveFilesAfterDownloadPP(self, False), info_dict))
3045 info_dict['__write_download_archive'] = self.params.get('force_write_download_archive')
3046 else:
3047 # Download
3048 info_dict.setdefault('__postprocessors', [])
3049 try:
3050
3051 def existing_video_file(*filepaths):
3052 ext = info_dict.get('ext')
3053 converted = lambda file: replace_extension(file, self.params.get('final_ext') or ext, ext)
3054 file = self.existing_file(itertools.chain(*zip(map(converted, filepaths), filepaths)),
3055 default_overwrite=False)
3056 if file:
3057 info_dict['ext'] = os.path.splitext(file)[1][1:]
3058 return file
3059
3060 fd, success = None, True
3061 if info_dict.get('protocol') or info_dict.get('url'):
3062 fd = get_suitable_downloader(info_dict, self.params, to_stdout=temp_filename == '-')
3063 if fd is not FFmpegFD and (
3064 info_dict.get('section_start') or info_dict.get('section_end')):
3065 msg = ('This format cannot be partially downloaded' if FFmpegFD.available()
3066 else 'You have requested downloading the video partially, but ffmpeg is not installed')
3067 self.report_error(f'{msg}. Aborting')
3068 return
3069
3070 if info_dict.get('requested_formats') is not None:
3071 requested_formats = info_dict['requested_formats']
3072 old_ext = info_dict['ext']
3073 if self.params.get('merge_output_format') is None:
3074 if (info_dict['ext'] == 'webm'
3075 and info_dict.get('thumbnails')
3076 # check with type instead of pp_key, __name__, or isinstance
3077 # since we dont want any custom PPs to trigger this
3078 and any(type(pp) == EmbedThumbnailPP for pp in self._pps['post_process'])): # noqa: E721
3079 info_dict['ext'] = 'mkv'
3080 self.report_warning(
3081 'webm doesn\'t support embedding a thumbnail, mkv will be used')
3082 new_ext = info_dict['ext']
3083
3084 def correct_ext(filename, ext=new_ext):
3085 if filename == '-':
3086 return filename
3087 filename_real_ext = os.path.splitext(filename)[1][1:]
3088 filename_wo_ext = (
3089 os.path.splitext(filename)[0]
3090 if filename_real_ext in (old_ext, new_ext)
3091 else filename)
3092 return f'{filename_wo_ext}.{ext}'
3093
3094 # Ensure filename always has a correct extension for successful merge
3095 full_filename = correct_ext(full_filename)
3096 temp_filename = correct_ext(temp_filename)
3097 dl_filename = existing_video_file(full_filename, temp_filename)
3098 info_dict['__real_download'] = False
3099
3100 merger = FFmpegMergerPP(self)
3101 downloaded = []
3102 if dl_filename is not None:
3103 self.report_file_already_downloaded(dl_filename)
3104 elif fd:
3105 for f in requested_formats if fd != FFmpegFD else []:
3106 f['filepath'] = fname = prepend_extension(
3107 correct_ext(temp_filename, info_dict['ext']),
3108 'f%s' % f['format_id'], info_dict['ext'])
3109 downloaded.append(fname)
3110 info_dict['url'] = '\n'.join(f['url'] for f in requested_formats)
3111 success, real_download = self.dl(temp_filename, info_dict)
3112 info_dict['__real_download'] = real_download
3113 else:
3114 if self.params.get('allow_unplayable_formats'):
3115 self.report_warning(
3116 'You have requested merging of multiple formats '
3117 'while also allowing unplayable formats to be downloaded. '
3118 'The formats won\'t be merged to prevent data corruption.')
3119 elif not merger.available:
3120 msg = 'You have requested merging of multiple formats but ffmpeg is not installed'
3121 if not self.params.get('ignoreerrors'):
3122 self.report_error(f'{msg}. Aborting due to --abort-on-error')
3123 return
3124 self.report_warning(f'{msg}. The formats won\'t be merged')
3125
3126 if temp_filename == '-':
3127 reason = ('using a downloader other than ffmpeg' if FFmpegFD.can_merge_formats(info_dict, self.params)
3128 else 'but the formats are incompatible for simultaneous download' if merger.available
3129 else 'but ffmpeg is not installed')
3130 self.report_warning(
3131 f'You have requested downloading multiple formats to stdout {reason}. '
3132 'The formats will be streamed one after the other')
3133 fname = temp_filename
3134 for f in requested_formats:
3135 new_info = dict(info_dict)
3136 del new_info['requested_formats']
3137 new_info.update(f)
3138 if temp_filename != '-':
3139 fname = prepend_extension(
3140 correct_ext(temp_filename, new_info['ext']),
3141 'f%s' % f['format_id'], new_info['ext'])
3142 if not self._ensure_dir_exists(fname):
3143 return
3144 f['filepath'] = fname
3145 downloaded.append(fname)
3146 partial_success, real_download = self.dl(fname, new_info)
3147 info_dict['__real_download'] = info_dict['__real_download'] or real_download
3148 success = success and partial_success
3149
3150 if downloaded and merger.available and not self.params.get('allow_unplayable_formats'):
3151 info_dict['__postprocessors'].append(merger)
3152 info_dict['__files_to_merge'] = downloaded
3153 # Even if there were no downloads, it is being merged only now
3154 info_dict['__real_download'] = True
3155 else:
3156 for file in downloaded:
3157 files_to_move[file] = None
3158 else:
3159 # Just a single file
3160 dl_filename = existing_video_file(full_filename, temp_filename)
3161 if dl_filename is None or dl_filename == temp_filename:
3162 # dl_filename == temp_filename could mean that the file was partially downloaded with --no-part.
3163 # So we should try to resume the download
3164 success, real_download = self.dl(temp_filename, info_dict)
3165 info_dict['__real_download'] = real_download
3166 else:
3167 self.report_file_already_downloaded(dl_filename)
3168
3169 dl_filename = dl_filename or temp_filename
3170 info_dict['__finaldir'] = os.path.dirname(os.path.abspath(encodeFilename(full_filename)))
3171
3172 except network_exceptions as err:
3173 self.report_error('unable to download video data: %s' % error_to_compat_str(err))
3174 return
3175 except OSError as err:
3176 raise UnavailableVideoError(err)
3177 except (ContentTooShortError, ) as err:
3178 self.report_error(f'content too short (expected {err.expected} bytes and served {err.downloaded})')
3179 return
3180
3181 self._raise_pending_errors(info_dict)
3182 if success and full_filename != '-':
3183
3184 def fixup():
3185 do_fixup = True
3186 fixup_policy = self.params.get('fixup')
3187 vid = info_dict['id']
3188
3189 if fixup_policy in ('ignore', 'never'):
3190 return
3191 elif fixup_policy == 'warn':
3192 do_fixup = 'warn'
3193 elif fixup_policy != 'force':
3194 assert fixup_policy in ('detect_or_warn', None)
3195 if not info_dict.get('__real_download'):
3196 do_fixup = False
3197
3198 def ffmpeg_fixup(cndn, msg, cls):
3199 if not (do_fixup and cndn):
3200 return
3201 elif do_fixup == 'warn':
3202 self.report_warning(f'{vid}: {msg}')
3203 return
3204 pp = cls(self)
3205 if pp.available:
3206 info_dict['__postprocessors'].append(pp)
3207 else:
3208 self.report_warning(f'{vid}: {msg}. Install ffmpeg to fix this automatically')
3209
3210 stretched_ratio = info_dict.get('stretched_ratio')
3211 ffmpeg_fixup(stretched_ratio not in (1, None),
3212 f'Non-uniform pixel ratio {stretched_ratio}',
3213 FFmpegFixupStretchedPP)
3214
3215 downloader = get_suitable_downloader(info_dict, self.params) if 'protocol' in info_dict else None
3216 downloader = downloader.FD_NAME if downloader else None
3217
3218 ext = info_dict.get('ext')
3219 postprocessed_by_ffmpeg = info_dict.get('requested_formats') or any((
3220 isinstance(pp, FFmpegVideoConvertorPP)
3221 and resolve_recode_mapping(ext, pp.mapping)[0] not in (ext, None)
3222 ) for pp in self._pps['post_process'])
3223
3224 if not postprocessed_by_ffmpeg:
3225 ffmpeg_fixup(ext == 'm4a' and info_dict.get('container') == 'm4a_dash',
3226 'writing DASH m4a. Only some players support this container',
3227 FFmpegFixupM4aPP)
3228 ffmpeg_fixup(downloader == 'hlsnative' and not self.params.get('hls_use_mpegts')
3229 or info_dict.get('is_live') and self.params.get('hls_use_mpegts') is None,
3230 'Possible MPEG-TS in MP4 container or malformed AAC timestamps',
3231 FFmpegFixupM3u8PP)
3232 ffmpeg_fixup(info_dict.get('is_live') and downloader == 'DashSegmentsFD',
3233 'Possible duplicate MOOV atoms', FFmpegFixupDuplicateMoovPP)
3234
3235 ffmpeg_fixup(downloader == 'web_socket_fragment', 'Malformed timestamps detected', FFmpegFixupTimestampPP)
3236 ffmpeg_fixup(downloader == 'web_socket_fragment', 'Malformed duration detected', FFmpegFixupDurationPP)
3237
3238 fixup()
3239 try:
3240 replace_info_dict(self.post_process(dl_filename, info_dict, files_to_move))
3241 except PostProcessingError as err:
3242 self.report_error('Postprocessing: %s' % str(err))
3243 return
3244 try:
3245 for ph in self._post_hooks:
3246 ph(info_dict['filepath'])
3247 except Exception as err:
3248 self.report_error('post hooks: %s' % str(err))
3249 return
3250 info_dict['__write_download_archive'] = True
3251
3252 assert info_dict is original_infodict # Make sure the info_dict was modified in-place
3253 if self.params.get('force_write_download_archive'):
3254 info_dict['__write_download_archive'] = True
3255 check_max_downloads()
3256
3257 def __download_wrapper(self, func):
3258 @functools.wraps(func)
3259 def wrapper(*args, **kwargs):
3260 try:
3261 res = func(*args, **kwargs)
3262 except UnavailableVideoError as e:
3263 self.report_error(e)
3264 except DownloadCancelled as e:
3265 self.to_screen(f'[info] {e}')
3266 if not self.params.get('break_per_url'):
3267 raise
3268 else:
3269 if self.params.get('dump_single_json', False):
3270 self.post_extract(res)
3271 self.to_stdout(json.dumps(self.sanitize_info(res)))
3272 return wrapper
3273
3274 def download(self, url_list):
3275 """Download a given list of URLs."""
3276 url_list = variadic(url_list) # Passing a single URL is a common mistake
3277 outtmpl = self.params['outtmpl']['default']
3278 if (len(url_list) > 1
3279 and outtmpl != '-'
3280 and '%' not in outtmpl
3281 and self.params.get('max_downloads') != 1):
3282 raise SameFileError(outtmpl)
3283
3284 for url in url_list:
3285 self.__download_wrapper(self.extract_info)(
3286 url, force_generic_extractor=self.params.get('force_generic_extractor', False))
3287
3288 return self._download_retcode
3289
3290 def download_with_info_file(self, info_filename):
3291 with contextlib.closing(fileinput.FileInput(
3292 [info_filename], mode='r',
3293 openhook=fileinput.hook_encoded('utf-8'))) as f:
3294 # FileInput doesn't have a read method, we can't call json.load
3295 info = self.sanitize_info(json.loads('\n'.join(f)), self.params.get('clean_infojson', True))
3296 try:
3297 self.__download_wrapper(self.process_ie_result)(info, download=True)
3298 except (DownloadError, EntryNotInPlaylist, ReExtractInfo) as e:
3299 if not isinstance(e, EntryNotInPlaylist):
3300 self.to_stderr('\r')
3301 webpage_url = info.get('webpage_url')
3302 if webpage_url is not None:
3303 self.report_warning(f'The info failed to download: {e}; trying with URL {webpage_url}')
3304 return self.download([webpage_url])
3305 else:
3306 raise
3307 return self._download_retcode
3308
3309 @staticmethod
3310 def sanitize_info(info_dict, remove_private_keys=False):
3311 ''' Sanitize the infodict for converting to json '''
3312 if info_dict is None:
3313 return info_dict
3314 info_dict.setdefault('epoch', int(time.time()))
3315 info_dict.setdefault('_type', 'video')
3316
3317 if remove_private_keys:
3318 reject = lambda k, v: v is None or k.startswith('__') or k in {
3319 'requested_downloads', 'requested_formats', 'requested_subtitles', 'requested_entries',
3320 'entries', 'filepath', '_filename', 'infojson_filename', 'original_url', 'playlist_autonumber',
3321 }
3322 else:
3323 reject = lambda k, v: False
3324
3325 def filter_fn(obj):
3326 if isinstance(obj, dict):
3327 return {k: filter_fn(v) for k, v in obj.items() if not reject(k, v)}
3328 elif isinstance(obj, (list, tuple, set, LazyList)):
3329 return list(map(filter_fn, obj))
3330 elif obj is None or isinstance(obj, (str, int, float, bool)):
3331 return obj
3332 else:
3333 return repr(obj)
3334
3335 return filter_fn(info_dict)
3336
3337 @staticmethod
3338 def filter_requested_info(info_dict, actually_filter=True):
3339 ''' Alias of sanitize_info for backward compatibility '''
3340 return YoutubeDL.sanitize_info(info_dict, actually_filter)
3341
3342 def _delete_downloaded_files(self, *files_to_delete, info={}, msg=None):
3343 for filename in set(filter(None, files_to_delete)):
3344 if msg:
3345 self.to_screen(msg % filename)
3346 try:
3347 os.remove(filename)
3348 except OSError:
3349 self.report_warning(f'Unable to delete file {filename}')
3350 if filename in info.get('__files_to_move', []): # NB: Delete even if None
3351 del info['__files_to_move'][filename]
3352
3353 @staticmethod
3354 def post_extract(info_dict):
3355 def actual_post_extract(info_dict):
3356 if info_dict.get('_type') in ('playlist', 'multi_video'):
3357 for video_dict in info_dict.get('entries', {}):
3358 actual_post_extract(video_dict or {})
3359 return
3360
3361 post_extractor = info_dict.pop('__post_extractor', None) or (lambda: {})
3362 info_dict.update(post_extractor())
3363
3364 actual_post_extract(info_dict or {})
3365
3366 def run_pp(self, pp, infodict):
3367 files_to_delete = []
3368 if '__files_to_move' not in infodict:
3369 infodict['__files_to_move'] = {}
3370 try:
3371 files_to_delete, infodict = pp.run(infodict)
3372 except PostProcessingError as e:
3373 # Must be True and not 'only_download'
3374 if self.params.get('ignoreerrors') is True:
3375 self.report_error(e)
3376 return infodict
3377 raise
3378
3379 if not files_to_delete:
3380 return infodict
3381 if self.params.get('keepvideo', False):
3382 for f in files_to_delete:
3383 infodict['__files_to_move'].setdefault(f, '')
3384 else:
3385 self._delete_downloaded_files(
3386 *files_to_delete, info=infodict, msg='Deleting original file %s (pass -k to keep)')
3387 return infodict
3388
3389 def run_all_pps(self, key, info, *, additional_pps=None):
3390 self._forceprint(key, info)
3391 for pp in (additional_pps or []) + self._pps[key]:
3392 info = self.run_pp(pp, info)
3393 return info
3394
3395 def pre_process(self, ie_info, key='pre_process', files_to_move=None):
3396 info = dict(ie_info)
3397 info['__files_to_move'] = files_to_move or {}
3398 try:
3399 info = self.run_all_pps(key, info)
3400 except PostProcessingError as err:
3401 msg = f'Preprocessing: {err}'
3402 info.setdefault('__pending_error', msg)
3403 self.report_error(msg, is_error=False)
3404 return info, info.pop('__files_to_move', None)
3405
3406 def post_process(self, filename, info, files_to_move=None):
3407 """Run all the postprocessors on the given file."""
3408 info['filepath'] = filename
3409 info['__files_to_move'] = files_to_move or {}
3410 info = self.run_all_pps('post_process', info, additional_pps=info.get('__postprocessors'))
3411 info = self.run_pp(MoveFilesAfterDownloadPP(self), info)
3412 del info['__files_to_move']
3413 return self.run_all_pps('after_move', info)
3414
3415 def _make_archive_id(self, info_dict):
3416 video_id = info_dict.get('id')
3417 if not video_id:
3418 return
3419 # Future-proof against any change in case
3420 # and backwards compatibility with prior versions
3421 extractor = info_dict.get('extractor_key') or info_dict.get('ie_key') # key in a playlist
3422 if extractor is None:
3423 url = str_or_none(info_dict.get('url'))
3424 if not url:
3425 return
3426 # Try to find matching extractor for the URL and take its ie_key
3427 for ie_key, ie in self._ies.items():
3428 if ie.suitable(url):
3429 extractor = ie_key
3430 break
3431 else:
3432 return
3433 return make_archive_id(extractor, video_id)
3434
3435 def in_download_archive(self, info_dict):
3436 fn = self.params.get('download_archive')
3437 if fn is None:
3438 return False
3439
3440 vid_ids = [self._make_archive_id(info_dict)]
3441 vid_ids.extend(info_dict.get('_old_archive_ids') or [])
3442 return any(id_ in self.archive for id_ in vid_ids)
3443
3444 def record_download_archive(self, info_dict):
3445 fn = self.params.get('download_archive')
3446 if fn is None:
3447 return
3448 vid_id = self._make_archive_id(info_dict)
3449 assert vid_id
3450 self.write_debug(f'Adding to archive: {vid_id}')
3451 with locked_file(fn, 'a', encoding='utf-8') as archive_file:
3452 archive_file.write(vid_id + '\n')
3453 self.archive.add(vid_id)
3454
3455 @staticmethod
3456 def format_resolution(format, default='unknown'):
3457 if format.get('vcodec') == 'none' and format.get('acodec') != 'none':
3458 return 'audio only'
3459 if format.get('resolution') is not None:
3460 return format['resolution']
3461 if format.get('width') and format.get('height'):
3462 return '%dx%d' % (format['width'], format['height'])
3463 elif format.get('height'):
3464 return '%sp' % format['height']
3465 elif format.get('width'):
3466 return '%dx?' % format['width']
3467 return default
3468
3469 def _list_format_headers(self, *headers):
3470 if self.params.get('listformats_table', True) is not False:
3471 return [self._format_out(header, self.Styles.HEADERS) for header in headers]
3472 return headers
3473
3474 def _format_note(self, fdict):
3475 res = ''
3476 if fdict.get('ext') in ['f4f', 'f4m']:
3477 res += '(unsupported)'
3478 if fdict.get('language'):
3479 if res:
3480 res += ' '
3481 res += '[%s]' % fdict['language']
3482 if fdict.get('format_note') is not None:
3483 if res:
3484 res += ' '
3485 res += fdict['format_note']
3486 if fdict.get('tbr') is not None:
3487 if res:
3488 res += ', '
3489 res += '%4dk' % fdict['tbr']
3490 if fdict.get('container') is not None:
3491 if res:
3492 res += ', '
3493 res += '%s container' % fdict['container']
3494 if (fdict.get('vcodec') is not None
3495 and fdict.get('vcodec') != 'none'):
3496 if res:
3497 res += ', '
3498 res += fdict['vcodec']
3499 if fdict.get('vbr') is not None:
3500 res += '@'
3501 elif fdict.get('vbr') is not None and fdict.get('abr') is not None:
3502 res += 'video@'
3503 if fdict.get('vbr') is not None:
3504 res += '%4dk' % fdict['vbr']
3505 if fdict.get('fps') is not None:
3506 if res:
3507 res += ', '
3508 res += '%sfps' % fdict['fps']
3509 if fdict.get('acodec') is not None:
3510 if res:
3511 res += ', '
3512 if fdict['acodec'] == 'none':
3513 res += 'video only'
3514 else:
3515 res += '%-5s' % fdict['acodec']
3516 elif fdict.get('abr') is not None:
3517 if res:
3518 res += ', '
3519 res += 'audio'
3520 if fdict.get('abr') is not None:
3521 res += '@%3dk' % fdict['abr']
3522 if fdict.get('asr') is not None:
3523 res += ' (%5dHz)' % fdict['asr']
3524 if fdict.get('filesize') is not None:
3525 if res:
3526 res += ', '
3527 res += format_bytes(fdict['filesize'])
3528 elif fdict.get('filesize_approx') is not None:
3529 if res:
3530 res += ', '
3531 res += '~' + format_bytes(fdict['filesize_approx'])
3532 return res
3533
3534 def render_formats_table(self, info_dict):
3535 if not info_dict.get('formats') and not info_dict.get('url'):
3536 return None
3537
3538 formats = info_dict.get('formats', [info_dict])
3539 if not self.params.get('listformats_table', True) is not False:
3540 table = [
3541 [
3542 format_field(f, 'format_id'),
3543 format_field(f, 'ext'),
3544 self.format_resolution(f),
3545 self._format_note(f)
3546 ] for f in formats if f.get('preference') is None or f['preference'] >= -1000]
3547 return render_table(['format code', 'extension', 'resolution', 'note'], table, extra_gap=1)
3548
3549 def simplified_codec(f, field):
3550 assert field in ('acodec', 'vcodec')
3551 codec = f.get(field, 'unknown')
3552 if not codec:
3553 return 'unknown'
3554 elif codec != 'none':
3555 return '.'.join(codec.split('.')[:4])
3556
3557 if field == 'vcodec' and f.get('acodec') == 'none':
3558 return 'images'
3559 elif field == 'acodec' and f.get('vcodec') == 'none':
3560 return ''
3561 return self._format_out('audio only' if field == 'vcodec' else 'video only',
3562 self.Styles.SUPPRESS)
3563
3564 delim = self._format_out('\u2502', self.Styles.DELIM, '|', test_encoding=True)
3565 table = [
3566 [
3567 self._format_out(format_field(f, 'format_id'), self.Styles.ID),
3568 format_field(f, 'ext'),
3569 format_field(f, func=self.format_resolution, ignore=('audio only', 'images')),
3570 format_field(f, 'fps', '\t%d', func=round),
3571 format_field(f, 'dynamic_range', '%s', ignore=(None, 'SDR')).replace('HDR', ''),
3572 format_field(f, 'audio_channels', '\t%s'),
3573 delim,
3574 format_field(f, 'filesize', ' \t%s', func=format_bytes) + format_field(f, 'filesize_approx', '~\t%s', func=format_bytes),
3575 format_field(f, 'tbr', '\t%dk', func=round),
3576 shorten_protocol_name(f.get('protocol', '')),
3577 delim,
3578 simplified_codec(f, 'vcodec'),
3579 format_field(f, 'vbr', '\t%dk', func=round),
3580 simplified_codec(f, 'acodec'),
3581 format_field(f, 'abr', '\t%dk', func=round),
3582 format_field(f, 'asr', '\t%s', func=format_decimal_suffix),
3583 join_nonempty(
3584 self._format_out('UNSUPPORTED', 'light red') if f.get('ext') in ('f4f', 'f4m') else None,
3585 format_field(f, 'language', '[%s]'),
3586 join_nonempty(format_field(f, 'format_note'),
3587 format_field(f, 'container', ignore=(None, f.get('ext'))),
3588 delim=', '),
3589 delim=' '),
3590 ] for f in formats if f.get('preference') is None or f['preference'] >= -1000]
3591 header_line = self._list_format_headers(
3592 'ID', 'EXT', 'RESOLUTION', '\tFPS', 'HDR', 'CH', delim, '\tFILESIZE', '\tTBR', 'PROTO',
3593 delim, 'VCODEC', '\tVBR', 'ACODEC', '\tABR', '\tASR', 'MORE INFO')
3594
3595 return render_table(
3596 header_line, table, hide_empty=True,
3597 delim=self._format_out('\u2500', self.Styles.DELIM, '-', test_encoding=True))
3598
3599 def render_thumbnails_table(self, info_dict):
3600 thumbnails = list(info_dict.get('thumbnails') or [])
3601 if not thumbnails:
3602 return None
3603 return render_table(
3604 self._list_format_headers('ID', 'Width', 'Height', 'URL'),
3605 [[t.get('id'), t.get('width', 'unknown'), t.get('height', 'unknown'), t['url']] for t in thumbnails])
3606
3607 def render_subtitles_table(self, video_id, subtitles):
3608 def _row(lang, formats):
3609 exts, names = zip(*((f['ext'], f.get('name') or 'unknown') for f in reversed(formats)))
3610 if len(set(names)) == 1:
3611 names = [] if names[0] == 'unknown' else names[:1]
3612 return [lang, ', '.join(names), ', '.join(exts)]
3613
3614 if not subtitles:
3615 return None
3616 return render_table(
3617 self._list_format_headers('Language', 'Name', 'Formats'),
3618 [_row(lang, formats) for lang, formats in subtitles.items()],
3619 hide_empty=True)
3620
3621 def __list_table(self, video_id, name, func, *args):
3622 table = func(*args)
3623 if not table:
3624 self.to_screen(f'{video_id} has no {name}')
3625 return
3626 self.to_screen(f'[info] Available {name} for {video_id}:')
3627 self.to_stdout(table)
3628
3629 def list_formats(self, info_dict):
3630 self.__list_table(info_dict['id'], 'formats', self.render_formats_table, info_dict)
3631
3632 def list_thumbnails(self, info_dict):
3633 self.__list_table(info_dict['id'], 'thumbnails', self.render_thumbnails_table, info_dict)
3634
3635 def list_subtitles(self, video_id, subtitles, name='subtitles'):
3636 self.__list_table(video_id, name, self.render_subtitles_table, video_id, subtitles)
3637
3638 def urlopen(self, req):
3639 """ Start an HTTP download """
3640 if isinstance(req, str):
3641 req = sanitized_Request(req)
3642 return self._opener.open(req, timeout=self._socket_timeout)
3643
3644 def print_debug_header(self):
3645 if not self.params.get('verbose'):
3646 return
3647
3648 # These imports can be slow. So import them only as needed
3649 from .extractor.extractors import _LAZY_LOADER
3650 from .extractor.extractors import _PLUGIN_CLASSES as plugin_extractors
3651
3652 def get_encoding(stream):
3653 ret = str(getattr(stream, 'encoding', 'missing (%s)' % type(stream).__name__))
3654 if not supports_terminal_sequences(stream):
3655 from .utils import WINDOWS_VT_MODE # Must be imported locally
3656 ret += ' (No VT)' if WINDOWS_VT_MODE is False else ' (No ANSI)'
3657 return ret
3658
3659 encoding_str = 'Encodings: locale %s, fs %s, pref %s, %s' % (
3660 locale.getpreferredencoding(),
3661 sys.getfilesystemencoding(),
3662 self.get_encoding(),
3663 ', '.join(
3664 f'{key} {get_encoding(stream)}' for key, stream in self._out_files.items_
3665 if stream is not None and key != 'console')
3666 )
3667
3668 logger = self.params.get('logger')
3669 if logger:
3670 write_debug = lambda msg: logger.debug(f'[debug] {msg}')
3671 write_debug(encoding_str)
3672 else:
3673 write_string(f'[debug] {encoding_str}\n', encoding=None)
3674 write_debug = lambda msg: self._write_string(f'[debug] {msg}\n')
3675
3676 source = detect_variant()
3677 if VARIANT not in (None, 'pip'):
3678 source += '*'
3679 write_debug(join_nonempty(
3680 'yt-dlp version', __version__,
3681 f'[{RELEASE_GIT_HEAD}]' if RELEASE_GIT_HEAD else '',
3682 '' if source == 'unknown' else f'({source})',
3683 delim=' '))
3684 if not _LAZY_LOADER:
3685 if os.environ.get('YTDLP_NO_LAZY_EXTRACTORS'):
3686 write_debug('Lazy loading extractors is forcibly disabled')
3687 else:
3688 write_debug('Lazy loading extractors is disabled')
3689 if plugin_extractors or plugin_postprocessors:
3690 write_debug('Plugins: %s' % [
3691 '%s%s' % (klass.__name__, '' if klass.__name__ == name else f' as {name}')
3692 for name, klass in itertools.chain(plugin_extractors.items(), plugin_postprocessors.items())])
3693 if self.params['compat_opts']:
3694 write_debug('Compatibility options: %s' % ', '.join(self.params['compat_opts']))
3695
3696 if source == 'source':
3697 try:
3698 stdout, _, _ = Popen.run(
3699 ['git', 'rev-parse', '--short', 'HEAD'],
3700 text=True, cwd=os.path.dirname(os.path.abspath(__file__)),
3701 stdout=subprocess.PIPE, stderr=subprocess.PIPE)
3702 if re.fullmatch('[0-9a-f]+', stdout.strip()):
3703 write_debug(f'Git HEAD: {stdout.strip()}')
3704 except Exception:
3705 with contextlib.suppress(Exception):
3706 sys.exc_clear()
3707
3708 write_debug(system_identifier())
3709
3710 exe_versions, ffmpeg_features = FFmpegPostProcessor.get_versions_and_features(self)
3711 ffmpeg_features = {key for key, val in ffmpeg_features.items() if val}
3712 if ffmpeg_features:
3713 exe_versions['ffmpeg'] += ' (%s)' % ','.join(sorted(ffmpeg_features))
3714
3715 exe_versions['rtmpdump'] = rtmpdump_version()
3716 exe_versions['phantomjs'] = PhantomJSwrapper._version()
3717 exe_str = ', '.join(
3718 f'{exe} {v}' for exe, v in sorted(exe_versions.items()) if v
3719 ) or 'none'
3720 write_debug('exe versions: %s' % exe_str)
3721
3722 from .compat.compat_utils import get_package_info
3723 from .dependencies import available_dependencies
3724
3725 write_debug('Optional libraries: %s' % (', '.join(sorted({
3726 join_nonempty(*get_package_info(m)) for m in available_dependencies.values()
3727 })) or 'none'))
3728
3729 self._setup_opener()
3730 proxy_map = {}
3731 for handler in self._opener.handlers:
3732 if hasattr(handler, 'proxies'):
3733 proxy_map.update(handler.proxies)
3734 write_debug(f'Proxy map: {proxy_map}')
3735
3736 # Not implemented
3737 if False and self.params.get('call_home'):
3738 ipaddr = self.urlopen('https://yt-dl.org/ip').read().decode()
3739 write_debug('Public IP address: %s' % ipaddr)
3740 latest_version = self.urlopen(
3741 'https://yt-dl.org/latest/version').read().decode()
3742 if version_tuple(latest_version) > version_tuple(__version__):
3743 self.report_warning(
3744 'You are using an outdated version (newest version: %s)! '
3745 'See https://yt-dl.org/update if you need help updating.' %
3746 latest_version)
3747
3748 def _setup_opener(self):
3749 if hasattr(self, '_opener'):
3750 return
3751 timeout_val = self.params.get('socket_timeout')
3752 self._socket_timeout = 20 if timeout_val is None else float(timeout_val)
3753
3754 opts_cookiesfrombrowser = self.params.get('cookiesfrombrowser')
3755 opts_cookiefile = self.params.get('cookiefile')
3756 opts_proxy = self.params.get('proxy')
3757
3758 self.cookiejar = load_cookies(opts_cookiefile, opts_cookiesfrombrowser, self)
3759
3760 cookie_processor = YoutubeDLCookieProcessor(self.cookiejar)
3761 if opts_proxy is not None:
3762 if opts_proxy == '':
3763 proxies = {}
3764 else:
3765 proxies = {'http': opts_proxy, 'https': opts_proxy}
3766 else:
3767 proxies = urllib.request.getproxies()
3768 # Set HTTPS proxy to HTTP one if given (https://github.com/ytdl-org/youtube-dl/issues/805)
3769 if 'http' in proxies and 'https' not in proxies:
3770 proxies['https'] = proxies['http']
3771 proxy_handler = PerRequestProxyHandler(proxies)
3772
3773 debuglevel = 1 if self.params.get('debug_printtraffic') else 0
3774 https_handler = make_HTTPS_handler(self.params, debuglevel=debuglevel)
3775 ydlh = YoutubeDLHandler(self.params, debuglevel=debuglevel)
3776 redirect_handler = YoutubeDLRedirectHandler()
3777 data_handler = urllib.request.DataHandler()
3778
3779 # When passing our own FileHandler instance, build_opener won't add the
3780 # default FileHandler and allows us to disable the file protocol, which
3781 # can be used for malicious purposes (see
3782 # https://github.com/ytdl-org/youtube-dl/issues/8227)
3783 file_handler = urllib.request.FileHandler()
3784
3785 def file_open(*args, **kwargs):
3786 raise urllib.error.URLError('file:// scheme is explicitly disabled in yt-dlp for security reasons')
3787 file_handler.file_open = file_open
3788
3789 opener = urllib.request.build_opener(
3790 proxy_handler, https_handler, cookie_processor, ydlh, redirect_handler, data_handler, file_handler)
3791
3792 # Delete the default user-agent header, which would otherwise apply in
3793 # cases where our custom HTTP handler doesn't come into play
3794 # (See https://github.com/ytdl-org/youtube-dl/issues/1309 for details)
3795 opener.addheaders = []
3796 self._opener = opener
3797
3798 def encode(self, s):
3799 if isinstance(s, bytes):
3800 return s # Already encoded
3801
3802 try:
3803 return s.encode(self.get_encoding())
3804 except UnicodeEncodeError as err:
3805 err.reason = err.reason + '. Check your system encoding configuration or use the --encoding option.'
3806 raise
3807
3808 def get_encoding(self):
3809 encoding = self.params.get('encoding')
3810 if encoding is None:
3811 encoding = preferredencoding()
3812 return encoding
3813
3814 def _write_info_json(self, label, ie_result, infofn, overwrite=None):
3815 ''' Write infojson and returns True = written, 'exists' = Already exists, False = skip, None = error '''
3816 if overwrite is None:
3817 overwrite = self.params.get('overwrites', True)
3818 if not self.params.get('writeinfojson'):
3819 return False
3820 elif not infofn:
3821 self.write_debug(f'Skipping writing {label} infojson')
3822 return False
3823 elif not self._ensure_dir_exists(infofn):
3824 return None
3825 elif not overwrite and os.path.exists(infofn):
3826 self.to_screen(f'[info] {label.title()} metadata is already present')
3827 return 'exists'
3828
3829 self.to_screen(f'[info] Writing {label} metadata as JSON to: {infofn}')
3830 try:
3831 write_json_file(self.sanitize_info(ie_result, self.params.get('clean_infojson', True)), infofn)
3832 return True
3833 except OSError:
3834 self.report_error(f'Cannot write {label} metadata to JSON file {infofn}')
3835 return None
3836
3837 def _write_description(self, label, ie_result, descfn):
3838 ''' Write description and returns True = written, False = skip, None = error '''
3839 if not self.params.get('writedescription'):
3840 return False
3841 elif not descfn:
3842 self.write_debug(f'Skipping writing {label} description')
3843 return False
3844 elif not self._ensure_dir_exists(descfn):
3845 return None
3846 elif not self.params.get('overwrites', True) and os.path.exists(descfn):
3847 self.to_screen(f'[info] {label.title()} description is already present')
3848 elif ie_result.get('description') is None:
3849 self.report_warning(f'There\'s no {label} description to write')
3850 return False
3851 else:
3852 try:
3853 self.to_screen(f'[info] Writing {label} description to: {descfn}')
3854 with open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile:
3855 descfile.write(ie_result['description'])
3856 except OSError:
3857 self.report_error(f'Cannot write {label} description file {descfn}')
3858 return None
3859 return True
3860
3861 def _write_subtitles(self, info_dict, filename):
3862 ''' Write subtitles to file and return list of (sub_filename, final_sub_filename); or None if error'''
3863 ret = []
3864 subtitles = info_dict.get('requested_subtitles')
3865 if not subtitles or not (self.params.get('writesubtitles') or self.params.get('writeautomaticsub')):
3866 # subtitles download errors are already managed as troubles in relevant IE
3867 # that way it will silently go on when used with unsupporting IE
3868 return ret
3869
3870 sub_filename_base = self.prepare_filename(info_dict, 'subtitle')
3871 if not sub_filename_base:
3872 self.to_screen('[info] Skipping writing video subtitles')
3873 return ret
3874 for sub_lang, sub_info in subtitles.items():
3875 sub_format = sub_info['ext']
3876 sub_filename = subtitles_filename(filename, sub_lang, sub_format, info_dict.get('ext'))
3877 sub_filename_final = subtitles_filename(sub_filename_base, sub_lang, sub_format, info_dict.get('ext'))
3878 existing_sub = self.existing_file((sub_filename_final, sub_filename))
3879 if existing_sub:
3880 self.to_screen(f'[info] Video subtitle {sub_lang}.{sub_format} is already present')
3881 sub_info['filepath'] = existing_sub
3882 ret.append((existing_sub, sub_filename_final))
3883 continue
3884
3885 self.to_screen(f'[info] Writing video subtitles to: {sub_filename}')
3886 if sub_info.get('data') is not None:
3887 try:
3888 # Use newline='' to prevent conversion of newline characters
3889 # See https://github.com/ytdl-org/youtube-dl/issues/10268
3890 with open(sub_filename, 'w', encoding='utf-8', newline='') as subfile:
3891 subfile.write(sub_info['data'])
3892 sub_info['filepath'] = sub_filename
3893 ret.append((sub_filename, sub_filename_final))
3894 continue
3895 except OSError:
3896 self.report_error(f'Cannot write video subtitles file {sub_filename}')
3897 return None
3898
3899 try:
3900 sub_copy = sub_info.copy()
3901 sub_copy.setdefault('http_headers', info_dict.get('http_headers'))
3902 self.dl(sub_filename, sub_copy, subtitle=True)
3903 sub_info['filepath'] = sub_filename
3904 ret.append((sub_filename, sub_filename_final))
3905 except (DownloadError, ExtractorError, IOError, OSError, ValueError) + network_exceptions as err:
3906 msg = f'Unable to download video subtitles for {sub_lang!r}: {err}'
3907 if self.params.get('ignoreerrors') is not True: # False or 'only_download'
3908 if not self.params.get('ignoreerrors'):
3909 self.report_error(msg)
3910 raise DownloadError(msg)
3911 self.report_warning(msg)
3912 return ret
3913
3914 def _write_thumbnails(self, label, info_dict, filename, thumb_filename_base=None):
3915 ''' Write thumbnails to file and return list of (thumb_filename, final_thumb_filename) '''
3916 write_all = self.params.get('write_all_thumbnails', False)
3917 thumbnails, ret = [], []
3918 if write_all or self.params.get('writethumbnail', False):
3919 thumbnails = info_dict.get('thumbnails') or []
3920 multiple = write_all and len(thumbnails) > 1
3921
3922 if thumb_filename_base is None:
3923 thumb_filename_base = filename
3924 if thumbnails and not thumb_filename_base:
3925 self.write_debug(f'Skipping writing {label} thumbnail')
3926 return ret
3927
3928 for idx, t in list(enumerate(thumbnails))[::-1]:
3929 thumb_ext = (f'{t["id"]}.' if multiple else '') + determine_ext(t['url'], 'jpg')
3930 thumb_display_id = f'{label} thumbnail {t["id"]}'
3931 thumb_filename = replace_extension(filename, thumb_ext, info_dict.get('ext'))
3932 thumb_filename_final = replace_extension(thumb_filename_base, thumb_ext, info_dict.get('ext'))
3933
3934 existing_thumb = self.existing_file((thumb_filename_final, thumb_filename))
3935 if existing_thumb:
3936 self.to_screen('[info] %s is already present' % (
3937 thumb_display_id if multiple else f'{label} thumbnail').capitalize())
3938 t['filepath'] = existing_thumb
3939 ret.append((existing_thumb, thumb_filename_final))
3940 else:
3941 self.to_screen(f'[info] Downloading {thumb_display_id} ...')
3942 try:
3943 uf = self.urlopen(sanitized_Request(t['url'], headers=t.get('http_headers', {})))
3944 self.to_screen(f'[info] Writing {thumb_display_id} to: {thumb_filename}')
3945 with open(encodeFilename(thumb_filename), 'wb') as thumbf:
3946 shutil.copyfileobj(uf, thumbf)
3947 ret.append((thumb_filename, thumb_filename_final))
3948 t['filepath'] = thumb_filename
3949 except network_exceptions as err:
3950 thumbnails.pop(idx)
3951 self.report_warning(f'Unable to download {thumb_display_id}: {err}')
3952 if ret and not write_all:
3953 break
3954 return ret