]> jfr.im git - yt-dlp.git/blob - yt_dlp/YoutubeDL.py
Determine merge container better (See desc) (#1482)
[yt-dlp.git] / yt_dlp / YoutubeDL.py
1 import collections
2 import contextlib
3 import datetime
4 import errno
5 import fileinput
6 import functools
7 import io
8 import itertools
9 import json
10 import locale
11 import operator
12 import os
13 import random
14 import re
15 import shutil
16 import subprocess
17 import sys
18 import tempfile
19 import time
20 import tokenize
21 import traceback
22 import unicodedata
23 import urllib.request
24 from string import ascii_letters
25
26 from .cache import Cache
27 from .compat import compat_os_name, compat_shlex_quote
28 from .cookies import load_cookies
29 from .downloader import FFmpegFD, get_suitable_downloader, shorten_protocol_name
30 from .downloader.rtmp import rtmpdump_version
31 from .extractor import gen_extractor_classes, get_info_extractor
32 from .extractor.openload import PhantomJSwrapper
33 from .minicurses import format_text
34 from .postprocessor import _PLUGIN_CLASSES as plugin_postprocessors
35 from .postprocessor import (
36 EmbedThumbnailPP,
37 FFmpegFixupDuplicateMoovPP,
38 FFmpegFixupDurationPP,
39 FFmpegFixupM3u8PP,
40 FFmpegFixupM4aPP,
41 FFmpegFixupStretchedPP,
42 FFmpegFixupTimestampPP,
43 FFmpegMergerPP,
44 FFmpegPostProcessor,
45 FFmpegVideoConvertorPP,
46 MoveFilesAfterDownloadPP,
47 get_postprocessor,
48 )
49 from .postprocessor.ffmpeg import resolve_mapping as resolve_recode_mapping
50 from .update import detect_variant
51 from .utils import (
52 DEFAULT_OUTTMPL,
53 IDENTITY,
54 LINK_TEMPLATES,
55 MEDIA_EXTENSIONS,
56 NO_DEFAULT,
57 NUMBER_RE,
58 OUTTMPL_TYPES,
59 POSTPROCESS_WHEN,
60 STR_FORMAT_RE_TMPL,
61 STR_FORMAT_TYPES,
62 ContentTooShortError,
63 DateRange,
64 DownloadCancelled,
65 DownloadError,
66 EntryNotInPlaylist,
67 ExistingVideoReached,
68 ExtractorError,
69 GeoRestrictedError,
70 HEADRequest,
71 ISO3166Utils,
72 LazyList,
73 MaxDownloadsReached,
74 Namespace,
75 PagedList,
76 PerRequestProxyHandler,
77 PlaylistEntries,
78 Popen,
79 PostProcessingError,
80 ReExtractInfo,
81 RejectedVideoReached,
82 SameFileError,
83 UnavailableVideoError,
84 UserNotLive,
85 YoutubeDLCookieProcessor,
86 YoutubeDLHandler,
87 YoutubeDLRedirectHandler,
88 age_restricted,
89 args_to_str,
90 bug_reports_message,
91 date_from_str,
92 determine_ext,
93 determine_protocol,
94 encode_compat_str,
95 encodeFilename,
96 error_to_compat_str,
97 escapeHTML,
98 expand_path,
99 filter_dict,
100 float_or_none,
101 format_bytes,
102 format_decimal_suffix,
103 format_field,
104 formatSeconds,
105 get_compatible_ext,
106 get_domain,
107 int_or_none,
108 iri_to_uri,
109 join_nonempty,
110 locked_file,
111 make_archive_id,
112 make_dir,
113 make_HTTPS_handler,
114 merge_headers,
115 network_exceptions,
116 number_of_digits,
117 orderedSet,
118 parse_filesize,
119 preferredencoding,
120 prepend_extension,
121 register_socks_protocols,
122 remove_terminal_sequences,
123 render_table,
124 replace_extension,
125 sanitize_filename,
126 sanitize_path,
127 sanitize_url,
128 sanitized_Request,
129 std_headers,
130 str_or_none,
131 strftime_or_none,
132 subtitles_filename,
133 supports_terminal_sequences,
134 system_identifier,
135 timetuple_from_msec,
136 to_high_limit_path,
137 traverse_obj,
138 try_call,
139 try_get,
140 url_basename,
141 variadic,
142 version_tuple,
143 windows_enable_vt_mode,
144 write_json_file,
145 write_string,
146 )
147 from .version import RELEASE_GIT_HEAD, __version__
148
149 if compat_os_name == 'nt':
150 import ctypes
151
152
153 class YoutubeDL:
154 """YoutubeDL class.
155
156 YoutubeDL objects are the ones responsible of downloading the
157 actual video file and writing it to disk if the user has requested
158 it, among some other tasks. In most cases there should be one per
159 program. As, given a video URL, the downloader doesn't know how to
160 extract all the needed information, task that InfoExtractors do, it
161 has to pass the URL to one of them.
162
163 For this, YoutubeDL objects have a method that allows
164 InfoExtractors to be registered in a given order. When it is passed
165 a URL, the YoutubeDL object handles it to the first InfoExtractor it
166 finds that reports being able to handle it. The InfoExtractor extracts
167 all the information about the video or videos the URL refers to, and
168 YoutubeDL process the extracted information, possibly using a File
169 Downloader to download the video.
170
171 YoutubeDL objects accept a lot of parameters. In order not to saturate
172 the object constructor with arguments, it receives a dictionary of
173 options instead. These options are available through the params
174 attribute for the InfoExtractors to use. The YoutubeDL also
175 registers itself as the downloader in charge for the InfoExtractors
176 that are added to it, so this is a "mutual registration".
177
178 Available options:
179
180 username: Username for authentication purposes.
181 password: Password for authentication purposes.
182 videopassword: Password for accessing a video.
183 ap_mso: Adobe Pass multiple-system operator identifier.
184 ap_username: Multiple-system operator account username.
185 ap_password: Multiple-system operator account password.
186 usenetrc: Use netrc for authentication instead.
187 verbose: Print additional info to stdout.
188 quiet: Do not print messages to stdout.
189 no_warnings: Do not print out anything for warnings.
190 forceprint: A dict with keys WHEN mapped to a list of templates to
191 print to stdout. The allowed keys are video or any of the
192 items in utils.POSTPROCESS_WHEN.
193 For compatibility, a single list is also accepted
194 print_to_file: A dict with keys WHEN (same as forceprint) mapped to
195 a list of tuples with (template, filename)
196 forcejson: Force printing info_dict as JSON.
197 dump_single_json: Force printing the info_dict of the whole playlist
198 (or video) as a single JSON line.
199 force_write_download_archive: Force writing download archive regardless
200 of 'skip_download' or 'simulate'.
201 simulate: Do not download the video files. If unset (or None),
202 simulate only if listsubtitles, listformats or list_thumbnails is used
203 format: Video format code. see "FORMAT SELECTION" for more details.
204 You can also pass a function. The function takes 'ctx' as
205 argument and returns the formats to download.
206 See "build_format_selector" for an implementation
207 allow_unplayable_formats: Allow unplayable formats to be extracted and downloaded.
208 ignore_no_formats_error: Ignore "No video formats" error. Usefull for
209 extracting metadata even if the video is not actually
210 available for download (experimental)
211 format_sort: A list of fields by which to sort the video formats.
212 See "Sorting Formats" for more details.
213 format_sort_force: Force the given format_sort. see "Sorting Formats"
214 for more details.
215 prefer_free_formats: Whether to prefer video formats with free containers
216 over non-free ones of same quality.
217 allow_multiple_video_streams: Allow multiple video streams to be merged
218 into a single file
219 allow_multiple_audio_streams: Allow multiple audio streams to be merged
220 into a single file
221 check_formats Whether to test if the formats are downloadable.
222 Can be True (check all), False (check none),
223 'selected' (check selected formats),
224 or None (check only if requested by extractor)
225 paths: Dictionary of output paths. The allowed keys are 'home'
226 'temp' and the keys of OUTTMPL_TYPES (in utils.py)
227 outtmpl: Dictionary of templates for output names. Allowed keys
228 are 'default' and the keys of OUTTMPL_TYPES (in utils.py).
229 For compatibility with youtube-dl, a single string can also be used
230 outtmpl_na_placeholder: Placeholder for unavailable meta fields.
231 restrictfilenames: Do not allow "&" and spaces in file names
232 trim_file_name: Limit length of filename (extension excluded)
233 windowsfilenames: Force the filenames to be windows compatible
234 ignoreerrors: Do not stop on download/postprocessing errors.
235 Can be 'only_download' to ignore only download errors.
236 Default is 'only_download' for CLI, but False for API
237 skip_playlist_after_errors: Number of allowed failures until the rest of
238 the playlist is skipped
239 force_generic_extractor: Force downloader to use the generic extractor
240 overwrites: Overwrite all video and metadata files if True,
241 overwrite only non-video files if None
242 and don't overwrite any file if False
243 For compatibility with youtube-dl,
244 "nooverwrites" may also be used instead
245 playlist_items: Specific indices of playlist to download.
246 playlistrandom: Download playlist items in random order.
247 lazy_playlist: Process playlist entries as they are received.
248 matchtitle: Download only matching titles.
249 rejecttitle: Reject downloads for matching titles.
250 logger: Log messages to a logging.Logger instance.
251 logtostderr: Log messages to stderr instead of stdout.
252 consoletitle: Display progress in console window's titlebar.
253 writedescription: Write the video description to a .description file
254 writeinfojson: Write the video description to a .info.json file
255 clean_infojson: Remove private fields from the infojson
256 getcomments: Extract video comments. This will not be written to disk
257 unless writeinfojson is also given
258 writeannotations: Write the video annotations to a .annotations.xml file
259 writethumbnail: Write the thumbnail image to a file
260 allow_playlist_files: Whether to write playlists' description, infojson etc
261 also to disk when using the 'write*' options
262 write_all_thumbnails: Write all thumbnail formats to files
263 writelink: Write an internet shortcut file, depending on the
264 current platform (.url/.webloc/.desktop)
265 writeurllink: Write a Windows internet shortcut file (.url)
266 writewebloclink: Write a macOS internet shortcut file (.webloc)
267 writedesktoplink: Write a Linux internet shortcut file (.desktop)
268 writesubtitles: Write the video subtitles to a file
269 writeautomaticsub: Write the automatically generated subtitles to a file
270 listsubtitles: Lists all available subtitles for the video
271 subtitlesformat: The format code for subtitles
272 subtitleslangs: List of languages of the subtitles to download (can be regex).
273 The list may contain "all" to refer to all the available
274 subtitles. The language can be prefixed with a "-" to
275 exclude it from the requested languages. Eg: ['all', '-live_chat']
276 keepvideo: Keep the video file after post-processing
277 daterange: A DateRange object, download only if the upload_date is in the range.
278 skip_download: Skip the actual download of the video file
279 cachedir: Location of the cache files in the filesystem.
280 False to disable filesystem cache.
281 noplaylist: Download single video instead of a playlist if in doubt.
282 age_limit: An integer representing the user's age in years.
283 Unsuitable videos for the given age are skipped.
284 min_views: An integer representing the minimum view count the video
285 must have in order to not be skipped.
286 Videos without view count information are always
287 downloaded. None for no limit.
288 max_views: An integer representing the maximum view count.
289 Videos that are more popular than that are not
290 downloaded.
291 Videos without view count information are always
292 downloaded. None for no limit.
293 download_archive: File name of a file where all downloads are recorded.
294 Videos already present in the file are not downloaded
295 again.
296 break_on_existing: Stop the download process after attempting to download a
297 file that is in the archive.
298 break_on_reject: Stop the download process when encountering a video that
299 has been filtered out.
300 break_per_url: Whether break_on_reject and break_on_existing
301 should act on each input URL as opposed to for the entire queue
302 cookiefile: File name or text stream from where cookies should be read and dumped to
303 cookiesfrombrowser: A tuple containing the name of the browser, the profile
304 name/pathfrom where cookies are loaded, and the name of the
305 keyring. Eg: ('chrome', ) or ('vivaldi', 'default', 'BASICTEXT')
306 legacyserverconnect: Explicitly allow HTTPS connection to servers that do not
307 support RFC 5746 secure renegotiation
308 nocheckcertificate: Do not verify SSL certificates
309 client_certificate: Path to client certificate file in PEM format. May include the private key
310 client_certificate_key: Path to private key file for client certificate
311 client_certificate_password: Password for client certificate private key, if encrypted.
312 If not provided and the key is encrypted, yt-dlp will ask interactively
313 prefer_insecure: Use HTTP instead of HTTPS to retrieve information.
314 (Only supported by some extractors)
315 http_headers: A dictionary of custom headers to be used for all requests
316 proxy: URL of the proxy server to use
317 geo_verification_proxy: URL of the proxy to use for IP address verification
318 on geo-restricted sites.
319 socket_timeout: Time to wait for unresponsive hosts, in seconds
320 bidi_workaround: Work around buggy terminals without bidirectional text
321 support, using fridibi
322 debug_printtraffic:Print out sent and received HTTP traffic
323 default_search: Prepend this string if an input url is not valid.
324 'auto' for elaborate guessing
325 encoding: Use this encoding instead of the system-specified.
326 extract_flat: Whether to resolve and process url_results further
327 * False: Always process (default)
328 * True: Never process
329 * 'in_playlist': Do not process inside playlist/multi_video
330 * 'discard': Always process, but don't return the result
331 from inside playlist/multi_video
332 * 'discard_in_playlist': Same as "discard", but only for
333 playlists (not multi_video)
334 wait_for_video: If given, wait for scheduled streams to become available.
335 The value should be a tuple containing the range
336 (min_secs, max_secs) to wait between retries
337 postprocessors: A list of dictionaries, each with an entry
338 * key: The name of the postprocessor. See
339 yt_dlp/postprocessor/__init__.py for a list.
340 * when: When to run the postprocessor. Allowed values are
341 the entries of utils.POSTPROCESS_WHEN
342 Assumed to be 'post_process' if not given
343 progress_hooks: A list of functions that get called on download
344 progress, with a dictionary with the entries
345 * status: One of "downloading", "error", or "finished".
346 Check this first and ignore unknown values.
347 * info_dict: The extracted info_dict
348
349 If status is one of "downloading", or "finished", the
350 following properties may also be present:
351 * filename: The final filename (always present)
352 * tmpfilename: The filename we're currently writing to
353 * downloaded_bytes: Bytes on disk
354 * total_bytes: Size of the whole file, None if unknown
355 * total_bytes_estimate: Guess of the eventual file size,
356 None if unavailable.
357 * elapsed: The number of seconds since download started.
358 * eta: The estimated time in seconds, None if unknown
359 * speed: The download speed in bytes/second, None if
360 unknown
361 * fragment_index: The counter of the currently
362 downloaded video fragment.
363 * fragment_count: The number of fragments (= individual
364 files that will be merged)
365
366 Progress hooks are guaranteed to be called at least once
367 (with status "finished") if the download is successful.
368 postprocessor_hooks: A list of functions that get called on postprocessing
369 progress, with a dictionary with the entries
370 * status: One of "started", "processing", or "finished".
371 Check this first and ignore unknown values.
372 * postprocessor: Name of the postprocessor
373 * info_dict: The extracted info_dict
374
375 Progress hooks are guaranteed to be called at least twice
376 (with status "started" and "finished") if the processing is successful.
377 merge_output_format: "/" separated list of extensions to use when merging formats.
378 final_ext: Expected final extension; used to detect when the file was
379 already downloaded and converted
380 fixup: Automatically correct known faults of the file.
381 One of:
382 - "never": do nothing
383 - "warn": only emit a warning
384 - "detect_or_warn": check whether we can do anything
385 about it, warn otherwise (default)
386 source_address: Client-side IP address to bind to.
387 sleep_interval_requests: Number of seconds to sleep between requests
388 during extraction
389 sleep_interval: Number of seconds to sleep before each download when
390 used alone or a lower bound of a range for randomized
391 sleep before each download (minimum possible number
392 of seconds to sleep) when used along with
393 max_sleep_interval.
394 max_sleep_interval:Upper bound of a range for randomized sleep before each
395 download (maximum possible number of seconds to sleep).
396 Must only be used along with sleep_interval.
397 Actual sleep time will be a random float from range
398 [sleep_interval; max_sleep_interval].
399 sleep_interval_subtitles: Number of seconds to sleep before each subtitle download
400 listformats: Print an overview of available video formats and exit.
401 list_thumbnails: Print a table of all thumbnails and exit.
402 match_filter: A function that gets called for every video with the signature
403 (info_dict, *, incomplete: bool) -> Optional[str]
404 For backward compatibility with youtube-dl, the signature
405 (info_dict) -> Optional[str] is also allowed.
406 - If it returns a message, the video is ignored.
407 - If it returns None, the video is downloaded.
408 - If it returns utils.NO_DEFAULT, the user is interactively
409 asked whether to download the video.
410 match_filter_func in utils.py is one example for this.
411 no_color: Do not emit color codes in output.
412 geo_bypass: Bypass geographic restriction via faking X-Forwarded-For
413 HTTP header
414 geo_bypass_country:
415 Two-letter ISO 3166-2 country code that will be used for
416 explicit geographic restriction bypassing via faking
417 X-Forwarded-For HTTP header
418 geo_bypass_ip_block:
419 IP range in CIDR notation that will be used similarly to
420 geo_bypass_country
421 external_downloader: A dictionary of protocol keys and the executable of the
422 external downloader to use for it. The allowed protocols
423 are default|http|ftp|m3u8|dash|rtsp|rtmp|mms.
424 Set the value to 'native' to use the native downloader
425 compat_opts: Compatibility options. See "Differences in default behavior".
426 The following options do not work when used through the API:
427 filename, abort-on-error, multistreams, no-live-chat, format-sort
428 no-clean-infojson, no-playlist-metafiles, no-keep-subs, no-attach-info-json.
429 Refer __init__.py for their implementation
430 progress_template: Dictionary of templates for progress outputs.
431 Allowed keys are 'download', 'postprocess',
432 'download-title' (console title) and 'postprocess-title'.
433 The template is mapped on a dictionary with keys 'progress' and 'info'
434 retry_sleep_functions: Dictionary of functions that takes the number of attempts
435 as argument and returns the time to sleep in seconds.
436 Allowed keys are 'http', 'fragment', 'file_access'
437 download_ranges: A callback function that gets called for every video with
438 the signature (info_dict, ydl) -> Iterable[Section].
439 Only the returned sections will be downloaded.
440 Each Section is a dict with the following keys:
441 * start_time: Start time of the section in seconds
442 * end_time: End time of the section in seconds
443 * title: Section title (Optional)
444 * index: Section number (Optional)
445 force_keyframes_at_cuts: Re-encode the video when downloading ranges to get precise cuts
446 noprogress: Do not print the progress bar
447
448 The following parameters are not used by YoutubeDL itself, they are used by
449 the downloader (see yt_dlp/downloader/common.py):
450 nopart, updatetime, buffersize, ratelimit, throttledratelimit, min_filesize,
451 max_filesize, test, noresizebuffer, retries, file_access_retries, fragment_retries,
452 continuedl, xattr_set_filesize, hls_use_mpegts, http_chunk_size,
453 external_downloader_args, concurrent_fragment_downloads.
454
455 The following options are used by the post processors:
456 ffmpeg_location: Location of the ffmpeg/avconv binary; either the path
457 to the binary or its containing directory.
458 postprocessor_args: A dictionary of postprocessor/executable keys (in lower case)
459 and a list of additional command-line arguments for the
460 postprocessor/executable. The dict can also have "PP+EXE" keys
461 which are used when the given exe is used by the given PP.
462 Use 'default' as the name for arguments to passed to all PP
463 For compatibility with youtube-dl, a single list of args
464 can also be used
465
466 The following options are used by the extractors:
467 extractor_retries: Number of times to retry for known errors
468 dynamic_mpd: Whether to process dynamic DASH manifests (default: True)
469 hls_split_discontinuity: Split HLS playlists to different formats at
470 discontinuities such as ad breaks (default: False)
471 extractor_args: A dictionary of arguments to be passed to the extractors.
472 See "EXTRACTOR ARGUMENTS" for details.
473 Eg: {'youtube': {'skip': ['dash', 'hls']}}
474 mark_watched: Mark videos watched (even with --simulate). Only for YouTube
475
476 The following options are deprecated and may be removed in the future:
477
478 playliststart: - Use playlist_items
479 Playlist item to start at.
480 playlistend: - Use playlist_items
481 Playlist item to end at.
482 playlistreverse: - Use playlist_items
483 Download playlist items in reverse order.
484 forceurl: - Use forceprint
485 Force printing final URL.
486 forcetitle: - Use forceprint
487 Force printing title.
488 forceid: - Use forceprint
489 Force printing ID.
490 forcethumbnail: - Use forceprint
491 Force printing thumbnail URL.
492 forcedescription: - Use forceprint
493 Force printing description.
494 forcefilename: - Use forceprint
495 Force printing final filename.
496 forceduration: - Use forceprint
497 Force printing duration.
498 allsubtitles: - Use subtitleslangs = ['all']
499 Downloads all the subtitles of the video
500 (requires writesubtitles or writeautomaticsub)
501 include_ads: - Doesn't work
502 Download ads as well
503 call_home: - Not implemented
504 Boolean, true iff we are allowed to contact the
505 yt-dlp servers for debugging.
506 post_hooks: - Register a custom postprocessor
507 A list of functions that get called as the final step
508 for each video file, after all postprocessors have been
509 called. The filename will be passed as the only argument.
510 hls_prefer_native: - Use external_downloader = {'m3u8': 'native'} or {'m3u8': 'ffmpeg'}.
511 Use the native HLS downloader instead of ffmpeg/avconv
512 if True, otherwise use ffmpeg/avconv if False, otherwise
513 use downloader suggested by extractor if None.
514 prefer_ffmpeg: - avconv support is deprecated
515 If False, use avconv instead of ffmpeg if both are available,
516 otherwise prefer ffmpeg.
517 youtube_include_dash_manifest: - Use extractor_args
518 If True (default), DASH manifests and related
519 data will be downloaded and processed by extractor.
520 You can reduce network I/O by disabling it if you don't
521 care about DASH. (only for youtube)
522 youtube_include_hls_manifest: - Use extractor_args
523 If True (default), HLS manifests and related
524 data will be downloaded and processed by extractor.
525 You can reduce network I/O by disabling it if you don't
526 care about HLS. (only for youtube)
527 """
528
529 _NUMERIC_FIELDS = {
530 'width', 'height', 'tbr', 'abr', 'asr', 'vbr', 'fps', 'filesize', 'filesize_approx',
531 'timestamp', 'release_timestamp',
532 'duration', 'view_count', 'like_count', 'dislike_count', 'repost_count',
533 'average_rating', 'comment_count', 'age_limit',
534 'start_time', 'end_time',
535 'chapter_number', 'season_number', 'episode_number',
536 'track_number', 'disc_number', 'release_year',
537 }
538
539 _format_fields = {
540 # NB: Keep in sync with the docstring of extractor/common.py
541 'url', 'manifest_url', 'manifest_stream_number', 'ext', 'format', 'format_id', 'format_note',
542 'width', 'height', 'resolution', 'dynamic_range', 'tbr', 'abr', 'acodec', 'asr',
543 'vbr', 'fps', 'vcodec', 'container', 'filesize', 'filesize_approx',
544 'player_url', 'protocol', 'fragment_base_url', 'fragments', 'is_from_start',
545 'preference', 'language', 'language_preference', 'quality', 'source_preference',
546 'http_headers', 'stretched_ratio', 'no_resume', 'has_drm', 'downloader_options',
547 'page_url', 'app', 'play_path', 'tc_url', 'flash_version', 'rtmp_live', 'rtmp_conn', 'rtmp_protocol', 'rtmp_real_time'
548 }
549 _format_selection_exts = {
550 'audio': set(MEDIA_EXTENSIONS.common_audio),
551 'video': set(MEDIA_EXTENSIONS.common_video + ('3gp', )),
552 'storyboards': set(MEDIA_EXTENSIONS.storyboards),
553 }
554
555 def __init__(self, params=None, auto_init=True):
556 """Create a FileDownloader object with the given options.
557 @param auto_init Whether to load the default extractors and print header (if verbose).
558 Set to 'no_verbose_header' to not print the header
559 """
560 if params is None:
561 params = {}
562 self.params = params
563 self._ies = {}
564 self._ies_instances = {}
565 self._pps = {k: [] for k in POSTPROCESS_WHEN}
566 self._printed_messages = set()
567 self._first_webpage_request = True
568 self._post_hooks = []
569 self._progress_hooks = []
570 self._postprocessor_hooks = []
571 self._download_retcode = 0
572 self._num_downloads = 0
573 self._num_videos = 0
574 self._playlist_level = 0
575 self._playlist_urls = set()
576 self.cache = Cache(self)
577
578 windows_enable_vt_mode()
579 stdout = sys.stderr if self.params.get('logtostderr') else sys.stdout
580 self._out_files = Namespace(
581 out=stdout,
582 error=sys.stderr,
583 screen=sys.stderr if self.params.get('quiet') else stdout,
584 console=None if compat_os_name == 'nt' else next(
585 filter(supports_terminal_sequences, (sys.stderr, sys.stdout)), None)
586 )
587 self._allow_colors = Namespace(**{
588 type_: not self.params.get('no_color') and supports_terminal_sequences(stream)
589 for type_, stream in self._out_files.items_ if type_ != 'console'
590 })
591
592 # The code is left like this to be reused for future deprecations
593 MIN_SUPPORTED, MIN_RECOMMENDED = (3, 7), (3, 7)
594 current_version = sys.version_info[:2]
595 if current_version < MIN_RECOMMENDED:
596 msg = ('Support for Python version %d.%d has been deprecated. '
597 'See https://github.com/yt-dlp/yt-dlp/issues/3764 for more details.'
598 '\n You will no longer receive updates on this version')
599 if current_version < MIN_SUPPORTED:
600 msg = 'Python version %d.%d is no longer supported'
601 self.deprecation_warning(
602 f'{msg}! Please update to Python %d.%d or above' % (*current_version, *MIN_RECOMMENDED))
603
604 if self.params.get('allow_unplayable_formats'):
605 self.report_warning(
606 f'You have asked for {self._format_err("UNPLAYABLE", self.Styles.EMPHASIS)} formats to be listed/downloaded. '
607 'This is a developer option intended for debugging. \n'
608 ' If you experience any issues while using this option, '
609 f'{self._format_err("DO NOT", self.Styles.ERROR)} open a bug report')
610
611 def check_deprecated(param, option, suggestion):
612 if self.params.get(param) is not None:
613 self.report_warning(f'{option} is deprecated. Use {suggestion} instead')
614 return True
615 return False
616
617 if check_deprecated('cn_verification_proxy', '--cn-verification-proxy', '--geo-verification-proxy'):
618 if self.params.get('geo_verification_proxy') is None:
619 self.params['geo_verification_proxy'] = self.params['cn_verification_proxy']
620
621 check_deprecated('autonumber', '--auto-number', '-o "%(autonumber)s-%(title)s.%(ext)s"')
622 check_deprecated('usetitle', '--title', '-o "%(title)s-%(id)s.%(ext)s"')
623 check_deprecated('useid', '--id', '-o "%(id)s.%(ext)s"')
624
625 for msg in self.params.get('_warnings', []):
626 self.report_warning(msg)
627 for msg in self.params.get('_deprecation_warnings', []):
628 self.deprecation_warning(msg)
629
630 self.params['compat_opts'] = set(self.params.get('compat_opts', ()))
631 if 'list-formats' in self.params['compat_opts']:
632 self.params['listformats_table'] = False
633
634 if 'overwrites' not in self.params and self.params.get('nooverwrites') is not None:
635 # nooverwrites was unnecessarily changed to overwrites
636 # in 0c3d0f51778b153f65c21906031c2e091fcfb641
637 # This ensures compatibility with both keys
638 self.params['overwrites'] = not self.params['nooverwrites']
639 elif self.params.get('overwrites') is None:
640 self.params.pop('overwrites', None)
641 else:
642 self.params['nooverwrites'] = not self.params['overwrites']
643
644 self.params.setdefault('forceprint', {})
645 self.params.setdefault('print_to_file', {})
646
647 # Compatibility with older syntax
648 if not isinstance(params['forceprint'], dict):
649 self.params['forceprint'] = {'video': params['forceprint']}
650
651 if self.params.get('bidi_workaround', False):
652 try:
653 import pty
654 master, slave = pty.openpty()
655 width = shutil.get_terminal_size().columns
656 width_args = [] if width is None else ['-w', str(width)]
657 sp_kwargs = {'stdin': subprocess.PIPE, 'stdout': slave, 'stderr': self._out_files.error}
658 try:
659 self._output_process = Popen(['bidiv'] + width_args, **sp_kwargs)
660 except OSError:
661 self._output_process = Popen(['fribidi', '-c', 'UTF-8'] + width_args, **sp_kwargs)
662 self._output_channel = os.fdopen(master, 'rb')
663 except OSError as ose:
664 if ose.errno == errno.ENOENT:
665 self.report_warning(
666 'Could not find fribidi executable, ignoring --bidi-workaround. '
667 'Make sure that fribidi is an executable file in one of the directories in your $PATH.')
668 else:
669 raise
670
671 if auto_init:
672 if auto_init != 'no_verbose_header':
673 self.print_debug_header()
674 self.add_default_info_extractors()
675
676 if (sys.platform != 'win32'
677 and sys.getfilesystemencoding() in ['ascii', 'ANSI_X3.4-1968']
678 and not self.params.get('restrictfilenames', False)):
679 # Unicode filesystem API will throw errors (#1474, #13027)
680 self.report_warning(
681 'Assuming --restrict-filenames since file system encoding '
682 'cannot encode all characters. '
683 'Set the LC_ALL environment variable to fix this.')
684 self.params['restrictfilenames'] = True
685
686 self._parse_outtmpl()
687
688 # Creating format selector here allows us to catch syntax errors before the extraction
689 self.format_selector = (
690 self.params.get('format') if self.params.get('format') in (None, '-')
691 else self.params['format'] if callable(self.params['format'])
692 else self.build_format_selector(self.params['format']))
693
694 # Set http_headers defaults according to std_headers
695 self.params['http_headers'] = merge_headers(std_headers, self.params.get('http_headers', {}))
696
697 hooks = {
698 'post_hooks': self.add_post_hook,
699 'progress_hooks': self.add_progress_hook,
700 'postprocessor_hooks': self.add_postprocessor_hook,
701 }
702 for opt, fn in hooks.items():
703 for ph in self.params.get(opt, []):
704 fn(ph)
705
706 for pp_def_raw in self.params.get('postprocessors', []):
707 pp_def = dict(pp_def_raw)
708 when = pp_def.pop('when', 'post_process')
709 self.add_post_processor(
710 get_postprocessor(pp_def.pop('key'))(self, **pp_def),
711 when=when)
712
713 self._setup_opener()
714 register_socks_protocols()
715
716 def preload_download_archive(fn):
717 """Preload the archive, if any is specified"""
718 if fn is None:
719 return False
720 self.write_debug(f'Loading archive file {fn!r}')
721 try:
722 with locked_file(fn, 'r', encoding='utf-8') as archive_file:
723 for line in archive_file:
724 self.archive.add(line.strip())
725 except OSError as ioe:
726 if ioe.errno != errno.ENOENT:
727 raise
728 return False
729 return True
730
731 self.archive = set()
732 preload_download_archive(self.params.get('download_archive'))
733
734 def warn_if_short_id(self, argv):
735 # short YouTube ID starting with dash?
736 idxs = [
737 i for i, a in enumerate(argv)
738 if re.match(r'^-[0-9A-Za-z_-]{10}$', a)]
739 if idxs:
740 correct_argv = (
741 ['yt-dlp']
742 + [a for i, a in enumerate(argv) if i not in idxs]
743 + ['--'] + [argv[i] for i in idxs]
744 )
745 self.report_warning(
746 'Long argument string detected. '
747 'Use -- to separate parameters and URLs, like this:\n%s' %
748 args_to_str(correct_argv))
749
750 def add_info_extractor(self, ie):
751 """Add an InfoExtractor object to the end of the list."""
752 ie_key = ie.ie_key()
753 self._ies[ie_key] = ie
754 if not isinstance(ie, type):
755 self._ies_instances[ie_key] = ie
756 ie.set_downloader(self)
757
758 def _get_info_extractor_class(self, ie_key):
759 ie = self._ies.get(ie_key)
760 if ie is None:
761 ie = get_info_extractor(ie_key)
762 self.add_info_extractor(ie)
763 return ie
764
765 def get_info_extractor(self, ie_key):
766 """
767 Get an instance of an IE with name ie_key, it will try to get one from
768 the _ies list, if there's no instance it will create a new one and add
769 it to the extractor list.
770 """
771 ie = self._ies_instances.get(ie_key)
772 if ie is None:
773 ie = get_info_extractor(ie_key)()
774 self.add_info_extractor(ie)
775 return ie
776
777 def add_default_info_extractors(self):
778 """
779 Add the InfoExtractors returned by gen_extractors to the end of the list
780 """
781 for ie in gen_extractor_classes():
782 self.add_info_extractor(ie)
783
784 def add_post_processor(self, pp, when='post_process'):
785 """Add a PostProcessor object to the end of the chain."""
786 assert when in POSTPROCESS_WHEN, f'Invalid when={when}'
787 self._pps[when].append(pp)
788 pp.set_downloader(self)
789
790 def add_post_hook(self, ph):
791 """Add the post hook"""
792 self._post_hooks.append(ph)
793
794 def add_progress_hook(self, ph):
795 """Add the download progress hook"""
796 self._progress_hooks.append(ph)
797
798 def add_postprocessor_hook(self, ph):
799 """Add the postprocessing progress hook"""
800 self._postprocessor_hooks.append(ph)
801 for pps in self._pps.values():
802 for pp in pps:
803 pp.add_progress_hook(ph)
804
805 def _bidi_workaround(self, message):
806 if not hasattr(self, '_output_channel'):
807 return message
808
809 assert hasattr(self, '_output_process')
810 assert isinstance(message, str)
811 line_count = message.count('\n') + 1
812 self._output_process.stdin.write((message + '\n').encode())
813 self._output_process.stdin.flush()
814 res = ''.join(self._output_channel.readline().decode()
815 for _ in range(line_count))
816 return res[:-len('\n')]
817
818 def _write_string(self, message, out=None, only_once=False):
819 if only_once:
820 if message in self._printed_messages:
821 return
822 self._printed_messages.add(message)
823 write_string(message, out=out, encoding=self.params.get('encoding'))
824
825 def to_stdout(self, message, skip_eol=False, quiet=None):
826 """Print message to stdout"""
827 if quiet is not None:
828 self.deprecation_warning('"YoutubeDL.to_stdout" no longer accepts the argument quiet. Use "YoutubeDL.to_screen" instead')
829 if skip_eol is not False:
830 self.deprecation_warning('"YoutubeDL.to_stdout" no longer accepts the argument skip_eol. Use "YoutubeDL.to_screen" instead')
831 self._write_string(f'{self._bidi_workaround(message)}\n', self._out_files.out)
832
833 def to_screen(self, message, skip_eol=False, quiet=None):
834 """Print message to screen if not in quiet mode"""
835 if self.params.get('logger'):
836 self.params['logger'].debug(message)
837 return
838 if (self.params.get('quiet') if quiet is None else quiet) and not self.params.get('verbose'):
839 return
840 self._write_string(
841 '%s%s' % (self._bidi_workaround(message), ('' if skip_eol else '\n')),
842 self._out_files.screen)
843
844 def to_stderr(self, message, only_once=False):
845 """Print message to stderr"""
846 assert isinstance(message, str)
847 if self.params.get('logger'):
848 self.params['logger'].error(message)
849 else:
850 self._write_string(f'{self._bidi_workaround(message)}\n', self._out_files.error, only_once=only_once)
851
852 def _send_console_code(self, code):
853 if compat_os_name == 'nt' or not self._out_files.console:
854 return
855 self._write_string(code, self._out_files.console)
856
857 def to_console_title(self, message):
858 if not self.params.get('consoletitle', False):
859 return
860 message = remove_terminal_sequences(message)
861 if compat_os_name == 'nt':
862 if ctypes.windll.kernel32.GetConsoleWindow():
863 # c_wchar_p() might not be necessary if `message` is
864 # already of type unicode()
865 ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(message))
866 else:
867 self._send_console_code(f'\033]0;{message}\007')
868
869 def save_console_title(self):
870 if not self.params.get('consoletitle') or self.params.get('simulate'):
871 return
872 self._send_console_code('\033[22;0t') # Save the title on stack
873
874 def restore_console_title(self):
875 if not self.params.get('consoletitle') or self.params.get('simulate'):
876 return
877 self._send_console_code('\033[23;0t') # Restore the title from stack
878
879 def __enter__(self):
880 self.save_console_title()
881 return self
882
883 def __exit__(self, *args):
884 self.restore_console_title()
885
886 if self.params.get('cookiefile') is not None:
887 self.cookiejar.save(ignore_discard=True, ignore_expires=True)
888
889 def trouble(self, message=None, tb=None, is_error=True):
890 """Determine action to take when a download problem appears.
891
892 Depending on if the downloader has been configured to ignore
893 download errors or not, this method may throw an exception or
894 not when errors are found, after printing the message.
895
896 @param tb If given, is additional traceback information
897 @param is_error Whether to raise error according to ignorerrors
898 """
899 if message is not None:
900 self.to_stderr(message)
901 if self.params.get('verbose'):
902 if tb is None:
903 if sys.exc_info()[0]: # if .trouble has been called from an except block
904 tb = ''
905 if hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
906 tb += ''.join(traceback.format_exception(*sys.exc_info()[1].exc_info))
907 tb += encode_compat_str(traceback.format_exc())
908 else:
909 tb_data = traceback.format_list(traceback.extract_stack())
910 tb = ''.join(tb_data)
911 if tb:
912 self.to_stderr(tb)
913 if not is_error:
914 return
915 if not self.params.get('ignoreerrors'):
916 if sys.exc_info()[0] and hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
917 exc_info = sys.exc_info()[1].exc_info
918 else:
919 exc_info = sys.exc_info()
920 raise DownloadError(message, exc_info)
921 self._download_retcode = 1
922
923 Styles = Namespace(
924 HEADERS='yellow',
925 EMPHASIS='light blue',
926 FILENAME='green',
927 ID='green',
928 DELIM='blue',
929 ERROR='red',
930 WARNING='yellow',
931 SUPPRESS='light black',
932 )
933
934 def _format_text(self, handle, allow_colors, text, f, fallback=None, *, test_encoding=False):
935 text = str(text)
936 if test_encoding:
937 original_text = text
938 # handle.encoding can be None. See https://github.com/yt-dlp/yt-dlp/issues/2711
939 encoding = self.params.get('encoding') or getattr(handle, 'encoding', None) or 'ascii'
940 text = text.encode(encoding, 'ignore').decode(encoding)
941 if fallback is not None and text != original_text:
942 text = fallback
943 return format_text(text, f) if allow_colors else text if fallback is None else fallback
944
945 def _format_out(self, *args, **kwargs):
946 return self._format_text(self._out_files.out, self._allow_colors.out, *args, **kwargs)
947
948 def _format_screen(self, *args, **kwargs):
949 return self._format_text(self._out_files.screen, self._allow_colors.screen, *args, **kwargs)
950
951 def _format_err(self, *args, **kwargs):
952 return self._format_text(self._out_files.error, self._allow_colors.error, *args, **kwargs)
953
954 def report_warning(self, message, only_once=False):
955 '''
956 Print the message to stderr, it will be prefixed with 'WARNING:'
957 If stderr is a tty file the 'WARNING:' will be colored
958 '''
959 if self.params.get('logger') is not None:
960 self.params['logger'].warning(message)
961 else:
962 if self.params.get('no_warnings'):
963 return
964 self.to_stderr(f'{self._format_err("WARNING:", self.Styles.WARNING)} {message}', only_once)
965
966 def deprecation_warning(self, message):
967 if self.params.get('logger') is not None:
968 self.params['logger'].warning(f'DeprecationWarning: {message}')
969 else:
970 self.to_stderr(f'{self._format_err("DeprecationWarning:", self.Styles.ERROR)} {message}', True)
971
972 def report_error(self, message, *args, **kwargs):
973 '''
974 Do the same as trouble, but prefixes the message with 'ERROR:', colored
975 in red if stderr is a tty file.
976 '''
977 self.trouble(f'{self._format_err("ERROR:", self.Styles.ERROR)} {message}', *args, **kwargs)
978
979 def write_debug(self, message, only_once=False):
980 '''Log debug message or Print message to stderr'''
981 if not self.params.get('verbose', False):
982 return
983 message = f'[debug] {message}'
984 if self.params.get('logger'):
985 self.params['logger'].debug(message)
986 else:
987 self.to_stderr(message, only_once)
988
989 def report_file_already_downloaded(self, file_name):
990 """Report file has already been fully downloaded."""
991 try:
992 self.to_screen('[download] %s has already been downloaded' % file_name)
993 except UnicodeEncodeError:
994 self.to_screen('[download] The file has already been downloaded')
995
996 def report_file_delete(self, file_name):
997 """Report that existing file will be deleted."""
998 try:
999 self.to_screen('Deleting existing file %s' % file_name)
1000 except UnicodeEncodeError:
1001 self.to_screen('Deleting existing file')
1002
1003 def raise_no_formats(self, info, forced=False, *, msg=None):
1004 has_drm = info.get('_has_drm')
1005 ignored, expected = self.params.get('ignore_no_formats_error'), bool(msg)
1006 msg = msg or has_drm and 'This video is DRM protected' or 'No video formats found!'
1007 if forced or not ignored:
1008 raise ExtractorError(msg, video_id=info['id'], ie=info['extractor'],
1009 expected=has_drm or ignored or expected)
1010 else:
1011 self.report_warning(msg)
1012
1013 def parse_outtmpl(self):
1014 self.deprecation_warning('"YoutubeDL.parse_outtmpl" is deprecated and may be removed in a future version')
1015 self._parse_outtmpl()
1016 return self.params['outtmpl']
1017
1018 def _parse_outtmpl(self):
1019 sanitize = IDENTITY
1020 if self.params.get('restrictfilenames'): # Remove spaces in the default template
1021 sanitize = lambda x: x.replace(' - ', ' ').replace(' ', '-')
1022
1023 outtmpl = self.params.setdefault('outtmpl', {})
1024 if not isinstance(outtmpl, dict):
1025 self.params['outtmpl'] = outtmpl = {'default': outtmpl}
1026 outtmpl.update({k: sanitize(v) for k, v in DEFAULT_OUTTMPL.items() if outtmpl.get(k) is None})
1027
1028 def get_output_path(self, dir_type='', filename=None):
1029 paths = self.params.get('paths', {})
1030 assert isinstance(paths, dict)
1031 path = os.path.join(
1032 expand_path(paths.get('home', '').strip()),
1033 expand_path(paths.get(dir_type, '').strip()) if dir_type else '',
1034 filename or '')
1035 return sanitize_path(path, force=self.params.get('windowsfilenames'))
1036
1037 @staticmethod
1038 def _outtmpl_expandpath(outtmpl):
1039 # expand_path translates '%%' into '%' and '$$' into '$'
1040 # correspondingly that is not what we want since we need to keep
1041 # '%%' intact for template dict substitution step. Working around
1042 # with boundary-alike separator hack.
1043 sep = ''.join([random.choice(ascii_letters) for _ in range(32)])
1044 outtmpl = outtmpl.replace('%%', f'%{sep}%').replace('$$', f'${sep}$')
1045
1046 # outtmpl should be expand_path'ed before template dict substitution
1047 # because meta fields may contain env variables we don't want to
1048 # be expanded. For example, for outtmpl "%(title)s.%(ext)s" and
1049 # title "Hello $PATH", we don't want `$PATH` to be expanded.
1050 return expand_path(outtmpl).replace(sep, '')
1051
1052 @staticmethod
1053 def escape_outtmpl(outtmpl):
1054 ''' Escape any remaining strings like %s, %abc% etc. '''
1055 return re.sub(
1056 STR_FORMAT_RE_TMPL.format('', '(?![%(\0])'),
1057 lambda mobj: ('' if mobj.group('has_key') else '%') + mobj.group(0),
1058 outtmpl)
1059
1060 @classmethod
1061 def validate_outtmpl(cls, outtmpl):
1062 ''' @return None or Exception object '''
1063 outtmpl = re.sub(
1064 STR_FORMAT_RE_TMPL.format('[^)]*', '[ljhqBUDS]'),
1065 lambda mobj: f'{mobj.group(0)[:-1]}s',
1066 cls._outtmpl_expandpath(outtmpl))
1067 try:
1068 cls.escape_outtmpl(outtmpl) % collections.defaultdict(int)
1069 return None
1070 except ValueError as err:
1071 return err
1072
1073 @staticmethod
1074 def _copy_infodict(info_dict):
1075 info_dict = dict(info_dict)
1076 info_dict.pop('__postprocessors', None)
1077 info_dict.pop('__pending_error', None)
1078 return info_dict
1079
1080 def prepare_outtmpl(self, outtmpl, info_dict, sanitize=False):
1081 """ Make the outtmpl and info_dict suitable for substitution: ydl.escape_outtmpl(outtmpl) % info_dict
1082 @param sanitize Whether to sanitize the output as a filename.
1083 For backward compatibility, a function can also be passed
1084 """
1085
1086 info_dict.setdefault('epoch', int(time.time())) # keep epoch consistent once set
1087
1088 info_dict = self._copy_infodict(info_dict)
1089 info_dict['duration_string'] = ( # %(duration>%H-%M-%S)s is wrong if duration > 24hrs
1090 formatSeconds(info_dict['duration'], '-' if sanitize else ':')
1091 if info_dict.get('duration', None) is not None
1092 else None)
1093 info_dict['autonumber'] = int(self.params.get('autonumber_start', 1) - 1 + self._num_downloads)
1094 info_dict['video_autonumber'] = self._num_videos
1095 if info_dict.get('resolution') is None:
1096 info_dict['resolution'] = self.format_resolution(info_dict, default=None)
1097
1098 # For fields playlist_index, playlist_autonumber and autonumber convert all occurrences
1099 # of %(field)s to %(field)0Nd for backward compatibility
1100 field_size_compat_map = {
1101 'playlist_index': number_of_digits(info_dict.get('__last_playlist_index') or 0),
1102 'playlist_autonumber': number_of_digits(info_dict.get('n_entries') or 0),
1103 'autonumber': self.params.get('autonumber_size') or 5,
1104 }
1105
1106 TMPL_DICT = {}
1107 EXTERNAL_FORMAT_RE = re.compile(STR_FORMAT_RE_TMPL.format('[^)]*', f'[{STR_FORMAT_TYPES}ljhqBUDS]'))
1108 MATH_FUNCTIONS = {
1109 '+': float.__add__,
1110 '-': float.__sub__,
1111 }
1112 # Field is of the form key1.key2...
1113 # where keys (except first) can be string, int or slice
1114 FIELD_RE = r'\w*(?:\.(?:\w+|{num}|{num}?(?::{num}?){{1,2}}))*'.format(num=r'(?:-?\d+)')
1115 MATH_FIELD_RE = rf'(?:{FIELD_RE}|-?{NUMBER_RE})'
1116 MATH_OPERATORS_RE = r'(?:%s)' % '|'.join(map(re.escape, MATH_FUNCTIONS.keys()))
1117 INTERNAL_FORMAT_RE = re.compile(rf'''(?x)
1118 (?P<negate>-)?
1119 (?P<fields>{FIELD_RE})
1120 (?P<maths>(?:{MATH_OPERATORS_RE}{MATH_FIELD_RE})*)
1121 (?:>(?P<strf_format>.+?))?
1122 (?P<remaining>
1123 (?P<alternate>(?<!\\),[^|&)]+)?
1124 (?:&(?P<replacement>.*?))?
1125 (?:\|(?P<default>.*?))?
1126 )$''')
1127
1128 def _traverse_infodict(k):
1129 k = k.split('.')
1130 if k[0] == '':
1131 k.pop(0)
1132 return traverse_obj(info_dict, k, is_user_input=True, traverse_string=True)
1133
1134 def get_value(mdict):
1135 # Object traversal
1136 value = _traverse_infodict(mdict['fields'])
1137 # Negative
1138 if mdict['negate']:
1139 value = float_or_none(value)
1140 if value is not None:
1141 value *= -1
1142 # Do maths
1143 offset_key = mdict['maths']
1144 if offset_key:
1145 value = float_or_none(value)
1146 operator = None
1147 while offset_key:
1148 item = re.match(
1149 MATH_FIELD_RE if operator else MATH_OPERATORS_RE,
1150 offset_key).group(0)
1151 offset_key = offset_key[len(item):]
1152 if operator is None:
1153 operator = MATH_FUNCTIONS[item]
1154 continue
1155 item, multiplier = (item[1:], -1) if item[0] == '-' else (item, 1)
1156 offset = float_or_none(item)
1157 if offset is None:
1158 offset = float_or_none(_traverse_infodict(item))
1159 try:
1160 value = operator(value, multiplier * offset)
1161 except (TypeError, ZeroDivisionError):
1162 return None
1163 operator = None
1164 # Datetime formatting
1165 if mdict['strf_format']:
1166 value = strftime_or_none(value, mdict['strf_format'].replace('\\,', ','))
1167
1168 # XXX: Workaround for https://github.com/yt-dlp/yt-dlp/issues/4485
1169 if sanitize and value == '':
1170 value = None
1171 return value
1172
1173 na = self.params.get('outtmpl_na_placeholder', 'NA')
1174
1175 def filename_sanitizer(key, value, restricted=self.params.get('restrictfilenames')):
1176 return sanitize_filename(str(value), restricted=restricted, is_id=(
1177 bool(re.search(r'(^|[_.])id(\.|$)', key))
1178 if 'filename-sanitization' in self.params['compat_opts']
1179 else NO_DEFAULT))
1180
1181 sanitizer = sanitize if callable(sanitize) else filename_sanitizer
1182 sanitize = bool(sanitize)
1183
1184 def _dumpjson_default(obj):
1185 if isinstance(obj, (set, LazyList)):
1186 return list(obj)
1187 return repr(obj)
1188
1189 def create_key(outer_mobj):
1190 if not outer_mobj.group('has_key'):
1191 return outer_mobj.group(0)
1192 key = outer_mobj.group('key')
1193 mobj = re.match(INTERNAL_FORMAT_RE, key)
1194 initial_field = mobj.group('fields') if mobj else ''
1195 value, replacement, default = None, None, na
1196 while mobj:
1197 mobj = mobj.groupdict()
1198 default = mobj['default'] if mobj['default'] is not None else default
1199 value = get_value(mobj)
1200 replacement = mobj['replacement']
1201 if value is None and mobj['alternate']:
1202 mobj = re.match(INTERNAL_FORMAT_RE, mobj['remaining'][1:])
1203 else:
1204 break
1205
1206 fmt = outer_mobj.group('format')
1207 if fmt == 's' and value is not None and key in field_size_compat_map.keys():
1208 fmt = f'0{field_size_compat_map[key]:d}d'
1209
1210 value = default if value is None else value if replacement is None else replacement
1211
1212 flags = outer_mobj.group('conversion') or ''
1213 str_fmt = f'{fmt[:-1]}s'
1214 if fmt[-1] == 'l': # list
1215 delim = '\n' if '#' in flags else ', '
1216 value, fmt = delim.join(map(str, variadic(value, allowed_types=(str, bytes)))), str_fmt
1217 elif fmt[-1] == 'j': # json
1218 value, fmt = json.dumps(value, default=_dumpjson_default, indent=4 if '#' in flags else None), str_fmt
1219 elif fmt[-1] == 'h': # html
1220 value, fmt = escapeHTML(value), str_fmt
1221 elif fmt[-1] == 'q': # quoted
1222 value = map(str, variadic(value) if '#' in flags else [value])
1223 value, fmt = ' '.join(map(compat_shlex_quote, value)), str_fmt
1224 elif fmt[-1] == 'B': # bytes
1225 value = f'%{str_fmt}'.encode() % str(value).encode()
1226 value, fmt = value.decode('utf-8', 'ignore'), 's'
1227 elif fmt[-1] == 'U': # unicode normalized
1228 value, fmt = unicodedata.normalize(
1229 # "+" = compatibility equivalence, "#" = NFD
1230 'NF%s%s' % ('K' if '+' in flags else '', 'D' if '#' in flags else 'C'),
1231 value), str_fmt
1232 elif fmt[-1] == 'D': # decimal suffix
1233 num_fmt, fmt = fmt[:-1].replace('#', ''), 's'
1234 value = format_decimal_suffix(value, f'%{num_fmt}f%s' if num_fmt else '%d%s',
1235 factor=1024 if '#' in flags else 1000)
1236 elif fmt[-1] == 'S': # filename sanitization
1237 value, fmt = filename_sanitizer(initial_field, value, restricted='#' in flags), str_fmt
1238 elif fmt[-1] == 'c':
1239 if value:
1240 value = str(value)[0]
1241 else:
1242 fmt = str_fmt
1243 elif fmt[-1] not in 'rs': # numeric
1244 value = float_or_none(value)
1245 if value is None:
1246 value, fmt = default, 's'
1247
1248 if sanitize:
1249 if fmt[-1] == 'r':
1250 # If value is an object, sanitize might convert it to a string
1251 # So we convert it to repr first
1252 value, fmt = repr(value), str_fmt
1253 if fmt[-1] in 'csr':
1254 value = sanitizer(initial_field, value)
1255
1256 key = '%s\0%s' % (key.replace('%', '%\0'), outer_mobj.group('format'))
1257 TMPL_DICT[key] = value
1258 return '{prefix}%({key}){fmt}'.format(key=key, fmt=fmt, prefix=outer_mobj.group('prefix'))
1259
1260 return EXTERNAL_FORMAT_RE.sub(create_key, outtmpl), TMPL_DICT
1261
1262 def evaluate_outtmpl(self, outtmpl, info_dict, *args, **kwargs):
1263 outtmpl, info_dict = self.prepare_outtmpl(outtmpl, info_dict, *args, **kwargs)
1264 return self.escape_outtmpl(outtmpl) % info_dict
1265
1266 def _prepare_filename(self, info_dict, *, outtmpl=None, tmpl_type=None):
1267 assert None in (outtmpl, tmpl_type), 'outtmpl and tmpl_type are mutually exclusive'
1268 if outtmpl is None:
1269 outtmpl = self.params['outtmpl'].get(tmpl_type or 'default', self.params['outtmpl']['default'])
1270 try:
1271 outtmpl = self._outtmpl_expandpath(outtmpl)
1272 filename = self.evaluate_outtmpl(outtmpl, info_dict, True)
1273 if not filename:
1274 return None
1275
1276 if tmpl_type in ('', 'temp'):
1277 final_ext, ext = self.params.get('final_ext'), info_dict.get('ext')
1278 if final_ext and ext and final_ext != ext and filename.endswith(f'.{final_ext}'):
1279 filename = replace_extension(filename, ext, final_ext)
1280 elif tmpl_type:
1281 force_ext = OUTTMPL_TYPES[tmpl_type]
1282 if force_ext:
1283 filename = replace_extension(filename, force_ext, info_dict.get('ext'))
1284
1285 # https://github.com/blackjack4494/youtube-dlc/issues/85
1286 trim_file_name = self.params.get('trim_file_name', False)
1287 if trim_file_name:
1288 no_ext, *ext = filename.rsplit('.', 2)
1289 filename = join_nonempty(no_ext[:trim_file_name], *ext, delim='.')
1290
1291 return filename
1292 except ValueError as err:
1293 self.report_error('Error in output template: ' + str(err) + ' (encoding: ' + repr(preferredencoding()) + ')')
1294 return None
1295
1296 def prepare_filename(self, info_dict, dir_type='', *, outtmpl=None, warn=False):
1297 """Generate the output filename"""
1298 if outtmpl:
1299 assert not dir_type, 'outtmpl and dir_type are mutually exclusive'
1300 dir_type = None
1301 filename = self._prepare_filename(info_dict, tmpl_type=dir_type, outtmpl=outtmpl)
1302 if not filename and dir_type not in ('', 'temp'):
1303 return ''
1304
1305 if warn:
1306 if not self.params.get('paths'):
1307 pass
1308 elif filename == '-':
1309 self.report_warning('--paths is ignored when an outputting to stdout', only_once=True)
1310 elif os.path.isabs(filename):
1311 self.report_warning('--paths is ignored since an absolute path is given in output template', only_once=True)
1312 if filename == '-' or not filename:
1313 return filename
1314
1315 return self.get_output_path(dir_type, filename)
1316
1317 def _match_entry(self, info_dict, incomplete=False, silent=False):
1318 """ Returns None if the file should be downloaded """
1319
1320 video_title = info_dict.get('title', info_dict.get('id', 'entry'))
1321
1322 def check_filter():
1323 if 'title' in info_dict:
1324 # This can happen when we're just evaluating the playlist
1325 title = info_dict['title']
1326 matchtitle = self.params.get('matchtitle', False)
1327 if matchtitle:
1328 if not re.search(matchtitle, title, re.IGNORECASE):
1329 return '"' + title + '" title did not match pattern "' + matchtitle + '"'
1330 rejecttitle = self.params.get('rejecttitle', False)
1331 if rejecttitle:
1332 if re.search(rejecttitle, title, re.IGNORECASE):
1333 return '"' + title + '" title matched reject pattern "' + rejecttitle + '"'
1334 date = info_dict.get('upload_date')
1335 if date is not None:
1336 dateRange = self.params.get('daterange', DateRange())
1337 if date not in dateRange:
1338 return f'{date_from_str(date).isoformat()} upload date is not in range {dateRange}'
1339 view_count = info_dict.get('view_count')
1340 if view_count is not None:
1341 min_views = self.params.get('min_views')
1342 if min_views is not None and view_count < min_views:
1343 return 'Skipping %s, because it has not reached minimum view count (%d/%d)' % (video_title, view_count, min_views)
1344 max_views = self.params.get('max_views')
1345 if max_views is not None and view_count > max_views:
1346 return 'Skipping %s, because it has exceeded the maximum view count (%d/%d)' % (video_title, view_count, max_views)
1347 if age_restricted(info_dict.get('age_limit'), self.params.get('age_limit')):
1348 return 'Skipping "%s" because it is age restricted' % video_title
1349
1350 match_filter = self.params.get('match_filter')
1351 if match_filter is not None:
1352 try:
1353 ret = match_filter(info_dict, incomplete=incomplete)
1354 except TypeError:
1355 # For backward compatibility
1356 ret = None if incomplete else match_filter(info_dict)
1357 if ret is NO_DEFAULT:
1358 while True:
1359 filename = self._format_screen(self.prepare_filename(info_dict), self.Styles.FILENAME)
1360 reply = input(self._format_screen(
1361 f'Download "{filename}"? (Y/n): ', self.Styles.EMPHASIS)).lower().strip()
1362 if reply in {'y', ''}:
1363 return None
1364 elif reply == 'n':
1365 return f'Skipping {video_title}'
1366 elif ret is not None:
1367 return ret
1368 return None
1369
1370 if self.in_download_archive(info_dict):
1371 reason = '%s has already been recorded in the archive' % video_title
1372 break_opt, break_err = 'break_on_existing', ExistingVideoReached
1373 else:
1374 reason = check_filter()
1375 break_opt, break_err = 'break_on_reject', RejectedVideoReached
1376 if reason is not None:
1377 if not silent:
1378 self.to_screen('[download] ' + reason)
1379 if self.params.get(break_opt, False):
1380 raise break_err()
1381 return reason
1382
1383 @staticmethod
1384 def add_extra_info(info_dict, extra_info):
1385 '''Set the keys from extra_info in info dict if they are missing'''
1386 for key, value in extra_info.items():
1387 info_dict.setdefault(key, value)
1388
1389 def extract_info(self, url, download=True, ie_key=None, extra_info=None,
1390 process=True, force_generic_extractor=False):
1391 """
1392 Return a list with a dictionary for each video extracted.
1393
1394 Arguments:
1395 url -- URL to extract
1396
1397 Keyword arguments:
1398 download -- whether to download videos during extraction
1399 ie_key -- extractor key hint
1400 extra_info -- dictionary containing the extra values to add to each result
1401 process -- whether to resolve all unresolved references (URLs, playlist items),
1402 must be True for download to work.
1403 force_generic_extractor -- force using the generic extractor
1404 """
1405
1406 if extra_info is None:
1407 extra_info = {}
1408
1409 if not ie_key and force_generic_extractor:
1410 ie_key = 'Generic'
1411
1412 if ie_key:
1413 ies = {ie_key: self._get_info_extractor_class(ie_key)}
1414 else:
1415 ies = self._ies
1416
1417 for ie_key, ie in ies.items():
1418 if not ie.suitable(url):
1419 continue
1420
1421 if not ie.working():
1422 self.report_warning('The program functionality for this site has been marked as broken, '
1423 'and will probably not work.')
1424
1425 temp_id = ie.get_temp_id(url)
1426 if temp_id is not None and self.in_download_archive({'id': temp_id, 'ie_key': ie_key}):
1427 self.to_screen(f'[{ie_key}] {temp_id}: has already been recorded in the archive')
1428 if self.params.get('break_on_existing', False):
1429 raise ExistingVideoReached()
1430 break
1431 return self.__extract_info(url, self.get_info_extractor(ie_key), download, extra_info, process)
1432 else:
1433 self.report_error('no suitable InfoExtractor for URL %s' % url)
1434
1435 def _handle_extraction_exceptions(func):
1436 @functools.wraps(func)
1437 def wrapper(self, *args, **kwargs):
1438 while True:
1439 try:
1440 return func(self, *args, **kwargs)
1441 except (DownloadCancelled, LazyList.IndexError, PagedList.IndexError):
1442 raise
1443 except ReExtractInfo as e:
1444 if e.expected:
1445 self.to_screen(f'{e}; Re-extracting data')
1446 else:
1447 self.to_stderr('\r')
1448 self.report_warning(f'{e}; Re-extracting data')
1449 continue
1450 except GeoRestrictedError as e:
1451 msg = e.msg
1452 if e.countries:
1453 msg += '\nThis video is available in %s.' % ', '.join(
1454 map(ISO3166Utils.short2full, e.countries))
1455 msg += '\nYou might want to use a VPN or a proxy server (with --proxy) to workaround.'
1456 self.report_error(msg)
1457 except ExtractorError as e: # An error we somewhat expected
1458 self.report_error(str(e), e.format_traceback())
1459 except Exception as e:
1460 if self.params.get('ignoreerrors'):
1461 self.report_error(str(e), tb=encode_compat_str(traceback.format_exc()))
1462 else:
1463 raise
1464 break
1465 return wrapper
1466
1467 def _wait_for_video(self, ie_result={}):
1468 if (not self.params.get('wait_for_video')
1469 or ie_result.get('_type', 'video') != 'video'
1470 or ie_result.get('formats') or ie_result.get('url')):
1471 return
1472
1473 format_dur = lambda dur: '%02d:%02d:%02d' % timetuple_from_msec(dur * 1000)[:-1]
1474 last_msg = ''
1475
1476 def progress(msg):
1477 nonlocal last_msg
1478 full_msg = f'{msg}\n'
1479 if not self.params.get('noprogress'):
1480 full_msg = msg + ' ' * (len(last_msg) - len(msg)) + '\r'
1481 elif last_msg:
1482 return
1483 self.to_screen(full_msg, skip_eol=True)
1484 last_msg = msg
1485
1486 min_wait, max_wait = self.params.get('wait_for_video')
1487 diff = try_get(ie_result, lambda x: x['release_timestamp'] - time.time())
1488 if diff is None and ie_result.get('live_status') == 'is_upcoming':
1489 diff = round(random.uniform(min_wait, max_wait) if (max_wait and min_wait) else (max_wait or min_wait), 0)
1490 self.report_warning('Release time of video is not known')
1491 elif ie_result and (diff or 0) <= 0:
1492 self.report_warning('Video should already be available according to extracted info')
1493 diff = min(max(diff or 0, min_wait or 0), max_wait or float('inf'))
1494 self.to_screen(f'[wait] Waiting for {format_dur(diff)} - Press Ctrl+C to try now')
1495
1496 wait_till = time.time() + diff
1497 try:
1498 while True:
1499 diff = wait_till - time.time()
1500 if diff <= 0:
1501 progress('')
1502 raise ReExtractInfo('[wait] Wait period ended', expected=True)
1503 progress(f'[wait] Remaining time until next attempt: {self._format_screen(format_dur(diff), self.Styles.EMPHASIS)}')
1504 time.sleep(1)
1505 except KeyboardInterrupt:
1506 progress('')
1507 raise ReExtractInfo('[wait] Interrupted by user', expected=True)
1508 except BaseException as e:
1509 if not isinstance(e, ReExtractInfo):
1510 self.to_screen('')
1511 raise
1512
1513 @_handle_extraction_exceptions
1514 def __extract_info(self, url, ie, download, extra_info, process):
1515 try:
1516 ie_result = ie.extract(url)
1517 except UserNotLive as e:
1518 if process:
1519 if self.params.get('wait_for_video'):
1520 self.report_warning(e)
1521 self._wait_for_video()
1522 raise
1523 if ie_result is None: # Finished already (backwards compatibility; listformats and friends should be moved here)
1524 self.report_warning(f'Extractor {ie.IE_NAME} returned nothing{bug_reports_message()}')
1525 return
1526 if isinstance(ie_result, list):
1527 # Backwards compatibility: old IE result format
1528 ie_result = {
1529 '_type': 'compat_list',
1530 'entries': ie_result,
1531 }
1532 if extra_info.get('original_url'):
1533 ie_result.setdefault('original_url', extra_info['original_url'])
1534 self.add_default_extra_info(ie_result, ie, url)
1535 if process:
1536 self._wait_for_video(ie_result)
1537 return self.process_ie_result(ie_result, download, extra_info)
1538 else:
1539 return ie_result
1540
1541 def add_default_extra_info(self, ie_result, ie, url):
1542 if url is not None:
1543 self.add_extra_info(ie_result, {
1544 'webpage_url': url,
1545 'original_url': url,
1546 })
1547 webpage_url = ie_result.get('webpage_url')
1548 if webpage_url:
1549 self.add_extra_info(ie_result, {
1550 'webpage_url_basename': url_basename(webpage_url),
1551 'webpage_url_domain': get_domain(webpage_url),
1552 })
1553 if ie is not None:
1554 self.add_extra_info(ie_result, {
1555 'extractor': ie.IE_NAME,
1556 'extractor_key': ie.ie_key(),
1557 })
1558
1559 def process_ie_result(self, ie_result, download=True, extra_info=None):
1560 """
1561 Take the result of the ie(may be modified) and resolve all unresolved
1562 references (URLs, playlist items).
1563
1564 It will also download the videos if 'download'.
1565 Returns the resolved ie_result.
1566 """
1567 if extra_info is None:
1568 extra_info = {}
1569 result_type = ie_result.get('_type', 'video')
1570
1571 if result_type in ('url', 'url_transparent'):
1572 ie_result['url'] = sanitize_url(
1573 ie_result['url'], scheme='http' if self.params.get('prefer_insecure') else 'https')
1574 if ie_result.get('original_url'):
1575 extra_info.setdefault('original_url', ie_result['original_url'])
1576
1577 extract_flat = self.params.get('extract_flat', False)
1578 if ((extract_flat == 'in_playlist' and 'playlist' in extra_info)
1579 or extract_flat is True):
1580 info_copy = ie_result.copy()
1581 ie = try_get(ie_result.get('ie_key'), self.get_info_extractor)
1582 if ie and not ie_result.get('id'):
1583 info_copy['id'] = ie.get_temp_id(ie_result['url'])
1584 self.add_default_extra_info(info_copy, ie, ie_result['url'])
1585 self.add_extra_info(info_copy, extra_info)
1586 info_copy, _ = self.pre_process(info_copy)
1587 self.__forced_printings(info_copy, self.prepare_filename(info_copy), incomplete=True)
1588 self._raise_pending_errors(info_copy)
1589 if self.params.get('force_write_download_archive', False):
1590 self.record_download_archive(info_copy)
1591 return ie_result
1592
1593 if result_type == 'video':
1594 self.add_extra_info(ie_result, extra_info)
1595 ie_result = self.process_video_result(ie_result, download=download)
1596 self._raise_pending_errors(ie_result)
1597 additional_urls = (ie_result or {}).get('additional_urls')
1598 if additional_urls:
1599 # TODO: Improve MetadataParserPP to allow setting a list
1600 if isinstance(additional_urls, str):
1601 additional_urls = [additional_urls]
1602 self.to_screen(
1603 '[info] %s: %d additional URL(s) requested' % (ie_result['id'], len(additional_urls)))
1604 self.write_debug('Additional URLs: "%s"' % '", "'.join(additional_urls))
1605 ie_result['additional_entries'] = [
1606 self.extract_info(
1607 url, download, extra_info=extra_info,
1608 force_generic_extractor=self.params.get('force_generic_extractor'))
1609 for url in additional_urls
1610 ]
1611 return ie_result
1612 elif result_type == 'url':
1613 # We have to add extra_info to the results because it may be
1614 # contained in a playlist
1615 return self.extract_info(
1616 ie_result['url'], download,
1617 ie_key=ie_result.get('ie_key'),
1618 extra_info=extra_info)
1619 elif result_type == 'url_transparent':
1620 # Use the information from the embedding page
1621 info = self.extract_info(
1622 ie_result['url'], ie_key=ie_result.get('ie_key'),
1623 extra_info=extra_info, download=False, process=False)
1624
1625 # extract_info may return None when ignoreerrors is enabled and
1626 # extraction failed with an error, don't crash and return early
1627 # in this case
1628 if not info:
1629 return info
1630
1631 exempted_fields = {'_type', 'url', 'ie_key'}
1632 if not ie_result.get('section_end') and ie_result.get('section_start') is None:
1633 # For video clips, the id etc of the clip extractor should be used
1634 exempted_fields |= {'id', 'extractor', 'extractor_key'}
1635
1636 new_result = info.copy()
1637 new_result.update(filter_dict(ie_result, lambda k, v: v is not None and k not in exempted_fields))
1638
1639 # Extracted info may not be a video result (i.e.
1640 # info.get('_type', 'video') != video) but rather an url or
1641 # url_transparent. In such cases outer metadata (from ie_result)
1642 # should be propagated to inner one (info). For this to happen
1643 # _type of info should be overridden with url_transparent. This
1644 # fixes issue from https://github.com/ytdl-org/youtube-dl/pull/11163.
1645 if new_result.get('_type') == 'url':
1646 new_result['_type'] = 'url_transparent'
1647
1648 return self.process_ie_result(
1649 new_result, download=download, extra_info=extra_info)
1650 elif result_type in ('playlist', 'multi_video'):
1651 # Protect from infinite recursion due to recursively nested playlists
1652 # (see https://github.com/ytdl-org/youtube-dl/issues/27833)
1653 webpage_url = ie_result['webpage_url']
1654 if webpage_url in self._playlist_urls:
1655 self.to_screen(
1656 '[download] Skipping already downloaded playlist: %s'
1657 % ie_result.get('title') or ie_result.get('id'))
1658 return
1659
1660 self._playlist_level += 1
1661 self._playlist_urls.add(webpage_url)
1662 self._fill_common_fields(ie_result, False)
1663 self._sanitize_thumbnails(ie_result)
1664 try:
1665 return self.__process_playlist(ie_result, download)
1666 finally:
1667 self._playlist_level -= 1
1668 if not self._playlist_level:
1669 self._playlist_urls.clear()
1670 elif result_type == 'compat_list':
1671 self.report_warning(
1672 'Extractor %s returned a compat_list result. '
1673 'It needs to be updated.' % ie_result.get('extractor'))
1674
1675 def _fixup(r):
1676 self.add_extra_info(r, {
1677 'extractor': ie_result['extractor'],
1678 'webpage_url': ie_result['webpage_url'],
1679 'webpage_url_basename': url_basename(ie_result['webpage_url']),
1680 'webpage_url_domain': get_domain(ie_result['webpage_url']),
1681 'extractor_key': ie_result['extractor_key'],
1682 })
1683 return r
1684 ie_result['entries'] = [
1685 self.process_ie_result(_fixup(r), download, extra_info)
1686 for r in ie_result['entries']
1687 ]
1688 return ie_result
1689 else:
1690 raise Exception('Invalid result type: %s' % result_type)
1691
1692 def _ensure_dir_exists(self, path):
1693 return make_dir(path, self.report_error)
1694
1695 @staticmethod
1696 def _playlist_infodict(ie_result, strict=False, **kwargs):
1697 info = {
1698 'playlist_count': ie_result.get('playlist_count'),
1699 'playlist': ie_result.get('title') or ie_result.get('id'),
1700 'playlist_id': ie_result.get('id'),
1701 'playlist_title': ie_result.get('title'),
1702 'playlist_uploader': ie_result.get('uploader'),
1703 'playlist_uploader_id': ie_result.get('uploader_id'),
1704 **kwargs,
1705 }
1706 if strict:
1707 return info
1708 return {
1709 **info,
1710 'playlist_index': 0,
1711 '__last_playlist_index': max(ie_result['requested_entries'] or (0, 0)),
1712 'extractor': ie_result['extractor'],
1713 'webpage_url': ie_result['webpage_url'],
1714 'webpage_url_basename': url_basename(ie_result['webpage_url']),
1715 'webpage_url_domain': get_domain(ie_result['webpage_url']),
1716 'extractor_key': ie_result['extractor_key'],
1717 }
1718
1719 def __process_playlist(self, ie_result, download):
1720 """Process each entry in the playlist"""
1721 assert ie_result['_type'] in ('playlist', 'multi_video')
1722
1723 common_info = self._playlist_infodict(ie_result, strict=True)
1724 title = common_info.get('playlist') or '<Untitled>'
1725 if self._match_entry(common_info, incomplete=True) is not None:
1726 return
1727 self.to_screen(f'[download] Downloading {ie_result["_type"]}: {title}')
1728
1729 all_entries = PlaylistEntries(self, ie_result)
1730 entries = orderedSet(all_entries.get_requested_items(), lazy=True)
1731
1732 lazy = self.params.get('lazy_playlist')
1733 if lazy:
1734 resolved_entries, n_entries = [], 'N/A'
1735 ie_result['requested_entries'], ie_result['entries'] = None, None
1736 else:
1737 entries = resolved_entries = list(entries)
1738 n_entries = len(resolved_entries)
1739 ie_result['requested_entries'], ie_result['entries'] = tuple(zip(*resolved_entries)) or ([], [])
1740 if not ie_result.get('playlist_count'):
1741 # Better to do this after potentially exhausting entries
1742 ie_result['playlist_count'] = all_entries.get_full_count()
1743
1744 extra = self._playlist_infodict(ie_result, n_entries=int_or_none(n_entries))
1745 ie_copy = collections.ChainMap(ie_result, extra)
1746
1747 _infojson_written = False
1748 write_playlist_files = self.params.get('allow_playlist_files', True)
1749 if write_playlist_files and self.params.get('list_thumbnails'):
1750 self.list_thumbnails(ie_result)
1751 if write_playlist_files and not self.params.get('simulate'):
1752 _infojson_written = self._write_info_json(
1753 'playlist', ie_result, self.prepare_filename(ie_copy, 'pl_infojson'))
1754 if _infojson_written is None:
1755 return
1756 if self._write_description('playlist', ie_result,
1757 self.prepare_filename(ie_copy, 'pl_description')) is None:
1758 return
1759 # TODO: This should be passed to ThumbnailsConvertor if necessary
1760 self._write_thumbnails('playlist', ie_result, self.prepare_filename(ie_copy, 'pl_thumbnail'))
1761
1762 if lazy:
1763 if self.params.get('playlistreverse') or self.params.get('playlistrandom'):
1764 self.report_warning('playlistreverse and playlistrandom are not supported with lazy_playlist', only_once=True)
1765 elif self.params.get('playlistreverse'):
1766 entries.reverse()
1767 elif self.params.get('playlistrandom'):
1768 random.shuffle(entries)
1769
1770 self.to_screen(f'[{ie_result["extractor"]}] Playlist {title}: Downloading {n_entries} videos'
1771 f'{format_field(ie_result, "playlist_count", " of %s")}')
1772
1773 keep_resolved_entries = self.params.get('extract_flat') != 'discard'
1774 if self.params.get('extract_flat') == 'discard_in_playlist':
1775 keep_resolved_entries = ie_result['_type'] != 'playlist'
1776 if keep_resolved_entries:
1777 self.write_debug('The information of all playlist entries will be held in memory')
1778
1779 failures = 0
1780 max_failures = self.params.get('skip_playlist_after_errors') or float('inf')
1781 for i, (playlist_index, entry) in enumerate(entries):
1782 if lazy:
1783 resolved_entries.append((playlist_index, entry))
1784 if not entry:
1785 continue
1786
1787 entry['__x_forwarded_for_ip'] = ie_result.get('__x_forwarded_for_ip')
1788 if not lazy and 'playlist-index' in self.params.get('compat_opts', []):
1789 playlist_index = ie_result['requested_entries'][i]
1790
1791 entry_copy = collections.ChainMap(entry, {
1792 **common_info,
1793 'n_entries': int_or_none(n_entries),
1794 'playlist_index': playlist_index,
1795 'playlist_autonumber': i + 1,
1796 })
1797
1798 if self._match_entry(entry_copy, incomplete=True) is not None:
1799 continue
1800
1801 self.to_screen('[download] Downloading video %s of %s' % (
1802 self._format_screen(i + 1, self.Styles.ID), self._format_screen(n_entries, self.Styles.EMPHASIS)))
1803
1804 extra.update({
1805 'playlist_index': playlist_index,
1806 'playlist_autonumber': i + 1,
1807 })
1808 entry_result = self.__process_iterable_entry(entry, download, extra)
1809 if not entry_result:
1810 failures += 1
1811 if failures >= max_failures:
1812 self.report_error(
1813 f'Skipping the remaining entries in playlist "{title}" since {failures} items failed extraction')
1814 break
1815 if keep_resolved_entries:
1816 resolved_entries[i] = (playlist_index, entry_result)
1817
1818 # Update with processed data
1819 ie_result['requested_entries'], ie_result['entries'] = tuple(zip(*resolved_entries)) or ([], [])
1820
1821 # Write the updated info to json
1822 if _infojson_written is True and self._write_info_json(
1823 'updated playlist', ie_result,
1824 self.prepare_filename(ie_copy, 'pl_infojson'), overwrite=True) is None:
1825 return
1826
1827 ie_result = self.run_all_pps('playlist', ie_result)
1828 self.to_screen(f'[download] Finished downloading playlist: {title}')
1829 return ie_result
1830
1831 @_handle_extraction_exceptions
1832 def __process_iterable_entry(self, entry, download, extra_info):
1833 return self.process_ie_result(
1834 entry, download=download, extra_info=extra_info)
1835
1836 def _build_format_filter(self, filter_spec):
1837 " Returns a function to filter the formats according to the filter_spec "
1838
1839 OPERATORS = {
1840 '<': operator.lt,
1841 '<=': operator.le,
1842 '>': operator.gt,
1843 '>=': operator.ge,
1844 '=': operator.eq,
1845 '!=': operator.ne,
1846 }
1847 operator_rex = re.compile(r'''(?x)\s*
1848 (?P<key>width|height|tbr|abr|vbr|asr|filesize|filesize_approx|fps)\s*
1849 (?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
1850 (?P<value>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)\s*
1851 ''' % '|'.join(map(re.escape, OPERATORS.keys())))
1852 m = operator_rex.fullmatch(filter_spec)
1853 if m:
1854 try:
1855 comparison_value = int(m.group('value'))
1856 except ValueError:
1857 comparison_value = parse_filesize(m.group('value'))
1858 if comparison_value is None:
1859 comparison_value = parse_filesize(m.group('value') + 'B')
1860 if comparison_value is None:
1861 raise ValueError(
1862 'Invalid value %r in format specification %r' % (
1863 m.group('value'), filter_spec))
1864 op = OPERATORS[m.group('op')]
1865
1866 if not m:
1867 STR_OPERATORS = {
1868 '=': operator.eq,
1869 '^=': lambda attr, value: attr.startswith(value),
1870 '$=': lambda attr, value: attr.endswith(value),
1871 '*=': lambda attr, value: value in attr,
1872 '~=': lambda attr, value: value.search(attr) is not None
1873 }
1874 str_operator_rex = re.compile(r'''(?x)\s*
1875 (?P<key>[a-zA-Z0-9._-]+)\s*
1876 (?P<negation>!\s*)?(?P<op>%s)\s*(?P<none_inclusive>\?\s*)?
1877 (?P<quote>["'])?
1878 (?P<value>(?(quote)(?:(?!(?P=quote))[^\\]|\\.)+|[\w.-]+))
1879 (?(quote)(?P=quote))\s*
1880 ''' % '|'.join(map(re.escape, STR_OPERATORS.keys())))
1881 m = str_operator_rex.fullmatch(filter_spec)
1882 if m:
1883 if m.group('op') == '~=':
1884 comparison_value = re.compile(m.group('value'))
1885 else:
1886 comparison_value = re.sub(r'''\\([\\"'])''', r'\1', m.group('value'))
1887 str_op = STR_OPERATORS[m.group('op')]
1888 if m.group('negation'):
1889 op = lambda attr, value: not str_op(attr, value)
1890 else:
1891 op = str_op
1892
1893 if not m:
1894 raise SyntaxError('Invalid filter specification %r' % filter_spec)
1895
1896 def _filter(f):
1897 actual_value = f.get(m.group('key'))
1898 if actual_value is None:
1899 return m.group('none_inclusive')
1900 return op(actual_value, comparison_value)
1901 return _filter
1902
1903 def _check_formats(self, formats):
1904 for f in formats:
1905 self.to_screen('[info] Testing format %s' % f['format_id'])
1906 path = self.get_output_path('temp')
1907 if not self._ensure_dir_exists(f'{path}/'):
1908 continue
1909 temp_file = tempfile.NamedTemporaryFile(suffix='.tmp', delete=False, dir=path or None)
1910 temp_file.close()
1911 try:
1912 success, _ = self.dl(temp_file.name, f, test=True)
1913 except (DownloadError, OSError, ValueError) + network_exceptions:
1914 success = False
1915 finally:
1916 if os.path.exists(temp_file.name):
1917 try:
1918 os.remove(temp_file.name)
1919 except OSError:
1920 self.report_warning('Unable to delete temporary file "%s"' % temp_file.name)
1921 if success:
1922 yield f
1923 else:
1924 self.to_screen('[info] Unable to download format %s. Skipping...' % f['format_id'])
1925
1926 def _default_format_spec(self, info_dict, download=True):
1927
1928 def can_merge():
1929 merger = FFmpegMergerPP(self)
1930 return merger.available and merger.can_merge()
1931
1932 prefer_best = (
1933 not self.params.get('simulate')
1934 and download
1935 and (
1936 not can_merge()
1937 or info_dict.get('is_live') and not self.params.get('live_from_start')
1938 or self.params['outtmpl']['default'] == '-'))
1939 compat = (
1940 prefer_best
1941 or self.params.get('allow_multiple_audio_streams', False)
1942 or 'format-spec' in self.params['compat_opts'])
1943
1944 return (
1945 'best/bestvideo+bestaudio' if prefer_best
1946 else 'bestvideo*+bestaudio/best' if not compat
1947 else 'bestvideo+bestaudio/best')
1948
1949 def build_format_selector(self, format_spec):
1950 def syntax_error(note, start):
1951 message = (
1952 'Invalid format specification: '
1953 '{}\n\t{}\n\t{}^'.format(note, format_spec, ' ' * start[1]))
1954 return SyntaxError(message)
1955
1956 PICKFIRST = 'PICKFIRST'
1957 MERGE = 'MERGE'
1958 SINGLE = 'SINGLE'
1959 GROUP = 'GROUP'
1960 FormatSelector = collections.namedtuple('FormatSelector', ['type', 'selector', 'filters'])
1961
1962 allow_multiple_streams = {'audio': self.params.get('allow_multiple_audio_streams', False),
1963 'video': self.params.get('allow_multiple_video_streams', False)}
1964
1965 check_formats = self.params.get('check_formats') == 'selected'
1966
1967 def _parse_filter(tokens):
1968 filter_parts = []
1969 for type, string, start, _, _ in tokens:
1970 if type == tokenize.OP and string == ']':
1971 return ''.join(filter_parts)
1972 else:
1973 filter_parts.append(string)
1974
1975 def _remove_unused_ops(tokens):
1976 # Remove operators that we don't use and join them with the surrounding strings
1977 # for example: 'mp4' '-' 'baseline' '-' '16x9' is converted to 'mp4-baseline-16x9'
1978 ALLOWED_OPS = ('/', '+', ',', '(', ')')
1979 last_string, last_start, last_end, last_line = None, None, None, None
1980 for type, string, start, end, line in tokens:
1981 if type == tokenize.OP and string == '[':
1982 if last_string:
1983 yield tokenize.NAME, last_string, last_start, last_end, last_line
1984 last_string = None
1985 yield type, string, start, end, line
1986 # everything inside brackets will be handled by _parse_filter
1987 for type, string, start, end, line in tokens:
1988 yield type, string, start, end, line
1989 if type == tokenize.OP and string == ']':
1990 break
1991 elif type == tokenize.OP and string in ALLOWED_OPS:
1992 if last_string:
1993 yield tokenize.NAME, last_string, last_start, last_end, last_line
1994 last_string = None
1995 yield type, string, start, end, line
1996 elif type in [tokenize.NAME, tokenize.NUMBER, tokenize.OP]:
1997 if not last_string:
1998 last_string = string
1999 last_start = start
2000 last_end = end
2001 else:
2002 last_string += string
2003 if last_string:
2004 yield tokenize.NAME, last_string, last_start, last_end, last_line
2005
2006 def _parse_format_selection(tokens, inside_merge=False, inside_choice=False, inside_group=False):
2007 selectors = []
2008 current_selector = None
2009 for type, string, start, _, _ in tokens:
2010 # ENCODING is only defined in python 3.x
2011 if type == getattr(tokenize, 'ENCODING', None):
2012 continue
2013 elif type in [tokenize.NAME, tokenize.NUMBER]:
2014 current_selector = FormatSelector(SINGLE, string, [])
2015 elif type == tokenize.OP:
2016 if string == ')':
2017 if not inside_group:
2018 # ')' will be handled by the parentheses group
2019 tokens.restore_last_token()
2020 break
2021 elif inside_merge and string in ['/', ',']:
2022 tokens.restore_last_token()
2023 break
2024 elif inside_choice and string == ',':
2025 tokens.restore_last_token()
2026 break
2027 elif string == ',':
2028 if not current_selector:
2029 raise syntax_error('"," must follow a format selector', start)
2030 selectors.append(current_selector)
2031 current_selector = None
2032 elif string == '/':
2033 if not current_selector:
2034 raise syntax_error('"/" must follow a format selector', start)
2035 first_choice = current_selector
2036 second_choice = _parse_format_selection(tokens, inside_choice=True)
2037 current_selector = FormatSelector(PICKFIRST, (first_choice, second_choice), [])
2038 elif string == '[':
2039 if not current_selector:
2040 current_selector = FormatSelector(SINGLE, 'best', [])
2041 format_filter = _parse_filter(tokens)
2042 current_selector.filters.append(format_filter)
2043 elif string == '(':
2044 if current_selector:
2045 raise syntax_error('Unexpected "("', start)
2046 group = _parse_format_selection(tokens, inside_group=True)
2047 current_selector = FormatSelector(GROUP, group, [])
2048 elif string == '+':
2049 if not current_selector:
2050 raise syntax_error('Unexpected "+"', start)
2051 selector_1 = current_selector
2052 selector_2 = _parse_format_selection(tokens, inside_merge=True)
2053 if not selector_2:
2054 raise syntax_error('Expected a selector', start)
2055 current_selector = FormatSelector(MERGE, (selector_1, selector_2), [])
2056 else:
2057 raise syntax_error(f'Operator not recognized: "{string}"', start)
2058 elif type == tokenize.ENDMARKER:
2059 break
2060 if current_selector:
2061 selectors.append(current_selector)
2062 return selectors
2063
2064 def _merge(formats_pair):
2065 format_1, format_2 = formats_pair
2066
2067 formats_info = []
2068 formats_info.extend(format_1.get('requested_formats', (format_1,)))
2069 formats_info.extend(format_2.get('requested_formats', (format_2,)))
2070
2071 if not allow_multiple_streams['video'] or not allow_multiple_streams['audio']:
2072 get_no_more = {'video': False, 'audio': False}
2073 for (i, fmt_info) in enumerate(formats_info):
2074 if fmt_info.get('acodec') == fmt_info.get('vcodec') == 'none':
2075 formats_info.pop(i)
2076 continue
2077 for aud_vid in ['audio', 'video']:
2078 if not allow_multiple_streams[aud_vid] and fmt_info.get(aud_vid[0] + 'codec') != 'none':
2079 if get_no_more[aud_vid]:
2080 formats_info.pop(i)
2081 break
2082 get_no_more[aud_vid] = True
2083
2084 if len(formats_info) == 1:
2085 return formats_info[0]
2086
2087 video_fmts = [fmt_info for fmt_info in formats_info if fmt_info.get('vcodec') != 'none']
2088 audio_fmts = [fmt_info for fmt_info in formats_info if fmt_info.get('acodec') != 'none']
2089
2090 the_only_video = video_fmts[0] if len(video_fmts) == 1 else None
2091 the_only_audio = audio_fmts[0] if len(audio_fmts) == 1 else None
2092
2093 output_ext = get_compatible_ext(
2094 vcodecs=[f.get('vcodec') for f in video_fmts],
2095 acodecs=[f.get('acodec') for f in audio_fmts],
2096 vexts=[f['ext'] for f in video_fmts],
2097 aexts=[f['ext'] for f in audio_fmts],
2098 preferences=(try_call(lambda: self.params['merge_output_format'].split('/'))
2099 or self.params.get('prefer_free_formats') and ('webm', 'mkv')))
2100
2101 filtered = lambda *keys: filter(None, (traverse_obj(fmt, *keys) for fmt in formats_info))
2102
2103 new_dict = {
2104 'requested_formats': formats_info,
2105 'format': '+'.join(filtered('format')),
2106 'format_id': '+'.join(filtered('format_id')),
2107 'ext': output_ext,
2108 'protocol': '+'.join(map(determine_protocol, formats_info)),
2109 'language': '+'.join(orderedSet(filtered('language'))) or None,
2110 'format_note': '+'.join(orderedSet(filtered('format_note'))) or None,
2111 'filesize_approx': sum(filtered('filesize', 'filesize_approx')) or None,
2112 'tbr': sum(filtered('tbr', 'vbr', 'abr')),
2113 }
2114
2115 if the_only_video:
2116 new_dict.update({
2117 'width': the_only_video.get('width'),
2118 'height': the_only_video.get('height'),
2119 'resolution': the_only_video.get('resolution') or self.format_resolution(the_only_video),
2120 'fps': the_only_video.get('fps'),
2121 'dynamic_range': the_only_video.get('dynamic_range'),
2122 'vcodec': the_only_video.get('vcodec'),
2123 'vbr': the_only_video.get('vbr'),
2124 'stretched_ratio': the_only_video.get('stretched_ratio'),
2125 })
2126
2127 if the_only_audio:
2128 new_dict.update({
2129 'acodec': the_only_audio.get('acodec'),
2130 'abr': the_only_audio.get('abr'),
2131 'asr': the_only_audio.get('asr'),
2132 })
2133
2134 return new_dict
2135
2136 def _check_formats(formats):
2137 if not check_formats:
2138 yield from formats
2139 return
2140 yield from self._check_formats(formats)
2141
2142 def _build_selector_function(selector):
2143 if isinstance(selector, list): # ,
2144 fs = [_build_selector_function(s) for s in selector]
2145
2146 def selector_function(ctx):
2147 for f in fs:
2148 yield from f(ctx)
2149 return selector_function
2150
2151 elif selector.type == GROUP: # ()
2152 selector_function = _build_selector_function(selector.selector)
2153
2154 elif selector.type == PICKFIRST: # /
2155 fs = [_build_selector_function(s) for s in selector.selector]
2156
2157 def selector_function(ctx):
2158 for f in fs:
2159 picked_formats = list(f(ctx))
2160 if picked_formats:
2161 return picked_formats
2162 return []
2163
2164 elif selector.type == MERGE: # +
2165 selector_1, selector_2 = map(_build_selector_function, selector.selector)
2166
2167 def selector_function(ctx):
2168 for pair in itertools.product(selector_1(ctx), selector_2(ctx)):
2169 yield _merge(pair)
2170
2171 elif selector.type == SINGLE: # atom
2172 format_spec = selector.selector or 'best'
2173
2174 # TODO: Add allvideo, allaudio etc by generalizing the code with best/worst selector
2175 if format_spec == 'all':
2176 def selector_function(ctx):
2177 yield from _check_formats(ctx['formats'][::-1])
2178 elif format_spec == 'mergeall':
2179 def selector_function(ctx):
2180 formats = list(_check_formats(
2181 f for f in ctx['formats'] if f.get('vcodec') != 'none' or f.get('acodec') != 'none'))
2182 if not formats:
2183 return
2184 merged_format = formats[-1]
2185 for f in formats[-2::-1]:
2186 merged_format = _merge((merged_format, f))
2187 yield merged_format
2188
2189 else:
2190 format_fallback, seperate_fallback, format_reverse, format_idx = False, None, True, 1
2191 mobj = re.match(
2192 r'(?P<bw>best|worst|b|w)(?P<type>video|audio|v|a)?(?P<mod>\*)?(?:\.(?P<n>[1-9]\d*))?$',
2193 format_spec)
2194 if mobj is not None:
2195 format_idx = int_or_none(mobj.group('n'), default=1)
2196 format_reverse = mobj.group('bw')[0] == 'b'
2197 format_type = (mobj.group('type') or [None])[0]
2198 not_format_type = {'v': 'a', 'a': 'v'}.get(format_type)
2199 format_modified = mobj.group('mod') is not None
2200
2201 format_fallback = not format_type and not format_modified # for b, w
2202 _filter_f = (
2203 (lambda f: f.get('%scodec' % format_type) != 'none')
2204 if format_type and format_modified # bv*, ba*, wv*, wa*
2205 else (lambda f: f.get('%scodec' % not_format_type) == 'none')
2206 if format_type # bv, ba, wv, wa
2207 else (lambda f: f.get('vcodec') != 'none' and f.get('acodec') != 'none')
2208 if not format_modified # b, w
2209 else lambda f: True) # b*, w*
2210 filter_f = lambda f: _filter_f(f) and (
2211 f.get('vcodec') != 'none' or f.get('acodec') != 'none')
2212 else:
2213 if format_spec in self._format_selection_exts['audio']:
2214 filter_f = lambda f: f.get('ext') == format_spec and f.get('acodec') != 'none'
2215 elif format_spec in self._format_selection_exts['video']:
2216 filter_f = lambda f: f.get('ext') == format_spec and f.get('acodec') != 'none' and f.get('vcodec') != 'none'
2217 seperate_fallback = lambda f: f.get('ext') == format_spec and f.get('vcodec') != 'none'
2218 elif format_spec in self._format_selection_exts['storyboards']:
2219 filter_f = lambda f: f.get('ext') == format_spec and f.get('acodec') == 'none' and f.get('vcodec') == 'none'
2220 else:
2221 filter_f = lambda f: f.get('format_id') == format_spec # id
2222
2223 def selector_function(ctx):
2224 formats = list(ctx['formats'])
2225 matches = list(filter(filter_f, formats)) if filter_f is not None else formats
2226 if not matches:
2227 if format_fallback and ctx['incomplete_formats']:
2228 # for extractors with incomplete formats (audio only (soundcloud)
2229 # or video only (imgur)) best/worst will fallback to
2230 # best/worst {video,audio}-only format
2231 matches = formats
2232 elif seperate_fallback and not ctx['has_merged_format']:
2233 # for compatibility with youtube-dl when there is no pre-merged format
2234 matches = list(filter(seperate_fallback, formats))
2235 matches = LazyList(_check_formats(matches[::-1 if format_reverse else 1]))
2236 try:
2237 yield matches[format_idx - 1]
2238 except LazyList.IndexError:
2239 return
2240
2241 filters = [self._build_format_filter(f) for f in selector.filters]
2242
2243 def final_selector(ctx):
2244 ctx_copy = dict(ctx)
2245 for _filter in filters:
2246 ctx_copy['formats'] = list(filter(_filter, ctx_copy['formats']))
2247 return selector_function(ctx_copy)
2248 return final_selector
2249
2250 stream = io.BytesIO(format_spec.encode())
2251 try:
2252 tokens = list(_remove_unused_ops(tokenize.tokenize(stream.readline)))
2253 except tokenize.TokenError:
2254 raise syntax_error('Missing closing/opening brackets or parenthesis', (0, len(format_spec)))
2255
2256 class TokenIterator:
2257 def __init__(self, tokens):
2258 self.tokens = tokens
2259 self.counter = 0
2260
2261 def __iter__(self):
2262 return self
2263
2264 def __next__(self):
2265 if self.counter >= len(self.tokens):
2266 raise StopIteration()
2267 value = self.tokens[self.counter]
2268 self.counter += 1
2269 return value
2270
2271 next = __next__
2272
2273 def restore_last_token(self):
2274 self.counter -= 1
2275
2276 parsed_selector = _parse_format_selection(iter(TokenIterator(tokens)))
2277 return _build_selector_function(parsed_selector)
2278
2279 def _calc_headers(self, info_dict):
2280 res = merge_headers(self.params['http_headers'], info_dict.get('http_headers') or {})
2281
2282 cookies = self._calc_cookies(info_dict['url'])
2283 if cookies:
2284 res['Cookie'] = cookies
2285
2286 if 'X-Forwarded-For' not in res:
2287 x_forwarded_for_ip = info_dict.get('__x_forwarded_for_ip')
2288 if x_forwarded_for_ip:
2289 res['X-Forwarded-For'] = x_forwarded_for_ip
2290
2291 return res
2292
2293 def _calc_cookies(self, url):
2294 pr = sanitized_Request(url)
2295 self.cookiejar.add_cookie_header(pr)
2296 return pr.get_header('Cookie')
2297
2298 def _sort_thumbnails(self, thumbnails):
2299 thumbnails.sort(key=lambda t: (
2300 t.get('preference') if t.get('preference') is not None else -1,
2301 t.get('width') if t.get('width') is not None else -1,
2302 t.get('height') if t.get('height') is not None else -1,
2303 t.get('id') if t.get('id') is not None else '',
2304 t.get('url')))
2305
2306 def _sanitize_thumbnails(self, info_dict):
2307 thumbnails = info_dict.get('thumbnails')
2308 if thumbnails is None:
2309 thumbnail = info_dict.get('thumbnail')
2310 if thumbnail:
2311 info_dict['thumbnails'] = thumbnails = [{'url': thumbnail}]
2312 if not thumbnails:
2313 return
2314
2315 def check_thumbnails(thumbnails):
2316 for t in thumbnails:
2317 self.to_screen(f'[info] Testing thumbnail {t["id"]}')
2318 try:
2319 self.urlopen(HEADRequest(t['url']))
2320 except network_exceptions as err:
2321 self.to_screen(f'[info] Unable to connect to thumbnail {t["id"]} URL {t["url"]!r} - {err}. Skipping...')
2322 continue
2323 yield t
2324
2325 self._sort_thumbnails(thumbnails)
2326 for i, t in enumerate(thumbnails):
2327 if t.get('id') is None:
2328 t['id'] = '%d' % i
2329 if t.get('width') and t.get('height'):
2330 t['resolution'] = '%dx%d' % (t['width'], t['height'])
2331 t['url'] = sanitize_url(t['url'])
2332
2333 if self.params.get('check_formats') is True:
2334 info_dict['thumbnails'] = LazyList(check_thumbnails(thumbnails[::-1]), reverse=True)
2335 else:
2336 info_dict['thumbnails'] = thumbnails
2337
2338 def _fill_common_fields(self, info_dict, is_video=True):
2339 # TODO: move sanitization here
2340 if is_video:
2341 # playlists are allowed to lack "title"
2342 title = info_dict.get('title', NO_DEFAULT)
2343 if title is NO_DEFAULT:
2344 raise ExtractorError('Missing "title" field in extractor result',
2345 video_id=info_dict['id'], ie=info_dict['extractor'])
2346 info_dict['fulltitle'] = title
2347 if not title:
2348 if title == '':
2349 self.write_debug('Extractor gave empty title. Creating a generic title')
2350 else:
2351 self.report_warning('Extractor failed to obtain "title". Creating a generic title instead')
2352 info_dict['title'] = f'{info_dict["extractor"].replace(":", "-")} video #{info_dict["id"]}'
2353
2354 if info_dict.get('duration') is not None:
2355 info_dict['duration_string'] = formatSeconds(info_dict['duration'])
2356
2357 for ts_key, date_key in (
2358 ('timestamp', 'upload_date'),
2359 ('release_timestamp', 'release_date'),
2360 ('modified_timestamp', 'modified_date'),
2361 ):
2362 if info_dict.get(date_key) is None and info_dict.get(ts_key) is not None:
2363 # Working around out-of-range timestamp values (e.g. negative ones on Windows,
2364 # see http://bugs.python.org/issue1646728)
2365 with contextlib.suppress(ValueError, OverflowError, OSError):
2366 upload_date = datetime.datetime.utcfromtimestamp(info_dict[ts_key])
2367 info_dict[date_key] = upload_date.strftime('%Y%m%d')
2368
2369 live_keys = ('is_live', 'was_live')
2370 live_status = info_dict.get('live_status')
2371 if live_status is None:
2372 for key in live_keys:
2373 if info_dict.get(key) is False:
2374 continue
2375 if info_dict.get(key):
2376 live_status = key
2377 break
2378 if all(info_dict.get(key) is False for key in live_keys):
2379 live_status = 'not_live'
2380 if live_status:
2381 info_dict['live_status'] = live_status
2382 for key in live_keys:
2383 if info_dict.get(key) is None:
2384 info_dict[key] = (live_status == key)
2385
2386 # Auto generate title fields corresponding to the *_number fields when missing
2387 # in order to always have clean titles. This is very common for TV series.
2388 for field in ('chapter', 'season', 'episode'):
2389 if info_dict.get('%s_number' % field) is not None and not info_dict.get(field):
2390 info_dict[field] = '%s %d' % (field.capitalize(), info_dict['%s_number' % field])
2391
2392 def _raise_pending_errors(self, info):
2393 err = info.pop('__pending_error', None)
2394 if err:
2395 self.report_error(err, tb=False)
2396
2397 def process_video_result(self, info_dict, download=True):
2398 assert info_dict.get('_type', 'video') == 'video'
2399 self._num_videos += 1
2400
2401 if 'id' not in info_dict:
2402 raise ExtractorError('Missing "id" field in extractor result', ie=info_dict['extractor'])
2403 elif not info_dict.get('id'):
2404 raise ExtractorError('Extractor failed to obtain "id"', ie=info_dict['extractor'])
2405
2406 def report_force_conversion(field, field_not, conversion):
2407 self.report_warning(
2408 '"%s" field is not %s - forcing %s conversion, there is an error in extractor'
2409 % (field, field_not, conversion))
2410
2411 def sanitize_string_field(info, string_field):
2412 field = info.get(string_field)
2413 if field is None or isinstance(field, str):
2414 return
2415 report_force_conversion(string_field, 'a string', 'string')
2416 info[string_field] = str(field)
2417
2418 def sanitize_numeric_fields(info):
2419 for numeric_field in self._NUMERIC_FIELDS:
2420 field = info.get(numeric_field)
2421 if field is None or isinstance(field, (int, float)):
2422 continue
2423 report_force_conversion(numeric_field, 'numeric', 'int')
2424 info[numeric_field] = int_or_none(field)
2425
2426 sanitize_string_field(info_dict, 'id')
2427 sanitize_numeric_fields(info_dict)
2428 if info_dict.get('section_end') and info_dict.get('section_start') is not None:
2429 info_dict['duration'] = round(info_dict['section_end'] - info_dict['section_start'], 3)
2430 if (info_dict.get('duration') or 0) <= 0 and info_dict.pop('duration', None):
2431 self.report_warning('"duration" field is negative, there is an error in extractor')
2432
2433 chapters = info_dict.get('chapters') or []
2434 if chapters and chapters[0].get('start_time'):
2435 chapters.insert(0, {'start_time': 0})
2436
2437 dummy_chapter = {'end_time': 0, 'start_time': info_dict.get('duration')}
2438 for idx, (prev, current, next_) in enumerate(zip(
2439 (dummy_chapter, *chapters), chapters, (*chapters[1:], dummy_chapter)), 1):
2440 if current.get('start_time') is None:
2441 current['start_time'] = prev.get('end_time')
2442 if not current.get('end_time'):
2443 current['end_time'] = next_.get('start_time')
2444 if not current.get('title'):
2445 current['title'] = f'<Untitled Chapter {idx}>'
2446
2447 if 'playlist' not in info_dict:
2448 # It isn't part of a playlist
2449 info_dict['playlist'] = None
2450 info_dict['playlist_index'] = None
2451
2452 self._sanitize_thumbnails(info_dict)
2453
2454 thumbnail = info_dict.get('thumbnail')
2455 thumbnails = info_dict.get('thumbnails')
2456 if thumbnail:
2457 info_dict['thumbnail'] = sanitize_url(thumbnail)
2458 elif thumbnails:
2459 info_dict['thumbnail'] = thumbnails[-1]['url']
2460
2461 if info_dict.get('display_id') is None and 'id' in info_dict:
2462 info_dict['display_id'] = info_dict['id']
2463
2464 self._fill_common_fields(info_dict)
2465
2466 for cc_kind in ('subtitles', 'automatic_captions'):
2467 cc = info_dict.get(cc_kind)
2468 if cc:
2469 for _, subtitle in cc.items():
2470 for subtitle_format in subtitle:
2471 if subtitle_format.get('url'):
2472 subtitle_format['url'] = sanitize_url(subtitle_format['url'])
2473 if subtitle_format.get('ext') is None:
2474 subtitle_format['ext'] = determine_ext(subtitle_format['url']).lower()
2475
2476 automatic_captions = info_dict.get('automatic_captions')
2477 subtitles = info_dict.get('subtitles')
2478
2479 info_dict['requested_subtitles'] = self.process_subtitles(
2480 info_dict['id'], subtitles, automatic_captions)
2481
2482 if info_dict.get('formats') is None:
2483 # There's only one format available
2484 formats = [info_dict]
2485 else:
2486 formats = info_dict['formats']
2487
2488 # or None ensures --clean-infojson removes it
2489 info_dict['_has_drm'] = any(f.get('has_drm') for f in formats) or None
2490 if not self.params.get('allow_unplayable_formats'):
2491 formats = [f for f in formats if not f.get('has_drm')]
2492 if info_dict['_has_drm'] and formats and all(
2493 f.get('acodec') == f.get('vcodec') == 'none' for f in formats):
2494 self.report_warning(
2495 'This video is DRM protected and only images are available for download. '
2496 'Use --list-formats to see them')
2497
2498 get_from_start = not info_dict.get('is_live') or bool(self.params.get('live_from_start'))
2499 if not get_from_start:
2500 info_dict['title'] += ' ' + datetime.datetime.now().strftime('%Y-%m-%d %H:%M')
2501 if info_dict.get('is_live') and formats:
2502 formats = [f for f in formats if bool(f.get('is_from_start')) == get_from_start]
2503 if get_from_start and not formats:
2504 self.raise_no_formats(info_dict, msg=(
2505 '--live-from-start is passed, but there are no formats that can be downloaded from the start. '
2506 'If you want to download from the current time, use --no-live-from-start'))
2507
2508 if not formats:
2509 self.raise_no_formats(info_dict)
2510
2511 def is_wellformed(f):
2512 url = f.get('url')
2513 if not url:
2514 self.report_warning(
2515 '"url" field is missing or empty - skipping format, '
2516 'there is an error in extractor')
2517 return False
2518 if isinstance(url, bytes):
2519 sanitize_string_field(f, 'url')
2520 return True
2521
2522 # Filter out malformed formats for better extraction robustness
2523 formats = list(filter(is_wellformed, formats))
2524
2525 formats_dict = {}
2526
2527 # We check that all the formats have the format and format_id fields
2528 for i, format in enumerate(formats):
2529 sanitize_string_field(format, 'format_id')
2530 sanitize_numeric_fields(format)
2531 format['url'] = sanitize_url(format['url'])
2532 if not format.get('format_id'):
2533 format['format_id'] = str(i)
2534 else:
2535 # Sanitize format_id from characters used in format selector expression
2536 format['format_id'] = re.sub(r'[\s,/+\[\]()]', '_', format['format_id'])
2537 format_id = format['format_id']
2538 if format_id not in formats_dict:
2539 formats_dict[format_id] = []
2540 formats_dict[format_id].append(format)
2541
2542 # Make sure all formats have unique format_id
2543 common_exts = set(itertools.chain(*self._format_selection_exts.values()))
2544 for format_id, ambiguous_formats in formats_dict.items():
2545 ambigious_id = len(ambiguous_formats) > 1
2546 for i, format in enumerate(ambiguous_formats):
2547 if ambigious_id:
2548 format['format_id'] = '%s-%d' % (format_id, i)
2549 if format.get('ext') is None:
2550 format['ext'] = determine_ext(format['url']).lower()
2551 # Ensure there is no conflict between id and ext in format selection
2552 # See https://github.com/yt-dlp/yt-dlp/issues/1282
2553 if format['format_id'] != format['ext'] and format['format_id'] in common_exts:
2554 format['format_id'] = 'f%s' % format['format_id']
2555
2556 for i, format in enumerate(formats):
2557 if format.get('format') is None:
2558 format['format'] = '{id} - {res}{note}'.format(
2559 id=format['format_id'],
2560 res=self.format_resolution(format),
2561 note=format_field(format, 'format_note', ' (%s)'),
2562 )
2563 if format.get('protocol') is None:
2564 format['protocol'] = determine_protocol(format)
2565 if format.get('resolution') is None:
2566 format['resolution'] = self.format_resolution(format, default=None)
2567 if format.get('dynamic_range') is None and format.get('vcodec') != 'none':
2568 format['dynamic_range'] = 'SDR'
2569 if (info_dict.get('duration') and format.get('tbr')
2570 and not format.get('filesize') and not format.get('filesize_approx')):
2571 format['filesize_approx'] = int(info_dict['duration'] * format['tbr'] * (1024 / 8))
2572
2573 # Add HTTP headers, so that external programs can use them from the
2574 # json output
2575 full_format_info = info_dict.copy()
2576 full_format_info.update(format)
2577 format['http_headers'] = self._calc_headers(full_format_info)
2578 # Remove private housekeeping stuff
2579 if '__x_forwarded_for_ip' in info_dict:
2580 del info_dict['__x_forwarded_for_ip']
2581
2582 if self.params.get('check_formats') is True:
2583 formats = LazyList(self._check_formats(formats[::-1]), reverse=True)
2584
2585 if not formats or formats[0] is not info_dict:
2586 # only set the 'formats' fields if the original info_dict list them
2587 # otherwise we end up with a circular reference, the first (and unique)
2588 # element in the 'formats' field in info_dict is info_dict itself,
2589 # which can't be exported to json
2590 info_dict['formats'] = formats
2591
2592 info_dict, _ = self.pre_process(info_dict)
2593
2594 if self._match_entry(info_dict, incomplete=self._format_fields) is not None:
2595 return info_dict
2596
2597 self.post_extract(info_dict)
2598 info_dict, _ = self.pre_process(info_dict, 'after_filter')
2599
2600 # The pre-processors may have modified the formats
2601 formats = info_dict.get('formats', [info_dict])
2602
2603 list_only = self.params.get('simulate') is None and (
2604 self.params.get('list_thumbnails') or self.params.get('listformats') or self.params.get('listsubtitles'))
2605 interactive_format_selection = not list_only and self.format_selector == '-'
2606 if self.params.get('list_thumbnails'):
2607 self.list_thumbnails(info_dict)
2608 if self.params.get('listsubtitles'):
2609 if 'automatic_captions' in info_dict:
2610 self.list_subtitles(
2611 info_dict['id'], automatic_captions, 'automatic captions')
2612 self.list_subtitles(info_dict['id'], subtitles, 'subtitles')
2613 if self.params.get('listformats') or interactive_format_selection:
2614 self.list_formats(info_dict)
2615 if list_only:
2616 # Without this printing, -F --print-json will not work
2617 self.__forced_printings(info_dict, self.prepare_filename(info_dict), incomplete=True)
2618 return info_dict
2619
2620 format_selector = self.format_selector
2621 if format_selector is None:
2622 req_format = self._default_format_spec(info_dict, download=download)
2623 self.write_debug('Default format spec: %s' % req_format)
2624 format_selector = self.build_format_selector(req_format)
2625
2626 while True:
2627 if interactive_format_selection:
2628 req_format = input(
2629 self._format_screen('\nEnter format selector: ', self.Styles.EMPHASIS))
2630 try:
2631 format_selector = self.build_format_selector(req_format)
2632 except SyntaxError as err:
2633 self.report_error(err, tb=False, is_error=False)
2634 continue
2635
2636 formats_to_download = list(format_selector({
2637 'formats': formats,
2638 'has_merged_format': any('none' not in (f.get('acodec'), f.get('vcodec')) for f in formats),
2639 'incomplete_formats': (
2640 # All formats are video-only or
2641 all(f.get('vcodec') != 'none' and f.get('acodec') == 'none' for f in formats)
2642 # all formats are audio-only
2643 or all(f.get('vcodec') == 'none' and f.get('acodec') != 'none' for f in formats)),
2644 }))
2645 if interactive_format_selection and not formats_to_download:
2646 self.report_error('Requested format is not available', tb=False, is_error=False)
2647 continue
2648 break
2649
2650 if not formats_to_download:
2651 if not self.params.get('ignore_no_formats_error'):
2652 raise ExtractorError(
2653 'Requested format is not available. Use --list-formats for a list of available formats',
2654 expected=True, video_id=info_dict['id'], ie=info_dict['extractor'])
2655 self.report_warning('Requested format is not available')
2656 # Process what we can, even without any available formats.
2657 formats_to_download = [{}]
2658
2659 requested_ranges = self.params.get('download_ranges')
2660 if requested_ranges:
2661 requested_ranges = tuple(requested_ranges(info_dict, self))
2662
2663 best_format, downloaded_formats = formats_to_download[-1], []
2664 if download:
2665 if best_format:
2666 def to_screen(*msg):
2667 self.to_screen(f'[info] {info_dict["id"]}: {" ".join(", ".join(variadic(m)) for m in msg)}')
2668
2669 to_screen(f'Downloading {len(formats_to_download)} format(s):',
2670 (f['format_id'] for f in formats_to_download))
2671 if requested_ranges:
2672 to_screen(f'Downloading {len(requested_ranges)} time ranges:',
2673 (f'{int(c["start_time"])}-{int(c["end_time"])}' for c in requested_ranges))
2674 max_downloads_reached = False
2675
2676 for fmt, chapter in itertools.product(formats_to_download, requested_ranges or [{}]):
2677 new_info = self._copy_infodict(info_dict)
2678 new_info.update(fmt)
2679 offset, duration = info_dict.get('section_start') or 0, info_dict.get('duration') or float('inf')
2680 if chapter or offset:
2681 new_info.update({
2682 'section_start': offset + chapter.get('start_time', 0),
2683 'section_end': offset + min(chapter.get('end_time', duration), duration),
2684 'section_title': chapter.get('title'),
2685 'section_number': chapter.get('index'),
2686 })
2687 downloaded_formats.append(new_info)
2688 try:
2689 self.process_info(new_info)
2690 except MaxDownloadsReached:
2691 max_downloads_reached = True
2692 self._raise_pending_errors(new_info)
2693 # Remove copied info
2694 for key, val in tuple(new_info.items()):
2695 if info_dict.get(key) == val:
2696 new_info.pop(key)
2697 if max_downloads_reached:
2698 break
2699
2700 write_archive = {f.get('__write_download_archive', False) for f in downloaded_formats}
2701 assert write_archive.issubset({True, False, 'ignore'})
2702 if True in write_archive and False not in write_archive:
2703 self.record_download_archive(info_dict)
2704
2705 info_dict['requested_downloads'] = downloaded_formats
2706 info_dict = self.run_all_pps('after_video', info_dict)
2707 if max_downloads_reached:
2708 raise MaxDownloadsReached()
2709
2710 # We update the info dict with the selected best quality format (backwards compatibility)
2711 info_dict.update(best_format)
2712 return info_dict
2713
2714 def process_subtitles(self, video_id, normal_subtitles, automatic_captions):
2715 """Select the requested subtitles and their format"""
2716 available_subs, normal_sub_langs = {}, []
2717 if normal_subtitles and self.params.get('writesubtitles'):
2718 available_subs.update(normal_subtitles)
2719 normal_sub_langs = tuple(normal_subtitles.keys())
2720 if automatic_captions and self.params.get('writeautomaticsub'):
2721 for lang, cap_info in automatic_captions.items():
2722 if lang not in available_subs:
2723 available_subs[lang] = cap_info
2724
2725 if (not self.params.get('writesubtitles') and not
2726 self.params.get('writeautomaticsub') or not
2727 available_subs):
2728 return None
2729
2730 all_sub_langs = tuple(available_subs.keys())
2731 if self.params.get('allsubtitles', False):
2732 requested_langs = all_sub_langs
2733 elif self.params.get('subtitleslangs', False):
2734 # A list is used so that the order of languages will be the same as
2735 # given in subtitleslangs. See https://github.com/yt-dlp/yt-dlp/issues/1041
2736 requested_langs = []
2737 for lang_re in self.params.get('subtitleslangs'):
2738 discard = lang_re[0] == '-'
2739 if discard:
2740 lang_re = lang_re[1:]
2741 if lang_re == 'all':
2742 if discard:
2743 requested_langs = []
2744 else:
2745 requested_langs.extend(all_sub_langs)
2746 continue
2747 current_langs = filter(re.compile(lang_re + '$').match, all_sub_langs)
2748 if discard:
2749 for lang in current_langs:
2750 while lang in requested_langs:
2751 requested_langs.remove(lang)
2752 else:
2753 requested_langs.extend(current_langs)
2754 requested_langs = orderedSet(requested_langs)
2755 elif normal_sub_langs:
2756 requested_langs = ['en'] if 'en' in normal_sub_langs else normal_sub_langs[:1]
2757 else:
2758 requested_langs = ['en'] if 'en' in all_sub_langs else all_sub_langs[:1]
2759 if requested_langs:
2760 self.write_debug('Downloading subtitles: %s' % ', '.join(requested_langs))
2761
2762 formats_query = self.params.get('subtitlesformat', 'best')
2763 formats_preference = formats_query.split('/') if formats_query else []
2764 subs = {}
2765 for lang in requested_langs:
2766 formats = available_subs.get(lang)
2767 if formats is None:
2768 self.report_warning(f'{lang} subtitles not available for {video_id}')
2769 continue
2770 for ext in formats_preference:
2771 if ext == 'best':
2772 f = formats[-1]
2773 break
2774 matches = list(filter(lambda f: f['ext'] == ext, formats))
2775 if matches:
2776 f = matches[-1]
2777 break
2778 else:
2779 f = formats[-1]
2780 self.report_warning(
2781 'No subtitle format found matching "%s" for language %s, '
2782 'using %s' % (formats_query, lang, f['ext']))
2783 subs[lang] = f
2784 return subs
2785
2786 def _forceprint(self, key, info_dict):
2787 if info_dict is None:
2788 return
2789 info_copy = info_dict.copy()
2790 info_copy['formats_table'] = self.render_formats_table(info_dict)
2791 info_copy['thumbnails_table'] = self.render_thumbnails_table(info_dict)
2792 info_copy['subtitles_table'] = self.render_subtitles_table(info_dict.get('id'), info_dict.get('subtitles'))
2793 info_copy['automatic_captions_table'] = self.render_subtitles_table(info_dict.get('id'), info_dict.get('automatic_captions'))
2794
2795 def format_tmpl(tmpl):
2796 mobj = re.match(r'\w+(=?)$', tmpl)
2797 if mobj and mobj.group(1):
2798 return f'{tmpl[:-1]} = %({tmpl[:-1]})r'
2799 elif mobj:
2800 return f'%({tmpl})s'
2801 return tmpl
2802
2803 for tmpl in self.params['forceprint'].get(key, []):
2804 self.to_stdout(self.evaluate_outtmpl(format_tmpl(tmpl), info_copy))
2805
2806 for tmpl, file_tmpl in self.params['print_to_file'].get(key, []):
2807 filename = self.prepare_filename(info_dict, outtmpl=file_tmpl)
2808 tmpl = format_tmpl(tmpl)
2809 self.to_screen(f'[info] Writing {tmpl!r} to: {filename}')
2810 if self._ensure_dir_exists(filename):
2811 with open(filename, 'a', encoding='utf-8') as f:
2812 f.write(self.evaluate_outtmpl(tmpl, info_copy) + '\n')
2813
2814 def __forced_printings(self, info_dict, filename, incomplete):
2815 def print_mandatory(field, actual_field=None):
2816 if actual_field is None:
2817 actual_field = field
2818 if (self.params.get('force%s' % field, False)
2819 and (not incomplete or info_dict.get(actual_field) is not None)):
2820 self.to_stdout(info_dict[actual_field])
2821
2822 def print_optional(field):
2823 if (self.params.get('force%s' % field, False)
2824 and info_dict.get(field) is not None):
2825 self.to_stdout(info_dict[field])
2826
2827 info_dict = info_dict.copy()
2828 if filename is not None:
2829 info_dict['filename'] = filename
2830 if info_dict.get('requested_formats') is not None:
2831 # For RTMP URLs, also include the playpath
2832 info_dict['urls'] = '\n'.join(f['url'] + f.get('play_path', '') for f in info_dict['requested_formats'])
2833 elif info_dict.get('url'):
2834 info_dict['urls'] = info_dict['url'] + info_dict.get('play_path', '')
2835
2836 if (self.params.get('forcejson')
2837 or self.params['forceprint'].get('video')
2838 or self.params['print_to_file'].get('video')):
2839 self.post_extract(info_dict)
2840 self._forceprint('video', info_dict)
2841
2842 print_mandatory('title')
2843 print_mandatory('id')
2844 print_mandatory('url', 'urls')
2845 print_optional('thumbnail')
2846 print_optional('description')
2847 print_optional('filename')
2848 if self.params.get('forceduration') and info_dict.get('duration') is not None:
2849 self.to_stdout(formatSeconds(info_dict['duration']))
2850 print_mandatory('format')
2851
2852 if self.params.get('forcejson'):
2853 self.to_stdout(json.dumps(self.sanitize_info(info_dict)))
2854
2855 def dl(self, name, info, subtitle=False, test=False):
2856 if not info.get('url'):
2857 self.raise_no_formats(info, True)
2858
2859 if test:
2860 verbose = self.params.get('verbose')
2861 params = {
2862 'test': True,
2863 'quiet': self.params.get('quiet') or not verbose,
2864 'verbose': verbose,
2865 'noprogress': not verbose,
2866 'nopart': True,
2867 'skip_unavailable_fragments': False,
2868 'keep_fragments': False,
2869 'overwrites': True,
2870 '_no_ytdl_file': True,
2871 }
2872 else:
2873 params = self.params
2874 fd = get_suitable_downloader(info, params, to_stdout=(name == '-'))(self, params)
2875 if not test:
2876 for ph in self._progress_hooks:
2877 fd.add_progress_hook(ph)
2878 urls = '", "'.join(
2879 (f['url'].split(',')[0] + ',<data>' if f['url'].startswith('data:') else f['url'])
2880 for f in info.get('requested_formats', []) or [info])
2881 self.write_debug(f'Invoking {fd.FD_NAME} downloader on "{urls}"')
2882
2883 # Note: Ideally info should be a deep-copied so that hooks cannot modify it.
2884 # But it may contain objects that are not deep-copyable
2885 new_info = self._copy_infodict(info)
2886 if new_info.get('http_headers') is None:
2887 new_info['http_headers'] = self._calc_headers(new_info)
2888 return fd.download(name, new_info, subtitle)
2889
2890 def existing_file(self, filepaths, *, default_overwrite=True):
2891 existing_files = list(filter(os.path.exists, orderedSet(filepaths)))
2892 if existing_files and not self.params.get('overwrites', default_overwrite):
2893 return existing_files[0]
2894
2895 for file in existing_files:
2896 self.report_file_delete(file)
2897 os.remove(file)
2898 return None
2899
2900 def process_info(self, info_dict):
2901 """Process a single resolved IE result. (Modifies it in-place)"""
2902
2903 assert info_dict.get('_type', 'video') == 'video'
2904 original_infodict = info_dict
2905
2906 if 'format' not in info_dict and 'ext' in info_dict:
2907 info_dict['format'] = info_dict['ext']
2908
2909 # This is mostly just for backward compatibility of process_info
2910 # As a side-effect, this allows for format-specific filters
2911 if self._match_entry(info_dict) is not None:
2912 info_dict['__write_download_archive'] = 'ignore'
2913 return
2914
2915 # Does nothing under normal operation - for backward compatibility of process_info
2916 self.post_extract(info_dict)
2917 self._num_downloads += 1
2918
2919 # info_dict['_filename'] needs to be set for backward compatibility
2920 info_dict['_filename'] = full_filename = self.prepare_filename(info_dict, warn=True)
2921 temp_filename = self.prepare_filename(info_dict, 'temp')
2922 files_to_move = {}
2923
2924 # Forced printings
2925 self.__forced_printings(info_dict, full_filename, incomplete=('format' not in info_dict))
2926
2927 def check_max_downloads():
2928 if self._num_downloads >= float(self.params.get('max_downloads') or 'inf'):
2929 raise MaxDownloadsReached()
2930
2931 if self.params.get('simulate'):
2932 info_dict['__write_download_archive'] = self.params.get('force_write_download_archive')
2933 check_max_downloads()
2934 return
2935
2936 if full_filename is None:
2937 return
2938 if not self._ensure_dir_exists(encodeFilename(full_filename)):
2939 return
2940 if not self._ensure_dir_exists(encodeFilename(temp_filename)):
2941 return
2942
2943 if self._write_description('video', info_dict,
2944 self.prepare_filename(info_dict, 'description')) is None:
2945 return
2946
2947 sub_files = self._write_subtitles(info_dict, temp_filename)
2948 if sub_files is None:
2949 return
2950 files_to_move.update(dict(sub_files))
2951
2952 thumb_files = self._write_thumbnails(
2953 'video', info_dict, temp_filename, self.prepare_filename(info_dict, 'thumbnail'))
2954 if thumb_files is None:
2955 return
2956 files_to_move.update(dict(thumb_files))
2957
2958 infofn = self.prepare_filename(info_dict, 'infojson')
2959 _infojson_written = self._write_info_json('video', info_dict, infofn)
2960 if _infojson_written:
2961 info_dict['infojson_filename'] = infofn
2962 # For backward compatibility, even though it was a private field
2963 info_dict['__infojson_filename'] = infofn
2964 elif _infojson_written is None:
2965 return
2966
2967 # Note: Annotations are deprecated
2968 annofn = None
2969 if self.params.get('writeannotations', False):
2970 annofn = self.prepare_filename(info_dict, 'annotation')
2971 if annofn:
2972 if not self._ensure_dir_exists(encodeFilename(annofn)):
2973 return
2974 if not self.params.get('overwrites', True) and os.path.exists(encodeFilename(annofn)):
2975 self.to_screen('[info] Video annotations are already present')
2976 elif not info_dict.get('annotations'):
2977 self.report_warning('There are no annotations to write.')
2978 else:
2979 try:
2980 self.to_screen('[info] Writing video annotations to: ' + annofn)
2981 with open(encodeFilename(annofn), 'w', encoding='utf-8') as annofile:
2982 annofile.write(info_dict['annotations'])
2983 except (KeyError, TypeError):
2984 self.report_warning('There are no annotations to write.')
2985 except OSError:
2986 self.report_error('Cannot write annotations file: ' + annofn)
2987 return
2988
2989 # Write internet shortcut files
2990 def _write_link_file(link_type):
2991 url = try_get(info_dict['webpage_url'], iri_to_uri)
2992 if not url:
2993 self.report_warning(
2994 f'Cannot write internet shortcut file because the actual URL of "{info_dict["webpage_url"]}" is unknown')
2995 return True
2996 linkfn = replace_extension(self.prepare_filename(info_dict, 'link'), link_type, info_dict.get('ext'))
2997 if not self._ensure_dir_exists(encodeFilename(linkfn)):
2998 return False
2999 if self.params.get('overwrites', True) and os.path.exists(encodeFilename(linkfn)):
3000 self.to_screen(f'[info] Internet shortcut (.{link_type}) is already present')
3001 return True
3002 try:
3003 self.to_screen(f'[info] Writing internet shortcut (.{link_type}) to: {linkfn}')
3004 with open(encodeFilename(to_high_limit_path(linkfn)), 'w', encoding='utf-8',
3005 newline='\r\n' if link_type == 'url' else '\n') as linkfile:
3006 template_vars = {'url': url}
3007 if link_type == 'desktop':
3008 template_vars['filename'] = linkfn[:-(len(link_type) + 1)]
3009 linkfile.write(LINK_TEMPLATES[link_type] % template_vars)
3010 except OSError:
3011 self.report_error(f'Cannot write internet shortcut {linkfn}')
3012 return False
3013 return True
3014
3015 write_links = {
3016 'url': self.params.get('writeurllink'),
3017 'webloc': self.params.get('writewebloclink'),
3018 'desktop': self.params.get('writedesktoplink'),
3019 }
3020 if self.params.get('writelink'):
3021 link_type = ('webloc' if sys.platform == 'darwin'
3022 else 'desktop' if sys.platform.startswith('linux')
3023 else 'url')
3024 write_links[link_type] = True
3025
3026 if any(should_write and not _write_link_file(link_type)
3027 for link_type, should_write in write_links.items()):
3028 return
3029
3030 def replace_info_dict(new_info):
3031 nonlocal info_dict
3032 if new_info == info_dict:
3033 return
3034 info_dict.clear()
3035 info_dict.update(new_info)
3036
3037 new_info, files_to_move = self.pre_process(info_dict, 'before_dl', files_to_move)
3038 replace_info_dict(new_info)
3039
3040 if self.params.get('skip_download'):
3041 info_dict['filepath'] = temp_filename
3042 info_dict['__finaldir'] = os.path.dirname(os.path.abspath(encodeFilename(full_filename)))
3043 info_dict['__files_to_move'] = files_to_move
3044 replace_info_dict(self.run_pp(MoveFilesAfterDownloadPP(self, False), info_dict))
3045 info_dict['__write_download_archive'] = self.params.get('force_write_download_archive')
3046 else:
3047 # Download
3048 info_dict.setdefault('__postprocessors', [])
3049 try:
3050
3051 def existing_video_file(*filepaths):
3052 ext = info_dict.get('ext')
3053 converted = lambda file: replace_extension(file, self.params.get('final_ext') or ext, ext)
3054 file = self.existing_file(itertools.chain(*zip(map(converted, filepaths), filepaths)),
3055 default_overwrite=False)
3056 if file:
3057 info_dict['ext'] = os.path.splitext(file)[1][1:]
3058 return file
3059
3060 fd, success = None, True
3061 if info_dict.get('protocol') or info_dict.get('url'):
3062 fd = get_suitable_downloader(info_dict, self.params, to_stdout=temp_filename == '-')
3063 if fd is not FFmpegFD and (
3064 info_dict.get('section_start') or info_dict.get('section_end')):
3065 msg = ('This format cannot be partially downloaded' if FFmpegFD.available()
3066 else 'You have requested downloading the video partially, but ffmpeg is not installed')
3067 self.report_error(f'{msg}. Aborting')
3068 return
3069
3070 if info_dict.get('requested_formats') is not None:
3071 requested_formats = info_dict['requested_formats']
3072 old_ext = info_dict['ext']
3073 if self.params.get('merge_output_format') is None:
3074 if (info_dict['ext'] == 'webm'
3075 and info_dict.get('thumbnails')
3076 # check with type instead of pp_key, __name__, or isinstance
3077 # since we dont want any custom PPs to trigger this
3078 and any(type(pp) == EmbedThumbnailPP for pp in self._pps['post_process'])): # noqa: E721
3079 info_dict['ext'] = 'mkv'
3080 self.report_warning(
3081 'webm doesn\'t support embedding a thumbnail, mkv will be used')
3082 new_ext = info_dict['ext']
3083
3084 def correct_ext(filename, ext=new_ext):
3085 if filename == '-':
3086 return filename
3087 filename_real_ext = os.path.splitext(filename)[1][1:]
3088 filename_wo_ext = (
3089 os.path.splitext(filename)[0]
3090 if filename_real_ext in (old_ext, new_ext)
3091 else filename)
3092 return f'{filename_wo_ext}.{ext}'
3093
3094 # Ensure filename always has a correct extension for successful merge
3095 full_filename = correct_ext(full_filename)
3096 temp_filename = correct_ext(temp_filename)
3097 dl_filename = existing_video_file(full_filename, temp_filename)
3098 info_dict['__real_download'] = False
3099
3100 merger = FFmpegMergerPP(self)
3101 downloaded = []
3102 if dl_filename is not None:
3103 self.report_file_already_downloaded(dl_filename)
3104 elif fd:
3105 for f in requested_formats if fd != FFmpegFD else []:
3106 f['filepath'] = fname = prepend_extension(
3107 correct_ext(temp_filename, info_dict['ext']),
3108 'f%s' % f['format_id'], info_dict['ext'])
3109 downloaded.append(fname)
3110 info_dict['url'] = '\n'.join(f['url'] for f in requested_formats)
3111 success, real_download = self.dl(temp_filename, info_dict)
3112 info_dict['__real_download'] = real_download
3113 else:
3114 if self.params.get('allow_unplayable_formats'):
3115 self.report_warning(
3116 'You have requested merging of multiple formats '
3117 'while also allowing unplayable formats to be downloaded. '
3118 'The formats won\'t be merged to prevent data corruption.')
3119 elif not merger.available:
3120 msg = 'You have requested merging of multiple formats but ffmpeg is not installed'
3121 if not self.params.get('ignoreerrors'):
3122 self.report_error(f'{msg}. Aborting due to --abort-on-error')
3123 return
3124 self.report_warning(f'{msg}. The formats won\'t be merged')
3125
3126 if temp_filename == '-':
3127 reason = ('using a downloader other than ffmpeg' if FFmpegFD.can_merge_formats(info_dict, self.params)
3128 else 'but the formats are incompatible for simultaneous download' if merger.available
3129 else 'but ffmpeg is not installed')
3130 self.report_warning(
3131 f'You have requested downloading multiple formats to stdout {reason}. '
3132 'The formats will be streamed one after the other')
3133 fname = temp_filename
3134 for f in requested_formats:
3135 new_info = dict(info_dict)
3136 del new_info['requested_formats']
3137 new_info.update(f)
3138 if temp_filename != '-':
3139 fname = prepend_extension(
3140 correct_ext(temp_filename, new_info['ext']),
3141 'f%s' % f['format_id'], new_info['ext'])
3142 if not self._ensure_dir_exists(fname):
3143 return
3144 f['filepath'] = fname
3145 downloaded.append(fname)
3146 partial_success, real_download = self.dl(fname, new_info)
3147 info_dict['__real_download'] = info_dict['__real_download'] or real_download
3148 success = success and partial_success
3149
3150 if downloaded and merger.available and not self.params.get('allow_unplayable_formats'):
3151 info_dict['__postprocessors'].append(merger)
3152 info_dict['__files_to_merge'] = downloaded
3153 # Even if there were no downloads, it is being merged only now
3154 info_dict['__real_download'] = True
3155 else:
3156 for file in downloaded:
3157 files_to_move[file] = None
3158 else:
3159 # Just a single file
3160 dl_filename = existing_video_file(full_filename, temp_filename)
3161 if dl_filename is None or dl_filename == temp_filename:
3162 # dl_filename == temp_filename could mean that the file was partially downloaded with --no-part.
3163 # So we should try to resume the download
3164 success, real_download = self.dl(temp_filename, info_dict)
3165 info_dict['__real_download'] = real_download
3166 else:
3167 self.report_file_already_downloaded(dl_filename)
3168
3169 dl_filename = dl_filename or temp_filename
3170 info_dict['__finaldir'] = os.path.dirname(os.path.abspath(encodeFilename(full_filename)))
3171
3172 except network_exceptions as err:
3173 self.report_error('unable to download video data: %s' % error_to_compat_str(err))
3174 return
3175 except OSError as err:
3176 raise UnavailableVideoError(err)
3177 except (ContentTooShortError, ) as err:
3178 self.report_error(f'content too short (expected {err.expected} bytes and served {err.downloaded})')
3179 return
3180
3181 self._raise_pending_errors(info_dict)
3182 if success and full_filename != '-':
3183
3184 def fixup():
3185 do_fixup = True
3186 fixup_policy = self.params.get('fixup')
3187 vid = info_dict['id']
3188
3189 if fixup_policy in ('ignore', 'never'):
3190 return
3191 elif fixup_policy == 'warn':
3192 do_fixup = 'warn'
3193 elif fixup_policy != 'force':
3194 assert fixup_policy in ('detect_or_warn', None)
3195 if not info_dict.get('__real_download'):
3196 do_fixup = False
3197
3198 def ffmpeg_fixup(cndn, msg, cls):
3199 if not (do_fixup and cndn):
3200 return
3201 elif do_fixup == 'warn':
3202 self.report_warning(f'{vid}: {msg}')
3203 return
3204 pp = cls(self)
3205 if pp.available:
3206 info_dict['__postprocessors'].append(pp)
3207 else:
3208 self.report_warning(f'{vid}: {msg}. Install ffmpeg to fix this automatically')
3209
3210 stretched_ratio = info_dict.get('stretched_ratio')
3211 ffmpeg_fixup(stretched_ratio not in (1, None),
3212 f'Non-uniform pixel ratio {stretched_ratio}',
3213 FFmpegFixupStretchedPP)
3214
3215 downloader = get_suitable_downloader(info_dict, self.params) if 'protocol' in info_dict else None
3216 downloader = downloader.FD_NAME if downloader else None
3217
3218 ext = info_dict.get('ext')
3219 postprocessed_by_ffmpeg = info_dict.get('requested_formats') or any((
3220 isinstance(pp, FFmpegVideoConvertorPP)
3221 and resolve_recode_mapping(ext, pp.mapping)[0] not in (ext, None)
3222 ) for pp in self._pps['post_process'])
3223
3224 if not postprocessed_by_ffmpeg:
3225 ffmpeg_fixup(ext == 'm4a' and info_dict.get('container') == 'm4a_dash',
3226 'writing DASH m4a. Only some players support this container',
3227 FFmpegFixupM4aPP)
3228 ffmpeg_fixup(downloader == 'hlsnative' and not self.params.get('hls_use_mpegts')
3229 or info_dict.get('is_live') and self.params.get('hls_use_mpegts') is None,
3230 'Possible MPEG-TS in MP4 container or malformed AAC timestamps',
3231 FFmpegFixupM3u8PP)
3232 ffmpeg_fixup(info_dict.get('is_live') and downloader == 'DashSegmentsFD',
3233 'Possible duplicate MOOV atoms', FFmpegFixupDuplicateMoovPP)
3234
3235 ffmpeg_fixup(downloader == 'web_socket_fragment', 'Malformed timestamps detected', FFmpegFixupTimestampPP)
3236 ffmpeg_fixup(downloader == 'web_socket_fragment', 'Malformed duration detected', FFmpegFixupDurationPP)
3237
3238 fixup()
3239 try:
3240 replace_info_dict(self.post_process(dl_filename, info_dict, files_to_move))
3241 except PostProcessingError as err:
3242 self.report_error('Postprocessing: %s' % str(err))
3243 return
3244 try:
3245 for ph in self._post_hooks:
3246 ph(info_dict['filepath'])
3247 except Exception as err:
3248 self.report_error('post hooks: %s' % str(err))
3249 return
3250 info_dict['__write_download_archive'] = True
3251
3252 assert info_dict is original_infodict # Make sure the info_dict was modified in-place
3253 if self.params.get('force_write_download_archive'):
3254 info_dict['__write_download_archive'] = True
3255 check_max_downloads()
3256
3257 def __download_wrapper(self, func):
3258 @functools.wraps(func)
3259 def wrapper(*args, **kwargs):
3260 try:
3261 res = func(*args, **kwargs)
3262 except UnavailableVideoError as e:
3263 self.report_error(e)
3264 except DownloadCancelled as e:
3265 self.to_screen(f'[info] {e}')
3266 if not self.params.get('break_per_url'):
3267 raise
3268 else:
3269 if self.params.get('dump_single_json', False):
3270 self.post_extract(res)
3271 self.to_stdout(json.dumps(self.sanitize_info(res)))
3272 return wrapper
3273
3274 def download(self, url_list):
3275 """Download a given list of URLs."""
3276 url_list = variadic(url_list) # Passing a single URL is a common mistake
3277 outtmpl = self.params['outtmpl']['default']
3278 if (len(url_list) > 1
3279 and outtmpl != '-'
3280 and '%' not in outtmpl
3281 and self.params.get('max_downloads') != 1):
3282 raise SameFileError(outtmpl)
3283
3284 for url in url_list:
3285 self.__download_wrapper(self.extract_info)(
3286 url, force_generic_extractor=self.params.get('force_generic_extractor', False))
3287
3288 return self._download_retcode
3289
3290 def download_with_info_file(self, info_filename):
3291 with contextlib.closing(fileinput.FileInput(
3292 [info_filename], mode='r',
3293 openhook=fileinput.hook_encoded('utf-8'))) as f:
3294 # FileInput doesn't have a read method, we can't call json.load
3295 info = self.sanitize_info(json.loads('\n'.join(f)), self.params.get('clean_infojson', True))
3296 try:
3297 self.__download_wrapper(self.process_ie_result)(info, download=True)
3298 except (DownloadError, EntryNotInPlaylist, ReExtractInfo) as e:
3299 if not isinstance(e, EntryNotInPlaylist):
3300 self.to_stderr('\r')
3301 webpage_url = info.get('webpage_url')
3302 if webpage_url is not None:
3303 self.report_warning(f'The info failed to download: {e}; trying with URL {webpage_url}')
3304 return self.download([webpage_url])
3305 else:
3306 raise
3307 return self._download_retcode
3308
3309 @staticmethod
3310 def sanitize_info(info_dict, remove_private_keys=False):
3311 ''' Sanitize the infodict for converting to json '''
3312 if info_dict is None:
3313 return info_dict
3314 info_dict.setdefault('epoch', int(time.time()))
3315 info_dict.setdefault('_type', 'video')
3316
3317 if remove_private_keys:
3318 reject = lambda k, v: v is None or k.startswith('__') or k in {
3319 'requested_downloads', 'requested_formats', 'requested_subtitles', 'requested_entries',
3320 'entries', 'filepath', '_filename', 'infojson_filename', 'original_url', 'playlist_autonumber',
3321 }
3322 else:
3323 reject = lambda k, v: False
3324
3325 def filter_fn(obj):
3326 if isinstance(obj, dict):
3327 return {k: filter_fn(v) for k, v in obj.items() if not reject(k, v)}
3328 elif isinstance(obj, (list, tuple, set, LazyList)):
3329 return list(map(filter_fn, obj))
3330 elif obj is None or isinstance(obj, (str, int, float, bool)):
3331 return obj
3332 else:
3333 return repr(obj)
3334
3335 return filter_fn(info_dict)
3336
3337 @staticmethod
3338 def filter_requested_info(info_dict, actually_filter=True):
3339 ''' Alias of sanitize_info for backward compatibility '''
3340 return YoutubeDL.sanitize_info(info_dict, actually_filter)
3341
3342 def _delete_downloaded_files(self, *files_to_delete, info={}, msg=None):
3343 for filename in set(filter(None, files_to_delete)):
3344 if msg:
3345 self.to_screen(msg % filename)
3346 try:
3347 os.remove(filename)
3348 except OSError:
3349 self.report_warning(f'Unable to delete file {filename}')
3350 if filename in info.get('__files_to_move', []): # NB: Delete even if None
3351 del info['__files_to_move'][filename]
3352
3353 @staticmethod
3354 def post_extract(info_dict):
3355 def actual_post_extract(info_dict):
3356 if info_dict.get('_type') in ('playlist', 'multi_video'):
3357 for video_dict in info_dict.get('entries', {}):
3358 actual_post_extract(video_dict or {})
3359 return
3360
3361 post_extractor = info_dict.pop('__post_extractor', None) or (lambda: {})
3362 info_dict.update(post_extractor())
3363
3364 actual_post_extract(info_dict or {})
3365
3366 def run_pp(self, pp, infodict):
3367 files_to_delete = []
3368 if '__files_to_move' not in infodict:
3369 infodict['__files_to_move'] = {}
3370 try:
3371 files_to_delete, infodict = pp.run(infodict)
3372 except PostProcessingError as e:
3373 # Must be True and not 'only_download'
3374 if self.params.get('ignoreerrors') is True:
3375 self.report_error(e)
3376 return infodict
3377 raise
3378
3379 if not files_to_delete:
3380 return infodict
3381 if self.params.get('keepvideo', False):
3382 for f in files_to_delete:
3383 infodict['__files_to_move'].setdefault(f, '')
3384 else:
3385 self._delete_downloaded_files(
3386 *files_to_delete, info=infodict, msg='Deleting original file %s (pass -k to keep)')
3387 return infodict
3388
3389 def run_all_pps(self, key, info, *, additional_pps=None):
3390 self._forceprint(key, info)
3391 for pp in (additional_pps or []) + self._pps[key]:
3392 info = self.run_pp(pp, info)
3393 return info
3394
3395 def pre_process(self, ie_info, key='pre_process', files_to_move=None):
3396 info = dict(ie_info)
3397 info['__files_to_move'] = files_to_move or {}
3398 try:
3399 info = self.run_all_pps(key, info)
3400 except PostProcessingError as err:
3401 msg = f'Preprocessing: {err}'
3402 info.setdefault('__pending_error', msg)
3403 self.report_error(msg, is_error=False)
3404 return info, info.pop('__files_to_move', None)
3405
3406 def post_process(self, filename, info, files_to_move=None):
3407 """Run all the postprocessors on the given file."""
3408 info['filepath'] = filename
3409 info['__files_to_move'] = files_to_move or {}
3410 info = self.run_all_pps('post_process', info, additional_pps=info.get('__postprocessors'))
3411 info = self.run_pp(MoveFilesAfterDownloadPP(self), info)
3412 del info['__files_to_move']
3413 return self.run_all_pps('after_move', info)
3414
3415 def _make_archive_id(self, info_dict):
3416 video_id = info_dict.get('id')
3417 if not video_id:
3418 return
3419 # Future-proof against any change in case
3420 # and backwards compatibility with prior versions
3421 extractor = info_dict.get('extractor_key') or info_dict.get('ie_key') # key in a playlist
3422 if extractor is None:
3423 url = str_or_none(info_dict.get('url'))
3424 if not url:
3425 return
3426 # Try to find matching extractor for the URL and take its ie_key
3427 for ie_key, ie in self._ies.items():
3428 if ie.suitable(url):
3429 extractor = ie_key
3430 break
3431 else:
3432 return
3433 return make_archive_id(extractor, video_id)
3434
3435 def in_download_archive(self, info_dict):
3436 fn = self.params.get('download_archive')
3437 if fn is None:
3438 return False
3439
3440 vid_ids = [self._make_archive_id(info_dict)]
3441 vid_ids.extend(info_dict.get('_old_archive_ids', []))
3442 return any(id_ in self.archive for id_ in vid_ids)
3443
3444 def record_download_archive(self, info_dict):
3445 fn = self.params.get('download_archive')
3446 if fn is None:
3447 return
3448 vid_id = self._make_archive_id(info_dict)
3449 assert vid_id
3450 self.write_debug(f'Adding to archive: {vid_id}')
3451 with locked_file(fn, 'a', encoding='utf-8') as archive_file:
3452 archive_file.write(vid_id + '\n')
3453 self.archive.add(vid_id)
3454
3455 @staticmethod
3456 def format_resolution(format, default='unknown'):
3457 if format.get('vcodec') == 'none' and format.get('acodec') != 'none':
3458 return 'audio only'
3459 if format.get('resolution') is not None:
3460 return format['resolution']
3461 if format.get('width') and format.get('height'):
3462 return '%dx%d' % (format['width'], format['height'])
3463 elif format.get('height'):
3464 return '%sp' % format['height']
3465 elif format.get('width'):
3466 return '%dx?' % format['width']
3467 return default
3468
3469 def _list_format_headers(self, *headers):
3470 if self.params.get('listformats_table', True) is not False:
3471 return [self._format_out(header, self.Styles.HEADERS) for header in headers]
3472 return headers
3473
3474 def _format_note(self, fdict):
3475 res = ''
3476 if fdict.get('ext') in ['f4f', 'f4m']:
3477 res += '(unsupported)'
3478 if fdict.get('language'):
3479 if res:
3480 res += ' '
3481 res += '[%s]' % fdict['language']
3482 if fdict.get('format_note') is not None:
3483 if res:
3484 res += ' '
3485 res += fdict['format_note']
3486 if fdict.get('tbr') is not None:
3487 if res:
3488 res += ', '
3489 res += '%4dk' % fdict['tbr']
3490 if fdict.get('container') is not None:
3491 if res:
3492 res += ', '
3493 res += '%s container' % fdict['container']
3494 if (fdict.get('vcodec') is not None
3495 and fdict.get('vcodec') != 'none'):
3496 if res:
3497 res += ', '
3498 res += fdict['vcodec']
3499 if fdict.get('vbr') is not None:
3500 res += '@'
3501 elif fdict.get('vbr') is not None and fdict.get('abr') is not None:
3502 res += 'video@'
3503 if fdict.get('vbr') is not None:
3504 res += '%4dk' % fdict['vbr']
3505 if fdict.get('fps') is not None:
3506 if res:
3507 res += ', '
3508 res += '%sfps' % fdict['fps']
3509 if fdict.get('acodec') is not None:
3510 if res:
3511 res += ', '
3512 if fdict['acodec'] == 'none':
3513 res += 'video only'
3514 else:
3515 res += '%-5s' % fdict['acodec']
3516 elif fdict.get('abr') is not None:
3517 if res:
3518 res += ', '
3519 res += 'audio'
3520 if fdict.get('abr') is not None:
3521 res += '@%3dk' % fdict['abr']
3522 if fdict.get('asr') is not None:
3523 res += ' (%5dHz)' % fdict['asr']
3524 if fdict.get('filesize') is not None:
3525 if res:
3526 res += ', '
3527 res += format_bytes(fdict['filesize'])
3528 elif fdict.get('filesize_approx') is not None:
3529 if res:
3530 res += ', '
3531 res += '~' + format_bytes(fdict['filesize_approx'])
3532 return res
3533
3534 def render_formats_table(self, info_dict):
3535 if not info_dict.get('formats') and not info_dict.get('url'):
3536 return None
3537
3538 formats = info_dict.get('formats', [info_dict])
3539 if not self.params.get('listformats_table', True) is not False:
3540 table = [
3541 [
3542 format_field(f, 'format_id'),
3543 format_field(f, 'ext'),
3544 self.format_resolution(f),
3545 self._format_note(f)
3546 ] for f in formats if f.get('preference') is None or f['preference'] >= -1000]
3547 return render_table(['format code', 'extension', 'resolution', 'note'], table, extra_gap=1)
3548
3549 def simplified_codec(f, field):
3550 assert field in ('acodec', 'vcodec')
3551 codec = f.get(field, 'unknown')
3552 if not codec:
3553 return 'unknown'
3554 elif codec != 'none':
3555 return '.'.join(codec.split('.')[:4])
3556
3557 if field == 'vcodec' and f.get('acodec') == 'none':
3558 return 'images'
3559 elif field == 'acodec' and f.get('vcodec') == 'none':
3560 return ''
3561 return self._format_out('audio only' if field == 'vcodec' else 'video only',
3562 self.Styles.SUPPRESS)
3563
3564 delim = self._format_out('\u2502', self.Styles.DELIM, '|', test_encoding=True)
3565 table = [
3566 [
3567 self._format_out(format_field(f, 'format_id'), self.Styles.ID),
3568 format_field(f, 'ext'),
3569 format_field(f, func=self.format_resolution, ignore=('audio only', 'images')),
3570 format_field(f, 'fps', '\t%d', func=round),
3571 format_field(f, 'dynamic_range', '%s', ignore=(None, 'SDR')).replace('HDR', ''),
3572 delim,
3573 format_field(f, 'filesize', ' \t%s', func=format_bytes) + format_field(f, 'filesize_approx', '~\t%s', func=format_bytes),
3574 format_field(f, 'tbr', '\t%dk', func=round),
3575 shorten_protocol_name(f.get('protocol', '')),
3576 delim,
3577 simplified_codec(f, 'vcodec'),
3578 format_field(f, 'vbr', '\t%dk', func=round),
3579 simplified_codec(f, 'acodec'),
3580 format_field(f, 'abr', '\t%dk', func=round),
3581 format_field(f, 'asr', '\t%s', func=format_decimal_suffix),
3582 join_nonempty(
3583 self._format_out('UNSUPPORTED', 'light red') if f.get('ext') in ('f4f', 'f4m') else None,
3584 format_field(f, 'language', '[%s]'),
3585 join_nonempty(format_field(f, 'format_note'),
3586 format_field(f, 'container', ignore=(None, f.get('ext'))),
3587 delim=', '),
3588 delim=' '),
3589 ] for f in formats if f.get('preference') is None or f['preference'] >= -1000]
3590 header_line = self._list_format_headers(
3591 'ID', 'EXT', 'RESOLUTION', '\tFPS', 'HDR', delim, '\tFILESIZE', '\tTBR', 'PROTO',
3592 delim, 'VCODEC', '\tVBR', 'ACODEC', '\tABR', '\tASR', 'MORE INFO')
3593
3594 return render_table(
3595 header_line, table, hide_empty=True,
3596 delim=self._format_out('\u2500', self.Styles.DELIM, '-', test_encoding=True))
3597
3598 def render_thumbnails_table(self, info_dict):
3599 thumbnails = list(info_dict.get('thumbnails') or [])
3600 if not thumbnails:
3601 return None
3602 return render_table(
3603 self._list_format_headers('ID', 'Width', 'Height', 'URL'),
3604 [[t.get('id'), t.get('width', 'unknown'), t.get('height', 'unknown'), t['url']] for t in thumbnails])
3605
3606 def render_subtitles_table(self, video_id, subtitles):
3607 def _row(lang, formats):
3608 exts, names = zip(*((f['ext'], f.get('name') or 'unknown') for f in reversed(formats)))
3609 if len(set(names)) == 1:
3610 names = [] if names[0] == 'unknown' else names[:1]
3611 return [lang, ', '.join(names), ', '.join(exts)]
3612
3613 if not subtitles:
3614 return None
3615 return render_table(
3616 self._list_format_headers('Language', 'Name', 'Formats'),
3617 [_row(lang, formats) for lang, formats in subtitles.items()],
3618 hide_empty=True)
3619
3620 def __list_table(self, video_id, name, func, *args):
3621 table = func(*args)
3622 if not table:
3623 self.to_screen(f'{video_id} has no {name}')
3624 return
3625 self.to_screen(f'[info] Available {name} for {video_id}:')
3626 self.to_stdout(table)
3627
3628 def list_formats(self, info_dict):
3629 self.__list_table(info_dict['id'], 'formats', self.render_formats_table, info_dict)
3630
3631 def list_thumbnails(self, info_dict):
3632 self.__list_table(info_dict['id'], 'thumbnails', self.render_thumbnails_table, info_dict)
3633
3634 def list_subtitles(self, video_id, subtitles, name='subtitles'):
3635 self.__list_table(video_id, name, self.render_subtitles_table, video_id, subtitles)
3636
3637 def urlopen(self, req):
3638 """ Start an HTTP download """
3639 if isinstance(req, str):
3640 req = sanitized_Request(req)
3641 return self._opener.open(req, timeout=self._socket_timeout)
3642
3643 def print_debug_header(self):
3644 if not self.params.get('verbose'):
3645 return
3646
3647 # These imports can be slow. So import them only as needed
3648 from .extractor.extractors import _LAZY_LOADER
3649 from .extractor.extractors import _PLUGIN_CLASSES as plugin_extractors
3650
3651 def get_encoding(stream):
3652 ret = str(getattr(stream, 'encoding', 'missing (%s)' % type(stream).__name__))
3653 if not supports_terminal_sequences(stream):
3654 from .utils import WINDOWS_VT_MODE # Must be imported locally
3655 ret += ' (No VT)' if WINDOWS_VT_MODE is False else ' (No ANSI)'
3656 return ret
3657
3658 encoding_str = 'Encodings: locale %s, fs %s, pref %s, %s' % (
3659 locale.getpreferredencoding(),
3660 sys.getfilesystemencoding(),
3661 self.get_encoding(),
3662 ', '.join(
3663 f'{key} {get_encoding(stream)}' for key, stream in self._out_files.items_
3664 if stream is not None and key != 'console')
3665 )
3666
3667 logger = self.params.get('logger')
3668 if logger:
3669 write_debug = lambda msg: logger.debug(f'[debug] {msg}')
3670 write_debug(encoding_str)
3671 else:
3672 write_string(f'[debug] {encoding_str}\n', encoding=None)
3673 write_debug = lambda msg: self._write_string(f'[debug] {msg}\n')
3674
3675 source = detect_variant()
3676 write_debug(join_nonempty(
3677 'yt-dlp version', __version__,
3678 f'[{RELEASE_GIT_HEAD}]' if RELEASE_GIT_HEAD else '',
3679 '' if source == 'unknown' else f'({source})',
3680 delim=' '))
3681 if not _LAZY_LOADER:
3682 if os.environ.get('YTDLP_NO_LAZY_EXTRACTORS'):
3683 write_debug('Lazy loading extractors is forcibly disabled')
3684 else:
3685 write_debug('Lazy loading extractors is disabled')
3686 if plugin_extractors or plugin_postprocessors:
3687 write_debug('Plugins: %s' % [
3688 '%s%s' % (klass.__name__, '' if klass.__name__ == name else f' as {name}')
3689 for name, klass in itertools.chain(plugin_extractors.items(), plugin_postprocessors.items())])
3690 if self.params['compat_opts']:
3691 write_debug('Compatibility options: %s' % ', '.join(self.params['compat_opts']))
3692
3693 if source == 'source':
3694 try:
3695 stdout, _, _ = Popen.run(
3696 ['git', 'rev-parse', '--short', 'HEAD'],
3697 text=True, cwd=os.path.dirname(os.path.abspath(__file__)),
3698 stdout=subprocess.PIPE, stderr=subprocess.PIPE)
3699 if re.fullmatch('[0-9a-f]+', stdout.strip()):
3700 write_debug(f'Git HEAD: {stdout.strip()}')
3701 except Exception:
3702 with contextlib.suppress(Exception):
3703 sys.exc_clear()
3704
3705 write_debug(system_identifier())
3706
3707 exe_versions, ffmpeg_features = FFmpegPostProcessor.get_versions_and_features(self)
3708 ffmpeg_features = {key for key, val in ffmpeg_features.items() if val}
3709 if ffmpeg_features:
3710 exe_versions['ffmpeg'] += ' (%s)' % ','.join(sorted(ffmpeg_features))
3711
3712 exe_versions['rtmpdump'] = rtmpdump_version()
3713 exe_versions['phantomjs'] = PhantomJSwrapper._version()
3714 exe_str = ', '.join(
3715 f'{exe} {v}' for exe, v in sorted(exe_versions.items()) if v
3716 ) or 'none'
3717 write_debug('exe versions: %s' % exe_str)
3718
3719 from .compat.compat_utils import get_package_info
3720 from .dependencies import available_dependencies
3721
3722 write_debug('Optional libraries: %s' % (', '.join(sorted({
3723 join_nonempty(*get_package_info(m)) for m in available_dependencies.values()
3724 })) or 'none'))
3725
3726 self._setup_opener()
3727 proxy_map = {}
3728 for handler in self._opener.handlers:
3729 if hasattr(handler, 'proxies'):
3730 proxy_map.update(handler.proxies)
3731 write_debug(f'Proxy map: {proxy_map}')
3732
3733 # Not implemented
3734 if False and self.params.get('call_home'):
3735 ipaddr = self.urlopen('https://yt-dl.org/ip').read().decode()
3736 write_debug('Public IP address: %s' % ipaddr)
3737 latest_version = self.urlopen(
3738 'https://yt-dl.org/latest/version').read().decode()
3739 if version_tuple(latest_version) > version_tuple(__version__):
3740 self.report_warning(
3741 'You are using an outdated version (newest version: %s)! '
3742 'See https://yt-dl.org/update if you need help updating.' %
3743 latest_version)
3744
3745 def _setup_opener(self):
3746 if hasattr(self, '_opener'):
3747 return
3748 timeout_val = self.params.get('socket_timeout')
3749 self._socket_timeout = 20 if timeout_val is None else float(timeout_val)
3750
3751 opts_cookiesfrombrowser = self.params.get('cookiesfrombrowser')
3752 opts_cookiefile = self.params.get('cookiefile')
3753 opts_proxy = self.params.get('proxy')
3754
3755 self.cookiejar = load_cookies(opts_cookiefile, opts_cookiesfrombrowser, self)
3756
3757 cookie_processor = YoutubeDLCookieProcessor(self.cookiejar)
3758 if opts_proxy is not None:
3759 if opts_proxy == '':
3760 proxies = {}
3761 else:
3762 proxies = {'http': opts_proxy, 'https': opts_proxy}
3763 else:
3764 proxies = urllib.request.getproxies()
3765 # Set HTTPS proxy to HTTP one if given (https://github.com/ytdl-org/youtube-dl/issues/805)
3766 if 'http' in proxies and 'https' not in proxies:
3767 proxies['https'] = proxies['http']
3768 proxy_handler = PerRequestProxyHandler(proxies)
3769
3770 debuglevel = 1 if self.params.get('debug_printtraffic') else 0
3771 https_handler = make_HTTPS_handler(self.params, debuglevel=debuglevel)
3772 ydlh = YoutubeDLHandler(self.params, debuglevel=debuglevel)
3773 redirect_handler = YoutubeDLRedirectHandler()
3774 data_handler = urllib.request.DataHandler()
3775
3776 # When passing our own FileHandler instance, build_opener won't add the
3777 # default FileHandler and allows us to disable the file protocol, which
3778 # can be used for malicious purposes (see
3779 # https://github.com/ytdl-org/youtube-dl/issues/8227)
3780 file_handler = urllib.request.FileHandler()
3781
3782 def file_open(*args, **kwargs):
3783 raise urllib.error.URLError('file:// scheme is explicitly disabled in yt-dlp for security reasons')
3784 file_handler.file_open = file_open
3785
3786 opener = urllib.request.build_opener(
3787 proxy_handler, https_handler, cookie_processor, ydlh, redirect_handler, data_handler, file_handler)
3788
3789 # Delete the default user-agent header, which would otherwise apply in
3790 # cases where our custom HTTP handler doesn't come into play
3791 # (See https://github.com/ytdl-org/youtube-dl/issues/1309 for details)
3792 opener.addheaders = []
3793 self._opener = opener
3794
3795 def encode(self, s):
3796 if isinstance(s, bytes):
3797 return s # Already encoded
3798
3799 try:
3800 return s.encode(self.get_encoding())
3801 except UnicodeEncodeError as err:
3802 err.reason = err.reason + '. Check your system encoding configuration or use the --encoding option.'
3803 raise
3804
3805 def get_encoding(self):
3806 encoding = self.params.get('encoding')
3807 if encoding is None:
3808 encoding = preferredencoding()
3809 return encoding
3810
3811 def _write_info_json(self, label, ie_result, infofn, overwrite=None):
3812 ''' Write infojson and returns True = written, 'exists' = Already exists, False = skip, None = error '''
3813 if overwrite is None:
3814 overwrite = self.params.get('overwrites', True)
3815 if not self.params.get('writeinfojson'):
3816 return False
3817 elif not infofn:
3818 self.write_debug(f'Skipping writing {label} infojson')
3819 return False
3820 elif not self._ensure_dir_exists(infofn):
3821 return None
3822 elif not overwrite and os.path.exists(infofn):
3823 self.to_screen(f'[info] {label.title()} metadata is already present')
3824 return 'exists'
3825
3826 self.to_screen(f'[info] Writing {label} metadata as JSON to: {infofn}')
3827 try:
3828 write_json_file(self.sanitize_info(ie_result, self.params.get('clean_infojson', True)), infofn)
3829 return True
3830 except OSError:
3831 self.report_error(f'Cannot write {label} metadata to JSON file {infofn}')
3832 return None
3833
3834 def _write_description(self, label, ie_result, descfn):
3835 ''' Write description and returns True = written, False = skip, None = error '''
3836 if not self.params.get('writedescription'):
3837 return False
3838 elif not descfn:
3839 self.write_debug(f'Skipping writing {label} description')
3840 return False
3841 elif not self._ensure_dir_exists(descfn):
3842 return None
3843 elif not self.params.get('overwrites', True) and os.path.exists(descfn):
3844 self.to_screen(f'[info] {label.title()} description is already present')
3845 elif ie_result.get('description') is None:
3846 self.report_warning(f'There\'s no {label} description to write')
3847 return False
3848 else:
3849 try:
3850 self.to_screen(f'[info] Writing {label} description to: {descfn}')
3851 with open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile:
3852 descfile.write(ie_result['description'])
3853 except OSError:
3854 self.report_error(f'Cannot write {label} description file {descfn}')
3855 return None
3856 return True
3857
3858 def _write_subtitles(self, info_dict, filename):
3859 ''' Write subtitles to file and return list of (sub_filename, final_sub_filename); or None if error'''
3860 ret = []
3861 subtitles = info_dict.get('requested_subtitles')
3862 if not subtitles or not (self.params.get('writesubtitles') or self.params.get('writeautomaticsub')):
3863 # subtitles download errors are already managed as troubles in relevant IE
3864 # that way it will silently go on when used with unsupporting IE
3865 return ret
3866
3867 sub_filename_base = self.prepare_filename(info_dict, 'subtitle')
3868 if not sub_filename_base:
3869 self.to_screen('[info] Skipping writing video subtitles')
3870 return ret
3871 for sub_lang, sub_info in subtitles.items():
3872 sub_format = sub_info['ext']
3873 sub_filename = subtitles_filename(filename, sub_lang, sub_format, info_dict.get('ext'))
3874 sub_filename_final = subtitles_filename(sub_filename_base, sub_lang, sub_format, info_dict.get('ext'))
3875 existing_sub = self.existing_file((sub_filename_final, sub_filename))
3876 if existing_sub:
3877 self.to_screen(f'[info] Video subtitle {sub_lang}.{sub_format} is already present')
3878 sub_info['filepath'] = existing_sub
3879 ret.append((existing_sub, sub_filename_final))
3880 continue
3881
3882 self.to_screen(f'[info] Writing video subtitles to: {sub_filename}')
3883 if sub_info.get('data') is not None:
3884 try:
3885 # Use newline='' to prevent conversion of newline characters
3886 # See https://github.com/ytdl-org/youtube-dl/issues/10268
3887 with open(sub_filename, 'w', encoding='utf-8', newline='') as subfile:
3888 subfile.write(sub_info['data'])
3889 sub_info['filepath'] = sub_filename
3890 ret.append((sub_filename, sub_filename_final))
3891 continue
3892 except OSError:
3893 self.report_error(f'Cannot write video subtitles file {sub_filename}')
3894 return None
3895
3896 try:
3897 sub_copy = sub_info.copy()
3898 sub_copy.setdefault('http_headers', info_dict.get('http_headers'))
3899 self.dl(sub_filename, sub_copy, subtitle=True)
3900 sub_info['filepath'] = sub_filename
3901 ret.append((sub_filename, sub_filename_final))
3902 except (DownloadError, ExtractorError, IOError, OSError, ValueError) + network_exceptions as err:
3903 msg = f'Unable to download video subtitles for {sub_lang!r}: {err}'
3904 if self.params.get('ignoreerrors') is not True: # False or 'only_download'
3905 if not self.params.get('ignoreerrors'):
3906 self.report_error(msg)
3907 raise DownloadError(msg)
3908 self.report_warning(msg)
3909 return ret
3910
3911 def _write_thumbnails(self, label, info_dict, filename, thumb_filename_base=None):
3912 ''' Write thumbnails to file and return list of (thumb_filename, final_thumb_filename) '''
3913 write_all = self.params.get('write_all_thumbnails', False)
3914 thumbnails, ret = [], []
3915 if write_all or self.params.get('writethumbnail', False):
3916 thumbnails = info_dict.get('thumbnails') or []
3917 multiple = write_all and len(thumbnails) > 1
3918
3919 if thumb_filename_base is None:
3920 thumb_filename_base = filename
3921 if thumbnails and not thumb_filename_base:
3922 self.write_debug(f'Skipping writing {label} thumbnail')
3923 return ret
3924
3925 for idx, t in list(enumerate(thumbnails))[::-1]:
3926 thumb_ext = (f'{t["id"]}.' if multiple else '') + determine_ext(t['url'], 'jpg')
3927 thumb_display_id = f'{label} thumbnail {t["id"]}'
3928 thumb_filename = replace_extension(filename, thumb_ext, info_dict.get('ext'))
3929 thumb_filename_final = replace_extension(thumb_filename_base, thumb_ext, info_dict.get('ext'))
3930
3931 existing_thumb = self.existing_file((thumb_filename_final, thumb_filename))
3932 if existing_thumb:
3933 self.to_screen('[info] %s is already present' % (
3934 thumb_display_id if multiple else f'{label} thumbnail').capitalize())
3935 t['filepath'] = existing_thumb
3936 ret.append((existing_thumb, thumb_filename_final))
3937 else:
3938 self.to_screen(f'[info] Downloading {thumb_display_id} ...')
3939 try:
3940 uf = self.urlopen(sanitized_Request(t['url'], headers=t.get('http_headers', {})))
3941 self.to_screen(f'[info] Writing {thumb_display_id} to: {thumb_filename}')
3942 with open(encodeFilename(thumb_filename), 'wb') as thumbf:
3943 shutil.copyfileobj(uf, thumbf)
3944 ret.append((thumb_filename, thumb_filename_final))
3945 t['filepath'] = thumb_filename
3946 except network_exceptions as err:
3947 thumbnails.pop(idx)
3948 self.report_warning(f'Unable to download {thumb_display_id}: {err}')
3949 if ret and not write_all:
3950 break
3951 return ret