]> jfr.im git - yt-dlp.git/blob - yt_dlp/YoutubeDL.py
Reject entire playlists faster with `--match-filter`
[yt-dlp.git] / yt_dlp / YoutubeDL.py
1 import collections
2 import contextlib
3 import datetime
4 import errno
5 import fileinput
6 import functools
7 import io
8 import itertools
9 import json
10 import locale
11 import operator
12 import os
13 import random
14 import re
15 import shutil
16 import subprocess
17 import sys
18 import tempfile
19 import time
20 import tokenize
21 import traceback
22 import unicodedata
23 import urllib.request
24 from string import ascii_letters
25
26 from .cache import Cache
27 from .compat import compat_os_name, compat_shlex_quote
28 from .cookies import load_cookies
29 from .downloader import FFmpegFD, get_suitable_downloader, shorten_protocol_name
30 from .downloader.rtmp import rtmpdump_version
31 from .extractor import gen_extractor_classes, get_info_extractor
32 from .extractor.openload import PhantomJSwrapper
33 from .minicurses import format_text
34 from .postprocessor import _PLUGIN_CLASSES as plugin_postprocessors
35 from .postprocessor import (
36 EmbedThumbnailPP,
37 FFmpegFixupDuplicateMoovPP,
38 FFmpegFixupDurationPP,
39 FFmpegFixupM3u8PP,
40 FFmpegFixupM4aPP,
41 FFmpegFixupStretchedPP,
42 FFmpegFixupTimestampPP,
43 FFmpegMergerPP,
44 FFmpegPostProcessor,
45 FFmpegVideoConvertorPP,
46 MoveFilesAfterDownloadPP,
47 get_postprocessor,
48 )
49 from .postprocessor.ffmpeg import resolve_mapping as resolve_recode_mapping
50 from .update import detect_variant
51 from .utils import (
52 DEFAULT_OUTTMPL,
53 IDENTITY,
54 LINK_TEMPLATES,
55 NO_DEFAULT,
56 NUMBER_RE,
57 OUTTMPL_TYPES,
58 POSTPROCESS_WHEN,
59 STR_FORMAT_RE_TMPL,
60 STR_FORMAT_TYPES,
61 ContentTooShortError,
62 DateRange,
63 DownloadCancelled,
64 DownloadError,
65 EntryNotInPlaylist,
66 ExistingVideoReached,
67 ExtractorError,
68 GeoRestrictedError,
69 HEADRequest,
70 ISO3166Utils,
71 LazyList,
72 MaxDownloadsReached,
73 Namespace,
74 PagedList,
75 PerRequestProxyHandler,
76 PlaylistEntries,
77 Popen,
78 PostProcessingError,
79 ReExtractInfo,
80 RejectedVideoReached,
81 SameFileError,
82 UnavailableVideoError,
83 YoutubeDLCookieProcessor,
84 YoutubeDLHandler,
85 YoutubeDLRedirectHandler,
86 age_restricted,
87 args_to_str,
88 bug_reports_message,
89 date_from_str,
90 determine_ext,
91 determine_protocol,
92 encode_compat_str,
93 encodeFilename,
94 error_to_compat_str,
95 escapeHTML,
96 expand_path,
97 filter_dict,
98 float_or_none,
99 format_bytes,
100 format_decimal_suffix,
101 format_field,
102 formatSeconds,
103 get_domain,
104 int_or_none,
105 iri_to_uri,
106 join_nonempty,
107 locked_file,
108 make_dir,
109 make_HTTPS_handler,
110 merge_headers,
111 network_exceptions,
112 number_of_digits,
113 orderedSet,
114 parse_filesize,
115 preferredencoding,
116 prepend_extension,
117 register_socks_protocols,
118 remove_terminal_sequences,
119 render_table,
120 replace_extension,
121 sanitize_filename,
122 sanitize_path,
123 sanitize_url,
124 sanitized_Request,
125 std_headers,
126 str_or_none,
127 strftime_or_none,
128 subtitles_filename,
129 supports_terminal_sequences,
130 system_identifier,
131 timetuple_from_msec,
132 to_high_limit_path,
133 traverse_obj,
134 try_get,
135 url_basename,
136 variadic,
137 version_tuple,
138 windows_enable_vt_mode,
139 write_json_file,
140 write_string,
141 )
142 from .version import RELEASE_GIT_HEAD, __version__
143
144 if compat_os_name == 'nt':
145 import ctypes
146
147
148 class YoutubeDL:
149 """YoutubeDL class.
150
151 YoutubeDL objects are the ones responsible of downloading the
152 actual video file and writing it to disk if the user has requested
153 it, among some other tasks. In most cases there should be one per
154 program. As, given a video URL, the downloader doesn't know how to
155 extract all the needed information, task that InfoExtractors do, it
156 has to pass the URL to one of them.
157
158 For this, YoutubeDL objects have a method that allows
159 InfoExtractors to be registered in a given order. When it is passed
160 a URL, the YoutubeDL object handles it to the first InfoExtractor it
161 finds that reports being able to handle it. The InfoExtractor extracts
162 all the information about the video or videos the URL refers to, and
163 YoutubeDL process the extracted information, possibly using a File
164 Downloader to download the video.
165
166 YoutubeDL objects accept a lot of parameters. In order not to saturate
167 the object constructor with arguments, it receives a dictionary of
168 options instead. These options are available through the params
169 attribute for the InfoExtractors to use. The YoutubeDL also
170 registers itself as the downloader in charge for the InfoExtractors
171 that are added to it, so this is a "mutual registration".
172
173 Available options:
174
175 username: Username for authentication purposes.
176 password: Password for authentication purposes.
177 videopassword: Password for accessing a video.
178 ap_mso: Adobe Pass multiple-system operator identifier.
179 ap_username: Multiple-system operator account username.
180 ap_password: Multiple-system operator account password.
181 usenetrc: Use netrc for authentication instead.
182 verbose: Print additional info to stdout.
183 quiet: Do not print messages to stdout.
184 no_warnings: Do not print out anything for warnings.
185 forceprint: A dict with keys WHEN mapped to a list of templates to
186 print to stdout. The allowed keys are video or any of the
187 items in utils.POSTPROCESS_WHEN.
188 For compatibility, a single list is also accepted
189 print_to_file: A dict with keys WHEN (same as forceprint) mapped to
190 a list of tuples with (template, filename)
191 forcejson: Force printing info_dict as JSON.
192 dump_single_json: Force printing the info_dict of the whole playlist
193 (or video) as a single JSON line.
194 force_write_download_archive: Force writing download archive regardless
195 of 'skip_download' or 'simulate'.
196 simulate: Do not download the video files. If unset (or None),
197 simulate only if listsubtitles, listformats or list_thumbnails is used
198 format: Video format code. see "FORMAT SELECTION" for more details.
199 You can also pass a function. The function takes 'ctx' as
200 argument and returns the formats to download.
201 See "build_format_selector" for an implementation
202 allow_unplayable_formats: Allow unplayable formats to be extracted and downloaded.
203 ignore_no_formats_error: Ignore "No video formats" error. Usefull for
204 extracting metadata even if the video is not actually
205 available for download (experimental)
206 format_sort: A list of fields by which to sort the video formats.
207 See "Sorting Formats" for more details.
208 format_sort_force: Force the given format_sort. see "Sorting Formats"
209 for more details.
210 prefer_free_formats: Whether to prefer video formats with free containers
211 over non-free ones of same quality.
212 allow_multiple_video_streams: Allow multiple video streams to be merged
213 into a single file
214 allow_multiple_audio_streams: Allow multiple audio streams to be merged
215 into a single file
216 check_formats Whether to test if the formats are downloadable.
217 Can be True (check all), False (check none),
218 'selected' (check selected formats),
219 or None (check only if requested by extractor)
220 paths: Dictionary of output paths. The allowed keys are 'home'
221 'temp' and the keys of OUTTMPL_TYPES (in utils.py)
222 outtmpl: Dictionary of templates for output names. Allowed keys
223 are 'default' and the keys of OUTTMPL_TYPES (in utils.py).
224 For compatibility with youtube-dl, a single string can also be used
225 outtmpl_na_placeholder: Placeholder for unavailable meta fields.
226 restrictfilenames: Do not allow "&" and spaces in file names
227 trim_file_name: Limit length of filename (extension excluded)
228 windowsfilenames: Force the filenames to be windows compatible
229 ignoreerrors: Do not stop on download/postprocessing errors.
230 Can be 'only_download' to ignore only download errors.
231 Default is 'only_download' for CLI, but False for API
232 skip_playlist_after_errors: Number of allowed failures until the rest of
233 the playlist is skipped
234 force_generic_extractor: Force downloader to use the generic extractor
235 overwrites: Overwrite all video and metadata files if True,
236 overwrite only non-video files if None
237 and don't overwrite any file if False
238 For compatibility with youtube-dl,
239 "nooverwrites" may also be used instead
240 playlist_items: Specific indices of playlist to download.
241 playlistrandom: Download playlist items in random order.
242 lazy_playlist: Process playlist entries as they are received.
243 matchtitle: Download only matching titles.
244 rejecttitle: Reject downloads for matching titles.
245 logger: Log messages to a logging.Logger instance.
246 logtostderr: Log messages to stderr instead of stdout.
247 consoletitle: Display progress in console window's titlebar.
248 writedescription: Write the video description to a .description file
249 writeinfojson: Write the video description to a .info.json file
250 clean_infojson: Remove private fields from the infojson
251 getcomments: Extract video comments. This will not be written to disk
252 unless writeinfojson is also given
253 writeannotations: Write the video annotations to a .annotations.xml file
254 writethumbnail: Write the thumbnail image to a file
255 allow_playlist_files: Whether to write playlists' description, infojson etc
256 also to disk when using the 'write*' options
257 write_all_thumbnails: Write all thumbnail formats to files
258 writelink: Write an internet shortcut file, depending on the
259 current platform (.url/.webloc/.desktop)
260 writeurllink: Write a Windows internet shortcut file (.url)
261 writewebloclink: Write a macOS internet shortcut file (.webloc)
262 writedesktoplink: Write a Linux internet shortcut file (.desktop)
263 writesubtitles: Write the video subtitles to a file
264 writeautomaticsub: Write the automatically generated subtitles to a file
265 listsubtitles: Lists all available subtitles for the video
266 subtitlesformat: The format code for subtitles
267 subtitleslangs: List of languages of the subtitles to download (can be regex).
268 The list may contain "all" to refer to all the available
269 subtitles. The language can be prefixed with a "-" to
270 exclude it from the requested languages. Eg: ['all', '-live_chat']
271 keepvideo: Keep the video file after post-processing
272 daterange: A DateRange object, download only if the upload_date is in the range.
273 skip_download: Skip the actual download of the video file
274 cachedir: Location of the cache files in the filesystem.
275 False to disable filesystem cache.
276 noplaylist: Download single video instead of a playlist if in doubt.
277 age_limit: An integer representing the user's age in years.
278 Unsuitable videos for the given age are skipped.
279 min_views: An integer representing the minimum view count the video
280 must have in order to not be skipped.
281 Videos without view count information are always
282 downloaded. None for no limit.
283 max_views: An integer representing the maximum view count.
284 Videos that are more popular than that are not
285 downloaded.
286 Videos without view count information are always
287 downloaded. None for no limit.
288 download_archive: File name of a file where all downloads are recorded.
289 Videos already present in the file are not downloaded
290 again.
291 break_on_existing: Stop the download process after attempting to download a
292 file that is in the archive.
293 break_on_reject: Stop the download process when encountering a video that
294 has been filtered out.
295 break_per_url: Whether break_on_reject and break_on_existing
296 should act on each input URL as opposed to for the entire queue
297 cookiefile: File name or text stream from where cookies should be read and dumped to
298 cookiesfrombrowser: A tuple containing the name of the browser, the profile
299 name/pathfrom where cookies are loaded, and the name of the
300 keyring. Eg: ('chrome', ) or ('vivaldi', 'default', 'BASICTEXT')
301 legacyserverconnect: Explicitly allow HTTPS connection to servers that do not
302 support RFC 5746 secure renegotiation
303 nocheckcertificate: Do not verify SSL certificates
304 client_certificate: Path to client certificate file in PEM format. May include the private key
305 client_certificate_key: Path to private key file for client certificate
306 client_certificate_password: Password for client certificate private key, if encrypted.
307 If not provided and the key is encrypted, yt-dlp will ask interactively
308 prefer_insecure: Use HTTP instead of HTTPS to retrieve information.
309 (Only supported by some extractors)
310 http_headers: A dictionary of custom headers to be used for all requests
311 proxy: URL of the proxy server to use
312 geo_verification_proxy: URL of the proxy to use for IP address verification
313 on geo-restricted sites.
314 socket_timeout: Time to wait for unresponsive hosts, in seconds
315 bidi_workaround: Work around buggy terminals without bidirectional text
316 support, using fridibi
317 debug_printtraffic:Print out sent and received HTTP traffic
318 default_search: Prepend this string if an input url is not valid.
319 'auto' for elaborate guessing
320 encoding: Use this encoding instead of the system-specified.
321 extract_flat: Whether to resolve and process url_results further
322 * False: Always process (default)
323 * True: Never process
324 * 'in_playlist': Do not process inside playlist/multi_video
325 * 'discard': Always process, but don't return the result
326 from inside playlist/multi_video
327 * 'discard_in_playlist': Same as "discard", but only for
328 playlists (not multi_video)
329 wait_for_video: If given, wait for scheduled streams to become available.
330 The value should be a tuple containing the range
331 (min_secs, max_secs) to wait between retries
332 postprocessors: A list of dictionaries, each with an entry
333 * key: The name of the postprocessor. See
334 yt_dlp/postprocessor/__init__.py for a list.
335 * when: When to run the postprocessor. Allowed values are
336 the entries of utils.POSTPROCESS_WHEN
337 Assumed to be 'post_process' if not given
338 progress_hooks: A list of functions that get called on download
339 progress, with a dictionary with the entries
340 * status: One of "downloading", "error", or "finished".
341 Check this first and ignore unknown values.
342 * info_dict: The extracted info_dict
343
344 If status is one of "downloading", or "finished", the
345 following properties may also be present:
346 * filename: The final filename (always present)
347 * tmpfilename: The filename we're currently writing to
348 * downloaded_bytes: Bytes on disk
349 * total_bytes: Size of the whole file, None if unknown
350 * total_bytes_estimate: Guess of the eventual file size,
351 None if unavailable.
352 * elapsed: The number of seconds since download started.
353 * eta: The estimated time in seconds, None if unknown
354 * speed: The download speed in bytes/second, None if
355 unknown
356 * fragment_index: The counter of the currently
357 downloaded video fragment.
358 * fragment_count: The number of fragments (= individual
359 files that will be merged)
360
361 Progress hooks are guaranteed to be called at least once
362 (with status "finished") if the download is successful.
363 postprocessor_hooks: A list of functions that get called on postprocessing
364 progress, with a dictionary with the entries
365 * status: One of "started", "processing", or "finished".
366 Check this first and ignore unknown values.
367 * postprocessor: Name of the postprocessor
368 * info_dict: The extracted info_dict
369
370 Progress hooks are guaranteed to be called at least twice
371 (with status "started" and "finished") if the processing is successful.
372 merge_output_format: Extension to use when merging formats.
373 final_ext: Expected final extension; used to detect when the file was
374 already downloaded and converted
375 fixup: Automatically correct known faults of the file.
376 One of:
377 - "never": do nothing
378 - "warn": only emit a warning
379 - "detect_or_warn": check whether we can do anything
380 about it, warn otherwise (default)
381 source_address: Client-side IP address to bind to.
382 sleep_interval_requests: Number of seconds to sleep between requests
383 during extraction
384 sleep_interval: Number of seconds to sleep before each download when
385 used alone or a lower bound of a range for randomized
386 sleep before each download (minimum possible number
387 of seconds to sleep) when used along with
388 max_sleep_interval.
389 max_sleep_interval:Upper bound of a range for randomized sleep before each
390 download (maximum possible number of seconds to sleep).
391 Must only be used along with sleep_interval.
392 Actual sleep time will be a random float from range
393 [sleep_interval; max_sleep_interval].
394 sleep_interval_subtitles: Number of seconds to sleep before each subtitle download
395 listformats: Print an overview of available video formats and exit.
396 list_thumbnails: Print a table of all thumbnails and exit.
397 match_filter: A function that gets called for every video with the signature
398 (info_dict, *, incomplete: bool) -> Optional[str]
399 For backward compatibility with youtube-dl, the signature
400 (info_dict) -> Optional[str] is also allowed.
401 - If it returns a message, the video is ignored.
402 - If it returns None, the video is downloaded.
403 - If it returns utils.NO_DEFAULT, the user is interactively
404 asked whether to download the video.
405 match_filter_func in utils.py is one example for this.
406 no_color: Do not emit color codes in output.
407 geo_bypass: Bypass geographic restriction via faking X-Forwarded-For
408 HTTP header
409 geo_bypass_country:
410 Two-letter ISO 3166-2 country code that will be used for
411 explicit geographic restriction bypassing via faking
412 X-Forwarded-For HTTP header
413 geo_bypass_ip_block:
414 IP range in CIDR notation that will be used similarly to
415 geo_bypass_country
416 external_downloader: A dictionary of protocol keys and the executable of the
417 external downloader to use for it. The allowed protocols
418 are default|http|ftp|m3u8|dash|rtsp|rtmp|mms.
419 Set the value to 'native' to use the native downloader
420 compat_opts: Compatibility options. See "Differences in default behavior".
421 The following options do not work when used through the API:
422 filename, abort-on-error, multistreams, no-live-chat, format-sort
423 no-clean-infojson, no-playlist-metafiles, no-keep-subs, no-attach-info-json.
424 Refer __init__.py for their implementation
425 progress_template: Dictionary of templates for progress outputs.
426 Allowed keys are 'download', 'postprocess',
427 'download-title' (console title) and 'postprocess-title'.
428 The template is mapped on a dictionary with keys 'progress' and 'info'
429 retry_sleep_functions: Dictionary of functions that takes the number of attempts
430 as argument and returns the time to sleep in seconds.
431 Allowed keys are 'http', 'fragment', 'file_access'
432 download_ranges: A callback function that gets called for every video with
433 the signature (info_dict, ydl) -> Iterable[Section].
434 Only the returned sections will be downloaded.
435 Each Section is a dict with the following keys:
436 * start_time: Start time of the section in seconds
437 * end_time: End time of the section in seconds
438 * title: Section title (Optional)
439 * index: Section number (Optional)
440 force_keyframes_at_cuts: Re-encode the video when downloading ranges to get precise cuts
441 noprogress: Do not print the progress bar
442
443 The following parameters are not used by YoutubeDL itself, they are used by
444 the downloader (see yt_dlp/downloader/common.py):
445 nopart, updatetime, buffersize, ratelimit, throttledratelimit, min_filesize,
446 max_filesize, test, noresizebuffer, retries, file_access_retries, fragment_retries,
447 continuedl, xattr_set_filesize, hls_use_mpegts, http_chunk_size,
448 external_downloader_args, concurrent_fragment_downloads.
449
450 The following options are used by the post processors:
451 ffmpeg_location: Location of the ffmpeg/avconv binary; either the path
452 to the binary or its containing directory.
453 postprocessor_args: A dictionary of postprocessor/executable keys (in lower case)
454 and a list of additional command-line arguments for the
455 postprocessor/executable. The dict can also have "PP+EXE" keys
456 which are used when the given exe is used by the given PP.
457 Use 'default' as the name for arguments to passed to all PP
458 For compatibility with youtube-dl, a single list of args
459 can also be used
460
461 The following options are used by the extractors:
462 extractor_retries: Number of times to retry for known errors
463 dynamic_mpd: Whether to process dynamic DASH manifests (default: True)
464 hls_split_discontinuity: Split HLS playlists to different formats at
465 discontinuities such as ad breaks (default: False)
466 extractor_args: A dictionary of arguments to be passed to the extractors.
467 See "EXTRACTOR ARGUMENTS" for details.
468 Eg: {'youtube': {'skip': ['dash', 'hls']}}
469 mark_watched: Mark videos watched (even with --simulate). Only for YouTube
470
471 The following options are deprecated and may be removed in the future:
472
473 playliststart: - Use playlist_items
474 Playlist item to start at.
475 playlistend: - Use playlist_items
476 Playlist item to end at.
477 playlistreverse: - Use playlist_items
478 Download playlist items in reverse order.
479 forceurl: - Use forceprint
480 Force printing final URL.
481 forcetitle: - Use forceprint
482 Force printing title.
483 forceid: - Use forceprint
484 Force printing ID.
485 forcethumbnail: - Use forceprint
486 Force printing thumbnail URL.
487 forcedescription: - Use forceprint
488 Force printing description.
489 forcefilename: - Use forceprint
490 Force printing final filename.
491 forceduration: - Use forceprint
492 Force printing duration.
493 allsubtitles: - Use subtitleslangs = ['all']
494 Downloads all the subtitles of the video
495 (requires writesubtitles or writeautomaticsub)
496 include_ads: - Doesn't work
497 Download ads as well
498 call_home: - Not implemented
499 Boolean, true iff we are allowed to contact the
500 yt-dlp servers for debugging.
501 post_hooks: - Register a custom postprocessor
502 A list of functions that get called as the final step
503 for each video file, after all postprocessors have been
504 called. The filename will be passed as the only argument.
505 hls_prefer_native: - Use external_downloader = {'m3u8': 'native'} or {'m3u8': 'ffmpeg'}.
506 Use the native HLS downloader instead of ffmpeg/avconv
507 if True, otherwise use ffmpeg/avconv if False, otherwise
508 use downloader suggested by extractor if None.
509 prefer_ffmpeg: - avconv support is deprecated
510 If False, use avconv instead of ffmpeg if both are available,
511 otherwise prefer ffmpeg.
512 youtube_include_dash_manifest: - Use extractor_args
513 If True (default), DASH manifests and related
514 data will be downloaded and processed by extractor.
515 You can reduce network I/O by disabling it if you don't
516 care about DASH. (only for youtube)
517 youtube_include_hls_manifest: - Use extractor_args
518 If True (default), HLS manifests and related
519 data will be downloaded and processed by extractor.
520 You can reduce network I/O by disabling it if you don't
521 care about HLS. (only for youtube)
522 """
523
524 _NUMERIC_FIELDS = {
525 'width', 'height', 'tbr', 'abr', 'asr', 'vbr', 'fps', 'filesize', 'filesize_approx',
526 'timestamp', 'release_timestamp',
527 'duration', 'view_count', 'like_count', 'dislike_count', 'repost_count',
528 'average_rating', 'comment_count', 'age_limit',
529 'start_time', 'end_time',
530 'chapter_number', 'season_number', 'episode_number',
531 'track_number', 'disc_number', 'release_year',
532 }
533
534 _format_fields = {
535 # NB: Keep in sync with the docstring of extractor/common.py
536 'url', 'manifest_url', 'manifest_stream_number', 'ext', 'format', 'format_id', 'format_note',
537 'width', 'height', 'resolution', 'dynamic_range', 'tbr', 'abr', 'acodec', 'asr',
538 'vbr', 'fps', 'vcodec', 'container', 'filesize', 'filesize_approx',
539 'player_url', 'protocol', 'fragment_base_url', 'fragments', 'is_from_start',
540 'preference', 'language', 'language_preference', 'quality', 'source_preference',
541 'http_headers', 'stretched_ratio', 'no_resume', 'has_drm', 'downloader_options',
542 'page_url', 'app', 'play_path', 'tc_url', 'flash_version', 'rtmp_live', 'rtmp_conn', 'rtmp_protocol', 'rtmp_real_time'
543 }
544 _format_selection_exts = {
545 'audio': {'m4a', 'mp3', 'ogg', 'aac'},
546 'video': {'mp4', 'flv', 'webm', '3gp'},
547 'storyboards': {'mhtml'},
548 }
549
550 def __init__(self, params=None, auto_init=True):
551 """Create a FileDownloader object with the given options.
552 @param auto_init Whether to load the default extractors and print header (if verbose).
553 Set to 'no_verbose_header' to not print the header
554 """
555 if params is None:
556 params = {}
557 self.params = params
558 self._ies = {}
559 self._ies_instances = {}
560 self._pps = {k: [] for k in POSTPROCESS_WHEN}
561 self._printed_messages = set()
562 self._first_webpage_request = True
563 self._post_hooks = []
564 self._progress_hooks = []
565 self._postprocessor_hooks = []
566 self._download_retcode = 0
567 self._num_downloads = 0
568 self._num_videos = 0
569 self._playlist_level = 0
570 self._playlist_urls = set()
571 self.cache = Cache(self)
572
573 windows_enable_vt_mode()
574 stdout = sys.stderr if self.params.get('logtostderr') else sys.stdout
575 self._out_files = Namespace(
576 out=stdout,
577 error=sys.stderr,
578 screen=sys.stderr if self.params.get('quiet') else stdout,
579 console=None if compat_os_name == 'nt' else next(
580 filter(supports_terminal_sequences, (sys.stderr, sys.stdout)), None)
581 )
582 self._allow_colors = Namespace(**{
583 type_: not self.params.get('no_color') and supports_terminal_sequences(stream)
584 for type_, stream in self._out_files.items_ if type_ != 'console'
585 })
586
587 # The code is left like this to be reused for future deprecations
588 MIN_SUPPORTED, MIN_RECOMMENDED = (3, 7), (3, 7)
589 current_version = sys.version_info[:2]
590 if current_version < MIN_RECOMMENDED:
591 msg = ('Support for Python version %d.%d has been deprecated. '
592 'See https://github.com/yt-dlp/yt-dlp/issues/3764 for more details.'
593 '\n You will no longer receive updates on this version')
594 if current_version < MIN_SUPPORTED:
595 msg = 'Python version %d.%d is no longer supported'
596 self.deprecation_warning(
597 f'{msg}! Please update to Python %d.%d or above' % (*current_version, *MIN_RECOMMENDED))
598
599 if self.params.get('allow_unplayable_formats'):
600 self.report_warning(
601 f'You have asked for {self._format_err("UNPLAYABLE", self.Styles.EMPHASIS)} formats to be listed/downloaded. '
602 'This is a developer option intended for debugging. \n'
603 ' If you experience any issues while using this option, '
604 f'{self._format_err("DO NOT", self.Styles.ERROR)} open a bug report')
605
606 def check_deprecated(param, option, suggestion):
607 if self.params.get(param) is not None:
608 self.report_warning(f'{option} is deprecated. Use {suggestion} instead')
609 return True
610 return False
611
612 if check_deprecated('cn_verification_proxy', '--cn-verification-proxy', '--geo-verification-proxy'):
613 if self.params.get('geo_verification_proxy') is None:
614 self.params['geo_verification_proxy'] = self.params['cn_verification_proxy']
615
616 check_deprecated('autonumber', '--auto-number', '-o "%(autonumber)s-%(title)s.%(ext)s"')
617 check_deprecated('usetitle', '--title', '-o "%(title)s-%(id)s.%(ext)s"')
618 check_deprecated('useid', '--id', '-o "%(id)s.%(ext)s"')
619
620 for msg in self.params.get('_warnings', []):
621 self.report_warning(msg)
622 for msg in self.params.get('_deprecation_warnings', []):
623 self.deprecation_warning(msg)
624
625 self.params['compat_opts'] = set(self.params.get('compat_opts', ()))
626 if 'list-formats' in self.params['compat_opts']:
627 self.params['listformats_table'] = False
628
629 if 'overwrites' not in self.params and self.params.get('nooverwrites') is not None:
630 # nooverwrites was unnecessarily changed to overwrites
631 # in 0c3d0f51778b153f65c21906031c2e091fcfb641
632 # This ensures compatibility with both keys
633 self.params['overwrites'] = not self.params['nooverwrites']
634 elif self.params.get('overwrites') is None:
635 self.params.pop('overwrites', None)
636 else:
637 self.params['nooverwrites'] = not self.params['overwrites']
638
639 self.params.setdefault('forceprint', {})
640 self.params.setdefault('print_to_file', {})
641
642 # Compatibility with older syntax
643 if not isinstance(params['forceprint'], dict):
644 self.params['forceprint'] = {'video': params['forceprint']}
645
646 if self.params.get('bidi_workaround', False):
647 try:
648 import pty
649 master, slave = pty.openpty()
650 width = shutil.get_terminal_size().columns
651 width_args = [] if width is None else ['-w', str(width)]
652 sp_kwargs = {'stdin': subprocess.PIPE, 'stdout': slave, 'stderr': self._out_files.error}
653 try:
654 self._output_process = Popen(['bidiv'] + width_args, **sp_kwargs)
655 except OSError:
656 self._output_process = Popen(['fribidi', '-c', 'UTF-8'] + width_args, **sp_kwargs)
657 self._output_channel = os.fdopen(master, 'rb')
658 except OSError as ose:
659 if ose.errno == errno.ENOENT:
660 self.report_warning(
661 'Could not find fribidi executable, ignoring --bidi-workaround. '
662 'Make sure that fribidi is an executable file in one of the directories in your $PATH.')
663 else:
664 raise
665
666 if auto_init:
667 if auto_init != 'no_verbose_header':
668 self.print_debug_header()
669 self.add_default_info_extractors()
670
671 if (sys.platform != 'win32'
672 and sys.getfilesystemencoding() in ['ascii', 'ANSI_X3.4-1968']
673 and not self.params.get('restrictfilenames', False)):
674 # Unicode filesystem API will throw errors (#1474, #13027)
675 self.report_warning(
676 'Assuming --restrict-filenames since file system encoding '
677 'cannot encode all characters. '
678 'Set the LC_ALL environment variable to fix this.')
679 self.params['restrictfilenames'] = True
680
681 self._parse_outtmpl()
682
683 # Creating format selector here allows us to catch syntax errors before the extraction
684 self.format_selector = (
685 self.params.get('format') if self.params.get('format') in (None, '-')
686 else self.params['format'] if callable(self.params['format'])
687 else self.build_format_selector(self.params['format']))
688
689 # Set http_headers defaults according to std_headers
690 self.params['http_headers'] = merge_headers(std_headers, self.params.get('http_headers', {}))
691
692 hooks = {
693 'post_hooks': self.add_post_hook,
694 'progress_hooks': self.add_progress_hook,
695 'postprocessor_hooks': self.add_postprocessor_hook,
696 }
697 for opt, fn in hooks.items():
698 for ph in self.params.get(opt, []):
699 fn(ph)
700
701 for pp_def_raw in self.params.get('postprocessors', []):
702 pp_def = dict(pp_def_raw)
703 when = pp_def.pop('when', 'post_process')
704 self.add_post_processor(
705 get_postprocessor(pp_def.pop('key'))(self, **pp_def),
706 when=when)
707
708 self._setup_opener()
709 register_socks_protocols()
710
711 def preload_download_archive(fn):
712 """Preload the archive, if any is specified"""
713 if fn is None:
714 return False
715 self.write_debug(f'Loading archive file {fn!r}')
716 try:
717 with locked_file(fn, 'r', encoding='utf-8') as archive_file:
718 for line in archive_file:
719 self.archive.add(line.strip())
720 except OSError as ioe:
721 if ioe.errno != errno.ENOENT:
722 raise
723 return False
724 return True
725
726 self.archive = set()
727 preload_download_archive(self.params.get('download_archive'))
728
729 def warn_if_short_id(self, argv):
730 # short YouTube ID starting with dash?
731 idxs = [
732 i for i, a in enumerate(argv)
733 if re.match(r'^-[0-9A-Za-z_-]{10}$', a)]
734 if idxs:
735 correct_argv = (
736 ['yt-dlp']
737 + [a for i, a in enumerate(argv) if i not in idxs]
738 + ['--'] + [argv[i] for i in idxs]
739 )
740 self.report_warning(
741 'Long argument string detected. '
742 'Use -- to separate parameters and URLs, like this:\n%s' %
743 args_to_str(correct_argv))
744
745 def add_info_extractor(self, ie):
746 """Add an InfoExtractor object to the end of the list."""
747 ie_key = ie.ie_key()
748 self._ies[ie_key] = ie
749 if not isinstance(ie, type):
750 self._ies_instances[ie_key] = ie
751 ie.set_downloader(self)
752
753 def _get_info_extractor_class(self, ie_key):
754 ie = self._ies.get(ie_key)
755 if ie is None:
756 ie = get_info_extractor(ie_key)
757 self.add_info_extractor(ie)
758 return ie
759
760 def get_info_extractor(self, ie_key):
761 """
762 Get an instance of an IE with name ie_key, it will try to get one from
763 the _ies list, if there's no instance it will create a new one and add
764 it to the extractor list.
765 """
766 ie = self._ies_instances.get(ie_key)
767 if ie is None:
768 ie = get_info_extractor(ie_key)()
769 self.add_info_extractor(ie)
770 return ie
771
772 def add_default_info_extractors(self):
773 """
774 Add the InfoExtractors returned by gen_extractors to the end of the list
775 """
776 for ie in gen_extractor_classes():
777 self.add_info_extractor(ie)
778
779 def add_post_processor(self, pp, when='post_process'):
780 """Add a PostProcessor object to the end of the chain."""
781 assert when in POSTPROCESS_WHEN, f'Invalid when={when}'
782 self._pps[when].append(pp)
783 pp.set_downloader(self)
784
785 def add_post_hook(self, ph):
786 """Add the post hook"""
787 self._post_hooks.append(ph)
788
789 def add_progress_hook(self, ph):
790 """Add the download progress hook"""
791 self._progress_hooks.append(ph)
792
793 def add_postprocessor_hook(self, ph):
794 """Add the postprocessing progress hook"""
795 self._postprocessor_hooks.append(ph)
796 for pps in self._pps.values():
797 for pp in pps:
798 pp.add_progress_hook(ph)
799
800 def _bidi_workaround(self, message):
801 if not hasattr(self, '_output_channel'):
802 return message
803
804 assert hasattr(self, '_output_process')
805 assert isinstance(message, str)
806 line_count = message.count('\n') + 1
807 self._output_process.stdin.write((message + '\n').encode())
808 self._output_process.stdin.flush()
809 res = ''.join(self._output_channel.readline().decode()
810 for _ in range(line_count))
811 return res[:-len('\n')]
812
813 def _write_string(self, message, out=None, only_once=False):
814 if only_once:
815 if message in self._printed_messages:
816 return
817 self._printed_messages.add(message)
818 write_string(message, out=out, encoding=self.params.get('encoding'))
819
820 def to_stdout(self, message, skip_eol=False, quiet=None):
821 """Print message to stdout"""
822 if quiet is not None:
823 self.deprecation_warning('"YoutubeDL.to_stdout" no longer accepts the argument quiet. Use "YoutubeDL.to_screen" instead')
824 if skip_eol is not False:
825 self.deprecation_warning('"YoutubeDL.to_stdout" no longer accepts the argument skip_eol. Use "YoutubeDL.to_screen" instead')
826 self._write_string(f'{self._bidi_workaround(message)}\n', self._out_files.out)
827
828 def to_screen(self, message, skip_eol=False, quiet=None):
829 """Print message to screen if not in quiet mode"""
830 if self.params.get('logger'):
831 self.params['logger'].debug(message)
832 return
833 if (self.params.get('quiet') if quiet is None else quiet) and not self.params.get('verbose'):
834 return
835 self._write_string(
836 '%s%s' % (self._bidi_workaround(message), ('' if skip_eol else '\n')),
837 self._out_files.screen)
838
839 def to_stderr(self, message, only_once=False):
840 """Print message to stderr"""
841 assert isinstance(message, str)
842 if self.params.get('logger'):
843 self.params['logger'].error(message)
844 else:
845 self._write_string(f'{self._bidi_workaround(message)}\n', self._out_files.error, only_once=only_once)
846
847 def _send_console_code(self, code):
848 if compat_os_name == 'nt' or not self._out_files.console:
849 return
850 self._write_string(code, self._out_files.console)
851
852 def to_console_title(self, message):
853 if not self.params.get('consoletitle', False):
854 return
855 message = remove_terminal_sequences(message)
856 if compat_os_name == 'nt':
857 if ctypes.windll.kernel32.GetConsoleWindow():
858 # c_wchar_p() might not be necessary if `message` is
859 # already of type unicode()
860 ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(message))
861 else:
862 self._send_console_code(f'\033]0;{message}\007')
863
864 def save_console_title(self):
865 if not self.params.get('consoletitle') or self.params.get('simulate'):
866 return
867 self._send_console_code('\033[22;0t') # Save the title on stack
868
869 def restore_console_title(self):
870 if not self.params.get('consoletitle') or self.params.get('simulate'):
871 return
872 self._send_console_code('\033[23;0t') # Restore the title from stack
873
874 def __enter__(self):
875 self.save_console_title()
876 return self
877
878 def __exit__(self, *args):
879 self.restore_console_title()
880
881 if self.params.get('cookiefile') is not None:
882 self.cookiejar.save(ignore_discard=True, ignore_expires=True)
883
884 def trouble(self, message=None, tb=None, is_error=True):
885 """Determine action to take when a download problem appears.
886
887 Depending on if the downloader has been configured to ignore
888 download errors or not, this method may throw an exception or
889 not when errors are found, after printing the message.
890
891 @param tb If given, is additional traceback information
892 @param is_error Whether to raise error according to ignorerrors
893 """
894 if message is not None:
895 self.to_stderr(message)
896 if self.params.get('verbose'):
897 if tb is None:
898 if sys.exc_info()[0]: # if .trouble has been called from an except block
899 tb = ''
900 if hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
901 tb += ''.join(traceback.format_exception(*sys.exc_info()[1].exc_info))
902 tb += encode_compat_str(traceback.format_exc())
903 else:
904 tb_data = traceback.format_list(traceback.extract_stack())
905 tb = ''.join(tb_data)
906 if tb:
907 self.to_stderr(tb)
908 if not is_error:
909 return
910 if not self.params.get('ignoreerrors'):
911 if sys.exc_info()[0] and hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
912 exc_info = sys.exc_info()[1].exc_info
913 else:
914 exc_info = sys.exc_info()
915 raise DownloadError(message, exc_info)
916 self._download_retcode = 1
917
918 Styles = Namespace(
919 HEADERS='yellow',
920 EMPHASIS='light blue',
921 FILENAME='green',
922 ID='green',
923 DELIM='blue',
924 ERROR='red',
925 WARNING='yellow',
926 SUPPRESS='light black',
927 )
928
929 def _format_text(self, handle, allow_colors, text, f, fallback=None, *, test_encoding=False):
930 text = str(text)
931 if test_encoding:
932 original_text = text
933 # handle.encoding can be None. See https://github.com/yt-dlp/yt-dlp/issues/2711
934 encoding = self.params.get('encoding') or getattr(handle, 'encoding', None) or 'ascii'
935 text = text.encode(encoding, 'ignore').decode(encoding)
936 if fallback is not None and text != original_text:
937 text = fallback
938 return format_text(text, f) if allow_colors else text if fallback is None else fallback
939
940 def _format_out(self, *args, **kwargs):
941 return self._format_text(self._out_files.out, self._allow_colors.out, *args, **kwargs)
942
943 def _format_screen(self, *args, **kwargs):
944 return self._format_text(self._out_files.screen, self._allow_colors.screen, *args, **kwargs)
945
946 def _format_err(self, *args, **kwargs):
947 return self._format_text(self._out_files.error, self._allow_colors.error, *args, **kwargs)
948
949 def report_warning(self, message, only_once=False):
950 '''
951 Print the message to stderr, it will be prefixed with 'WARNING:'
952 If stderr is a tty file the 'WARNING:' will be colored
953 '''
954 if self.params.get('logger') is not None:
955 self.params['logger'].warning(message)
956 else:
957 if self.params.get('no_warnings'):
958 return
959 self.to_stderr(f'{self._format_err("WARNING:", self.Styles.WARNING)} {message}', only_once)
960
961 def deprecation_warning(self, message):
962 if self.params.get('logger') is not None:
963 self.params['logger'].warning(f'DeprecationWarning: {message}')
964 else:
965 self.to_stderr(f'{self._format_err("DeprecationWarning:", self.Styles.ERROR)} {message}', True)
966
967 def report_error(self, message, *args, **kwargs):
968 '''
969 Do the same as trouble, but prefixes the message with 'ERROR:', colored
970 in red if stderr is a tty file.
971 '''
972 self.trouble(f'{self._format_err("ERROR:", self.Styles.ERROR)} {message}', *args, **kwargs)
973
974 def write_debug(self, message, only_once=False):
975 '''Log debug message or Print message to stderr'''
976 if not self.params.get('verbose', False):
977 return
978 message = f'[debug] {message}'
979 if self.params.get('logger'):
980 self.params['logger'].debug(message)
981 else:
982 self.to_stderr(message, only_once)
983
984 def report_file_already_downloaded(self, file_name):
985 """Report file has already been fully downloaded."""
986 try:
987 self.to_screen('[download] %s has already been downloaded' % file_name)
988 except UnicodeEncodeError:
989 self.to_screen('[download] The file has already been downloaded')
990
991 def report_file_delete(self, file_name):
992 """Report that existing file will be deleted."""
993 try:
994 self.to_screen('Deleting existing file %s' % file_name)
995 except UnicodeEncodeError:
996 self.to_screen('Deleting existing file')
997
998 def raise_no_formats(self, info, forced=False, *, msg=None):
999 has_drm = info.get('_has_drm')
1000 ignored, expected = self.params.get('ignore_no_formats_error'), bool(msg)
1001 msg = msg or has_drm and 'This video is DRM protected' or 'No video formats found!'
1002 if forced or not ignored:
1003 raise ExtractorError(msg, video_id=info['id'], ie=info['extractor'],
1004 expected=has_drm or ignored or expected)
1005 else:
1006 self.report_warning(msg)
1007
1008 def parse_outtmpl(self):
1009 self.deprecation_warning('"YoutubeDL.parse_outtmpl" is deprecated and may be removed in a future version')
1010 self._parse_outtmpl()
1011 return self.params['outtmpl']
1012
1013 def _parse_outtmpl(self):
1014 sanitize = IDENTITY
1015 if self.params.get('restrictfilenames'): # Remove spaces in the default template
1016 sanitize = lambda x: x.replace(' - ', ' ').replace(' ', '-')
1017
1018 outtmpl = self.params.setdefault('outtmpl', {})
1019 if not isinstance(outtmpl, dict):
1020 self.params['outtmpl'] = outtmpl = {'default': outtmpl}
1021 outtmpl.update({k: sanitize(v) for k, v in DEFAULT_OUTTMPL.items() if outtmpl.get(k) is None})
1022
1023 def get_output_path(self, dir_type='', filename=None):
1024 paths = self.params.get('paths', {})
1025 assert isinstance(paths, dict)
1026 path = os.path.join(
1027 expand_path(paths.get('home', '').strip()),
1028 expand_path(paths.get(dir_type, '').strip()) if dir_type else '',
1029 filename or '')
1030 return sanitize_path(path, force=self.params.get('windowsfilenames'))
1031
1032 @staticmethod
1033 def _outtmpl_expandpath(outtmpl):
1034 # expand_path translates '%%' into '%' and '$$' into '$'
1035 # correspondingly that is not what we want since we need to keep
1036 # '%%' intact for template dict substitution step. Working around
1037 # with boundary-alike separator hack.
1038 sep = ''.join([random.choice(ascii_letters) for _ in range(32)])
1039 outtmpl = outtmpl.replace('%%', f'%{sep}%').replace('$$', f'${sep}$')
1040
1041 # outtmpl should be expand_path'ed before template dict substitution
1042 # because meta fields may contain env variables we don't want to
1043 # be expanded. For example, for outtmpl "%(title)s.%(ext)s" and
1044 # title "Hello $PATH", we don't want `$PATH` to be expanded.
1045 return expand_path(outtmpl).replace(sep, '')
1046
1047 @staticmethod
1048 def escape_outtmpl(outtmpl):
1049 ''' Escape any remaining strings like %s, %abc% etc. '''
1050 return re.sub(
1051 STR_FORMAT_RE_TMPL.format('', '(?![%(\0])'),
1052 lambda mobj: ('' if mobj.group('has_key') else '%') + mobj.group(0),
1053 outtmpl)
1054
1055 @classmethod
1056 def validate_outtmpl(cls, outtmpl):
1057 ''' @return None or Exception object '''
1058 outtmpl = re.sub(
1059 STR_FORMAT_RE_TMPL.format('[^)]*', '[ljhqBUDS]'),
1060 lambda mobj: f'{mobj.group(0)[:-1]}s',
1061 cls._outtmpl_expandpath(outtmpl))
1062 try:
1063 cls.escape_outtmpl(outtmpl) % collections.defaultdict(int)
1064 return None
1065 except ValueError as err:
1066 return err
1067
1068 @staticmethod
1069 def _copy_infodict(info_dict):
1070 info_dict = dict(info_dict)
1071 info_dict.pop('__postprocessors', None)
1072 info_dict.pop('__pending_error', None)
1073 return info_dict
1074
1075 def prepare_outtmpl(self, outtmpl, info_dict, sanitize=False):
1076 """ Make the outtmpl and info_dict suitable for substitution: ydl.escape_outtmpl(outtmpl) % info_dict
1077 @param sanitize Whether to sanitize the output as a filename.
1078 For backward compatibility, a function can also be passed
1079 """
1080
1081 info_dict.setdefault('epoch', int(time.time())) # keep epoch consistent once set
1082
1083 info_dict = self._copy_infodict(info_dict)
1084 info_dict['duration_string'] = ( # %(duration>%H-%M-%S)s is wrong if duration > 24hrs
1085 formatSeconds(info_dict['duration'], '-' if sanitize else ':')
1086 if info_dict.get('duration', None) is not None
1087 else None)
1088 info_dict['autonumber'] = int(self.params.get('autonumber_start', 1) - 1 + self._num_downloads)
1089 info_dict['video_autonumber'] = self._num_videos
1090 if info_dict.get('resolution') is None:
1091 info_dict['resolution'] = self.format_resolution(info_dict, default=None)
1092
1093 # For fields playlist_index, playlist_autonumber and autonumber convert all occurrences
1094 # of %(field)s to %(field)0Nd for backward compatibility
1095 field_size_compat_map = {
1096 'playlist_index': number_of_digits(info_dict.get('__last_playlist_index') or 0),
1097 'playlist_autonumber': number_of_digits(info_dict.get('n_entries') or 0),
1098 'autonumber': self.params.get('autonumber_size') or 5,
1099 }
1100
1101 TMPL_DICT = {}
1102 EXTERNAL_FORMAT_RE = re.compile(STR_FORMAT_RE_TMPL.format('[^)]*', f'[{STR_FORMAT_TYPES}ljhqBUDS]'))
1103 MATH_FUNCTIONS = {
1104 '+': float.__add__,
1105 '-': float.__sub__,
1106 }
1107 # Field is of the form key1.key2...
1108 # where keys (except first) can be string, int or slice
1109 FIELD_RE = r'\w*(?:\.(?:\w+|{num}|{num}?(?::{num}?){{1,2}}))*'.format(num=r'(?:-?\d+)')
1110 MATH_FIELD_RE = rf'(?:{FIELD_RE}|-?{NUMBER_RE})'
1111 MATH_OPERATORS_RE = r'(?:%s)' % '|'.join(map(re.escape, MATH_FUNCTIONS.keys()))
1112 INTERNAL_FORMAT_RE = re.compile(rf'''(?x)
1113 (?P<negate>-)?
1114 (?P<fields>{FIELD_RE})
1115 (?P<maths>(?:{MATH_OPERATORS_RE}{MATH_FIELD_RE})*)
1116 (?:>(?P<strf_format>.+?))?
1117 (?P<remaining>
1118 (?P<alternate>(?<!\\),[^|&)]+)?
1119 (?:&(?P<replacement>.*?))?
1120 (?:\|(?P<default>.*?))?
1121 )$''')
1122
1123 def _traverse_infodict(k):
1124 k = k.split('.')
1125 if k[0] == '':
1126 k.pop(0)
1127 return traverse_obj(info_dict, k, is_user_input=True, traverse_string=True)
1128
1129 def get_value(mdict):
1130 # Object traversal
1131 value = _traverse_infodict(mdict['fields'])
1132 # Negative
1133 if mdict['negate']:
1134 value = float_or_none(value)
1135 if value is not None:
1136 value *= -1
1137 # Do maths
1138 offset_key = mdict['maths']
1139 if offset_key:
1140 value = float_or_none(value)
1141 operator = None
1142 while offset_key:
1143 item = re.match(
1144 MATH_FIELD_RE if operator else MATH_OPERATORS_RE,
1145 offset_key).group(0)
1146 offset_key = offset_key[len(item):]
1147 if operator is None:
1148 operator = MATH_FUNCTIONS[item]
1149 continue
1150 item, multiplier = (item[1:], -1) if item[0] == '-' else (item, 1)
1151 offset = float_or_none(item)
1152 if offset is None:
1153 offset = float_or_none(_traverse_infodict(item))
1154 try:
1155 value = operator(value, multiplier * offset)
1156 except (TypeError, ZeroDivisionError):
1157 return None
1158 operator = None
1159 # Datetime formatting
1160 if mdict['strf_format']:
1161 value = strftime_or_none(value, mdict['strf_format'].replace('\\,', ','))
1162
1163 return value
1164
1165 na = self.params.get('outtmpl_na_placeholder', 'NA')
1166
1167 def filename_sanitizer(key, value, restricted=self.params.get('restrictfilenames')):
1168 return sanitize_filename(str(value), restricted=restricted, is_id=(
1169 bool(re.search(r'(^|[_.])id(\.|$)', key))
1170 if 'filename-sanitization' in self.params['compat_opts']
1171 else NO_DEFAULT))
1172
1173 sanitizer = sanitize if callable(sanitize) else filename_sanitizer
1174 sanitize = bool(sanitize)
1175
1176 def _dumpjson_default(obj):
1177 if isinstance(obj, (set, LazyList)):
1178 return list(obj)
1179 return repr(obj)
1180
1181 def create_key(outer_mobj):
1182 if not outer_mobj.group('has_key'):
1183 return outer_mobj.group(0)
1184 key = outer_mobj.group('key')
1185 mobj = re.match(INTERNAL_FORMAT_RE, key)
1186 initial_field = mobj.group('fields') if mobj else ''
1187 value, replacement, default = None, None, na
1188 while mobj:
1189 mobj = mobj.groupdict()
1190 default = mobj['default'] if mobj['default'] is not None else default
1191 value = get_value(mobj)
1192 replacement = mobj['replacement']
1193 if value is None and mobj['alternate']:
1194 mobj = re.match(INTERNAL_FORMAT_RE, mobj['remaining'][1:])
1195 else:
1196 break
1197
1198 fmt = outer_mobj.group('format')
1199 if fmt == 's' and value is not None and key in field_size_compat_map.keys():
1200 fmt = f'0{field_size_compat_map[key]:d}d'
1201
1202 value = default if value is None else value if replacement is None else replacement
1203
1204 flags = outer_mobj.group('conversion') or ''
1205 str_fmt = f'{fmt[:-1]}s'
1206 if fmt[-1] == 'l': # list
1207 delim = '\n' if '#' in flags else ', '
1208 value, fmt = delim.join(map(str, variadic(value, allowed_types=(str, bytes)))), str_fmt
1209 elif fmt[-1] == 'j': # json
1210 value, fmt = json.dumps(value, default=_dumpjson_default, indent=4 if '#' in flags else None), str_fmt
1211 elif fmt[-1] == 'h': # html
1212 value, fmt = escapeHTML(value), str_fmt
1213 elif fmt[-1] == 'q': # quoted
1214 value = map(str, variadic(value) if '#' in flags else [value])
1215 value, fmt = ' '.join(map(compat_shlex_quote, value)), str_fmt
1216 elif fmt[-1] == 'B': # bytes
1217 value = f'%{str_fmt}'.encode() % str(value).encode()
1218 value, fmt = value.decode('utf-8', 'ignore'), 's'
1219 elif fmt[-1] == 'U': # unicode normalized
1220 value, fmt = unicodedata.normalize(
1221 # "+" = compatibility equivalence, "#" = NFD
1222 'NF%s%s' % ('K' if '+' in flags else '', 'D' if '#' in flags else 'C'),
1223 value), str_fmt
1224 elif fmt[-1] == 'D': # decimal suffix
1225 num_fmt, fmt = fmt[:-1].replace('#', ''), 's'
1226 value = format_decimal_suffix(value, f'%{num_fmt}f%s' if num_fmt else '%d%s',
1227 factor=1024 if '#' in flags else 1000)
1228 elif fmt[-1] == 'S': # filename sanitization
1229 value, fmt = filename_sanitizer(initial_field, value, restricted='#' in flags), str_fmt
1230 elif fmt[-1] == 'c':
1231 if value:
1232 value = str(value)[0]
1233 else:
1234 fmt = str_fmt
1235 elif fmt[-1] not in 'rs': # numeric
1236 value = float_or_none(value)
1237 if value is None:
1238 value, fmt = default, 's'
1239
1240 if sanitize:
1241 if fmt[-1] == 'r':
1242 # If value is an object, sanitize might convert it to a string
1243 # So we convert it to repr first
1244 value, fmt = repr(value), str_fmt
1245 if fmt[-1] in 'csr':
1246 value = sanitizer(initial_field, value)
1247
1248 key = '%s\0%s' % (key.replace('%', '%\0'), outer_mobj.group('format'))
1249 TMPL_DICT[key] = value
1250 return '{prefix}%({key}){fmt}'.format(key=key, fmt=fmt, prefix=outer_mobj.group('prefix'))
1251
1252 return EXTERNAL_FORMAT_RE.sub(create_key, outtmpl), TMPL_DICT
1253
1254 def evaluate_outtmpl(self, outtmpl, info_dict, *args, **kwargs):
1255 outtmpl, info_dict = self.prepare_outtmpl(outtmpl, info_dict, *args, **kwargs)
1256 return self.escape_outtmpl(outtmpl) % info_dict
1257
1258 def _prepare_filename(self, info_dict, *, outtmpl=None, tmpl_type=None):
1259 assert None in (outtmpl, tmpl_type), 'outtmpl and tmpl_type are mutually exclusive'
1260 if outtmpl is None:
1261 outtmpl = self.params['outtmpl'].get(tmpl_type or 'default', self.params['outtmpl']['default'])
1262 try:
1263 outtmpl = self._outtmpl_expandpath(outtmpl)
1264 filename = self.evaluate_outtmpl(outtmpl, info_dict, True)
1265 if not filename:
1266 return None
1267
1268 if tmpl_type in ('', 'temp'):
1269 final_ext, ext = self.params.get('final_ext'), info_dict.get('ext')
1270 if final_ext and ext and final_ext != ext and filename.endswith(f'.{final_ext}'):
1271 filename = replace_extension(filename, ext, final_ext)
1272 elif tmpl_type:
1273 force_ext = OUTTMPL_TYPES[tmpl_type]
1274 if force_ext:
1275 filename = replace_extension(filename, force_ext, info_dict.get('ext'))
1276
1277 # https://github.com/blackjack4494/youtube-dlc/issues/85
1278 trim_file_name = self.params.get('trim_file_name', False)
1279 if trim_file_name:
1280 no_ext, *ext = filename.rsplit('.', 2)
1281 filename = join_nonempty(no_ext[:trim_file_name], *ext, delim='.')
1282
1283 return filename
1284 except ValueError as err:
1285 self.report_error('Error in output template: ' + str(err) + ' (encoding: ' + repr(preferredencoding()) + ')')
1286 return None
1287
1288 def prepare_filename(self, info_dict, dir_type='', *, outtmpl=None, warn=False):
1289 """Generate the output filename"""
1290 if outtmpl:
1291 assert not dir_type, 'outtmpl and dir_type are mutually exclusive'
1292 dir_type = None
1293 filename = self._prepare_filename(info_dict, tmpl_type=dir_type, outtmpl=outtmpl)
1294 if not filename and dir_type not in ('', 'temp'):
1295 return ''
1296
1297 if warn:
1298 if not self.params.get('paths'):
1299 pass
1300 elif filename == '-':
1301 self.report_warning('--paths is ignored when an outputting to stdout', only_once=True)
1302 elif os.path.isabs(filename):
1303 self.report_warning('--paths is ignored since an absolute path is given in output template', only_once=True)
1304 if filename == '-' or not filename:
1305 return filename
1306
1307 return self.get_output_path(dir_type, filename)
1308
1309 def _match_entry(self, info_dict, incomplete=False, silent=False):
1310 """ Returns None if the file should be downloaded """
1311
1312 video_title = info_dict.get('title', info_dict.get('id', 'entry'))
1313
1314 def check_filter():
1315 if 'title' in info_dict:
1316 # This can happen when we're just evaluating the playlist
1317 title = info_dict['title']
1318 matchtitle = self.params.get('matchtitle', False)
1319 if matchtitle:
1320 if not re.search(matchtitle, title, re.IGNORECASE):
1321 return '"' + title + '" title did not match pattern "' + matchtitle + '"'
1322 rejecttitle = self.params.get('rejecttitle', False)
1323 if rejecttitle:
1324 if re.search(rejecttitle, title, re.IGNORECASE):
1325 return '"' + title + '" title matched reject pattern "' + rejecttitle + '"'
1326 date = info_dict.get('upload_date')
1327 if date is not None:
1328 dateRange = self.params.get('daterange', DateRange())
1329 if date not in dateRange:
1330 return f'{date_from_str(date).isoformat()} upload date is not in range {dateRange}'
1331 view_count = info_dict.get('view_count')
1332 if view_count is not None:
1333 min_views = self.params.get('min_views')
1334 if min_views is not None and view_count < min_views:
1335 return 'Skipping %s, because it has not reached minimum view count (%d/%d)' % (video_title, view_count, min_views)
1336 max_views = self.params.get('max_views')
1337 if max_views is not None and view_count > max_views:
1338 return 'Skipping %s, because it has exceeded the maximum view count (%d/%d)' % (video_title, view_count, max_views)
1339 if age_restricted(info_dict.get('age_limit'), self.params.get('age_limit')):
1340 return 'Skipping "%s" because it is age restricted' % video_title
1341
1342 match_filter = self.params.get('match_filter')
1343 if match_filter is not None:
1344 try:
1345 ret = match_filter(info_dict, incomplete=incomplete)
1346 except TypeError:
1347 # For backward compatibility
1348 ret = None if incomplete else match_filter(info_dict)
1349 if ret is NO_DEFAULT:
1350 while True:
1351 filename = self._format_screen(self.prepare_filename(info_dict), self.Styles.FILENAME)
1352 reply = input(self._format_screen(
1353 f'Download "{filename}"? (Y/n): ', self.Styles.EMPHASIS)).lower().strip()
1354 if reply in {'y', ''}:
1355 return None
1356 elif reply == 'n':
1357 return f'Skipping {video_title}'
1358 elif ret is not None:
1359 return ret
1360 return None
1361
1362 if self.in_download_archive(info_dict):
1363 reason = '%s has already been recorded in the archive' % video_title
1364 break_opt, break_err = 'break_on_existing', ExistingVideoReached
1365 else:
1366 reason = check_filter()
1367 break_opt, break_err = 'break_on_reject', RejectedVideoReached
1368 if reason is not None:
1369 if not silent:
1370 self.to_screen('[download] ' + reason)
1371 if self.params.get(break_opt, False):
1372 raise break_err()
1373 return reason
1374
1375 @staticmethod
1376 def add_extra_info(info_dict, extra_info):
1377 '''Set the keys from extra_info in info dict if they are missing'''
1378 for key, value in extra_info.items():
1379 info_dict.setdefault(key, value)
1380
1381 def extract_info(self, url, download=True, ie_key=None, extra_info=None,
1382 process=True, force_generic_extractor=False):
1383 """
1384 Return a list with a dictionary for each video extracted.
1385
1386 Arguments:
1387 url -- URL to extract
1388
1389 Keyword arguments:
1390 download -- whether to download videos during extraction
1391 ie_key -- extractor key hint
1392 extra_info -- dictionary containing the extra values to add to each result
1393 process -- whether to resolve all unresolved references (URLs, playlist items),
1394 must be True for download to work.
1395 force_generic_extractor -- force using the generic extractor
1396 """
1397
1398 if extra_info is None:
1399 extra_info = {}
1400
1401 if not ie_key and force_generic_extractor:
1402 ie_key = 'Generic'
1403
1404 if ie_key:
1405 ies = {ie_key: self._get_info_extractor_class(ie_key)}
1406 else:
1407 ies = self._ies
1408
1409 for ie_key, ie in ies.items():
1410 if not ie.suitable(url):
1411 continue
1412
1413 if not ie.working():
1414 self.report_warning('The program functionality for this site has been marked as broken, '
1415 'and will probably not work.')
1416
1417 temp_id = ie.get_temp_id(url)
1418 if temp_id is not None and self.in_download_archive({'id': temp_id, 'ie_key': ie_key}):
1419 self.to_screen(f'[{ie_key}] {temp_id}: has already been recorded in the archive')
1420 if self.params.get('break_on_existing', False):
1421 raise ExistingVideoReached()
1422 break
1423 return self.__extract_info(url, self.get_info_extractor(ie_key), download, extra_info, process)
1424 else:
1425 self.report_error('no suitable InfoExtractor for URL %s' % url)
1426
1427 def _handle_extraction_exceptions(func):
1428 @functools.wraps(func)
1429 def wrapper(self, *args, **kwargs):
1430 while True:
1431 try:
1432 return func(self, *args, **kwargs)
1433 except (DownloadCancelled, LazyList.IndexError, PagedList.IndexError):
1434 raise
1435 except ReExtractInfo as e:
1436 if e.expected:
1437 self.to_screen(f'{e}; Re-extracting data')
1438 else:
1439 self.to_stderr('\r')
1440 self.report_warning(f'{e}; Re-extracting data')
1441 continue
1442 except GeoRestrictedError as e:
1443 msg = e.msg
1444 if e.countries:
1445 msg += '\nThis video is available in %s.' % ', '.join(
1446 map(ISO3166Utils.short2full, e.countries))
1447 msg += '\nYou might want to use a VPN or a proxy server (with --proxy) to workaround.'
1448 self.report_error(msg)
1449 except ExtractorError as e: # An error we somewhat expected
1450 self.report_error(str(e), e.format_traceback())
1451 except Exception as e:
1452 if self.params.get('ignoreerrors'):
1453 self.report_error(str(e), tb=encode_compat_str(traceback.format_exc()))
1454 else:
1455 raise
1456 break
1457 return wrapper
1458
1459 def _wait_for_video(self, ie_result):
1460 if (not self.params.get('wait_for_video')
1461 or ie_result.get('_type', 'video') != 'video'
1462 or ie_result.get('formats') or ie_result.get('url')):
1463 return
1464
1465 format_dur = lambda dur: '%02d:%02d:%02d' % timetuple_from_msec(dur * 1000)[:-1]
1466 last_msg = ''
1467
1468 def progress(msg):
1469 nonlocal last_msg
1470 full_msg = f'{msg}\n'
1471 if not self.params.get('noprogress'):
1472 full_msg = msg + ' ' * (len(last_msg) - len(msg)) + '\r'
1473 elif last_msg:
1474 return
1475 self.to_screen(full_msg, skip_eol=True)
1476 last_msg = msg
1477
1478 min_wait, max_wait = self.params.get('wait_for_video')
1479 diff = try_get(ie_result, lambda x: x['release_timestamp'] - time.time())
1480 if diff is None and ie_result.get('live_status') == 'is_upcoming':
1481 diff = round(random.uniform(min_wait, max_wait) if (max_wait and min_wait) else (max_wait or min_wait), 0)
1482 self.report_warning('Release time of video is not known')
1483 elif (diff or 0) <= 0:
1484 self.report_warning('Video should already be available according to extracted info')
1485 diff = min(max(diff or 0, min_wait or 0), max_wait or float('inf'))
1486 self.to_screen(f'[wait] Waiting for {format_dur(diff)} - Press Ctrl+C to try now')
1487
1488 wait_till = time.time() + diff
1489 try:
1490 while True:
1491 diff = wait_till - time.time()
1492 if diff <= 0:
1493 progress('')
1494 raise ReExtractInfo('[wait] Wait period ended', expected=True)
1495 progress(f'[wait] Remaining time until next attempt: {self._format_screen(format_dur(diff), self.Styles.EMPHASIS)}')
1496 time.sleep(1)
1497 except KeyboardInterrupt:
1498 progress('')
1499 raise ReExtractInfo('[wait] Interrupted by user', expected=True)
1500 except BaseException as e:
1501 if not isinstance(e, ReExtractInfo):
1502 self.to_screen('')
1503 raise
1504
1505 @_handle_extraction_exceptions
1506 def __extract_info(self, url, ie, download, extra_info, process):
1507 ie_result = ie.extract(url)
1508 if ie_result is None: # Finished already (backwards compatibility; listformats and friends should be moved here)
1509 self.report_warning(f'Extractor {ie.IE_NAME} returned nothing{bug_reports_message()}')
1510 return
1511 if isinstance(ie_result, list):
1512 # Backwards compatibility: old IE result format
1513 ie_result = {
1514 '_type': 'compat_list',
1515 'entries': ie_result,
1516 }
1517 if extra_info.get('original_url'):
1518 ie_result.setdefault('original_url', extra_info['original_url'])
1519 self.add_default_extra_info(ie_result, ie, url)
1520 if process:
1521 self._wait_for_video(ie_result)
1522 return self.process_ie_result(ie_result, download, extra_info)
1523 else:
1524 return ie_result
1525
1526 def add_default_extra_info(self, ie_result, ie, url):
1527 if url is not None:
1528 self.add_extra_info(ie_result, {
1529 'webpage_url': url,
1530 'original_url': url,
1531 })
1532 webpage_url = ie_result.get('webpage_url')
1533 if webpage_url:
1534 self.add_extra_info(ie_result, {
1535 'webpage_url_basename': url_basename(webpage_url),
1536 'webpage_url_domain': get_domain(webpage_url),
1537 })
1538 if ie is not None:
1539 self.add_extra_info(ie_result, {
1540 'extractor': ie.IE_NAME,
1541 'extractor_key': ie.ie_key(),
1542 })
1543
1544 def process_ie_result(self, ie_result, download=True, extra_info=None):
1545 """
1546 Take the result of the ie(may be modified) and resolve all unresolved
1547 references (URLs, playlist items).
1548
1549 It will also download the videos if 'download'.
1550 Returns the resolved ie_result.
1551 """
1552 if extra_info is None:
1553 extra_info = {}
1554 result_type = ie_result.get('_type', 'video')
1555
1556 if result_type in ('url', 'url_transparent'):
1557 ie_result['url'] = sanitize_url(ie_result['url'])
1558 if ie_result.get('original_url'):
1559 extra_info.setdefault('original_url', ie_result['original_url'])
1560
1561 extract_flat = self.params.get('extract_flat', False)
1562 if ((extract_flat == 'in_playlist' and 'playlist' in extra_info)
1563 or extract_flat is True):
1564 info_copy = ie_result.copy()
1565 ie = try_get(ie_result.get('ie_key'), self.get_info_extractor)
1566 if ie and not ie_result.get('id'):
1567 info_copy['id'] = ie.get_temp_id(ie_result['url'])
1568 self.add_default_extra_info(info_copy, ie, ie_result['url'])
1569 self.add_extra_info(info_copy, extra_info)
1570 info_copy, _ = self.pre_process(info_copy)
1571 self.__forced_printings(info_copy, self.prepare_filename(info_copy), incomplete=True)
1572 self._raise_pending_errors(info_copy)
1573 if self.params.get('force_write_download_archive', False):
1574 self.record_download_archive(info_copy)
1575 return ie_result
1576
1577 if result_type == 'video':
1578 self.add_extra_info(ie_result, extra_info)
1579 ie_result = self.process_video_result(ie_result, download=download)
1580 self._raise_pending_errors(ie_result)
1581 additional_urls = (ie_result or {}).get('additional_urls')
1582 if additional_urls:
1583 # TODO: Improve MetadataParserPP to allow setting a list
1584 if isinstance(additional_urls, str):
1585 additional_urls = [additional_urls]
1586 self.to_screen(
1587 '[info] %s: %d additional URL(s) requested' % (ie_result['id'], len(additional_urls)))
1588 self.write_debug('Additional URLs: "%s"' % '", "'.join(additional_urls))
1589 ie_result['additional_entries'] = [
1590 self.extract_info(
1591 url, download, extra_info=extra_info,
1592 force_generic_extractor=self.params.get('force_generic_extractor'))
1593 for url in additional_urls
1594 ]
1595 return ie_result
1596 elif result_type == 'url':
1597 # We have to add extra_info to the results because it may be
1598 # contained in a playlist
1599 return self.extract_info(
1600 ie_result['url'], download,
1601 ie_key=ie_result.get('ie_key'),
1602 extra_info=extra_info)
1603 elif result_type == 'url_transparent':
1604 # Use the information from the embedding page
1605 info = self.extract_info(
1606 ie_result['url'], ie_key=ie_result.get('ie_key'),
1607 extra_info=extra_info, download=False, process=False)
1608
1609 # extract_info may return None when ignoreerrors is enabled and
1610 # extraction failed with an error, don't crash and return early
1611 # in this case
1612 if not info:
1613 return info
1614
1615 exempted_fields = {'_type', 'url', 'ie_key'}
1616 if not ie_result.get('section_end') and ie_result.get('section_start') is None:
1617 # For video clips, the id etc of the clip extractor should be used
1618 exempted_fields |= {'id', 'extractor', 'extractor_key'}
1619
1620 new_result = info.copy()
1621 new_result.update(filter_dict(ie_result, lambda k, v: v is not None and k not in exempted_fields))
1622
1623 # Extracted info may not be a video result (i.e.
1624 # info.get('_type', 'video') != video) but rather an url or
1625 # url_transparent. In such cases outer metadata (from ie_result)
1626 # should be propagated to inner one (info). For this to happen
1627 # _type of info should be overridden with url_transparent. This
1628 # fixes issue from https://github.com/ytdl-org/youtube-dl/pull/11163.
1629 if new_result.get('_type') == 'url':
1630 new_result['_type'] = 'url_transparent'
1631
1632 return self.process_ie_result(
1633 new_result, download=download, extra_info=extra_info)
1634 elif result_type in ('playlist', 'multi_video'):
1635 # Protect from infinite recursion due to recursively nested playlists
1636 # (see https://github.com/ytdl-org/youtube-dl/issues/27833)
1637 webpage_url = ie_result['webpage_url']
1638 if webpage_url in self._playlist_urls:
1639 self.to_screen(
1640 '[download] Skipping already downloaded playlist: %s'
1641 % ie_result.get('title') or ie_result.get('id'))
1642 return
1643
1644 self._playlist_level += 1
1645 self._playlist_urls.add(webpage_url)
1646 self._fill_common_fields(ie_result, False)
1647 self._sanitize_thumbnails(ie_result)
1648 try:
1649 return self.__process_playlist(ie_result, download)
1650 finally:
1651 self._playlist_level -= 1
1652 if not self._playlist_level:
1653 self._playlist_urls.clear()
1654 elif result_type == 'compat_list':
1655 self.report_warning(
1656 'Extractor %s returned a compat_list result. '
1657 'It needs to be updated.' % ie_result.get('extractor'))
1658
1659 def _fixup(r):
1660 self.add_extra_info(r, {
1661 'extractor': ie_result['extractor'],
1662 'webpage_url': ie_result['webpage_url'],
1663 'webpage_url_basename': url_basename(ie_result['webpage_url']),
1664 'webpage_url_domain': get_domain(ie_result['webpage_url']),
1665 'extractor_key': ie_result['extractor_key'],
1666 })
1667 return r
1668 ie_result['entries'] = [
1669 self.process_ie_result(_fixup(r), download, extra_info)
1670 for r in ie_result['entries']
1671 ]
1672 return ie_result
1673 else:
1674 raise Exception('Invalid result type: %s' % result_type)
1675
1676 def _ensure_dir_exists(self, path):
1677 return make_dir(path, self.report_error)
1678
1679 @staticmethod
1680 def _playlist_infodict(ie_result, strict=False, **kwargs):
1681 info = {
1682 'playlist_count': ie_result.get('playlist_count'),
1683 'playlist': ie_result.get('title') or ie_result.get('id'),
1684 'playlist_id': ie_result.get('id'),
1685 'playlist_title': ie_result.get('title'),
1686 'playlist_uploader': ie_result.get('uploader'),
1687 'playlist_uploader_id': ie_result.get('uploader_id'),
1688 **kwargs,
1689 }
1690 if strict:
1691 return info
1692 return {
1693 **info,
1694 'playlist_index': 0,
1695 '__last_playlist_index': max(ie_result['requested_entries'] or (0, 0)),
1696 'extractor': ie_result['extractor'],
1697 'webpage_url': ie_result['webpage_url'],
1698 'webpage_url_basename': url_basename(ie_result['webpage_url']),
1699 'webpage_url_domain': get_domain(ie_result['webpage_url']),
1700 'extractor_key': ie_result['extractor_key'],
1701 }
1702
1703 def __process_playlist(self, ie_result, download):
1704 """Process each entry in the playlist"""
1705 assert ie_result['_type'] in ('playlist', 'multi_video')
1706
1707 common_info = self._playlist_infodict(ie_result, strict=True)
1708 title = common_info.get('title') or '<Untitled>'
1709 if self._match_entry(common_info, incomplete=True) is not None:
1710 return
1711 self.to_screen(f'[download] Downloading {ie_result["_type"]}: {title}')
1712
1713 all_entries = PlaylistEntries(self, ie_result)
1714 entries = orderedSet(all_entries.get_requested_items(), lazy=True)
1715
1716 lazy = self.params.get('lazy_playlist')
1717 if lazy:
1718 resolved_entries, n_entries = [], 'N/A'
1719 ie_result['requested_entries'], ie_result['entries'] = None, None
1720 else:
1721 entries = resolved_entries = list(entries)
1722 n_entries = len(resolved_entries)
1723 ie_result['requested_entries'], ie_result['entries'] = tuple(zip(*resolved_entries)) or ([], [])
1724 if not ie_result.get('playlist_count'):
1725 # Better to do this after potentially exhausting entries
1726 ie_result['playlist_count'] = all_entries.get_full_count()
1727
1728 common_info = self._playlist_infodict(ie_result, n_entries=int_or_none(n_entries))
1729 ie_copy = collections.ChainMap(ie_result, common_info)
1730
1731 _infojson_written = False
1732 write_playlist_files = self.params.get('allow_playlist_files', True)
1733 if write_playlist_files and self.params.get('list_thumbnails'):
1734 self.list_thumbnails(ie_result)
1735 if write_playlist_files and not self.params.get('simulate'):
1736 _infojson_written = self._write_info_json(
1737 'playlist', ie_result, self.prepare_filename(ie_copy, 'pl_infojson'))
1738 if _infojson_written is None:
1739 return
1740 if self._write_description('playlist', ie_result,
1741 self.prepare_filename(ie_copy, 'pl_description')) is None:
1742 return
1743 # TODO: This should be passed to ThumbnailsConvertor if necessary
1744 self._write_thumbnails('playlist', ie_result, self.prepare_filename(ie_copy, 'pl_thumbnail'))
1745
1746 if lazy:
1747 if self.params.get('playlistreverse') or self.params.get('playlistrandom'):
1748 self.report_warning('playlistreverse and playlistrandom are not supported with lazy_playlist', only_once=True)
1749 elif self.params.get('playlistreverse'):
1750 entries.reverse()
1751 elif self.params.get('playlistrandom'):
1752 random.shuffle(entries)
1753
1754 self.to_screen(f'[{ie_result["extractor"]}] Playlist {title}: Downloading {n_entries} videos'
1755 f'{format_field(ie_result, "playlist_count", " of %s")}')
1756
1757 keep_resolved_entries = self.params.get('extract_flat') != 'discard'
1758 if self.params.get('extract_flat') == 'discard_in_playlist':
1759 keep_resolved_entries = ie_result['_type'] != 'playlist'
1760 if keep_resolved_entries:
1761 self.write_debug('The information of all playlist entries will be held in memory')
1762
1763 failures = 0
1764 max_failures = self.params.get('skip_playlist_after_errors') or float('inf')
1765 for i, (playlist_index, entry) in enumerate(entries):
1766 if lazy:
1767 resolved_entries.append((playlist_index, entry))
1768 if not entry:
1769 continue
1770
1771 entry['__x_forwarded_for_ip'] = ie_result.get('__x_forwarded_for_ip')
1772 if not lazy and 'playlist-index' in self.params.get('compat_opts', []):
1773 playlist_index = ie_result['requested_entries'][i]
1774
1775 extra = {
1776 **common_info,
1777 'playlist_index': playlist_index,
1778 'playlist_autonumber': i + 1,
1779 }
1780
1781 if self._match_entry(collections.ChainMap(entry, extra), incomplete=True) is not None:
1782 continue
1783
1784 self.to_screen('[download] Downloading video %s of %s' % (
1785 self._format_screen(i + 1, self.Styles.ID), self._format_screen(n_entries, self.Styles.EMPHASIS)))
1786
1787 entry_result = self.__process_iterable_entry(entry, download, extra)
1788 if not entry_result:
1789 failures += 1
1790 if failures >= max_failures:
1791 self.report_error(
1792 f'Skipping the remaining entries in playlist "{title}" since {failures} items failed extraction')
1793 break
1794 if keep_resolved_entries:
1795 resolved_entries[i] = (playlist_index, entry_result)
1796
1797 # Update with processed data
1798 ie_result['requested_entries'], ie_result['entries'] = tuple(zip(*resolved_entries)) or ([], [])
1799
1800 # Write the updated info to json
1801 if _infojson_written is True and self._write_info_json(
1802 'updated playlist', ie_result,
1803 self.prepare_filename(ie_copy, 'pl_infojson'), overwrite=True) is None:
1804 return
1805
1806 ie_result = self.run_all_pps('playlist', ie_result)
1807 self.to_screen(f'[download] Finished downloading playlist: {title}')
1808 return ie_result
1809
1810 @_handle_extraction_exceptions
1811 def __process_iterable_entry(self, entry, download, extra_info):
1812 return self.process_ie_result(
1813 entry, download=download, extra_info=extra_info)
1814
1815 def _build_format_filter(self, filter_spec):
1816 " Returns a function to filter the formats according to the filter_spec "
1817
1818 OPERATORS = {
1819 '<': operator.lt,
1820 '<=': operator.le,
1821 '>': operator.gt,
1822 '>=': operator.ge,
1823 '=': operator.eq,
1824 '!=': operator.ne,
1825 }
1826 operator_rex = re.compile(r'''(?x)\s*
1827 (?P<key>width|height|tbr|abr|vbr|asr|filesize|filesize_approx|fps)\s*
1828 (?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
1829 (?P<value>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)\s*
1830 ''' % '|'.join(map(re.escape, OPERATORS.keys())))
1831 m = operator_rex.fullmatch(filter_spec)
1832 if m:
1833 try:
1834 comparison_value = int(m.group('value'))
1835 except ValueError:
1836 comparison_value = parse_filesize(m.group('value'))
1837 if comparison_value is None:
1838 comparison_value = parse_filesize(m.group('value') + 'B')
1839 if comparison_value is None:
1840 raise ValueError(
1841 'Invalid value %r in format specification %r' % (
1842 m.group('value'), filter_spec))
1843 op = OPERATORS[m.group('op')]
1844
1845 if not m:
1846 STR_OPERATORS = {
1847 '=': operator.eq,
1848 '^=': lambda attr, value: attr.startswith(value),
1849 '$=': lambda attr, value: attr.endswith(value),
1850 '*=': lambda attr, value: value in attr,
1851 '~=': lambda attr, value: value.search(attr) is not None
1852 }
1853 str_operator_rex = re.compile(r'''(?x)\s*
1854 (?P<key>[a-zA-Z0-9._-]+)\s*
1855 (?P<negation>!\s*)?(?P<op>%s)\s*(?P<none_inclusive>\?\s*)?
1856 (?P<quote>["'])?
1857 (?P<value>(?(quote)(?:(?!(?P=quote))[^\\]|\\.)+|[\w.-]+))
1858 (?(quote)(?P=quote))\s*
1859 ''' % '|'.join(map(re.escape, STR_OPERATORS.keys())))
1860 m = str_operator_rex.fullmatch(filter_spec)
1861 if m:
1862 if m.group('op') == '~=':
1863 comparison_value = re.compile(m.group('value'))
1864 else:
1865 comparison_value = re.sub(r'''\\([\\"'])''', r'\1', m.group('value'))
1866 str_op = STR_OPERATORS[m.group('op')]
1867 if m.group('negation'):
1868 op = lambda attr, value: not str_op(attr, value)
1869 else:
1870 op = str_op
1871
1872 if not m:
1873 raise SyntaxError('Invalid filter specification %r' % filter_spec)
1874
1875 def _filter(f):
1876 actual_value = f.get(m.group('key'))
1877 if actual_value is None:
1878 return m.group('none_inclusive')
1879 return op(actual_value, comparison_value)
1880 return _filter
1881
1882 def _check_formats(self, formats):
1883 for f in formats:
1884 self.to_screen('[info] Testing format %s' % f['format_id'])
1885 path = self.get_output_path('temp')
1886 if not self._ensure_dir_exists(f'{path}/'):
1887 continue
1888 temp_file = tempfile.NamedTemporaryFile(suffix='.tmp', delete=False, dir=path or None)
1889 temp_file.close()
1890 try:
1891 success, _ = self.dl(temp_file.name, f, test=True)
1892 except (DownloadError, OSError, ValueError) + network_exceptions:
1893 success = False
1894 finally:
1895 if os.path.exists(temp_file.name):
1896 try:
1897 os.remove(temp_file.name)
1898 except OSError:
1899 self.report_warning('Unable to delete temporary file "%s"' % temp_file.name)
1900 if success:
1901 yield f
1902 else:
1903 self.to_screen('[info] Unable to download format %s. Skipping...' % f['format_id'])
1904
1905 def _default_format_spec(self, info_dict, download=True):
1906
1907 def can_merge():
1908 merger = FFmpegMergerPP(self)
1909 return merger.available and merger.can_merge()
1910
1911 prefer_best = (
1912 not self.params.get('simulate')
1913 and download
1914 and (
1915 not can_merge()
1916 or info_dict.get('is_live') and not self.params.get('live_from_start')
1917 or self.params['outtmpl']['default'] == '-'))
1918 compat = (
1919 prefer_best
1920 or self.params.get('allow_multiple_audio_streams', False)
1921 or 'format-spec' in self.params['compat_opts'])
1922
1923 return (
1924 'best/bestvideo+bestaudio' if prefer_best
1925 else 'bestvideo*+bestaudio/best' if not compat
1926 else 'bestvideo+bestaudio/best')
1927
1928 def build_format_selector(self, format_spec):
1929 def syntax_error(note, start):
1930 message = (
1931 'Invalid format specification: '
1932 '{}\n\t{}\n\t{}^'.format(note, format_spec, ' ' * start[1]))
1933 return SyntaxError(message)
1934
1935 PICKFIRST = 'PICKFIRST'
1936 MERGE = 'MERGE'
1937 SINGLE = 'SINGLE'
1938 GROUP = 'GROUP'
1939 FormatSelector = collections.namedtuple('FormatSelector', ['type', 'selector', 'filters'])
1940
1941 allow_multiple_streams = {'audio': self.params.get('allow_multiple_audio_streams', False),
1942 'video': self.params.get('allow_multiple_video_streams', False)}
1943
1944 check_formats = self.params.get('check_formats') == 'selected'
1945
1946 def _parse_filter(tokens):
1947 filter_parts = []
1948 for type, string, start, _, _ in tokens:
1949 if type == tokenize.OP and string == ']':
1950 return ''.join(filter_parts)
1951 else:
1952 filter_parts.append(string)
1953
1954 def _remove_unused_ops(tokens):
1955 # Remove operators that we don't use and join them with the surrounding strings
1956 # for example: 'mp4' '-' 'baseline' '-' '16x9' is converted to 'mp4-baseline-16x9'
1957 ALLOWED_OPS = ('/', '+', ',', '(', ')')
1958 last_string, last_start, last_end, last_line = None, None, None, None
1959 for type, string, start, end, line in tokens:
1960 if type == tokenize.OP and string == '[':
1961 if last_string:
1962 yield tokenize.NAME, last_string, last_start, last_end, last_line
1963 last_string = None
1964 yield type, string, start, end, line
1965 # everything inside brackets will be handled by _parse_filter
1966 for type, string, start, end, line in tokens:
1967 yield type, string, start, end, line
1968 if type == tokenize.OP and string == ']':
1969 break
1970 elif type == tokenize.OP and string in ALLOWED_OPS:
1971 if last_string:
1972 yield tokenize.NAME, last_string, last_start, last_end, last_line
1973 last_string = None
1974 yield type, string, start, end, line
1975 elif type in [tokenize.NAME, tokenize.NUMBER, tokenize.OP]:
1976 if not last_string:
1977 last_string = string
1978 last_start = start
1979 last_end = end
1980 else:
1981 last_string += string
1982 if last_string:
1983 yield tokenize.NAME, last_string, last_start, last_end, last_line
1984
1985 def _parse_format_selection(tokens, inside_merge=False, inside_choice=False, inside_group=False):
1986 selectors = []
1987 current_selector = None
1988 for type, string, start, _, _ in tokens:
1989 # ENCODING is only defined in python 3.x
1990 if type == getattr(tokenize, 'ENCODING', None):
1991 continue
1992 elif type in [tokenize.NAME, tokenize.NUMBER]:
1993 current_selector = FormatSelector(SINGLE, string, [])
1994 elif type == tokenize.OP:
1995 if string == ')':
1996 if not inside_group:
1997 # ')' will be handled by the parentheses group
1998 tokens.restore_last_token()
1999 break
2000 elif inside_merge and string in ['/', ',']:
2001 tokens.restore_last_token()
2002 break
2003 elif inside_choice and string == ',':
2004 tokens.restore_last_token()
2005 break
2006 elif string == ',':
2007 if not current_selector:
2008 raise syntax_error('"," must follow a format selector', start)
2009 selectors.append(current_selector)
2010 current_selector = None
2011 elif string == '/':
2012 if not current_selector:
2013 raise syntax_error('"/" must follow a format selector', start)
2014 first_choice = current_selector
2015 second_choice = _parse_format_selection(tokens, inside_choice=True)
2016 current_selector = FormatSelector(PICKFIRST, (first_choice, second_choice), [])
2017 elif string == '[':
2018 if not current_selector:
2019 current_selector = FormatSelector(SINGLE, 'best', [])
2020 format_filter = _parse_filter(tokens)
2021 current_selector.filters.append(format_filter)
2022 elif string == '(':
2023 if current_selector:
2024 raise syntax_error('Unexpected "("', start)
2025 group = _parse_format_selection(tokens, inside_group=True)
2026 current_selector = FormatSelector(GROUP, group, [])
2027 elif string == '+':
2028 if not current_selector:
2029 raise syntax_error('Unexpected "+"', start)
2030 selector_1 = current_selector
2031 selector_2 = _parse_format_selection(tokens, inside_merge=True)
2032 if not selector_2:
2033 raise syntax_error('Expected a selector', start)
2034 current_selector = FormatSelector(MERGE, (selector_1, selector_2), [])
2035 else:
2036 raise syntax_error(f'Operator not recognized: "{string}"', start)
2037 elif type == tokenize.ENDMARKER:
2038 break
2039 if current_selector:
2040 selectors.append(current_selector)
2041 return selectors
2042
2043 def _merge(formats_pair):
2044 format_1, format_2 = formats_pair
2045
2046 formats_info = []
2047 formats_info.extend(format_1.get('requested_formats', (format_1,)))
2048 formats_info.extend(format_2.get('requested_formats', (format_2,)))
2049
2050 if not allow_multiple_streams['video'] or not allow_multiple_streams['audio']:
2051 get_no_more = {'video': False, 'audio': False}
2052 for (i, fmt_info) in enumerate(formats_info):
2053 if fmt_info.get('acodec') == fmt_info.get('vcodec') == 'none':
2054 formats_info.pop(i)
2055 continue
2056 for aud_vid in ['audio', 'video']:
2057 if not allow_multiple_streams[aud_vid] and fmt_info.get(aud_vid[0] + 'codec') != 'none':
2058 if get_no_more[aud_vid]:
2059 formats_info.pop(i)
2060 break
2061 get_no_more[aud_vid] = True
2062
2063 if len(formats_info) == 1:
2064 return formats_info[0]
2065
2066 video_fmts = [fmt_info for fmt_info in formats_info if fmt_info.get('vcodec') != 'none']
2067 audio_fmts = [fmt_info for fmt_info in formats_info if fmt_info.get('acodec') != 'none']
2068
2069 the_only_video = video_fmts[0] if len(video_fmts) == 1 else None
2070 the_only_audio = audio_fmts[0] if len(audio_fmts) == 1 else None
2071
2072 output_ext = self.params.get('merge_output_format')
2073 if not output_ext:
2074 if the_only_video:
2075 output_ext = the_only_video['ext']
2076 elif the_only_audio and not video_fmts:
2077 output_ext = the_only_audio['ext']
2078 else:
2079 output_ext = 'mkv'
2080
2081 filtered = lambda *keys: filter(None, (traverse_obj(fmt, *keys) for fmt in formats_info))
2082
2083 new_dict = {
2084 'requested_formats': formats_info,
2085 'format': '+'.join(filtered('format')),
2086 'format_id': '+'.join(filtered('format_id')),
2087 'ext': output_ext,
2088 'protocol': '+'.join(map(determine_protocol, formats_info)),
2089 'language': '+'.join(orderedSet(filtered('language'))) or None,
2090 'format_note': '+'.join(orderedSet(filtered('format_note'))) or None,
2091 'filesize_approx': sum(filtered('filesize', 'filesize_approx')) or None,
2092 'tbr': sum(filtered('tbr', 'vbr', 'abr')),
2093 }
2094
2095 if the_only_video:
2096 new_dict.update({
2097 'width': the_only_video.get('width'),
2098 'height': the_only_video.get('height'),
2099 'resolution': the_only_video.get('resolution') or self.format_resolution(the_only_video),
2100 'fps': the_only_video.get('fps'),
2101 'dynamic_range': the_only_video.get('dynamic_range'),
2102 'vcodec': the_only_video.get('vcodec'),
2103 'vbr': the_only_video.get('vbr'),
2104 'stretched_ratio': the_only_video.get('stretched_ratio'),
2105 })
2106
2107 if the_only_audio:
2108 new_dict.update({
2109 'acodec': the_only_audio.get('acodec'),
2110 'abr': the_only_audio.get('abr'),
2111 'asr': the_only_audio.get('asr'),
2112 })
2113
2114 return new_dict
2115
2116 def _check_formats(formats):
2117 if not check_formats:
2118 yield from formats
2119 return
2120 yield from self._check_formats(formats)
2121
2122 def _build_selector_function(selector):
2123 if isinstance(selector, list): # ,
2124 fs = [_build_selector_function(s) for s in selector]
2125
2126 def selector_function(ctx):
2127 for f in fs:
2128 yield from f(ctx)
2129 return selector_function
2130
2131 elif selector.type == GROUP: # ()
2132 selector_function = _build_selector_function(selector.selector)
2133
2134 elif selector.type == PICKFIRST: # /
2135 fs = [_build_selector_function(s) for s in selector.selector]
2136
2137 def selector_function(ctx):
2138 for f in fs:
2139 picked_formats = list(f(ctx))
2140 if picked_formats:
2141 return picked_formats
2142 return []
2143
2144 elif selector.type == MERGE: # +
2145 selector_1, selector_2 = map(_build_selector_function, selector.selector)
2146
2147 def selector_function(ctx):
2148 for pair in itertools.product(selector_1(ctx), selector_2(ctx)):
2149 yield _merge(pair)
2150
2151 elif selector.type == SINGLE: # atom
2152 format_spec = selector.selector or 'best'
2153
2154 # TODO: Add allvideo, allaudio etc by generalizing the code with best/worst selector
2155 if format_spec == 'all':
2156 def selector_function(ctx):
2157 yield from _check_formats(ctx['formats'][::-1])
2158 elif format_spec == 'mergeall':
2159 def selector_function(ctx):
2160 formats = list(_check_formats(
2161 f for f in ctx['formats'] if f.get('vcodec') != 'none' or f.get('acodec') != 'none'))
2162 if not formats:
2163 return
2164 merged_format = formats[-1]
2165 for f in formats[-2::-1]:
2166 merged_format = _merge((merged_format, f))
2167 yield merged_format
2168
2169 else:
2170 format_fallback, seperate_fallback, format_reverse, format_idx = False, None, True, 1
2171 mobj = re.match(
2172 r'(?P<bw>best|worst|b|w)(?P<type>video|audio|v|a)?(?P<mod>\*)?(?:\.(?P<n>[1-9]\d*))?$',
2173 format_spec)
2174 if mobj is not None:
2175 format_idx = int_or_none(mobj.group('n'), default=1)
2176 format_reverse = mobj.group('bw')[0] == 'b'
2177 format_type = (mobj.group('type') or [None])[0]
2178 not_format_type = {'v': 'a', 'a': 'v'}.get(format_type)
2179 format_modified = mobj.group('mod') is not None
2180
2181 format_fallback = not format_type and not format_modified # for b, w
2182 _filter_f = (
2183 (lambda f: f.get('%scodec' % format_type) != 'none')
2184 if format_type and format_modified # bv*, ba*, wv*, wa*
2185 else (lambda f: f.get('%scodec' % not_format_type) == 'none')
2186 if format_type # bv, ba, wv, wa
2187 else (lambda f: f.get('vcodec') != 'none' and f.get('acodec') != 'none')
2188 if not format_modified # b, w
2189 else lambda f: True) # b*, w*
2190 filter_f = lambda f: _filter_f(f) and (
2191 f.get('vcodec') != 'none' or f.get('acodec') != 'none')
2192 else:
2193 if format_spec in self._format_selection_exts['audio']:
2194 filter_f = lambda f: f.get('ext') == format_spec and f.get('acodec') != 'none'
2195 elif format_spec in self._format_selection_exts['video']:
2196 filter_f = lambda f: f.get('ext') == format_spec and f.get('acodec') != 'none' and f.get('vcodec') != 'none'
2197 seperate_fallback = lambda f: f.get('ext') == format_spec and f.get('vcodec') != 'none'
2198 elif format_spec in self._format_selection_exts['storyboards']:
2199 filter_f = lambda f: f.get('ext') == format_spec and f.get('acodec') == 'none' and f.get('vcodec') == 'none'
2200 else:
2201 filter_f = lambda f: f.get('format_id') == format_spec # id
2202
2203 def selector_function(ctx):
2204 formats = list(ctx['formats'])
2205 matches = list(filter(filter_f, formats)) if filter_f is not None else formats
2206 if not matches:
2207 if format_fallback and ctx['incomplete_formats']:
2208 # for extractors with incomplete formats (audio only (soundcloud)
2209 # or video only (imgur)) best/worst will fallback to
2210 # best/worst {video,audio}-only format
2211 matches = formats
2212 elif seperate_fallback and not ctx['has_merged_format']:
2213 # for compatibility with youtube-dl when there is no pre-merged format
2214 matches = list(filter(seperate_fallback, formats))
2215 matches = LazyList(_check_formats(matches[::-1 if format_reverse else 1]))
2216 try:
2217 yield matches[format_idx - 1]
2218 except LazyList.IndexError:
2219 return
2220
2221 filters = [self._build_format_filter(f) for f in selector.filters]
2222
2223 def final_selector(ctx):
2224 ctx_copy = dict(ctx)
2225 for _filter in filters:
2226 ctx_copy['formats'] = list(filter(_filter, ctx_copy['formats']))
2227 return selector_function(ctx_copy)
2228 return final_selector
2229
2230 stream = io.BytesIO(format_spec.encode())
2231 try:
2232 tokens = list(_remove_unused_ops(tokenize.tokenize(stream.readline)))
2233 except tokenize.TokenError:
2234 raise syntax_error('Missing closing/opening brackets or parenthesis', (0, len(format_spec)))
2235
2236 class TokenIterator:
2237 def __init__(self, tokens):
2238 self.tokens = tokens
2239 self.counter = 0
2240
2241 def __iter__(self):
2242 return self
2243
2244 def __next__(self):
2245 if self.counter >= len(self.tokens):
2246 raise StopIteration()
2247 value = self.tokens[self.counter]
2248 self.counter += 1
2249 return value
2250
2251 next = __next__
2252
2253 def restore_last_token(self):
2254 self.counter -= 1
2255
2256 parsed_selector = _parse_format_selection(iter(TokenIterator(tokens)))
2257 return _build_selector_function(parsed_selector)
2258
2259 def _calc_headers(self, info_dict):
2260 res = merge_headers(self.params['http_headers'], info_dict.get('http_headers') or {})
2261
2262 cookies = self._calc_cookies(info_dict['url'])
2263 if cookies:
2264 res['Cookie'] = cookies
2265
2266 if 'X-Forwarded-For' not in res:
2267 x_forwarded_for_ip = info_dict.get('__x_forwarded_for_ip')
2268 if x_forwarded_for_ip:
2269 res['X-Forwarded-For'] = x_forwarded_for_ip
2270
2271 return res
2272
2273 def _calc_cookies(self, url):
2274 pr = sanitized_Request(url)
2275 self.cookiejar.add_cookie_header(pr)
2276 return pr.get_header('Cookie')
2277
2278 def _sort_thumbnails(self, thumbnails):
2279 thumbnails.sort(key=lambda t: (
2280 t.get('preference') if t.get('preference') is not None else -1,
2281 t.get('width') if t.get('width') is not None else -1,
2282 t.get('height') if t.get('height') is not None else -1,
2283 t.get('id') if t.get('id') is not None else '',
2284 t.get('url')))
2285
2286 def _sanitize_thumbnails(self, info_dict):
2287 thumbnails = info_dict.get('thumbnails')
2288 if thumbnails is None:
2289 thumbnail = info_dict.get('thumbnail')
2290 if thumbnail:
2291 info_dict['thumbnails'] = thumbnails = [{'url': thumbnail}]
2292 if not thumbnails:
2293 return
2294
2295 def check_thumbnails(thumbnails):
2296 for t in thumbnails:
2297 self.to_screen(f'[info] Testing thumbnail {t["id"]}')
2298 try:
2299 self.urlopen(HEADRequest(t['url']))
2300 except network_exceptions as err:
2301 self.to_screen(f'[info] Unable to connect to thumbnail {t["id"]} URL {t["url"]!r} - {err}. Skipping...')
2302 continue
2303 yield t
2304
2305 self._sort_thumbnails(thumbnails)
2306 for i, t in enumerate(thumbnails):
2307 if t.get('id') is None:
2308 t['id'] = '%d' % i
2309 if t.get('width') and t.get('height'):
2310 t['resolution'] = '%dx%d' % (t['width'], t['height'])
2311 t['url'] = sanitize_url(t['url'])
2312
2313 if self.params.get('check_formats') is True:
2314 info_dict['thumbnails'] = LazyList(check_thumbnails(thumbnails[::-1]), reverse=True)
2315 else:
2316 info_dict['thumbnails'] = thumbnails
2317
2318 def _fill_common_fields(self, info_dict, is_video=True):
2319 # TODO: move sanitization here
2320 if is_video:
2321 # playlists are allowed to lack "title"
2322 title = info_dict.get('title', NO_DEFAULT)
2323 if title is NO_DEFAULT:
2324 raise ExtractorError('Missing "title" field in extractor result',
2325 video_id=info_dict['id'], ie=info_dict['extractor'])
2326 info_dict['fulltitle'] = title
2327 if not title:
2328 if title == '':
2329 self.write_debug('Extractor gave empty title. Creating a generic title')
2330 else:
2331 self.report_warning('Extractor failed to obtain "title". Creating a generic title instead')
2332 info_dict['title'] = f'{info_dict["extractor"].replace(":", "-")} video #{info_dict["id"]}'
2333
2334 if info_dict.get('duration') is not None:
2335 info_dict['duration_string'] = formatSeconds(info_dict['duration'])
2336
2337 for ts_key, date_key in (
2338 ('timestamp', 'upload_date'),
2339 ('release_timestamp', 'release_date'),
2340 ('modified_timestamp', 'modified_date'),
2341 ):
2342 if info_dict.get(date_key) is None and info_dict.get(ts_key) is not None:
2343 # Working around out-of-range timestamp values (e.g. negative ones on Windows,
2344 # see http://bugs.python.org/issue1646728)
2345 with contextlib.suppress(ValueError, OverflowError, OSError):
2346 upload_date = datetime.datetime.utcfromtimestamp(info_dict[ts_key])
2347 info_dict[date_key] = upload_date.strftime('%Y%m%d')
2348
2349 live_keys = ('is_live', 'was_live')
2350 live_status = info_dict.get('live_status')
2351 if live_status is None:
2352 for key in live_keys:
2353 if info_dict.get(key) is False:
2354 continue
2355 if info_dict.get(key):
2356 live_status = key
2357 break
2358 if all(info_dict.get(key) is False for key in live_keys):
2359 live_status = 'not_live'
2360 if live_status:
2361 info_dict['live_status'] = live_status
2362 for key in live_keys:
2363 if info_dict.get(key) is None:
2364 info_dict[key] = (live_status == key)
2365
2366 # Auto generate title fields corresponding to the *_number fields when missing
2367 # in order to always have clean titles. This is very common for TV series.
2368 for field in ('chapter', 'season', 'episode'):
2369 if info_dict.get('%s_number' % field) is not None and not info_dict.get(field):
2370 info_dict[field] = '%s %d' % (field.capitalize(), info_dict['%s_number' % field])
2371
2372 def _raise_pending_errors(self, info):
2373 err = info.pop('__pending_error', None)
2374 if err:
2375 self.report_error(err, tb=False)
2376
2377 def process_video_result(self, info_dict, download=True):
2378 assert info_dict.get('_type', 'video') == 'video'
2379 self._num_videos += 1
2380
2381 if 'id' not in info_dict:
2382 raise ExtractorError('Missing "id" field in extractor result', ie=info_dict['extractor'])
2383 elif not info_dict.get('id'):
2384 raise ExtractorError('Extractor failed to obtain "id"', ie=info_dict['extractor'])
2385
2386 def report_force_conversion(field, field_not, conversion):
2387 self.report_warning(
2388 '"%s" field is not %s - forcing %s conversion, there is an error in extractor'
2389 % (field, field_not, conversion))
2390
2391 def sanitize_string_field(info, string_field):
2392 field = info.get(string_field)
2393 if field is None or isinstance(field, str):
2394 return
2395 report_force_conversion(string_field, 'a string', 'string')
2396 info[string_field] = str(field)
2397
2398 def sanitize_numeric_fields(info):
2399 for numeric_field in self._NUMERIC_FIELDS:
2400 field = info.get(numeric_field)
2401 if field is None or isinstance(field, (int, float)):
2402 continue
2403 report_force_conversion(numeric_field, 'numeric', 'int')
2404 info[numeric_field] = int_or_none(field)
2405
2406 sanitize_string_field(info_dict, 'id')
2407 sanitize_numeric_fields(info_dict)
2408 if info_dict.get('section_end') and info_dict.get('section_start') is not None:
2409 info_dict['duration'] = round(info_dict['section_end'] - info_dict['section_start'], 3)
2410 if (info_dict.get('duration') or 0) <= 0 and info_dict.pop('duration', None):
2411 self.report_warning('"duration" field is negative, there is an error in extractor')
2412
2413 chapters = info_dict.get('chapters') or []
2414 if chapters and chapters[0].get('start_time'):
2415 chapters.insert(0, {'start_time': 0})
2416
2417 dummy_chapter = {'end_time': 0, 'start_time': info_dict.get('duration')}
2418 for idx, (prev, current, next_) in enumerate(zip(
2419 (dummy_chapter, *chapters), chapters, (*chapters[1:], dummy_chapter)), 1):
2420 if current.get('start_time') is None:
2421 current['start_time'] = prev.get('end_time')
2422 if not current.get('end_time'):
2423 current['end_time'] = next_.get('start_time')
2424 if not current.get('title'):
2425 current['title'] = f'<Untitled Chapter {idx}>'
2426
2427 if 'playlist' not in info_dict:
2428 # It isn't part of a playlist
2429 info_dict['playlist'] = None
2430 info_dict['playlist_index'] = None
2431
2432 self._sanitize_thumbnails(info_dict)
2433
2434 thumbnail = info_dict.get('thumbnail')
2435 thumbnails = info_dict.get('thumbnails')
2436 if thumbnail:
2437 info_dict['thumbnail'] = sanitize_url(thumbnail)
2438 elif thumbnails:
2439 info_dict['thumbnail'] = thumbnails[-1]['url']
2440
2441 if info_dict.get('display_id') is None and 'id' in info_dict:
2442 info_dict['display_id'] = info_dict['id']
2443
2444 self._fill_common_fields(info_dict)
2445
2446 for cc_kind in ('subtitles', 'automatic_captions'):
2447 cc = info_dict.get(cc_kind)
2448 if cc:
2449 for _, subtitle in cc.items():
2450 for subtitle_format in subtitle:
2451 if subtitle_format.get('url'):
2452 subtitle_format['url'] = sanitize_url(subtitle_format['url'])
2453 if subtitle_format.get('ext') is None:
2454 subtitle_format['ext'] = determine_ext(subtitle_format['url']).lower()
2455
2456 automatic_captions = info_dict.get('automatic_captions')
2457 subtitles = info_dict.get('subtitles')
2458
2459 info_dict['requested_subtitles'] = self.process_subtitles(
2460 info_dict['id'], subtitles, automatic_captions)
2461
2462 if info_dict.get('formats') is None:
2463 # There's only one format available
2464 formats = [info_dict]
2465 else:
2466 formats = info_dict['formats']
2467
2468 # or None ensures --clean-infojson removes it
2469 info_dict['_has_drm'] = any(f.get('has_drm') for f in formats) or None
2470 if not self.params.get('allow_unplayable_formats'):
2471 formats = [f for f in formats if not f.get('has_drm')]
2472 if info_dict['_has_drm'] and all(
2473 f.get('acodec') == f.get('vcodec') == 'none' for f in formats):
2474 self.report_warning(
2475 'This video is DRM protected and only images are available for download. '
2476 'Use --list-formats to see them')
2477
2478 get_from_start = not info_dict.get('is_live') or bool(self.params.get('live_from_start'))
2479 if not get_from_start:
2480 info_dict['title'] += ' ' + datetime.datetime.now().strftime('%Y-%m-%d %H:%M')
2481 if info_dict.get('is_live') and formats:
2482 formats = [f for f in formats if bool(f.get('is_from_start')) == get_from_start]
2483 if get_from_start and not formats:
2484 self.raise_no_formats(info_dict, msg=(
2485 '--live-from-start is passed, but there are no formats that can be downloaded from the start. '
2486 'If you want to download from the current time, use --no-live-from-start'))
2487
2488 if not formats:
2489 self.raise_no_formats(info_dict)
2490
2491 def is_wellformed(f):
2492 url = f.get('url')
2493 if not url:
2494 self.report_warning(
2495 '"url" field is missing or empty - skipping format, '
2496 'there is an error in extractor')
2497 return False
2498 if isinstance(url, bytes):
2499 sanitize_string_field(f, 'url')
2500 return True
2501
2502 # Filter out malformed formats for better extraction robustness
2503 formats = list(filter(is_wellformed, formats))
2504
2505 formats_dict = {}
2506
2507 # We check that all the formats have the format and format_id fields
2508 for i, format in enumerate(formats):
2509 sanitize_string_field(format, 'format_id')
2510 sanitize_numeric_fields(format)
2511 format['url'] = sanitize_url(format['url'])
2512 if not format.get('format_id'):
2513 format['format_id'] = str(i)
2514 else:
2515 # Sanitize format_id from characters used in format selector expression
2516 format['format_id'] = re.sub(r'[\s,/+\[\]()]', '_', format['format_id'])
2517 format_id = format['format_id']
2518 if format_id not in formats_dict:
2519 formats_dict[format_id] = []
2520 formats_dict[format_id].append(format)
2521
2522 # Make sure all formats have unique format_id
2523 common_exts = set(itertools.chain(*self._format_selection_exts.values()))
2524 for format_id, ambiguous_formats in formats_dict.items():
2525 ambigious_id = len(ambiguous_formats) > 1
2526 for i, format in enumerate(ambiguous_formats):
2527 if ambigious_id:
2528 format['format_id'] = '%s-%d' % (format_id, i)
2529 if format.get('ext') is None:
2530 format['ext'] = determine_ext(format['url']).lower()
2531 # Ensure there is no conflict between id and ext in format selection
2532 # See https://github.com/yt-dlp/yt-dlp/issues/1282
2533 if format['format_id'] != format['ext'] and format['format_id'] in common_exts:
2534 format['format_id'] = 'f%s' % format['format_id']
2535
2536 for i, format in enumerate(formats):
2537 if format.get('format') is None:
2538 format['format'] = '{id} - {res}{note}'.format(
2539 id=format['format_id'],
2540 res=self.format_resolution(format),
2541 note=format_field(format, 'format_note', ' (%s)'),
2542 )
2543 if format.get('protocol') is None:
2544 format['protocol'] = determine_protocol(format)
2545 if format.get('resolution') is None:
2546 format['resolution'] = self.format_resolution(format, default=None)
2547 if format.get('dynamic_range') is None and format.get('vcodec') != 'none':
2548 format['dynamic_range'] = 'SDR'
2549 if (info_dict.get('duration') and format.get('tbr')
2550 and not format.get('filesize') and not format.get('filesize_approx')):
2551 format['filesize_approx'] = int(info_dict['duration'] * format['tbr'] * (1024 / 8))
2552
2553 # Add HTTP headers, so that external programs can use them from the
2554 # json output
2555 full_format_info = info_dict.copy()
2556 full_format_info.update(format)
2557 format['http_headers'] = self._calc_headers(full_format_info)
2558 # Remove private housekeeping stuff
2559 if '__x_forwarded_for_ip' in info_dict:
2560 del info_dict['__x_forwarded_for_ip']
2561
2562 if self.params.get('check_formats') is True:
2563 formats = LazyList(self._check_formats(formats[::-1]), reverse=True)
2564
2565 if not formats or formats[0] is not info_dict:
2566 # only set the 'formats' fields if the original info_dict list them
2567 # otherwise we end up with a circular reference, the first (and unique)
2568 # element in the 'formats' field in info_dict is info_dict itself,
2569 # which can't be exported to json
2570 info_dict['formats'] = formats
2571
2572 info_dict, _ = self.pre_process(info_dict)
2573
2574 if self._match_entry(info_dict, incomplete=self._format_fields) is not None:
2575 return info_dict
2576
2577 self.post_extract(info_dict)
2578 info_dict, _ = self.pre_process(info_dict, 'after_filter')
2579
2580 # The pre-processors may have modified the formats
2581 formats = info_dict.get('formats', [info_dict])
2582
2583 list_only = self.params.get('simulate') is None and (
2584 self.params.get('list_thumbnails') or self.params.get('listformats') or self.params.get('listsubtitles'))
2585 interactive_format_selection = not list_only and self.format_selector == '-'
2586 if self.params.get('list_thumbnails'):
2587 self.list_thumbnails(info_dict)
2588 if self.params.get('listsubtitles'):
2589 if 'automatic_captions' in info_dict:
2590 self.list_subtitles(
2591 info_dict['id'], automatic_captions, 'automatic captions')
2592 self.list_subtitles(info_dict['id'], subtitles, 'subtitles')
2593 if self.params.get('listformats') or interactive_format_selection:
2594 self.list_formats(info_dict)
2595 if list_only:
2596 # Without this printing, -F --print-json will not work
2597 self.__forced_printings(info_dict, self.prepare_filename(info_dict), incomplete=True)
2598 return info_dict
2599
2600 format_selector = self.format_selector
2601 if format_selector is None:
2602 req_format = self._default_format_spec(info_dict, download=download)
2603 self.write_debug('Default format spec: %s' % req_format)
2604 format_selector = self.build_format_selector(req_format)
2605
2606 while True:
2607 if interactive_format_selection:
2608 req_format = input(
2609 self._format_screen('\nEnter format selector: ', self.Styles.EMPHASIS))
2610 try:
2611 format_selector = self.build_format_selector(req_format)
2612 except SyntaxError as err:
2613 self.report_error(err, tb=False, is_error=False)
2614 continue
2615
2616 formats_to_download = list(format_selector({
2617 'formats': formats,
2618 'has_merged_format': any('none' not in (f.get('acodec'), f.get('vcodec')) for f in formats),
2619 'incomplete_formats': (
2620 # All formats are video-only or
2621 all(f.get('vcodec') != 'none' and f.get('acodec') == 'none' for f in formats)
2622 # all formats are audio-only
2623 or all(f.get('vcodec') == 'none' and f.get('acodec') != 'none' for f in formats)),
2624 }))
2625 if interactive_format_selection and not formats_to_download:
2626 self.report_error('Requested format is not available', tb=False, is_error=False)
2627 continue
2628 break
2629
2630 if not formats_to_download:
2631 if not self.params.get('ignore_no_formats_error'):
2632 raise ExtractorError(
2633 'Requested format is not available. Use --list-formats for a list of available formats',
2634 expected=True, video_id=info_dict['id'], ie=info_dict['extractor'])
2635 self.report_warning('Requested format is not available')
2636 # Process what we can, even without any available formats.
2637 formats_to_download = [{}]
2638
2639 requested_ranges = self.params.get('download_ranges')
2640 if requested_ranges:
2641 requested_ranges = tuple(requested_ranges(info_dict, self))
2642
2643 best_format, downloaded_formats = formats_to_download[-1], []
2644 if download:
2645 if best_format:
2646 def to_screen(*msg):
2647 self.to_screen(f'[info] {info_dict["id"]}: {" ".join(", ".join(variadic(m)) for m in msg)}')
2648
2649 to_screen(f'Downloading {len(formats_to_download)} format(s):',
2650 (f['format_id'] for f in formats_to_download))
2651 if requested_ranges:
2652 to_screen(f'Downloading {len(requested_ranges)} time ranges:',
2653 (f'{int(c["start_time"])}-{int(c["end_time"])}' for c in requested_ranges))
2654 max_downloads_reached = False
2655
2656 for fmt, chapter in itertools.product(formats_to_download, requested_ranges or [{}]):
2657 new_info = self._copy_infodict(info_dict)
2658 new_info.update(fmt)
2659 offset, duration = info_dict.get('section_start') or 0, info_dict.get('duration') or float('inf')
2660 if chapter or offset:
2661 new_info.update({
2662 'section_start': offset + chapter.get('start_time', 0),
2663 'section_end': offset + min(chapter.get('end_time', duration), duration),
2664 'section_title': chapter.get('title'),
2665 'section_number': chapter.get('index'),
2666 })
2667 downloaded_formats.append(new_info)
2668 try:
2669 self.process_info(new_info)
2670 except MaxDownloadsReached:
2671 max_downloads_reached = True
2672 self._raise_pending_errors(new_info)
2673 # Remove copied info
2674 for key, val in tuple(new_info.items()):
2675 if info_dict.get(key) == val:
2676 new_info.pop(key)
2677 if max_downloads_reached:
2678 break
2679
2680 write_archive = {f.get('__write_download_archive', False) for f in downloaded_formats}
2681 assert write_archive.issubset({True, False, 'ignore'})
2682 if True in write_archive and False not in write_archive:
2683 self.record_download_archive(info_dict)
2684
2685 info_dict['requested_downloads'] = downloaded_formats
2686 info_dict = self.run_all_pps('after_video', info_dict)
2687 if max_downloads_reached:
2688 raise MaxDownloadsReached()
2689
2690 # We update the info dict with the selected best quality format (backwards compatibility)
2691 info_dict.update(best_format)
2692 return info_dict
2693
2694 def process_subtitles(self, video_id, normal_subtitles, automatic_captions):
2695 """Select the requested subtitles and their format"""
2696 available_subs, normal_sub_langs = {}, []
2697 if normal_subtitles and self.params.get('writesubtitles'):
2698 available_subs.update(normal_subtitles)
2699 normal_sub_langs = tuple(normal_subtitles.keys())
2700 if automatic_captions and self.params.get('writeautomaticsub'):
2701 for lang, cap_info in automatic_captions.items():
2702 if lang not in available_subs:
2703 available_subs[lang] = cap_info
2704
2705 if (not self.params.get('writesubtitles') and not
2706 self.params.get('writeautomaticsub') or not
2707 available_subs):
2708 return None
2709
2710 all_sub_langs = tuple(available_subs.keys())
2711 if self.params.get('allsubtitles', False):
2712 requested_langs = all_sub_langs
2713 elif self.params.get('subtitleslangs', False):
2714 # A list is used so that the order of languages will be the same as
2715 # given in subtitleslangs. See https://github.com/yt-dlp/yt-dlp/issues/1041
2716 requested_langs = []
2717 for lang_re in self.params.get('subtitleslangs'):
2718 discard = lang_re[0] == '-'
2719 if discard:
2720 lang_re = lang_re[1:]
2721 if lang_re == 'all':
2722 if discard:
2723 requested_langs = []
2724 else:
2725 requested_langs.extend(all_sub_langs)
2726 continue
2727 current_langs = filter(re.compile(lang_re + '$').match, all_sub_langs)
2728 if discard:
2729 for lang in current_langs:
2730 while lang in requested_langs:
2731 requested_langs.remove(lang)
2732 else:
2733 requested_langs.extend(current_langs)
2734 requested_langs = orderedSet(requested_langs)
2735 elif normal_sub_langs:
2736 requested_langs = ['en'] if 'en' in normal_sub_langs else normal_sub_langs[:1]
2737 else:
2738 requested_langs = ['en'] if 'en' in all_sub_langs else all_sub_langs[:1]
2739 if requested_langs:
2740 self.write_debug('Downloading subtitles: %s' % ', '.join(requested_langs))
2741
2742 formats_query = self.params.get('subtitlesformat', 'best')
2743 formats_preference = formats_query.split('/') if formats_query else []
2744 subs = {}
2745 for lang in requested_langs:
2746 formats = available_subs.get(lang)
2747 if formats is None:
2748 self.report_warning(f'{lang} subtitles not available for {video_id}')
2749 continue
2750 for ext in formats_preference:
2751 if ext == 'best':
2752 f = formats[-1]
2753 break
2754 matches = list(filter(lambda f: f['ext'] == ext, formats))
2755 if matches:
2756 f = matches[-1]
2757 break
2758 else:
2759 f = formats[-1]
2760 self.report_warning(
2761 'No subtitle format found matching "%s" for language %s, '
2762 'using %s' % (formats_query, lang, f['ext']))
2763 subs[lang] = f
2764 return subs
2765
2766 def _forceprint(self, key, info_dict):
2767 if info_dict is None:
2768 return
2769 info_copy = info_dict.copy()
2770 info_copy['formats_table'] = self.render_formats_table(info_dict)
2771 info_copy['thumbnails_table'] = self.render_thumbnails_table(info_dict)
2772 info_copy['subtitles_table'] = self.render_subtitles_table(info_dict.get('id'), info_dict.get('subtitles'))
2773 info_copy['automatic_captions_table'] = self.render_subtitles_table(info_dict.get('id'), info_dict.get('automatic_captions'))
2774
2775 def format_tmpl(tmpl):
2776 mobj = re.match(r'\w+(=?)$', tmpl)
2777 if mobj and mobj.group(1):
2778 return f'{tmpl[:-1]} = %({tmpl[:-1]})r'
2779 elif mobj:
2780 return f'%({tmpl})s'
2781 return tmpl
2782
2783 for tmpl in self.params['forceprint'].get(key, []):
2784 self.to_stdout(self.evaluate_outtmpl(format_tmpl(tmpl), info_copy))
2785
2786 for tmpl, file_tmpl in self.params['print_to_file'].get(key, []):
2787 filename = self.prepare_filename(info_dict, outtmpl=file_tmpl)
2788 tmpl = format_tmpl(tmpl)
2789 self.to_screen(f'[info] Writing {tmpl!r} to: {filename}')
2790 if self._ensure_dir_exists(filename):
2791 with open(filename, 'a', encoding='utf-8') as f:
2792 f.write(self.evaluate_outtmpl(tmpl, info_copy) + '\n')
2793
2794 def __forced_printings(self, info_dict, filename, incomplete):
2795 def print_mandatory(field, actual_field=None):
2796 if actual_field is None:
2797 actual_field = field
2798 if (self.params.get('force%s' % field, False)
2799 and (not incomplete or info_dict.get(actual_field) is not None)):
2800 self.to_stdout(info_dict[actual_field])
2801
2802 def print_optional(field):
2803 if (self.params.get('force%s' % field, False)
2804 and info_dict.get(field) is not None):
2805 self.to_stdout(info_dict[field])
2806
2807 info_dict = info_dict.copy()
2808 if filename is not None:
2809 info_dict['filename'] = filename
2810 if info_dict.get('requested_formats') is not None:
2811 # For RTMP URLs, also include the playpath
2812 info_dict['urls'] = '\n'.join(f['url'] + f.get('play_path', '') for f in info_dict['requested_formats'])
2813 elif info_dict.get('url'):
2814 info_dict['urls'] = info_dict['url'] + info_dict.get('play_path', '')
2815
2816 if (self.params.get('forcejson')
2817 or self.params['forceprint'].get('video')
2818 or self.params['print_to_file'].get('video')):
2819 self.post_extract(info_dict)
2820 self._forceprint('video', info_dict)
2821
2822 print_mandatory('title')
2823 print_mandatory('id')
2824 print_mandatory('url', 'urls')
2825 print_optional('thumbnail')
2826 print_optional('description')
2827 print_optional('filename')
2828 if self.params.get('forceduration') and info_dict.get('duration') is not None:
2829 self.to_stdout(formatSeconds(info_dict['duration']))
2830 print_mandatory('format')
2831
2832 if self.params.get('forcejson'):
2833 self.to_stdout(json.dumps(self.sanitize_info(info_dict)))
2834
2835 def dl(self, name, info, subtitle=False, test=False):
2836 if not info.get('url'):
2837 self.raise_no_formats(info, True)
2838
2839 if test:
2840 verbose = self.params.get('verbose')
2841 params = {
2842 'test': True,
2843 'quiet': self.params.get('quiet') or not verbose,
2844 'verbose': verbose,
2845 'noprogress': not verbose,
2846 'nopart': True,
2847 'skip_unavailable_fragments': False,
2848 'keep_fragments': False,
2849 'overwrites': True,
2850 '_no_ytdl_file': True,
2851 }
2852 else:
2853 params = self.params
2854 fd = get_suitable_downloader(info, params, to_stdout=(name == '-'))(self, params)
2855 if not test:
2856 for ph in self._progress_hooks:
2857 fd.add_progress_hook(ph)
2858 urls = '", "'.join(
2859 (f['url'].split(',')[0] + ',<data>' if f['url'].startswith('data:') else f['url'])
2860 for f in info.get('requested_formats', []) or [info])
2861 self.write_debug(f'Invoking {fd.FD_NAME} downloader on "{urls}"')
2862
2863 # Note: Ideally info should be a deep-copied so that hooks cannot modify it.
2864 # But it may contain objects that are not deep-copyable
2865 new_info = self._copy_infodict(info)
2866 if new_info.get('http_headers') is None:
2867 new_info['http_headers'] = self._calc_headers(new_info)
2868 return fd.download(name, new_info, subtitle)
2869
2870 def existing_file(self, filepaths, *, default_overwrite=True):
2871 existing_files = list(filter(os.path.exists, orderedSet(filepaths)))
2872 if existing_files and not self.params.get('overwrites', default_overwrite):
2873 return existing_files[0]
2874
2875 for file in existing_files:
2876 self.report_file_delete(file)
2877 os.remove(file)
2878 return None
2879
2880 def process_info(self, info_dict):
2881 """Process a single resolved IE result. (Modifies it in-place)"""
2882
2883 assert info_dict.get('_type', 'video') == 'video'
2884 original_infodict = info_dict
2885
2886 if 'format' not in info_dict and 'ext' in info_dict:
2887 info_dict['format'] = info_dict['ext']
2888
2889 # This is mostly just for backward compatibility of process_info
2890 # As a side-effect, this allows for format-specific filters
2891 if self._match_entry(info_dict) is not None:
2892 info_dict['__write_download_archive'] = 'ignore'
2893 return
2894
2895 # Does nothing under normal operation - for backward compatibility of process_info
2896 self.post_extract(info_dict)
2897 self._num_downloads += 1
2898
2899 # info_dict['_filename'] needs to be set for backward compatibility
2900 info_dict['_filename'] = full_filename = self.prepare_filename(info_dict, warn=True)
2901 temp_filename = self.prepare_filename(info_dict, 'temp')
2902 files_to_move = {}
2903
2904 # Forced printings
2905 self.__forced_printings(info_dict, full_filename, incomplete=('format' not in info_dict))
2906
2907 def check_max_downloads():
2908 if self._num_downloads >= float(self.params.get('max_downloads') or 'inf'):
2909 raise MaxDownloadsReached()
2910
2911 if self.params.get('simulate'):
2912 info_dict['__write_download_archive'] = self.params.get('force_write_download_archive')
2913 check_max_downloads()
2914 return
2915
2916 if full_filename is None:
2917 return
2918 if not self._ensure_dir_exists(encodeFilename(full_filename)):
2919 return
2920 if not self._ensure_dir_exists(encodeFilename(temp_filename)):
2921 return
2922
2923 if self._write_description('video', info_dict,
2924 self.prepare_filename(info_dict, 'description')) is None:
2925 return
2926
2927 sub_files = self._write_subtitles(info_dict, temp_filename)
2928 if sub_files is None:
2929 return
2930 files_to_move.update(dict(sub_files))
2931
2932 thumb_files = self._write_thumbnails(
2933 'video', info_dict, temp_filename, self.prepare_filename(info_dict, 'thumbnail'))
2934 if thumb_files is None:
2935 return
2936 files_to_move.update(dict(thumb_files))
2937
2938 infofn = self.prepare_filename(info_dict, 'infojson')
2939 _infojson_written = self._write_info_json('video', info_dict, infofn)
2940 if _infojson_written:
2941 info_dict['infojson_filename'] = infofn
2942 # For backward compatibility, even though it was a private field
2943 info_dict['__infojson_filename'] = infofn
2944 elif _infojson_written is None:
2945 return
2946
2947 # Note: Annotations are deprecated
2948 annofn = None
2949 if self.params.get('writeannotations', False):
2950 annofn = self.prepare_filename(info_dict, 'annotation')
2951 if annofn:
2952 if not self._ensure_dir_exists(encodeFilename(annofn)):
2953 return
2954 if not self.params.get('overwrites', True) and os.path.exists(encodeFilename(annofn)):
2955 self.to_screen('[info] Video annotations are already present')
2956 elif not info_dict.get('annotations'):
2957 self.report_warning('There are no annotations to write.')
2958 else:
2959 try:
2960 self.to_screen('[info] Writing video annotations to: ' + annofn)
2961 with open(encodeFilename(annofn), 'w', encoding='utf-8') as annofile:
2962 annofile.write(info_dict['annotations'])
2963 except (KeyError, TypeError):
2964 self.report_warning('There are no annotations to write.')
2965 except OSError:
2966 self.report_error('Cannot write annotations file: ' + annofn)
2967 return
2968
2969 # Write internet shortcut files
2970 def _write_link_file(link_type):
2971 url = try_get(info_dict['webpage_url'], iri_to_uri)
2972 if not url:
2973 self.report_warning(
2974 f'Cannot write internet shortcut file because the actual URL of "{info_dict["webpage_url"]}" is unknown')
2975 return True
2976 linkfn = replace_extension(self.prepare_filename(info_dict, 'link'), link_type, info_dict.get('ext'))
2977 if not self._ensure_dir_exists(encodeFilename(linkfn)):
2978 return False
2979 if self.params.get('overwrites', True) and os.path.exists(encodeFilename(linkfn)):
2980 self.to_screen(f'[info] Internet shortcut (.{link_type}) is already present')
2981 return True
2982 try:
2983 self.to_screen(f'[info] Writing internet shortcut (.{link_type}) to: {linkfn}')
2984 with open(encodeFilename(to_high_limit_path(linkfn)), 'w', encoding='utf-8',
2985 newline='\r\n' if link_type == 'url' else '\n') as linkfile:
2986 template_vars = {'url': url}
2987 if link_type == 'desktop':
2988 template_vars['filename'] = linkfn[:-(len(link_type) + 1)]
2989 linkfile.write(LINK_TEMPLATES[link_type] % template_vars)
2990 except OSError:
2991 self.report_error(f'Cannot write internet shortcut {linkfn}')
2992 return False
2993 return True
2994
2995 write_links = {
2996 'url': self.params.get('writeurllink'),
2997 'webloc': self.params.get('writewebloclink'),
2998 'desktop': self.params.get('writedesktoplink'),
2999 }
3000 if self.params.get('writelink'):
3001 link_type = ('webloc' if sys.platform == 'darwin'
3002 else 'desktop' if sys.platform.startswith('linux')
3003 else 'url')
3004 write_links[link_type] = True
3005
3006 if any(should_write and not _write_link_file(link_type)
3007 for link_type, should_write in write_links.items()):
3008 return
3009
3010 def replace_info_dict(new_info):
3011 nonlocal info_dict
3012 if new_info == info_dict:
3013 return
3014 info_dict.clear()
3015 info_dict.update(new_info)
3016
3017 new_info, files_to_move = self.pre_process(info_dict, 'before_dl', files_to_move)
3018 replace_info_dict(new_info)
3019
3020 if self.params.get('skip_download'):
3021 info_dict['filepath'] = temp_filename
3022 info_dict['__finaldir'] = os.path.dirname(os.path.abspath(encodeFilename(full_filename)))
3023 info_dict['__files_to_move'] = files_to_move
3024 replace_info_dict(self.run_pp(MoveFilesAfterDownloadPP(self, False), info_dict))
3025 info_dict['__write_download_archive'] = self.params.get('force_write_download_archive')
3026 else:
3027 # Download
3028 info_dict.setdefault('__postprocessors', [])
3029 try:
3030
3031 def existing_video_file(*filepaths):
3032 ext = info_dict.get('ext')
3033 converted = lambda file: replace_extension(file, self.params.get('final_ext') or ext, ext)
3034 file = self.existing_file(itertools.chain(*zip(map(converted, filepaths), filepaths)),
3035 default_overwrite=False)
3036 if file:
3037 info_dict['ext'] = os.path.splitext(file)[1][1:]
3038 return file
3039
3040 fd, success = None, True
3041 if info_dict.get('protocol') or info_dict.get('url'):
3042 fd = get_suitable_downloader(info_dict, self.params, to_stdout=temp_filename == '-')
3043 if fd is not FFmpegFD and (
3044 info_dict.get('section_start') or info_dict.get('section_end')):
3045 msg = ('This format cannot be partially downloaded' if FFmpegFD.available()
3046 else 'You have requested downloading the video partially, but ffmpeg is not installed')
3047 self.report_error(f'{msg}. Aborting')
3048 return
3049
3050 if info_dict.get('requested_formats') is not None:
3051
3052 def compatible_formats(formats):
3053 # TODO: some formats actually allow this (mkv, webm, ogg, mp4), but not all of them.
3054 video_formats = [format for format in formats if format.get('vcodec') != 'none']
3055 audio_formats = [format for format in formats if format.get('acodec') != 'none']
3056 if len(video_formats) > 2 or len(audio_formats) > 2:
3057 return False
3058
3059 # Check extension
3060 exts = {format.get('ext') for format in formats}
3061 COMPATIBLE_EXTS = (
3062 {'mp3', 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'ismv', 'isma'},
3063 {'webm'},
3064 )
3065 for ext_sets in COMPATIBLE_EXTS:
3066 if ext_sets.issuperset(exts):
3067 return True
3068 # TODO: Check acodec/vcodec
3069 return False
3070
3071 requested_formats = info_dict['requested_formats']
3072 old_ext = info_dict['ext']
3073 if self.params.get('merge_output_format') is None:
3074 if not compatible_formats(requested_formats):
3075 info_dict['ext'] = 'mkv'
3076 self.report_warning(
3077 'Requested formats are incompatible for merge and will be merged into mkv')
3078 if (info_dict['ext'] == 'webm'
3079 and info_dict.get('thumbnails')
3080 # check with type instead of pp_key, __name__, or isinstance
3081 # since we dont want any custom PPs to trigger this
3082 and any(type(pp) == EmbedThumbnailPP for pp in self._pps['post_process'])): # noqa: E721
3083 info_dict['ext'] = 'mkv'
3084 self.report_warning(
3085 'webm doesn\'t support embedding a thumbnail, mkv will be used')
3086 new_ext = info_dict['ext']
3087
3088 def correct_ext(filename, ext=new_ext):
3089 if filename == '-':
3090 return filename
3091 filename_real_ext = os.path.splitext(filename)[1][1:]
3092 filename_wo_ext = (
3093 os.path.splitext(filename)[0]
3094 if filename_real_ext in (old_ext, new_ext)
3095 else filename)
3096 return f'{filename_wo_ext}.{ext}'
3097
3098 # Ensure filename always has a correct extension for successful merge
3099 full_filename = correct_ext(full_filename)
3100 temp_filename = correct_ext(temp_filename)
3101 dl_filename = existing_video_file(full_filename, temp_filename)
3102 info_dict['__real_download'] = False
3103
3104 merger = FFmpegMergerPP(self)
3105 downloaded = []
3106 if dl_filename is not None:
3107 self.report_file_already_downloaded(dl_filename)
3108 elif fd:
3109 for f in requested_formats if fd != FFmpegFD else []:
3110 f['filepath'] = fname = prepend_extension(
3111 correct_ext(temp_filename, info_dict['ext']),
3112 'f%s' % f['format_id'], info_dict['ext'])
3113 downloaded.append(fname)
3114 info_dict['url'] = '\n'.join(f['url'] for f in requested_formats)
3115 success, real_download = self.dl(temp_filename, info_dict)
3116 info_dict['__real_download'] = real_download
3117 else:
3118 if self.params.get('allow_unplayable_formats'):
3119 self.report_warning(
3120 'You have requested merging of multiple formats '
3121 'while also allowing unplayable formats to be downloaded. '
3122 'The formats won\'t be merged to prevent data corruption.')
3123 elif not merger.available:
3124 msg = 'You have requested merging of multiple formats but ffmpeg is not installed'
3125 if not self.params.get('ignoreerrors'):
3126 self.report_error(f'{msg}. Aborting due to --abort-on-error')
3127 return
3128 self.report_warning(f'{msg}. The formats won\'t be merged')
3129
3130 if temp_filename == '-':
3131 reason = ('using a downloader other than ffmpeg' if FFmpegFD.can_merge_formats(info_dict, self.params)
3132 else 'but the formats are incompatible for simultaneous download' if merger.available
3133 else 'but ffmpeg is not installed')
3134 self.report_warning(
3135 f'You have requested downloading multiple formats to stdout {reason}. '
3136 'The formats will be streamed one after the other')
3137 fname = temp_filename
3138 for f in requested_formats:
3139 new_info = dict(info_dict)
3140 del new_info['requested_formats']
3141 new_info.update(f)
3142 if temp_filename != '-':
3143 fname = prepend_extension(
3144 correct_ext(temp_filename, new_info['ext']),
3145 'f%s' % f['format_id'], new_info['ext'])
3146 if not self._ensure_dir_exists(fname):
3147 return
3148 f['filepath'] = fname
3149 downloaded.append(fname)
3150 partial_success, real_download = self.dl(fname, new_info)
3151 info_dict['__real_download'] = info_dict['__real_download'] or real_download
3152 success = success and partial_success
3153
3154 if downloaded and merger.available and not self.params.get('allow_unplayable_formats'):
3155 info_dict['__postprocessors'].append(merger)
3156 info_dict['__files_to_merge'] = downloaded
3157 # Even if there were no downloads, it is being merged only now
3158 info_dict['__real_download'] = True
3159 else:
3160 for file in downloaded:
3161 files_to_move[file] = None
3162 else:
3163 # Just a single file
3164 dl_filename = existing_video_file(full_filename, temp_filename)
3165 if dl_filename is None or dl_filename == temp_filename:
3166 # dl_filename == temp_filename could mean that the file was partially downloaded with --no-part.
3167 # So we should try to resume the download
3168 success, real_download = self.dl(temp_filename, info_dict)
3169 info_dict['__real_download'] = real_download
3170 else:
3171 self.report_file_already_downloaded(dl_filename)
3172
3173 dl_filename = dl_filename or temp_filename
3174 info_dict['__finaldir'] = os.path.dirname(os.path.abspath(encodeFilename(full_filename)))
3175
3176 except network_exceptions as err:
3177 self.report_error('unable to download video data: %s' % error_to_compat_str(err))
3178 return
3179 except OSError as err:
3180 raise UnavailableVideoError(err)
3181 except (ContentTooShortError, ) as err:
3182 self.report_error(f'content too short (expected {err.expected} bytes and served {err.downloaded})')
3183 return
3184
3185 self._raise_pending_errors(info_dict)
3186 if success and full_filename != '-':
3187
3188 def fixup():
3189 do_fixup = True
3190 fixup_policy = self.params.get('fixup')
3191 vid = info_dict['id']
3192
3193 if fixup_policy in ('ignore', 'never'):
3194 return
3195 elif fixup_policy == 'warn':
3196 do_fixup = 'warn'
3197 elif fixup_policy != 'force':
3198 assert fixup_policy in ('detect_or_warn', None)
3199 if not info_dict.get('__real_download'):
3200 do_fixup = False
3201
3202 def ffmpeg_fixup(cndn, msg, cls):
3203 if not (do_fixup and cndn):
3204 return
3205 elif do_fixup == 'warn':
3206 self.report_warning(f'{vid}: {msg}')
3207 return
3208 pp = cls(self)
3209 if pp.available:
3210 info_dict['__postprocessors'].append(pp)
3211 else:
3212 self.report_warning(f'{vid}: {msg}. Install ffmpeg to fix this automatically')
3213
3214 stretched_ratio = info_dict.get('stretched_ratio')
3215 ffmpeg_fixup(stretched_ratio not in (1, None),
3216 f'Non-uniform pixel ratio {stretched_ratio}',
3217 FFmpegFixupStretchedPP)
3218
3219 downloader = get_suitable_downloader(info_dict, self.params) if 'protocol' in info_dict else None
3220 downloader = downloader.FD_NAME if downloader else None
3221
3222 ext = info_dict.get('ext')
3223 postprocessed_by_ffmpeg = info_dict.get('requested_formats') or any((
3224 isinstance(pp, FFmpegVideoConvertorPP)
3225 and resolve_recode_mapping(ext, pp.mapping)[0] not in (ext, None)
3226 ) for pp in self._pps['post_process'])
3227
3228 if not postprocessed_by_ffmpeg:
3229 ffmpeg_fixup(ext == 'm4a' and info_dict.get('container') == 'm4a_dash',
3230 'writing DASH m4a. Only some players support this container',
3231 FFmpegFixupM4aPP)
3232 ffmpeg_fixup(downloader == 'hlsnative' and not self.params.get('hls_use_mpegts')
3233 or info_dict.get('is_live') and self.params.get('hls_use_mpegts') is None,
3234 'Possible MPEG-TS in MP4 container or malformed AAC timestamps',
3235 FFmpegFixupM3u8PP)
3236 ffmpeg_fixup(info_dict.get('is_live') and downloader == 'DashSegmentsFD',
3237 'Possible duplicate MOOV atoms', FFmpegFixupDuplicateMoovPP)
3238
3239 ffmpeg_fixup(downloader == 'web_socket_fragment', 'Malformed timestamps detected', FFmpegFixupTimestampPP)
3240 ffmpeg_fixup(downloader == 'web_socket_fragment', 'Malformed duration detected', FFmpegFixupDurationPP)
3241
3242 fixup()
3243 try:
3244 replace_info_dict(self.post_process(dl_filename, info_dict, files_to_move))
3245 except PostProcessingError as err:
3246 self.report_error('Postprocessing: %s' % str(err))
3247 return
3248 try:
3249 for ph in self._post_hooks:
3250 ph(info_dict['filepath'])
3251 except Exception as err:
3252 self.report_error('post hooks: %s' % str(err))
3253 return
3254 info_dict['__write_download_archive'] = True
3255
3256 assert info_dict is original_infodict # Make sure the info_dict was modified in-place
3257 if self.params.get('force_write_download_archive'):
3258 info_dict['__write_download_archive'] = True
3259 check_max_downloads()
3260
3261 def __download_wrapper(self, func):
3262 @functools.wraps(func)
3263 def wrapper(*args, **kwargs):
3264 try:
3265 res = func(*args, **kwargs)
3266 except UnavailableVideoError as e:
3267 self.report_error(e)
3268 except DownloadCancelled as e:
3269 self.to_screen(f'[info] {e}')
3270 if not self.params.get('break_per_url'):
3271 raise
3272 else:
3273 if self.params.get('dump_single_json', False):
3274 self.post_extract(res)
3275 self.to_stdout(json.dumps(self.sanitize_info(res)))
3276 return wrapper
3277
3278 def download(self, url_list):
3279 """Download a given list of URLs."""
3280 url_list = variadic(url_list) # Passing a single URL is a common mistake
3281 outtmpl = self.params['outtmpl']['default']
3282 if (len(url_list) > 1
3283 and outtmpl != '-'
3284 and '%' not in outtmpl
3285 and self.params.get('max_downloads') != 1):
3286 raise SameFileError(outtmpl)
3287
3288 for url in url_list:
3289 self.__download_wrapper(self.extract_info)(
3290 url, force_generic_extractor=self.params.get('force_generic_extractor', False))
3291
3292 return self._download_retcode
3293
3294 def download_with_info_file(self, info_filename):
3295 with contextlib.closing(fileinput.FileInput(
3296 [info_filename], mode='r',
3297 openhook=fileinput.hook_encoded('utf-8'))) as f:
3298 # FileInput doesn't have a read method, we can't call json.load
3299 info = self.sanitize_info(json.loads('\n'.join(f)), self.params.get('clean_infojson', True))
3300 try:
3301 self.__download_wrapper(self.process_ie_result)(info, download=True)
3302 except (DownloadError, EntryNotInPlaylist, ReExtractInfo) as e:
3303 if not isinstance(e, EntryNotInPlaylist):
3304 self.to_stderr('\r')
3305 webpage_url = info.get('webpage_url')
3306 if webpage_url is not None:
3307 self.report_warning(f'The info failed to download: {e}; trying with URL {webpage_url}')
3308 return self.download([webpage_url])
3309 else:
3310 raise
3311 return self._download_retcode
3312
3313 @staticmethod
3314 def sanitize_info(info_dict, remove_private_keys=False):
3315 ''' Sanitize the infodict for converting to json '''
3316 if info_dict is None:
3317 return info_dict
3318 info_dict.setdefault('epoch', int(time.time()))
3319 info_dict.setdefault('_type', 'video')
3320
3321 if remove_private_keys:
3322 reject = lambda k, v: v is None or k.startswith('__') or k in {
3323 'requested_downloads', 'requested_formats', 'requested_subtitles', 'requested_entries',
3324 'entries', 'filepath', '_filename', 'infojson_filename', 'original_url', 'playlist_autonumber',
3325 }
3326 else:
3327 reject = lambda k, v: False
3328
3329 def filter_fn(obj):
3330 if isinstance(obj, dict):
3331 return {k: filter_fn(v) for k, v in obj.items() if not reject(k, v)}
3332 elif isinstance(obj, (list, tuple, set, LazyList)):
3333 return list(map(filter_fn, obj))
3334 elif obj is None or isinstance(obj, (str, int, float, bool)):
3335 return obj
3336 else:
3337 return repr(obj)
3338
3339 return filter_fn(info_dict)
3340
3341 @staticmethod
3342 def filter_requested_info(info_dict, actually_filter=True):
3343 ''' Alias of sanitize_info for backward compatibility '''
3344 return YoutubeDL.sanitize_info(info_dict, actually_filter)
3345
3346 def _delete_downloaded_files(self, *files_to_delete, info={}, msg=None):
3347 for filename in set(filter(None, files_to_delete)):
3348 if msg:
3349 self.to_screen(msg % filename)
3350 try:
3351 os.remove(filename)
3352 except OSError:
3353 self.report_warning(f'Unable to delete file {filename}')
3354 if filename in info.get('__files_to_move', []): # NB: Delete even if None
3355 del info['__files_to_move'][filename]
3356
3357 @staticmethod
3358 def post_extract(info_dict):
3359 def actual_post_extract(info_dict):
3360 if info_dict.get('_type') in ('playlist', 'multi_video'):
3361 for video_dict in info_dict.get('entries', {}):
3362 actual_post_extract(video_dict or {})
3363 return
3364
3365 post_extractor = info_dict.pop('__post_extractor', None) or (lambda: {})
3366 info_dict.update(post_extractor())
3367
3368 actual_post_extract(info_dict or {})
3369
3370 def run_pp(self, pp, infodict):
3371 files_to_delete = []
3372 if '__files_to_move' not in infodict:
3373 infodict['__files_to_move'] = {}
3374 try:
3375 files_to_delete, infodict = pp.run(infodict)
3376 except PostProcessingError as e:
3377 # Must be True and not 'only_download'
3378 if self.params.get('ignoreerrors') is True:
3379 self.report_error(e)
3380 return infodict
3381 raise
3382
3383 if not files_to_delete:
3384 return infodict
3385 if self.params.get('keepvideo', False):
3386 for f in files_to_delete:
3387 infodict['__files_to_move'].setdefault(f, '')
3388 else:
3389 self._delete_downloaded_files(
3390 *files_to_delete, info=infodict, msg='Deleting original file %s (pass -k to keep)')
3391 return infodict
3392
3393 def run_all_pps(self, key, info, *, additional_pps=None):
3394 self._forceprint(key, info)
3395 for pp in (additional_pps or []) + self._pps[key]:
3396 info = self.run_pp(pp, info)
3397 return info
3398
3399 def pre_process(self, ie_info, key='pre_process', files_to_move=None):
3400 info = dict(ie_info)
3401 info['__files_to_move'] = files_to_move or {}
3402 try:
3403 info = self.run_all_pps(key, info)
3404 except PostProcessingError as err:
3405 msg = f'Preprocessing: {err}'
3406 info.setdefault('__pending_error', msg)
3407 self.report_error(msg, is_error=False)
3408 return info, info.pop('__files_to_move', None)
3409
3410 def post_process(self, filename, info, files_to_move=None):
3411 """Run all the postprocessors on the given file."""
3412 info['filepath'] = filename
3413 info['__files_to_move'] = files_to_move or {}
3414 info = self.run_all_pps('post_process', info, additional_pps=info.get('__postprocessors'))
3415 info = self.run_pp(MoveFilesAfterDownloadPP(self), info)
3416 del info['__files_to_move']
3417 return self.run_all_pps('after_move', info)
3418
3419 def _make_archive_id(self, info_dict):
3420 video_id = info_dict.get('id')
3421 if not video_id:
3422 return
3423 # Future-proof against any change in case
3424 # and backwards compatibility with prior versions
3425 extractor = info_dict.get('extractor_key') or info_dict.get('ie_key') # key in a playlist
3426 if extractor is None:
3427 url = str_or_none(info_dict.get('url'))
3428 if not url:
3429 return
3430 # Try to find matching extractor for the URL and take its ie_key
3431 for ie_key, ie in self._ies.items():
3432 if ie.suitable(url):
3433 extractor = ie_key
3434 break
3435 else:
3436 return
3437 return f'{extractor.lower()} {video_id}'
3438
3439 def in_download_archive(self, info_dict):
3440 fn = self.params.get('download_archive')
3441 if fn is None:
3442 return False
3443
3444 vid_id = self._make_archive_id(info_dict)
3445 if not vid_id:
3446 return False # Incomplete video information
3447
3448 return vid_id in self.archive
3449
3450 def record_download_archive(self, info_dict):
3451 fn = self.params.get('download_archive')
3452 if fn is None:
3453 return
3454 vid_id = self._make_archive_id(info_dict)
3455 assert vid_id
3456 self.write_debug(f'Adding to archive: {vid_id}')
3457 with locked_file(fn, 'a', encoding='utf-8') as archive_file:
3458 archive_file.write(vid_id + '\n')
3459 self.archive.add(vid_id)
3460
3461 @staticmethod
3462 def format_resolution(format, default='unknown'):
3463 if format.get('vcodec') == 'none' and format.get('acodec') != 'none':
3464 return 'audio only'
3465 if format.get('resolution') is not None:
3466 return format['resolution']
3467 if format.get('width') and format.get('height'):
3468 return '%dx%d' % (format['width'], format['height'])
3469 elif format.get('height'):
3470 return '%sp' % format['height']
3471 elif format.get('width'):
3472 return '%dx?' % format['width']
3473 return default
3474
3475 def _list_format_headers(self, *headers):
3476 if self.params.get('listformats_table', True) is not False:
3477 return [self._format_out(header, self.Styles.HEADERS) for header in headers]
3478 return headers
3479
3480 def _format_note(self, fdict):
3481 res = ''
3482 if fdict.get('ext') in ['f4f', 'f4m']:
3483 res += '(unsupported)'
3484 if fdict.get('language'):
3485 if res:
3486 res += ' '
3487 res += '[%s]' % fdict['language']
3488 if fdict.get('format_note') is not None:
3489 if res:
3490 res += ' '
3491 res += fdict['format_note']
3492 if fdict.get('tbr') is not None:
3493 if res:
3494 res += ', '
3495 res += '%4dk' % fdict['tbr']
3496 if fdict.get('container') is not None:
3497 if res:
3498 res += ', '
3499 res += '%s container' % fdict['container']
3500 if (fdict.get('vcodec') is not None
3501 and fdict.get('vcodec') != 'none'):
3502 if res:
3503 res += ', '
3504 res += fdict['vcodec']
3505 if fdict.get('vbr') is not None:
3506 res += '@'
3507 elif fdict.get('vbr') is not None and fdict.get('abr') is not None:
3508 res += 'video@'
3509 if fdict.get('vbr') is not None:
3510 res += '%4dk' % fdict['vbr']
3511 if fdict.get('fps') is not None:
3512 if res:
3513 res += ', '
3514 res += '%sfps' % fdict['fps']
3515 if fdict.get('acodec') is not None:
3516 if res:
3517 res += ', '
3518 if fdict['acodec'] == 'none':
3519 res += 'video only'
3520 else:
3521 res += '%-5s' % fdict['acodec']
3522 elif fdict.get('abr') is not None:
3523 if res:
3524 res += ', '
3525 res += 'audio'
3526 if fdict.get('abr') is not None:
3527 res += '@%3dk' % fdict['abr']
3528 if fdict.get('asr') is not None:
3529 res += ' (%5dHz)' % fdict['asr']
3530 if fdict.get('filesize') is not None:
3531 if res:
3532 res += ', '
3533 res += format_bytes(fdict['filesize'])
3534 elif fdict.get('filesize_approx') is not None:
3535 if res:
3536 res += ', '
3537 res += '~' + format_bytes(fdict['filesize_approx'])
3538 return res
3539
3540 def render_formats_table(self, info_dict):
3541 if not info_dict.get('formats') and not info_dict.get('url'):
3542 return None
3543
3544 formats = info_dict.get('formats', [info_dict])
3545 if not self.params.get('listformats_table', True) is not False:
3546 table = [
3547 [
3548 format_field(f, 'format_id'),
3549 format_field(f, 'ext'),
3550 self.format_resolution(f),
3551 self._format_note(f)
3552 ] for f in formats if f.get('preference') is None or f['preference'] >= -1000]
3553 return render_table(['format code', 'extension', 'resolution', 'note'], table, extra_gap=1)
3554
3555 def simplified_codec(f, field):
3556 assert field in ('acodec', 'vcodec')
3557 codec = f.get(field, 'unknown')
3558 if not codec:
3559 return 'unknown'
3560 elif codec != 'none':
3561 return '.'.join(codec.split('.')[:4])
3562
3563 if field == 'vcodec' and f.get('acodec') == 'none':
3564 return 'images'
3565 elif field == 'acodec' and f.get('vcodec') == 'none':
3566 return ''
3567 return self._format_out('audio only' if field == 'vcodec' else 'video only',
3568 self.Styles.SUPPRESS)
3569
3570 delim = self._format_out('\u2502', self.Styles.DELIM, '|', test_encoding=True)
3571 table = [
3572 [
3573 self._format_out(format_field(f, 'format_id'), self.Styles.ID),
3574 format_field(f, 'ext'),
3575 format_field(f, func=self.format_resolution, ignore=('audio only', 'images')),
3576 format_field(f, 'fps', '\t%d', func=round),
3577 format_field(f, 'dynamic_range', '%s', ignore=(None, 'SDR')).replace('HDR', ''),
3578 delim,
3579 format_field(f, 'filesize', ' \t%s', func=format_bytes) + format_field(f, 'filesize_approx', '~\t%s', func=format_bytes),
3580 format_field(f, 'tbr', '\t%dk', func=round),
3581 shorten_protocol_name(f.get('protocol', '')),
3582 delim,
3583 simplified_codec(f, 'vcodec'),
3584 format_field(f, 'vbr', '\t%dk', func=round),
3585 simplified_codec(f, 'acodec'),
3586 format_field(f, 'abr', '\t%dk', func=round),
3587 format_field(f, 'asr', '\t%s', func=format_decimal_suffix),
3588 join_nonempty(
3589 self._format_out('UNSUPPORTED', 'light red') if f.get('ext') in ('f4f', 'f4m') else None,
3590 format_field(f, 'language', '[%s]'),
3591 join_nonempty(format_field(f, 'format_note'),
3592 format_field(f, 'container', ignore=(None, f.get('ext'))),
3593 delim=', '),
3594 delim=' '),
3595 ] for f in formats if f.get('preference') is None or f['preference'] >= -1000]
3596 header_line = self._list_format_headers(
3597 'ID', 'EXT', 'RESOLUTION', '\tFPS', 'HDR', delim, '\tFILESIZE', '\tTBR', 'PROTO',
3598 delim, 'VCODEC', '\tVBR', 'ACODEC', '\tABR', '\tASR', 'MORE INFO')
3599
3600 return render_table(
3601 header_line, table, hide_empty=True,
3602 delim=self._format_out('\u2500', self.Styles.DELIM, '-', test_encoding=True))
3603
3604 def render_thumbnails_table(self, info_dict):
3605 thumbnails = list(info_dict.get('thumbnails') or [])
3606 if not thumbnails:
3607 return None
3608 return render_table(
3609 self._list_format_headers('ID', 'Width', 'Height', 'URL'),
3610 [[t.get('id'), t.get('width', 'unknown'), t.get('height', 'unknown'), t['url']] for t in thumbnails])
3611
3612 def render_subtitles_table(self, video_id, subtitles):
3613 def _row(lang, formats):
3614 exts, names = zip(*((f['ext'], f.get('name') or 'unknown') for f in reversed(formats)))
3615 if len(set(names)) == 1:
3616 names = [] if names[0] == 'unknown' else names[:1]
3617 return [lang, ', '.join(names), ', '.join(exts)]
3618
3619 if not subtitles:
3620 return None
3621 return render_table(
3622 self._list_format_headers('Language', 'Name', 'Formats'),
3623 [_row(lang, formats) for lang, formats in subtitles.items()],
3624 hide_empty=True)
3625
3626 def __list_table(self, video_id, name, func, *args):
3627 table = func(*args)
3628 if not table:
3629 self.to_screen(f'{video_id} has no {name}')
3630 return
3631 self.to_screen(f'[info] Available {name} for {video_id}:')
3632 self.to_stdout(table)
3633
3634 def list_formats(self, info_dict):
3635 self.__list_table(info_dict['id'], 'formats', self.render_formats_table, info_dict)
3636
3637 def list_thumbnails(self, info_dict):
3638 self.__list_table(info_dict['id'], 'thumbnails', self.render_thumbnails_table, info_dict)
3639
3640 def list_subtitles(self, video_id, subtitles, name='subtitles'):
3641 self.__list_table(video_id, name, self.render_subtitles_table, video_id, subtitles)
3642
3643 def urlopen(self, req):
3644 """ Start an HTTP download """
3645 if isinstance(req, str):
3646 req = sanitized_Request(req)
3647 return self._opener.open(req, timeout=self._socket_timeout)
3648
3649 def print_debug_header(self):
3650 if not self.params.get('verbose'):
3651 return
3652
3653 # These imports can be slow. So import them only as needed
3654 from .extractor.extractors import _LAZY_LOADER
3655 from .extractor.extractors import _PLUGIN_CLASSES as plugin_extractors
3656
3657 def get_encoding(stream):
3658 ret = str(getattr(stream, 'encoding', 'missing (%s)' % type(stream).__name__))
3659 if not supports_terminal_sequences(stream):
3660 from .utils import WINDOWS_VT_MODE # Must be imported locally
3661 ret += ' (No VT)' if WINDOWS_VT_MODE is False else ' (No ANSI)'
3662 return ret
3663
3664 encoding_str = 'Encodings: locale %s, fs %s, pref %s, %s' % (
3665 locale.getpreferredencoding(),
3666 sys.getfilesystemencoding(),
3667 self.get_encoding(),
3668 ', '.join(
3669 f'{key} {get_encoding(stream)}' for key, stream in self._out_files.items_
3670 if stream is not None and key != 'console')
3671 )
3672
3673 logger = self.params.get('logger')
3674 if logger:
3675 write_debug = lambda msg: logger.debug(f'[debug] {msg}')
3676 write_debug(encoding_str)
3677 else:
3678 write_string(f'[debug] {encoding_str}\n', encoding=None)
3679 write_debug = lambda msg: self._write_string(f'[debug] {msg}\n')
3680
3681 source = detect_variant()
3682 write_debug(join_nonempty(
3683 'yt-dlp version', __version__,
3684 f'[{RELEASE_GIT_HEAD}]' if RELEASE_GIT_HEAD else '',
3685 '' if source == 'unknown' else f'({source})',
3686 delim=' '))
3687 if not _LAZY_LOADER:
3688 if os.environ.get('YTDLP_NO_LAZY_EXTRACTORS'):
3689 write_debug('Lazy loading extractors is forcibly disabled')
3690 else:
3691 write_debug('Lazy loading extractors is disabled')
3692 if plugin_extractors or plugin_postprocessors:
3693 write_debug('Plugins: %s' % [
3694 '%s%s' % (klass.__name__, '' if klass.__name__ == name else f' as {name}')
3695 for name, klass in itertools.chain(plugin_extractors.items(), plugin_postprocessors.items())])
3696 if self.params['compat_opts']:
3697 write_debug('Compatibility options: %s' % ', '.join(self.params['compat_opts']))
3698
3699 if source == 'source':
3700 try:
3701 stdout, _, _ = Popen.run(
3702 ['git', 'rev-parse', '--short', 'HEAD'],
3703 text=True, cwd=os.path.dirname(os.path.abspath(__file__)),
3704 stdout=subprocess.PIPE, stderr=subprocess.PIPE)
3705 if re.fullmatch('[0-9a-f]+', stdout.strip()):
3706 write_debug(f'Git HEAD: {stdout.strip()}')
3707 except Exception:
3708 with contextlib.suppress(Exception):
3709 sys.exc_clear()
3710
3711 write_debug(system_identifier())
3712
3713 exe_versions, ffmpeg_features = FFmpegPostProcessor.get_versions_and_features(self)
3714 ffmpeg_features = {key for key, val in ffmpeg_features.items() if val}
3715 if ffmpeg_features:
3716 exe_versions['ffmpeg'] += ' (%s)' % ','.join(sorted(ffmpeg_features))
3717
3718 exe_versions['rtmpdump'] = rtmpdump_version()
3719 exe_versions['phantomjs'] = PhantomJSwrapper._version()
3720 exe_str = ', '.join(
3721 f'{exe} {v}' for exe, v in sorted(exe_versions.items()) if v
3722 ) or 'none'
3723 write_debug('exe versions: %s' % exe_str)
3724
3725 from .compat.compat_utils import get_package_info
3726 from .dependencies import available_dependencies
3727
3728 write_debug('Optional libraries: %s' % (', '.join(sorted({
3729 join_nonempty(*get_package_info(m)) for m in available_dependencies.values()
3730 })) or 'none'))
3731
3732 self._setup_opener()
3733 proxy_map = {}
3734 for handler in self._opener.handlers:
3735 if hasattr(handler, 'proxies'):
3736 proxy_map.update(handler.proxies)
3737 write_debug(f'Proxy map: {proxy_map}')
3738
3739 # Not implemented
3740 if False and self.params.get('call_home'):
3741 ipaddr = self.urlopen('https://yt-dl.org/ip').read().decode()
3742 write_debug('Public IP address: %s' % ipaddr)
3743 latest_version = self.urlopen(
3744 'https://yt-dl.org/latest/version').read().decode()
3745 if version_tuple(latest_version) > version_tuple(__version__):
3746 self.report_warning(
3747 'You are using an outdated version (newest version: %s)! '
3748 'See https://yt-dl.org/update if you need help updating.' %
3749 latest_version)
3750
3751 def _setup_opener(self):
3752 if hasattr(self, '_opener'):
3753 return
3754 timeout_val = self.params.get('socket_timeout')
3755 self._socket_timeout = 20 if timeout_val is None else float(timeout_val)
3756
3757 opts_cookiesfrombrowser = self.params.get('cookiesfrombrowser')
3758 opts_cookiefile = self.params.get('cookiefile')
3759 opts_proxy = self.params.get('proxy')
3760
3761 self.cookiejar = load_cookies(opts_cookiefile, opts_cookiesfrombrowser, self)
3762
3763 cookie_processor = YoutubeDLCookieProcessor(self.cookiejar)
3764 if opts_proxy is not None:
3765 if opts_proxy == '':
3766 proxies = {}
3767 else:
3768 proxies = {'http': opts_proxy, 'https': opts_proxy}
3769 else:
3770 proxies = urllib.request.getproxies()
3771 # Set HTTPS proxy to HTTP one if given (https://github.com/ytdl-org/youtube-dl/issues/805)
3772 if 'http' in proxies and 'https' not in proxies:
3773 proxies['https'] = proxies['http']
3774 proxy_handler = PerRequestProxyHandler(proxies)
3775
3776 debuglevel = 1 if self.params.get('debug_printtraffic') else 0
3777 https_handler = make_HTTPS_handler(self.params, debuglevel=debuglevel)
3778 ydlh = YoutubeDLHandler(self.params, debuglevel=debuglevel)
3779 redirect_handler = YoutubeDLRedirectHandler()
3780 data_handler = urllib.request.DataHandler()
3781
3782 # When passing our own FileHandler instance, build_opener won't add the
3783 # default FileHandler and allows us to disable the file protocol, which
3784 # can be used for malicious purposes (see
3785 # https://github.com/ytdl-org/youtube-dl/issues/8227)
3786 file_handler = urllib.request.FileHandler()
3787
3788 def file_open(*args, **kwargs):
3789 raise urllib.error.URLError('file:// scheme is explicitly disabled in yt-dlp for security reasons')
3790 file_handler.file_open = file_open
3791
3792 opener = urllib.request.build_opener(
3793 proxy_handler, https_handler, cookie_processor, ydlh, redirect_handler, data_handler, file_handler)
3794
3795 # Delete the default user-agent header, which would otherwise apply in
3796 # cases where our custom HTTP handler doesn't come into play
3797 # (See https://github.com/ytdl-org/youtube-dl/issues/1309 for details)
3798 opener.addheaders = []
3799 self._opener = opener
3800
3801 def encode(self, s):
3802 if isinstance(s, bytes):
3803 return s # Already encoded
3804
3805 try:
3806 return s.encode(self.get_encoding())
3807 except UnicodeEncodeError as err:
3808 err.reason = err.reason + '. Check your system encoding configuration or use the --encoding option.'
3809 raise
3810
3811 def get_encoding(self):
3812 encoding = self.params.get('encoding')
3813 if encoding is None:
3814 encoding = preferredencoding()
3815 return encoding
3816
3817 def _write_info_json(self, label, ie_result, infofn, overwrite=None):
3818 ''' Write infojson and returns True = written, 'exists' = Already exists, False = skip, None = error '''
3819 if overwrite is None:
3820 overwrite = self.params.get('overwrites', True)
3821 if not self.params.get('writeinfojson'):
3822 return False
3823 elif not infofn:
3824 self.write_debug(f'Skipping writing {label} infojson')
3825 return False
3826 elif not self._ensure_dir_exists(infofn):
3827 return None
3828 elif not overwrite and os.path.exists(infofn):
3829 self.to_screen(f'[info] {label.title()} metadata is already present')
3830 return 'exists'
3831
3832 self.to_screen(f'[info] Writing {label} metadata as JSON to: {infofn}')
3833 try:
3834 write_json_file(self.sanitize_info(ie_result, self.params.get('clean_infojson', True)), infofn)
3835 return True
3836 except OSError:
3837 self.report_error(f'Cannot write {label} metadata to JSON file {infofn}')
3838 return None
3839
3840 def _write_description(self, label, ie_result, descfn):
3841 ''' Write description and returns True = written, False = skip, None = error '''
3842 if not self.params.get('writedescription'):
3843 return False
3844 elif not descfn:
3845 self.write_debug(f'Skipping writing {label} description')
3846 return False
3847 elif not self._ensure_dir_exists(descfn):
3848 return None
3849 elif not self.params.get('overwrites', True) and os.path.exists(descfn):
3850 self.to_screen(f'[info] {label.title()} description is already present')
3851 elif ie_result.get('description') is None:
3852 self.report_warning(f'There\'s no {label} description to write')
3853 return False
3854 else:
3855 try:
3856 self.to_screen(f'[info] Writing {label} description to: {descfn}')
3857 with open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile:
3858 descfile.write(ie_result['description'])
3859 except OSError:
3860 self.report_error(f'Cannot write {label} description file {descfn}')
3861 return None
3862 return True
3863
3864 def _write_subtitles(self, info_dict, filename):
3865 ''' Write subtitles to file and return list of (sub_filename, final_sub_filename); or None if error'''
3866 ret = []
3867 subtitles = info_dict.get('requested_subtitles')
3868 if not subtitles or not (self.params.get('writesubtitles') or self.params.get('writeautomaticsub')):
3869 # subtitles download errors are already managed as troubles in relevant IE
3870 # that way it will silently go on when used with unsupporting IE
3871 return ret
3872
3873 sub_filename_base = self.prepare_filename(info_dict, 'subtitle')
3874 if not sub_filename_base:
3875 self.to_screen('[info] Skipping writing video subtitles')
3876 return ret
3877 for sub_lang, sub_info in subtitles.items():
3878 sub_format = sub_info['ext']
3879 sub_filename = subtitles_filename(filename, sub_lang, sub_format, info_dict.get('ext'))
3880 sub_filename_final = subtitles_filename(sub_filename_base, sub_lang, sub_format, info_dict.get('ext'))
3881 existing_sub = self.existing_file((sub_filename_final, sub_filename))
3882 if existing_sub:
3883 self.to_screen(f'[info] Video subtitle {sub_lang}.{sub_format} is already present')
3884 sub_info['filepath'] = existing_sub
3885 ret.append((existing_sub, sub_filename_final))
3886 continue
3887
3888 self.to_screen(f'[info] Writing video subtitles to: {sub_filename}')
3889 if sub_info.get('data') is not None:
3890 try:
3891 # Use newline='' to prevent conversion of newline characters
3892 # See https://github.com/ytdl-org/youtube-dl/issues/10268
3893 with open(sub_filename, 'w', encoding='utf-8', newline='') as subfile:
3894 subfile.write(sub_info['data'])
3895 sub_info['filepath'] = sub_filename
3896 ret.append((sub_filename, sub_filename_final))
3897 continue
3898 except OSError:
3899 self.report_error(f'Cannot write video subtitles file {sub_filename}')
3900 return None
3901
3902 try:
3903 sub_copy = sub_info.copy()
3904 sub_copy.setdefault('http_headers', info_dict.get('http_headers'))
3905 self.dl(sub_filename, sub_copy, subtitle=True)
3906 sub_info['filepath'] = sub_filename
3907 ret.append((sub_filename, sub_filename_final))
3908 except (DownloadError, ExtractorError, IOError, OSError, ValueError) + network_exceptions as err:
3909 msg = f'Unable to download video subtitles for {sub_lang!r}: {err}'
3910 if self.params.get('ignoreerrors') is not True: # False or 'only_download'
3911 if not self.params.get('ignoreerrors'):
3912 self.report_error(msg)
3913 raise DownloadError(msg)
3914 self.report_warning(msg)
3915 return ret
3916
3917 def _write_thumbnails(self, label, info_dict, filename, thumb_filename_base=None):
3918 ''' Write thumbnails to file and return list of (thumb_filename, final_thumb_filename) '''
3919 write_all = self.params.get('write_all_thumbnails', False)
3920 thumbnails, ret = [], []
3921 if write_all or self.params.get('writethumbnail', False):
3922 thumbnails = info_dict.get('thumbnails') or []
3923 multiple = write_all and len(thumbnails) > 1
3924
3925 if thumb_filename_base is None:
3926 thumb_filename_base = filename
3927 if thumbnails and not thumb_filename_base:
3928 self.write_debug(f'Skipping writing {label} thumbnail')
3929 return ret
3930
3931 for idx, t in list(enumerate(thumbnails))[::-1]:
3932 thumb_ext = (f'{t["id"]}.' if multiple else '') + determine_ext(t['url'], 'jpg')
3933 thumb_display_id = f'{label} thumbnail {t["id"]}'
3934 thumb_filename = replace_extension(filename, thumb_ext, info_dict.get('ext'))
3935 thumb_filename_final = replace_extension(thumb_filename_base, thumb_ext, info_dict.get('ext'))
3936
3937 existing_thumb = self.existing_file((thumb_filename_final, thumb_filename))
3938 if existing_thumb:
3939 self.to_screen('[info] %s is already present' % (
3940 thumb_display_id if multiple else f'{label} thumbnail').capitalize())
3941 t['filepath'] = existing_thumb
3942 ret.append((existing_thumb, thumb_filename_final))
3943 else:
3944 self.to_screen(f'[info] Downloading {thumb_display_id} ...')
3945 try:
3946 uf = self.urlopen(sanitized_Request(t['url'], headers=t.get('http_headers', {})))
3947 self.to_screen(f'[info] Writing {thumb_display_id} to: {thumb_filename}')
3948 with open(encodeFilename(thumb_filename), 'wb') as thumbf:
3949 shutil.copyfileobj(uf, thumbf)
3950 ret.append((thumb_filename, thumb_filename_final))
3951 t['filepath'] = thumb_filename
3952 except network_exceptions as err:
3953 thumbnails.pop(idx)
3954 self.report_warning(f'Unable to download {thumb_display_id}: {err}')
3955 if ret and not write_all:
3956 break
3957 return ret