]> jfr.im git - yt-dlp.git/blame - yt_dlp/YoutubeDL.py
[extractor/youtube:music:search_url] Extract title (#7102)
[yt-dlp.git] / yt_dlp / YoutubeDL.py
CommitLineData
26e63931 1import collections
31bd3925 2import contextlib
9d2ecdbc 3import datetime
c1c9a79c 4import errno
31bd3925 5import fileinput
b5ae35ee 6import functools
8222d8de 7import io
b82f815f 8import itertools
8694c600 9import json
62fec3b2 10import locale
083c9df9 11import operator
8222d8de 12import os
f8271158 13import random
8222d8de
JMF
14import re
15import shutil
6f2287cb 16import string
dca08720 17import subprocess
8222d8de 18import sys
21cd8fae 19import tempfile
8222d8de 20import time
67134eab 21import tokenize
8222d8de 22import traceback
524e2e4f 23import unicodedata
f9934b96 24import urllib.request
961ea474 25
f8271158 26from .cache import Cache
14f25df2 27from .compat import compat_os_name, compat_shlex_quote
982ee69a 28from .cookies import load_cookies
f8271158 29from .downloader import FFmpegFD, get_suitable_downloader, shorten_protocol_name
30from .downloader.rtmp import rtmpdump_version
f8271158 31from .extractor import gen_extractor_classes, get_info_extractor
fe7866d0 32from .extractor.common import UnsupportedURLIE
f8271158 33from .extractor.openload import PhantomJSwrapper
34from .minicurses import format_text
8e40b9d1 35from .plugins import directories as plugin_directories
e756f45b 36from .postprocessor import _PLUGIN_CLASSES as plugin_pps
f8271158 37from .postprocessor import (
38 EmbedThumbnailPP,
39 FFmpegFixupDuplicateMoovPP,
40 FFmpegFixupDurationPP,
41 FFmpegFixupM3u8PP,
42 FFmpegFixupM4aPP,
43 FFmpegFixupStretchedPP,
44 FFmpegFixupTimestampPP,
45 FFmpegMergerPP,
46 FFmpegPostProcessor,
ca9def71 47 FFmpegVideoConvertorPP,
f8271158 48 MoveFilesAfterDownloadPP,
49 get_postprocessor,
50)
ca9def71 51from .postprocessor.ffmpeg import resolve_mapping as resolve_recode_mapping
b5e7a2e6 52from .update import REPOSITORY, current_git_head, detect_variant
8c25f81b 53from .utils import (
f8271158 54 DEFAULT_OUTTMPL,
7b2c3f47 55 IDENTITY,
f8271158 56 LINK_TEMPLATES,
8dc59305 57 MEDIA_EXTENSIONS,
f8271158 58 NO_DEFAULT,
1d485a1a 59 NUMBER_RE,
f8271158 60 OUTTMPL_TYPES,
61 POSTPROCESS_WHEN,
62 STR_FORMAT_RE_TMPL,
63 STR_FORMAT_TYPES,
64 ContentTooShortError,
65 DateRange,
66 DownloadCancelled,
67 DownloadError,
68 EntryNotInPlaylist,
69 ExistingVideoReached,
70 ExtractorError,
784320c9 71 FormatSorter,
f8271158 72 GeoRestrictedError,
73 HEADRequest,
f8271158 74 ISO3166Utils,
75 LazyList,
76 MaxDownloadsReached,
19a03940 77 Namespace,
f8271158 78 PagedList,
79 PerRequestProxyHandler,
7e88d7d7 80 PlaylistEntries,
f8271158 81 Popen,
82 PostProcessingError,
83 ReExtractInfo,
84 RejectedVideoReached,
85 SameFileError,
86 UnavailableVideoError,
693f0600 87 UserNotLive,
f8271158 88 YoutubeDLCookieProcessor,
89 YoutubeDLHandler,
90 YoutubeDLRedirectHandler,
eedb7ba5
S
91 age_restricted,
92 args_to_str,
cb794ee0 93 bug_reports_message,
ce02ed60 94 date_from_str,
da4db748 95 deprecation_warning,
ce02ed60 96 determine_ext,
b5559424 97 determine_protocol,
c0384f22 98 encode_compat_str,
ce02ed60 99 encodeFilename,
a06916d9 100 error_to_compat_str,
47cdc68e 101 escapeHTML,
590bc6f6 102 expand_path,
90137ca4 103 filter_dict,
e29663c6 104 float_or_none,
02dbf93f 105 format_bytes,
e0fd9573 106 format_decimal_suffix,
f8271158 107 format_field,
525ef922 108 formatSeconds,
fc61aff4 109 get_compatible_ext,
0bb322b9 110 get_domain,
c9969434 111 int_or_none,
732044af 112 iri_to_uri,
941e881e 113 is_path_like,
34921b43 114 join_nonempty,
ce02ed60 115 locked_file,
0647d925 116 make_archive_id,
0202b52a 117 make_dir,
dca08720 118 make_HTTPS_handler,
8b7539d2 119 merge_headers,
3158150c 120 network_exceptions,
ec11a9f4 121 number_of_digits,
cd6fc19e 122 orderedSet,
5314b521 123 orderedSet_from_options,
083c9df9 124 parse_filesize,
ce02ed60 125 preferredencoding,
eedb7ba5 126 prepend_extension,
3efb96a6 127 remove_terminal_sequences,
cfb56d1a 128 render_table,
eedb7ba5 129 replace_extension,
ce02ed60 130 sanitize_filename,
1bb5c511 131 sanitize_path,
dcf77cf1 132 sanitize_url,
67dda517 133 sanitized_Request,
e5660ee6 134 std_headers,
1211bb6d 135 str_or_none,
e29663c6 136 strftime_or_none,
ce02ed60 137 subtitles_filename,
819e0531 138 supports_terminal_sequences,
b1f94422 139 system_identifier,
f2ebc5c7 140 timetuple_from_msec,
732044af 141 to_high_limit_path,
324ad820 142 traverse_obj,
fc61aff4 143 try_call,
6033d980 144 try_get,
29eb5174 145 url_basename,
7d1eb38a 146 variadic,
58b1f00d 147 version_tuple,
53973b4d 148 windows_enable_vt_mode,
ce02ed60
PH
149 write_json_file,
150 write_string,
4f026faf 151)
29cb20bd 152from .version import CHANNEL, RELEASE_GIT_HEAD, VARIANT, __version__
8222d8de 153
e9c0cdd3
YCH
154if compat_os_name == 'nt':
155 import ctypes
156
2459b6e1 157
86e5f3ed 158class YoutubeDL:
8222d8de
JMF
159 """YoutubeDL class.
160
161 YoutubeDL objects are the ones responsible of downloading the
162 actual video file and writing it to disk if the user has requested
163 it, among some other tasks. In most cases there should be one per
164 program. As, given a video URL, the downloader doesn't know how to
165 extract all the needed information, task that InfoExtractors do, it
166 has to pass the URL to one of them.
167
168 For this, YoutubeDL objects have a method that allows
169 InfoExtractors to be registered in a given order. When it is passed
170 a URL, the YoutubeDL object handles it to the first InfoExtractor it
171 finds that reports being able to handle it. The InfoExtractor extracts
172 all the information about the video or videos the URL refers to, and
173 YoutubeDL process the extracted information, possibly using a File
174 Downloader to download the video.
175
176 YoutubeDL objects accept a lot of parameters. In order not to saturate
177 the object constructor with arguments, it receives a dictionary of
178 options instead. These options are available through the params
179 attribute for the InfoExtractors to use. The YoutubeDL also
180 registers itself as the downloader in charge for the InfoExtractors
181 that are added to it, so this is a "mutual registration".
182
183 Available options:
184
185 username: Username for authentication purposes.
186 password: Password for authentication purposes.
180940e0 187 videopassword: Password for accessing a video.
1da50aa3
S
188 ap_mso: Adobe Pass multiple-system operator identifier.
189 ap_username: Multiple-system operator account username.
190 ap_password: Multiple-system operator account password.
8222d8de 191 usenetrc: Use netrc for authentication instead.
c8bc203f 192 netrc_location: Location of the netrc file. Defaults to ~/.netrc.
8222d8de
JMF
193 verbose: Print additional info to stdout.
194 quiet: Do not print messages to stdout.
ad8915b7 195 no_warnings: Do not print out anything for warnings.
bb66c247 196 forceprint: A dict with keys WHEN mapped to a list of templates to
197 print to stdout. The allowed keys are video or any of the
198 items in utils.POSTPROCESS_WHEN.
ca30f449 199 For compatibility, a single list is also accepted
bb66c247 200 print_to_file: A dict with keys WHEN (same as forceprint) mapped to
201 a list of tuples with (template, filename)
8694c600 202 forcejson: Force printing info_dict as JSON.
63e0be34
PH
203 dump_single_json: Force printing the info_dict of the whole playlist
204 (or video) as a single JSON line.
c25228e5 205 force_write_download_archive: Force writing download archive regardless
206 of 'skip_download' or 'simulate'.
b7b04c78 207 simulate: Do not download the video files. If unset (or None),
208 simulate only if listsubtitles, listformats or list_thumbnails is used
eb8a4433 209 format: Video format code. see "FORMAT SELECTION" for more details.
093a1710 210 You can also pass a function. The function takes 'ctx' as
211 argument and returns the formats to download.
212 See "build_format_selector" for an implementation
63ad4d43 213 allow_unplayable_formats: Allow unplayable formats to be extracted and downloaded.
b7da73eb 214 ignore_no_formats_error: Ignore "No video formats" error. Usefull for
215 extracting metadata even if the video is not actually
216 available for download (experimental)
0930b11f 217 format_sort: A list of fields by which to sort the video formats.
218 See "Sorting Formats" for more details.
c25228e5 219 format_sort_force: Force the given format_sort. see "Sorting Formats"
220 for more details.
08d30158 221 prefer_free_formats: Whether to prefer video formats with free containers
222 over non-free ones of same quality.
c25228e5 223 allow_multiple_video_streams: Allow multiple video streams to be merged
224 into a single file
225 allow_multiple_audio_streams: Allow multiple audio streams to be merged
226 into a single file
0ba692ac 227 check_formats Whether to test if the formats are downloadable.
9f1a1c36 228 Can be True (check all), False (check none),
229 'selected' (check selected formats),
0ba692ac 230 or None (check only if requested by extractor)
4524baf0 231 paths: Dictionary of output paths. The allowed keys are 'home'
232 'temp' and the keys of OUTTMPL_TYPES (in utils.py)
de6000d9 233 outtmpl: Dictionary of templates for output names. Allowed keys
4524baf0 234 are 'default' and the keys of OUTTMPL_TYPES (in utils.py).
34488702 235 For compatibility with youtube-dl, a single string can also be used
a820dc72
RA
236 outtmpl_na_placeholder: Placeholder for unavailable meta fields.
237 restrictfilenames: Do not allow "&" and spaces in file names
238 trim_file_name: Limit length of filename (extension excluded)
4524baf0 239 windowsfilenames: Force the filenames to be windows compatible
b1940459 240 ignoreerrors: Do not stop on download/postprocessing errors.
241 Can be 'only_download' to ignore only download errors.
242 Default is 'only_download' for CLI, but False for API
26e2805c 243 skip_playlist_after_errors: Number of allowed failures until the rest of
244 the playlist is skipped
fe7866d0 245 allowed_extractors: List of regexes to match against extractor names that are allowed
0c3d0f51 246 overwrites: Overwrite all video and metadata files if True,
247 overwrite only non-video files if None
248 and don't overwrite any file if False
34488702 249 For compatibility with youtube-dl,
250 "nooverwrites" may also be used instead
c14e88f0 251 playlist_items: Specific indices of playlist to download.
75822ca7 252 playlistrandom: Download playlist items in random order.
7e9a6125 253 lazy_playlist: Process playlist entries as they are received.
8222d8de
JMF
254 matchtitle: Download only matching titles.
255 rejecttitle: Reject downloads for matching titles.
8bf9319e 256 logger: Log messages to a logging.Logger instance.
17ffed18 257 logtostderr: Print everything to stderr instead of stdout.
258 consoletitle: Display progress in console window's titlebar.
8222d8de
JMF
259 writedescription: Write the video description to a .description file
260 writeinfojson: Write the video description to a .info.json file
75d43ca0 261 clean_infojson: Remove private fields from the infojson
34488702 262 getcomments: Extract video comments. This will not be written to disk
06167fbb 263 unless writeinfojson is also given
1fb07d10 264 writeannotations: Write the video annotations to a .annotations.xml file
8222d8de 265 writethumbnail: Write the thumbnail image to a file
c25228e5 266 allow_playlist_files: Whether to write playlists' description, infojson etc
267 also to disk when using the 'write*' options
ec82d85a 268 write_all_thumbnails: Write all thumbnail formats to files
732044af 269 writelink: Write an internet shortcut file, depending on the
270 current platform (.url/.webloc/.desktop)
271 writeurllink: Write a Windows internet shortcut file (.url)
272 writewebloclink: Write a macOS internet shortcut file (.webloc)
273 writedesktoplink: Write a Linux internet shortcut file (.desktop)
8222d8de 274 writesubtitles: Write the video subtitles to a file
741dd8ea 275 writeautomaticsub: Write the automatically generated subtitles to a file
8222d8de 276 listsubtitles: Lists all available subtitles for the video
a504ced0 277 subtitlesformat: The format code for subtitles
c32b0aab 278 subtitleslangs: List of languages of the subtitles to download (can be regex).
279 The list may contain "all" to refer to all the available
280 subtitles. The language can be prefixed with a "-" to
62b58c09 281 exclude it from the requested languages, e.g. ['all', '-live_chat']
8222d8de
JMF
282 keepvideo: Keep the video file after post-processing
283 daterange: A DateRange object, download only if the upload_date is in the range.
284 skip_download: Skip the actual download of the video file
c35f9e72 285 cachedir: Location of the cache files in the filesystem.
a0e07d31 286 False to disable filesystem cache.
47192f92 287 noplaylist: Download single video instead of a playlist if in doubt.
8dbe9899
PH
288 age_limit: An integer representing the user's age in years.
289 Unsuitable videos for the given age are skipped.
5fe18bdb
PH
290 min_views: An integer representing the minimum view count the video
291 must have in order to not be skipped.
292 Videos without view count information are always
293 downloaded. None for no limit.
294 max_views: An integer representing the maximum view count.
295 Videos that are more popular than that are not
296 downloaded.
297 Videos without view count information are always
298 downloaded. None for no limit.
ae103564 299 download_archive: A set, or the name of a file where all downloads are recorded.
300 Videos already present in the file are not downloaded again.
8a51f564 301 break_on_existing: Stop the download process after attempting to download a
302 file that is in the archive.
b222c271 303 break_per_url: Whether break_on_reject and break_on_existing
304 should act on each input URL as opposed to for the entire queue
d76fa1f3 305 cookiefile: File name or text stream from where cookies should be read and dumped to
f59f5ef8 306 cookiesfrombrowser: A tuple containing the name of the browser, the profile
9bd13fe5 307 name/path from where cookies are loaded, the name of the keyring,
308 and the container name, e.g. ('chrome', ) or
309 ('vivaldi', 'default', 'BASICTEXT') or ('firefox', 'default', None, 'Meta')
f81c62a6 310 legacyserverconnect: Explicitly allow HTTPS connection to servers that do not
311 support RFC 5746 secure renegotiation
f59f5ef8 312 nocheckcertificate: Do not verify SSL certificates
bb58c9ed 313 client_certificate: Path to client certificate file in PEM format. May include the private key
314 client_certificate_key: Path to private key file for client certificate
315 client_certificate_password: Password for client certificate private key, if encrypted.
316 If not provided and the key is encrypted, yt-dlp will ask interactively
7e8c0af0 317 prefer_insecure: Use HTTP instead of HTTPS to retrieve information.
c6e07cf1 318 (Only supported by some extractors)
8300774c 319 enable_file_urls: Enable file:// URLs. This is disabled by default for security reasons.
8b7539d2 320 http_headers: A dictionary of custom headers to be used for all requests
a1ee09e8 321 proxy: URL of the proxy server to use
38cce791 322 geo_verification_proxy: URL of the proxy to use for IP address verification
504f20dd 323 on geo-restricted sites.
e344693b 324 socket_timeout: Time to wait for unresponsive hosts, in seconds
0783b09b
PH
325 bidi_workaround: Work around buggy terminals without bidirectional text
326 support, using fridibi
a0ddb8a2 327 debug_printtraffic:Print out sent and received HTTP traffic
04b4d394
PH
328 default_search: Prepend this string if an input url is not valid.
329 'auto' for elaborate guessing
62fec3b2 330 encoding: Use this encoding instead of the system-specified.
134c913c 331 extract_flat: Whether to resolve and process url_results further
332 * False: Always process (default)
333 * True: Never process
334 * 'in_playlist': Do not process inside playlist/multi_video
335 * 'discard': Always process, but don't return the result
336 from inside playlist/multi_video
337 * 'discard_in_playlist': Same as "discard", but only for
338 playlists (not multi_video)
f2ebc5c7 339 wait_for_video: If given, wait for scheduled streams to become available.
340 The value should be a tuple containing the range
341 (min_secs, max_secs) to wait between retries
4f026faf 342 postprocessors: A list of dictionaries, each with an entry
71b640cc 343 * key: The name of the postprocessor. See
7a5c1cfe 344 yt_dlp/postprocessor/__init__.py for a list.
bb66c247 345 * when: When to run the postprocessor. Allowed values are
346 the entries of utils.POSTPROCESS_WHEN
56d868db 347 Assumed to be 'post_process' if not given
71b640cc
PH
348 progress_hooks: A list of functions that get called on download
349 progress, with a dictionary with the entries
5cda4eda 350 * status: One of "downloading", "error", or "finished".
ee69b99a 351 Check this first and ignore unknown values.
3ba7740d 352 * info_dict: The extracted info_dict
71b640cc 353
5cda4eda 354 If status is one of "downloading", or "finished", the
ee69b99a
PH
355 following properties may also be present:
356 * filename: The final filename (always present)
5cda4eda 357 * tmpfilename: The filename we're currently writing to
71b640cc
PH
358 * downloaded_bytes: Bytes on disk
359 * total_bytes: Size of the whole file, None if unknown
5cda4eda
PH
360 * total_bytes_estimate: Guess of the eventual file size,
361 None if unavailable.
362 * elapsed: The number of seconds since download started.
71b640cc
PH
363 * eta: The estimated time in seconds, None if unknown
364 * speed: The download speed in bytes/second, None if
365 unknown
5cda4eda
PH
366 * fragment_index: The counter of the currently
367 downloaded video fragment.
368 * fragment_count: The number of fragments (= individual
369 files that will be merged)
71b640cc
PH
370
371 Progress hooks are guaranteed to be called at least once
372 (with status "finished") if the download is successful.
819e0531 373 postprocessor_hooks: A list of functions that get called on postprocessing
374 progress, with a dictionary with the entries
375 * status: One of "started", "processing", or "finished".
376 Check this first and ignore unknown values.
377 * postprocessor: Name of the postprocessor
378 * info_dict: The extracted info_dict
379
380 Progress hooks are guaranteed to be called at least twice
381 (with status "started" and "finished") if the processing is successful.
fc61aff4 382 merge_output_format: "/" separated list of extensions to use when merging formats.
6b591b29 383 final_ext: Expected final extension; used to detect when the file was
59a7a13e 384 already downloaded and converted
6271f1ca
PH
385 fixup: Automatically correct known faults of the file.
386 One of:
387 - "never": do nothing
388 - "warn": only emit a warning
389 - "detect_or_warn": check whether we can do anything
62cd676c 390 about it, warn otherwise (default)
504f20dd 391 source_address: Client-side IP address to bind to.
1cf376f5 392 sleep_interval_requests: Number of seconds to sleep between requests
393 during extraction
7aa589a5
S
394 sleep_interval: Number of seconds to sleep before each download when
395 used alone or a lower bound of a range for randomized
396 sleep before each download (minimum possible number
397 of seconds to sleep) when used along with
398 max_sleep_interval.
399 max_sleep_interval:Upper bound of a range for randomized sleep before each
400 download (maximum possible number of seconds to sleep).
401 Must only be used along with sleep_interval.
402 Actual sleep time will be a random float from range
403 [sleep_interval; max_sleep_interval].
1cf376f5 404 sleep_interval_subtitles: Number of seconds to sleep before each subtitle download
cfb56d1a
PH
405 listformats: Print an overview of available video formats and exit.
406 list_thumbnails: Print a table of all thumbnails and exit.
0a41f331 407 match_filter: A function that gets called for every video with the signature
408 (info_dict, *, incomplete: bool) -> Optional[str]
409 For backward compatibility with youtube-dl, the signature
410 (info_dict) -> Optional[str] is also allowed.
411 - If it returns a message, the video is ignored.
412 - If it returns None, the video is downloaded.
413 - If it returns utils.NO_DEFAULT, the user is interactively
414 asked whether to download the video.
fe2ce85a 415 - Raise utils.DownloadCancelled(msg) to abort remaining
416 downloads when a video is rejected.
347de493 417 match_filter_func in utils.py is one example for this.
7e5db8c9 418 no_color: Do not emit color codes in output.
0a840f58 419 geo_bypass: Bypass geographic restriction via faking X-Forwarded-For
504f20dd 420 HTTP header
0a840f58 421 geo_bypass_country:
773f291d
S
422 Two-letter ISO 3166-2 country code that will be used for
423 explicit geographic restriction bypassing via faking
504f20dd 424 X-Forwarded-For HTTP header
5f95927a
S
425 geo_bypass_ip_block:
426 IP range in CIDR notation that will be used similarly to
504f20dd 427 geo_bypass_country
52a8a1e1 428 external_downloader: A dictionary of protocol keys and the executable of the
429 external downloader to use for it. The allowed protocols
430 are default|http|ftp|m3u8|dash|rtsp|rtmp|mms.
431 Set the value to 'native' to use the native downloader
53ed7066 432 compat_opts: Compatibility options. See "Differences in default behavior".
3acf6d38 433 The following options do not work when used through the API:
b5ae35ee 434 filename, abort-on-error, multistreams, no-live-chat, format-sort
dac5df5a 435 no-clean-infojson, no-playlist-metafiles, no-keep-subs, no-attach-info-json.
e4f02757 436 Refer __init__.py for their implementation
819e0531 437 progress_template: Dictionary of templates for progress outputs.
438 Allowed keys are 'download', 'postprocess',
439 'download-title' (console title) and 'postprocess-title'.
440 The template is mapped on a dictionary with keys 'progress' and 'info'
23326151 441 retry_sleep_functions: Dictionary of functions that takes the number of attempts
442 as argument and returns the time to sleep in seconds.
443 Allowed keys are 'http', 'fragment', 'file_access'
0f446365
SW
444 download_ranges: A callback function that gets called for every video with
445 the signature (info_dict, ydl) -> Iterable[Section].
446 Only the returned sections will be downloaded.
447 Each Section is a dict with the following keys:
5ec1b6b7 448 * start_time: Start time of the section in seconds
449 * end_time: End time of the section in seconds
450 * title: Section title (Optional)
451 * index: Section number (Optional)
0f446365 452 force_keyframes_at_cuts: Re-encode the video when downloading ranges to get precise cuts
a7dc6a89 453 noprogress: Do not print the progress bar
a831c2ea 454 live_from_start: Whether to download livestreams videos from the start
fe7e0c98 455
8222d8de 456 The following parameters are not used by YoutubeDL itself, they are used by
7a5c1cfe 457 the downloader (see yt_dlp/downloader/common.py):
51d9739f 458 nopart, updatetime, buffersize, ratelimit, throttledratelimit, min_filesize,
205a0654 459 max_filesize, test, noresizebuffer, retries, file_access_retries, fragment_retries,
a7dc6a89 460 continuedl, xattr_set_filesize, hls_use_mpegts, http_chunk_size,
59a7a13e 461 external_downloader_args, concurrent_fragment_downloads.
76b1bd67
JMF
462
463 The following options are used by the post processors:
c0b7d117
S
464 ffmpeg_location: Location of the ffmpeg/avconv binary; either the path
465 to the binary or its containing directory.
43820c03 466 postprocessor_args: A dictionary of postprocessor/executable keys (in lower case)
34488702 467 and a list of additional command-line arguments for the
468 postprocessor/executable. The dict can also have "PP+EXE" keys
469 which are used when the given exe is used by the given PP.
470 Use 'default' as the name for arguments to passed to all PP
471 For compatibility with youtube-dl, a single list of args
472 can also be used
e409895f 473
474 The following options are used by the extractors:
62bff2c1 475 extractor_retries: Number of times to retry for known errors
476 dynamic_mpd: Whether to process dynamic DASH manifests (default: True)
e409895f 477 hls_split_discontinuity: Split HLS playlists to different formats at
62bff2c1 478 discontinuities such as ad breaks (default: False)
5d3a0e79 479 extractor_args: A dictionary of arguments to be passed to the extractors.
480 See "EXTRACTOR ARGUMENTS" for details.
62b58c09 481 E.g. {'youtube': {'skip': ['dash', 'hls']}}
88f23a18 482 mark_watched: Mark videos watched (even with --simulate). Only for YouTube
1890fc63 483
484 The following options are deprecated and may be removed in the future:
485
fe2ce85a 486 break_on_reject: Stop the download process when encountering a video that
487 has been filtered out.
488 - `raise DownloadCancelled(msg)` in match_filter instead
fe7866d0 489 force_generic_extractor: Force downloader to use the generic extractor
490 - Use allowed_extractors = ['generic', 'default']
7e9a6125 491 playliststart: - Use playlist_items
492 Playlist item to start at.
493 playlistend: - Use playlist_items
494 Playlist item to end at.
495 playlistreverse: - Use playlist_items
496 Download playlist items in reverse order.
1890fc63 497 forceurl: - Use forceprint
498 Force printing final URL.
499 forcetitle: - Use forceprint
500 Force printing title.
501 forceid: - Use forceprint
502 Force printing ID.
503 forcethumbnail: - Use forceprint
504 Force printing thumbnail URL.
505 forcedescription: - Use forceprint
506 Force printing description.
507 forcefilename: - Use forceprint
508 Force printing final filename.
509 forceduration: - Use forceprint
510 Force printing duration.
511 allsubtitles: - Use subtitleslangs = ['all']
512 Downloads all the subtitles of the video
513 (requires writesubtitles or writeautomaticsub)
514 include_ads: - Doesn't work
515 Download ads as well
516 call_home: - Not implemented
517 Boolean, true iff we are allowed to contact the
518 yt-dlp servers for debugging.
519 post_hooks: - Register a custom postprocessor
520 A list of functions that get called as the final step
521 for each video file, after all postprocessors have been
522 called. The filename will be passed as the only argument.
523 hls_prefer_native: - Use external_downloader = {'m3u8': 'native'} or {'m3u8': 'ffmpeg'}.
524 Use the native HLS downloader instead of ffmpeg/avconv
525 if True, otherwise use ffmpeg/avconv if False, otherwise
526 use downloader suggested by extractor if None.
527 prefer_ffmpeg: - avconv support is deprecated
528 If False, use avconv instead of ffmpeg if both are available,
529 otherwise prefer ffmpeg.
530 youtube_include_dash_manifest: - Use extractor_args
5d3a0e79 531 If True (default), DASH manifests and related
62bff2c1 532 data will be downloaded and processed by extractor.
533 You can reduce network I/O by disabling it if you don't
534 care about DASH. (only for youtube)
1890fc63 535 youtube_include_hls_manifest: - Use extractor_args
5d3a0e79 536 If True (default), HLS manifests and related
62bff2c1 537 data will be downloaded and processed by extractor.
538 You can reduce network I/O by disabling it if you don't
539 care about HLS. (only for youtube)
8222d8de
JMF
540 """
541
86e5f3ed 542 _NUMERIC_FIELDS = {
b8ed0f15 543 'width', 'height', 'asr', 'audio_channels', 'fps',
544 'tbr', 'abr', 'vbr', 'filesize', 'filesize_approx',
e6f21b3d 545 'timestamp', 'release_timestamp',
c9969434
S
546 'duration', 'view_count', 'like_count', 'dislike_count', 'repost_count',
547 'average_rating', 'comment_count', 'age_limit',
548 'start_time', 'end_time',
549 'chapter_number', 'season_number', 'episode_number',
550 'track_number', 'disc_number', 'release_year',
86e5f3ed 551 }
c9969434 552
6db9c4d5 553 _format_fields = {
554 # NB: Keep in sync with the docstring of extractor/common.py
a44ca5a4 555 'url', 'manifest_url', 'manifest_stream_number', 'ext', 'format', 'format_id', 'format_note',
105bfd90 556 'width', 'height', 'aspect_ratio', 'resolution', 'dynamic_range', 'tbr', 'abr', 'acodec', 'asr', 'audio_channels',
d5d1df8a 557 'vbr', 'fps', 'vcodec', 'container', 'filesize', 'filesize_approx', 'rows', 'columns',
6db9c4d5 558 'player_url', 'protocol', 'fragment_base_url', 'fragments', 'is_from_start',
559 'preference', 'language', 'language_preference', 'quality', 'source_preference',
7e68567e 560 'http_headers', 'stretched_ratio', 'no_resume', 'has_drm', 'extra_param_to_segment_url', 'hls_aes', 'downloader_options',
6db9c4d5 561 'page_url', 'app', 'play_path', 'tc_url', 'flash_version', 'rtmp_live', 'rtmp_conn', 'rtmp_protocol', 'rtmp_real_time'
562 }
48ee10ee 563 _format_selection_exts = {
8dc59305 564 'audio': set(MEDIA_EXTENSIONS.common_audio),
565 'video': set(MEDIA_EXTENSIONS.common_video + ('3gp', )),
566 'storyboards': set(MEDIA_EXTENSIONS.storyboards),
48ee10ee 567 }
568
3511266b 569 def __init__(self, params=None, auto_init=True):
883d4b1e 570 """Create a FileDownloader object with the given options.
571 @param auto_init Whether to load the default extractors and print header (if verbose).
49a57e70 572 Set to 'no_verbose_header' to not print the header
883d4b1e 573 """
e9f9a10f
JMF
574 if params is None:
575 params = {}
592b7485 576 self.params = params
8b7491c8 577 self._ies = {}
56c73665 578 self._ies_instances = {}
1e43a6f7 579 self._pps = {k: [] for k in POSTPROCESS_WHEN}
b35496d8 580 self._printed_messages = set()
1cf376f5 581 self._first_webpage_request = True
ab8e5e51 582 self._post_hooks = []
933605d7 583 self._progress_hooks = []
819e0531 584 self._postprocessor_hooks = []
8222d8de
JMF
585 self._download_retcode = 0
586 self._num_downloads = 0
9c906919 587 self._num_videos = 0
592b7485 588 self._playlist_level = 0
589 self._playlist_urls = set()
a0e07d31 590 self.cache = Cache(self)
34308b30 591
591bb9d3 592 stdout = sys.stderr if self.params.get('logtostderr') else sys.stdout
593 self._out_files = Namespace(
594 out=stdout,
595 error=sys.stderr,
596 screen=sys.stderr if self.params.get('quiet') else stdout,
597 console=None if compat_os_name == 'nt' else next(
cf4f42cb 598 filter(supports_terminal_sequences, (sys.stderr, sys.stdout)), None)
591bb9d3 599 )
f0795149 600
601 try:
602 windows_enable_vt_mode()
603 except Exception as e:
604 self.write_debug(f'Failed to enable VT mode: {e}')
605
591bb9d3 606 self._allow_colors = Namespace(**{
607 type_: not self.params.get('no_color') and supports_terminal_sequences(stream)
64fa820c 608 for type_, stream in self._out_files.items_ if type_ != 'console'
591bb9d3 609 })
819e0531 610
6929b41a 611 # The code is left like this to be reused for future deprecations
612 MIN_SUPPORTED, MIN_RECOMMENDED = (3, 7), (3, 7)
eff42759 613 current_version = sys.version_info[:2]
614 if current_version < MIN_RECOMMENDED:
9d339c41 615 msg = ('Support for Python version %d.%d has been deprecated. '
24093d52 616 'See https://github.com/yt-dlp/yt-dlp/issues/3764 for more details.'
c6e07cf1 617 '\n You will no longer receive updates on this version')
eff42759 618 if current_version < MIN_SUPPORTED:
619 msg = 'Python version %d.%d is no longer supported'
5b28cef7 620 self.deprecated_feature(
eff42759 621 f'{msg}! Please update to Python %d.%d or above' % (*current_version, *MIN_RECOMMENDED))
a61f4b28 622
88acdbc2 623 if self.params.get('allow_unplayable_formats'):
624 self.report_warning(
ec11a9f4 625 f'You have asked for {self._format_err("UNPLAYABLE", self.Styles.EMPHASIS)} formats to be listed/downloaded. '
819e0531 626 'This is a developer option intended for debugging. \n'
627 ' If you experience any issues while using this option, '
ec11a9f4 628 f'{self._format_err("DO NOT", self.Styles.ERROR)} open a bug report')
88acdbc2 629
497074f0 630 if self.params.get('bidi_workaround', False):
631 try:
632 import pty
633 master, slave = pty.openpty()
634 width = shutil.get_terminal_size().columns
635 width_args = [] if width is None else ['-w', str(width)]
636 sp_kwargs = {'stdin': subprocess.PIPE, 'stdout': slave, 'stderr': self._out_files.error}
637 try:
638 self._output_process = Popen(['bidiv'] + width_args, **sp_kwargs)
639 except OSError:
640 self._output_process = Popen(['fribidi', '-c', 'UTF-8'] + width_args, **sp_kwargs)
641 self._output_channel = os.fdopen(master, 'rb')
642 except OSError as ose:
643 if ose.errno == errno.ENOENT:
644 self.report_warning(
645 'Could not find fribidi executable, ignoring --bidi-workaround. '
646 'Make sure that fribidi is an executable file in one of the directories in your $PATH.')
647 else:
648 raise
649
650 self.params['compat_opts'] = set(self.params.get('compat_opts', ()))
651 if auto_init and auto_init != 'no_verbose_header':
652 self.print_debug_header()
653
be5df5ee
S
654 def check_deprecated(param, option, suggestion):
655 if self.params.get(param) is not None:
86e5f3ed 656 self.report_warning(f'{option} is deprecated. Use {suggestion} instead')
be5df5ee
S
657 return True
658 return False
659
660 if check_deprecated('cn_verification_proxy', '--cn-verification-proxy', '--geo-verification-proxy'):
38cce791
YCH
661 if self.params.get('geo_verification_proxy') is None:
662 self.params['geo_verification_proxy'] = self.params['cn_verification_proxy']
663
0d1bb027 664 check_deprecated('autonumber', '--auto-number', '-o "%(autonumber)s-%(title)s.%(ext)s"')
665 check_deprecated('usetitle', '--title', '-o "%(title)s-%(id)s.%(ext)s"')
53ed7066 666 check_deprecated('useid', '--id', '-o "%(id)s.%(ext)s"')
0d1bb027 667
49a57e70 668 for msg in self.params.get('_warnings', []):
0d1bb027 669 self.report_warning(msg)
ee8dd27a 670 for msg in self.params.get('_deprecation_warnings', []):
da4db748 671 self.deprecated_feature(msg)
0d1bb027 672
8a82af35 673 if 'list-formats' in self.params['compat_opts']:
ec11a9f4 674 self.params['listformats_table'] = False
675
b5ae35ee 676 if 'overwrites' not in self.params and self.params.get('nooverwrites') is not None:
b868936c 677 # nooverwrites was unnecessarily changed to overwrites
678 # in 0c3d0f51778b153f65c21906031c2e091fcfb641
679 # This ensures compatibility with both keys
680 self.params['overwrites'] = not self.params['nooverwrites']
b5ae35ee 681 elif self.params.get('overwrites') is None:
682 self.params.pop('overwrites', None)
b868936c 683 else:
684 self.params['nooverwrites'] = not self.params['overwrites']
b9d973be 685
e4221b70 686 if self.params.get('simulate') is None and any((
687 self.params.get('list_thumbnails'),
688 self.params.get('listformats'),
689 self.params.get('listsubtitles'),
690 )):
691 self.params['simulate'] = 'list_only'
692
455a15e2 693 self.params.setdefault('forceprint', {})
694 self.params.setdefault('print_to_file', {})
bb66c247 695
696 # Compatibility with older syntax
ca30f449 697 if not isinstance(params['forceprint'], dict):
455a15e2 698 self.params['forceprint'] = {'video': params['forceprint']}
ca30f449 699
97ec5bc5 700 if auto_init:
97ec5bc5 701 self.add_default_info_extractors()
702
3089bc74
S
703 if (sys.platform != 'win32'
704 and sys.getfilesystemencoding() in ['ascii', 'ANSI_X3.4-1968']
455a15e2 705 and not self.params.get('restrictfilenames', False)):
e9137224 706 # Unicode filesystem API will throw errors (#1474, #13027)
34308b30 707 self.report_warning(
6febd1c1 708 'Assuming --restrict-filenames since file system encoding '
1b725173 709 'cannot encode all characters. '
6febd1c1 710 'Set the LC_ALL environment variable to fix this.')
4a98cdbf 711 self.params['restrictfilenames'] = True
34308b30 712
bf1824b3 713 self._parse_outtmpl()
486dd09e 714
187986a8 715 # Creating format selector here allows us to catch syntax errors before the extraction
716 self.format_selector = (
fa9f30b8 717 self.params.get('format') if self.params.get('format') in (None, '-')
093a1710 718 else self.params['format'] if callable(self.params['format'])
187986a8 719 else self.build_format_selector(self.params['format']))
720
8b7539d2 721 # Set http_headers defaults according to std_headers
722 self.params['http_headers'] = merge_headers(std_headers, self.params.get('http_headers', {}))
723
013b50b7 724 hooks = {
725 'post_hooks': self.add_post_hook,
726 'progress_hooks': self.add_progress_hook,
727 'postprocessor_hooks': self.add_postprocessor_hook,
728 }
729 for opt, fn in hooks.items():
730 for ph in self.params.get(opt, []):
731 fn(ph)
71b640cc 732
5bfc8bee 733 for pp_def_raw in self.params.get('postprocessors', []):
734 pp_def = dict(pp_def_raw)
735 when = pp_def.pop('when', 'post_process')
736 self.add_post_processor(
f9934b96 737 get_postprocessor(pp_def.pop('key'))(self, **pp_def),
5bfc8bee 738 when=when)
739
97ec5bc5 740 self._setup_opener()
51fb4995 741
ed39cac5 742 def preload_download_archive(fn):
743 """Preload the archive, if any is specified"""
ae103564 744 archive = set()
ed39cac5 745 if fn is None:
ae103564 746 return archive
941e881e 747 elif not is_path_like(fn):
ae103564 748 return fn
749
49a57e70 750 self.write_debug(f'Loading archive file {fn!r}')
ed39cac5 751 try:
752 with locked_file(fn, 'r', encoding='utf-8') as archive_file:
753 for line in archive_file:
ae103564 754 archive.add(line.strip())
86e5f3ed 755 except OSError as ioe:
ed39cac5 756 if ioe.errno != errno.ENOENT:
757 raise
ae103564 758 return archive
ed39cac5 759
ae103564 760 self.archive = preload_download_archive(self.params.get('download_archive'))
ed39cac5 761
7d4111ed
PH
762 def warn_if_short_id(self, argv):
763 # short YouTube ID starting with dash?
764 idxs = [
765 i for i, a in enumerate(argv)
766 if re.match(r'^-[0-9A-Za-z_-]{10}$', a)]
767 if idxs:
768 correct_argv = (
7a5c1cfe 769 ['yt-dlp']
3089bc74
S
770 + [a for i, a in enumerate(argv) if i not in idxs]
771 + ['--'] + [argv[i] for i in idxs]
7d4111ed
PH
772 )
773 self.report_warning(
774 'Long argument string detected. '
49a57e70 775 'Use -- to separate parameters and URLs, like this:\n%s' %
7d4111ed
PH
776 args_to_str(correct_argv))
777
8222d8de
JMF
778 def add_info_extractor(self, ie):
779 """Add an InfoExtractor object to the end of the list."""
8b7491c8 780 ie_key = ie.ie_key()
781 self._ies[ie_key] = ie
e52d7f85 782 if not isinstance(ie, type):
8b7491c8 783 self._ies_instances[ie_key] = ie
e52d7f85 784 ie.set_downloader(self)
8222d8de 785
56c73665
JMF
786 def get_info_extractor(self, ie_key):
787 """
788 Get an instance of an IE with name ie_key, it will try to get one from
789 the _ies list, if there's no instance it will create a new one and add
790 it to the extractor list.
791 """
792 ie = self._ies_instances.get(ie_key)
793 if ie is None:
794 ie = get_info_extractor(ie_key)()
795 self.add_info_extractor(ie)
796 return ie
797
023fa8c4
JMF
798 def add_default_info_extractors(self):
799 """
800 Add the InfoExtractors returned by gen_extractors to the end of the list
801 """
fe7866d0 802 all_ies = {ie.IE_NAME.lower(): ie for ie in gen_extractor_classes()}
803 all_ies['end'] = UnsupportedURLIE()
804 try:
805 ie_names = orderedSet_from_options(
806 self.params.get('allowed_extractors', ['default']), {
807 'all': list(all_ies),
808 'default': [name for name, ie in all_ies.items() if ie._ENABLED],
809 }, use_regex=True)
810 except re.error as e:
811 raise ValueError(f'Wrong regex for allowed_extractors: {e.pattern}')
812 for name in ie_names:
813 self.add_info_extractor(all_ies[name])
814 self.write_debug(f'Loaded {len(ie_names)} extractors')
023fa8c4 815
56d868db 816 def add_post_processor(self, pp, when='post_process'):
8222d8de 817 """Add a PostProcessor object to the end of the chain."""
8aa0e7cd 818 assert when in POSTPROCESS_WHEN, f'Invalid when={when}'
5bfa4862 819 self._pps[when].append(pp)
8222d8de
JMF
820 pp.set_downloader(self)
821
ab8e5e51
AM
822 def add_post_hook(self, ph):
823 """Add the post hook"""
824 self._post_hooks.append(ph)
825
933605d7 826 def add_progress_hook(self, ph):
819e0531 827 """Add the download progress hook"""
933605d7 828 self._progress_hooks.append(ph)
8ab470f1 829
819e0531 830 def add_postprocessor_hook(self, ph):
831 """Add the postprocessing progress hook"""
832 self._postprocessor_hooks.append(ph)
5bfc8bee 833 for pps in self._pps.values():
834 for pp in pps:
835 pp.add_progress_hook(ph)
819e0531 836
1c088fa8 837 def _bidi_workaround(self, message):
5d681e96 838 if not hasattr(self, '_output_channel'):
1c088fa8
PH
839 return message
840
5d681e96 841 assert hasattr(self, '_output_process')
14f25df2 842 assert isinstance(message, str)
6febd1c1 843 line_count = message.count('\n') + 1
0f06bcd7 844 self._output_process.stdin.write((message + '\n').encode())
5d681e96 845 self._output_process.stdin.flush()
0f06bcd7 846 res = ''.join(self._output_channel.readline().decode()
9e1a5b84 847 for _ in range(line_count))
6febd1c1 848 return res[:-len('\n')]
1c088fa8 849
b35496d8 850 def _write_string(self, message, out=None, only_once=False):
851 if only_once:
852 if message in self._printed_messages:
853 return
854 self._printed_messages.add(message)
855 write_string(message, out=out, encoding=self.params.get('encoding'))
734f90bb 856
cf4f42cb 857 def to_stdout(self, message, skip_eol=False, quiet=None):
0760b0a7 858 """Print message to stdout"""
cf4f42cb 859 if quiet is not None:
da4db748 860 self.deprecation_warning('"YoutubeDL.to_stdout" no longer accepts the argument quiet. '
861 'Use "YoutubeDL.to_screen" instead')
8a82af35 862 if skip_eol is not False:
da4db748 863 self.deprecation_warning('"YoutubeDL.to_stdout" no longer accepts the argument skip_eol. '
864 'Use "YoutubeDL.to_screen" instead')
0bf9dc1e 865 self._write_string(f'{self._bidi_workaround(message)}\n', self._out_files.out)
cf4f42cb 866
dfea94f8 867 def to_screen(self, message, skip_eol=False, quiet=None, only_once=False):
cf4f42cb 868 """Print message to screen if not in quiet mode"""
8bf9319e 869 if self.params.get('logger'):
43afe285 870 self.params['logger'].debug(message)
cf4f42cb 871 return
872 if (self.params.get('quiet') if quiet is None else quiet) and not self.params.get('verbose'):
873 return
874 self._write_string(
875 '%s%s' % (self._bidi_workaround(message), ('' if skip_eol else '\n')),
dfea94f8 876 self._out_files.screen, only_once=only_once)
8222d8de 877
b35496d8 878 def to_stderr(self, message, only_once=False):
0760b0a7 879 """Print message to stderr"""
14f25df2 880 assert isinstance(message, str)
8bf9319e 881 if self.params.get('logger'):
43afe285
IB
882 self.params['logger'].error(message)
883 else:
5792c950 884 self._write_string(f'{self._bidi_workaround(message)}\n', self._out_files.error, only_once=only_once)
cf4f42cb 885
886 def _send_console_code(self, code):
591bb9d3 887 if compat_os_name == 'nt' or not self._out_files.console:
cf4f42cb 888 return
591bb9d3 889 self._write_string(code, self._out_files.console)
8222d8de 890
1e5b9a95
PH
891 def to_console_title(self, message):
892 if not self.params.get('consoletitle', False):
893 return
3efb96a6 894 message = remove_terminal_sequences(message)
4bede0d8
C
895 if compat_os_name == 'nt':
896 if ctypes.windll.kernel32.GetConsoleWindow():
897 # c_wchar_p() might not be necessary if `message` is
898 # already of type unicode()
899 ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(message))
cf4f42cb 900 else:
901 self._send_console_code(f'\033]0;{message}\007')
1e5b9a95 902
bdde425c 903 def save_console_title(self):
cf4f42cb 904 if not self.params.get('consoletitle') or self.params.get('simulate'):
bdde425c 905 return
592b7485 906 self._send_console_code('\033[22;0t') # Save the title on stack
bdde425c
PH
907
908 def restore_console_title(self):
cf4f42cb 909 if not self.params.get('consoletitle') or self.params.get('simulate'):
bdde425c 910 return
592b7485 911 self._send_console_code('\033[23;0t') # Restore the title from stack
bdde425c
PH
912
913 def __enter__(self):
914 self.save_console_title()
915 return self
916
917 def __exit__(self, *args):
918 self.restore_console_title()
f89197d7 919
dca08720 920 if self.params.get('cookiefile') is not None:
1bab3437 921 self.cookiejar.save(ignore_discard=True, ignore_expires=True)
bdde425c 922
fa9f30b8 923 def trouble(self, message=None, tb=None, is_error=True):
8222d8de
JMF
924 """Determine action to take when a download problem appears.
925
926 Depending on if the downloader has been configured to ignore
927 download errors or not, this method may throw an exception or
928 not when errors are found, after printing the message.
929
fa9f30b8 930 @param tb If given, is additional traceback information
931 @param is_error Whether to raise error according to ignorerrors
8222d8de
JMF
932 """
933 if message is not None:
934 self.to_stderr(message)
935 if self.params.get('verbose'):
936 if tb is None:
937 if sys.exc_info()[0]: # if .trouble has been called from an except block
6febd1c1 938 tb = ''
8222d8de 939 if hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
6febd1c1 940 tb += ''.join(traceback.format_exception(*sys.exc_info()[1].exc_info))
c0384f22 941 tb += encode_compat_str(traceback.format_exc())
8222d8de
JMF
942 else:
943 tb_data = traceback.format_list(traceback.extract_stack())
6febd1c1 944 tb = ''.join(tb_data)
c19bc311 945 if tb:
946 self.to_stderr(tb)
fa9f30b8 947 if not is_error:
948 return
b1940459 949 if not self.params.get('ignoreerrors'):
8222d8de
JMF
950 if sys.exc_info()[0] and hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
951 exc_info = sys.exc_info()[1].exc_info
952 else:
953 exc_info = sys.exc_info()
954 raise DownloadError(message, exc_info)
955 self._download_retcode = 1
956
19a03940 957 Styles = Namespace(
958 HEADERS='yellow',
959 EMPHASIS='light blue',
492272fe 960 FILENAME='green',
19a03940 961 ID='green',
962 DELIM='blue',
963 ERROR='red',
964 WARNING='yellow',
965 SUPPRESS='light black',
966 )
ec11a9f4 967
7578d77d 968 def _format_text(self, handle, allow_colors, text, f, fallback=None, *, test_encoding=False):
e5a998f3 969 text = str(text)
ec11a9f4 970 if test_encoding:
971 original_text = text
5c104538 972 # handle.encoding can be None. See https://github.com/yt-dlp/yt-dlp/issues/2711
973 encoding = self.params.get('encoding') or getattr(handle, 'encoding', None) or 'ascii'
ec11a9f4 974 text = text.encode(encoding, 'ignore').decode(encoding)
975 if fallback is not None and text != original_text:
976 text = fallback
7578d77d 977 return format_text(text, f) if allow_colors else text if fallback is None else fallback
ec11a9f4 978
591bb9d3 979 def _format_out(self, *args, **kwargs):
980 return self._format_text(self._out_files.out, self._allow_colors.out, *args, **kwargs)
981
ec11a9f4 982 def _format_screen(self, *args, **kwargs):
591bb9d3 983 return self._format_text(self._out_files.screen, self._allow_colors.screen, *args, **kwargs)
ec11a9f4 984
985 def _format_err(self, *args, **kwargs):
591bb9d3 986 return self._format_text(self._out_files.error, self._allow_colors.error, *args, **kwargs)
819e0531 987
c84aeac6 988 def report_warning(self, message, only_once=False):
8222d8de
JMF
989 '''
990 Print the message to stderr, it will be prefixed with 'WARNING:'
991 If stderr is a tty file the 'WARNING:' will be colored
992 '''
6d07ce01
JMF
993 if self.params.get('logger') is not None:
994 self.params['logger'].warning(message)
8222d8de 995 else:
ad8915b7
PH
996 if self.params.get('no_warnings'):
997 return
ec11a9f4 998 self.to_stderr(f'{self._format_err("WARNING:", self.Styles.WARNING)} {message}', only_once)
8222d8de 999
da4db748 1000 def deprecation_warning(self, message, *, stacklevel=0):
1001 deprecation_warning(
1002 message, stacklevel=stacklevel + 1, printer=self.report_error, is_error=False)
1003
1004 def deprecated_feature(self, message):
ee8dd27a 1005 if self.params.get('logger') is not None:
da4db748 1006 self.params['logger'].warning(f'Deprecated Feature: {message}')
1007 self.to_stderr(f'{self._format_err("Deprecated Feature:", self.Styles.ERROR)} {message}', True)
ee8dd27a 1008
fa9f30b8 1009 def report_error(self, message, *args, **kwargs):
8222d8de
JMF
1010 '''
1011 Do the same as trouble, but prefixes the message with 'ERROR:', colored
1012 in red if stderr is a tty file.
1013 '''
fa9f30b8 1014 self.trouble(f'{self._format_err("ERROR:", self.Styles.ERROR)} {message}', *args, **kwargs)
8222d8de 1015
b35496d8 1016 def write_debug(self, message, only_once=False):
0760b0a7 1017 '''Log debug message or Print message to stderr'''
1018 if not self.params.get('verbose', False):
1019 return
8a82af35 1020 message = f'[debug] {message}'
0760b0a7 1021 if self.params.get('logger'):
1022 self.params['logger'].debug(message)
1023 else:
b35496d8 1024 self.to_stderr(message, only_once)
0760b0a7 1025
8222d8de
JMF
1026 def report_file_already_downloaded(self, file_name):
1027 """Report file has already been fully downloaded."""
1028 try:
6febd1c1 1029 self.to_screen('[download] %s has already been downloaded' % file_name)
ce02ed60 1030 except UnicodeEncodeError:
6febd1c1 1031 self.to_screen('[download] The file has already been downloaded')
8222d8de 1032
0c3d0f51 1033 def report_file_delete(self, file_name):
1034 """Report that existing file will be deleted."""
1035 try:
c25228e5 1036 self.to_screen('Deleting existing file %s' % file_name)
0c3d0f51 1037 except UnicodeEncodeError:
c25228e5 1038 self.to_screen('Deleting existing file')
0c3d0f51 1039
319b6059 1040 def raise_no_formats(self, info, forced=False, *, msg=None):
0a5a191a 1041 has_drm = info.get('_has_drm')
319b6059 1042 ignored, expected = self.params.get('ignore_no_formats_error'), bool(msg)
1043 msg = msg or has_drm and 'This video is DRM protected' or 'No video formats found!'
1044 if forced or not ignored:
1151c407 1045 raise ExtractorError(msg, video_id=info['id'], ie=info['extractor'],
319b6059 1046 expected=has_drm or ignored or expected)
88acdbc2 1047 else:
1048 self.report_warning(msg)
1049
de6000d9 1050 def parse_outtmpl(self):
bf1824b3 1051 self.deprecation_warning('"YoutubeDL.parse_outtmpl" is deprecated and may be removed in a future version')
1052 self._parse_outtmpl()
1053 return self.params['outtmpl']
1054
1055 def _parse_outtmpl(self):
7b2c3f47 1056 sanitize = IDENTITY
bf1824b3 1057 if self.params.get('restrictfilenames'): # Remove spaces in the default template
71ce444a 1058 sanitize = lambda x: x.replace(' - ', ' ').replace(' ', '-')
bf1824b3 1059
1060 outtmpl = self.params.setdefault('outtmpl', {})
1061 if not isinstance(outtmpl, dict):
1062 self.params['outtmpl'] = outtmpl = {'default': outtmpl}
1063 outtmpl.update({k: sanitize(v) for k, v in DEFAULT_OUTTMPL.items() if outtmpl.get(k) is None})
de6000d9 1064
21cd8fae 1065 def get_output_path(self, dir_type='', filename=None):
1066 paths = self.params.get('paths', {})
d2c8aadf 1067 assert isinstance(paths, dict), '"paths" parameter must be a dictionary'
21cd8fae 1068 path = os.path.join(
1069 expand_path(paths.get('home', '').strip()),
1070 expand_path(paths.get(dir_type, '').strip()) if dir_type else '',
1071 filename or '')
21cd8fae 1072 return sanitize_path(path, force=self.params.get('windowsfilenames'))
1073
76a264ac 1074 @staticmethod
901130bb 1075 def _outtmpl_expandpath(outtmpl):
1076 # expand_path translates '%%' into '%' and '$$' into '$'
1077 # correspondingly that is not what we want since we need to keep
1078 # '%%' intact for template dict substitution step. Working around
1079 # with boundary-alike separator hack.
6f2287cb 1080 sep = ''.join(random.choices(string.ascii_letters, k=32))
86e5f3ed 1081 outtmpl = outtmpl.replace('%%', f'%{sep}%').replace('$$', f'${sep}$')
901130bb 1082
1083 # outtmpl should be expand_path'ed before template dict substitution
1084 # because meta fields may contain env variables we don't want to
62b58c09 1085 # be expanded. E.g. for outtmpl "%(title)s.%(ext)s" and
901130bb 1086 # title "Hello $PATH", we don't want `$PATH` to be expanded.
1087 return expand_path(outtmpl).replace(sep, '')
1088
1089 @staticmethod
1090 def escape_outtmpl(outtmpl):
1091 ''' Escape any remaining strings like %s, %abc% etc. '''
1092 return re.sub(
1093 STR_FORMAT_RE_TMPL.format('', '(?![%(\0])'),
1094 lambda mobj: ('' if mobj.group('has_key') else '%') + mobj.group(0),
1095 outtmpl)
1096
1097 @classmethod
1098 def validate_outtmpl(cls, outtmpl):
76a264ac 1099 ''' @return None or Exception object '''
7d1eb38a 1100 outtmpl = re.sub(
47cdc68e 1101 STR_FORMAT_RE_TMPL.format('[^)]*', '[ljhqBUDS]'),
7d1eb38a 1102 lambda mobj: f'{mobj.group(0)[:-1]}s',
1103 cls._outtmpl_expandpath(outtmpl))
76a264ac 1104 try:
7d1eb38a 1105 cls.escape_outtmpl(outtmpl) % collections.defaultdict(int)
76a264ac 1106 return None
1107 except ValueError as err:
1108 return err
1109
03b4de72 1110 @staticmethod
1111 def _copy_infodict(info_dict):
1112 info_dict = dict(info_dict)
09b49e1f 1113 info_dict.pop('__postprocessors', None)
415f8d51 1114 info_dict.pop('__pending_error', None)
03b4de72 1115 return info_dict
1116
e0fd9573 1117 def prepare_outtmpl(self, outtmpl, info_dict, sanitize=False):
1118 """ Make the outtmpl and info_dict suitable for substitution: ydl.escape_outtmpl(outtmpl) % info_dict
1119 @param sanitize Whether to sanitize the output as a filename.
1120 For backward compatibility, a function can also be passed
1121 """
1122
6e84b215 1123 info_dict.setdefault('epoch', int(time.time())) # keep epoch consistent once set
143db31d 1124
03b4de72 1125 info_dict = self._copy_infodict(info_dict)
752cda38 1126 info_dict['duration_string'] = ( # %(duration>%H-%M-%S)s is wrong if duration > 24hrs
53c18592 1127 formatSeconds(info_dict['duration'], '-' if sanitize else ':')
143db31d 1128 if info_dict.get('duration', None) is not None
1129 else None)
1d485a1a 1130 info_dict['autonumber'] = int(self.params.get('autonumber_start', 1) - 1 + self._num_downloads)
9c906919 1131 info_dict['video_autonumber'] = self._num_videos
752cda38 1132 if info_dict.get('resolution') is None:
1133 info_dict['resolution'] = self.format_resolution(info_dict, default=None)
143db31d 1134
e6f21b3d 1135 # For fields playlist_index, playlist_autonumber and autonumber convert all occurrences
143db31d 1136 # of %(field)s to %(field)0Nd for backward compatibility
1137 field_size_compat_map = {
0a5a191a 1138 'playlist_index': number_of_digits(info_dict.get('__last_playlist_index') or 0),
ec11a9f4 1139 'playlist_autonumber': number_of_digits(info_dict.get('n_entries') or 0),
752cda38 1140 'autonumber': self.params.get('autonumber_size') or 5,
143db31d 1141 }
752cda38 1142
385a27fa 1143 TMPL_DICT = {}
47cdc68e 1144 EXTERNAL_FORMAT_RE = re.compile(STR_FORMAT_RE_TMPL.format('[^)]*', f'[{STR_FORMAT_TYPES}ljhqBUDS]'))
385a27fa 1145 MATH_FUNCTIONS = {
1146 '+': float.__add__,
1147 '-': float.__sub__,
1148 }
e625be0d 1149 # Field is of the form key1.key2...
07a1250e 1150 # where keys (except first) can be string, int, slice or "{field, ...}"
1151 FIELD_INNER_RE = r'(?:\w+|%(num)s|%(num)s?(?::%(num)s?){1,2})' % {'num': r'(?:-?\d+)'}
1152 FIELD_RE = r'\w*(?:\.(?:%(inner)s|{%(field)s(?:,%(field)s)*}))*' % {
1153 'inner': FIELD_INNER_RE,
1154 'field': rf'\w*(?:\.{FIELD_INNER_RE})*'
1155 }
1d485a1a 1156 MATH_FIELD_RE = rf'(?:{FIELD_RE}|-?{NUMBER_RE})'
385a27fa 1157 MATH_OPERATORS_RE = r'(?:%s)' % '|'.join(map(re.escape, MATH_FUNCTIONS.keys()))
78fde6e3 1158 INTERNAL_FORMAT_RE = re.compile(rf'''(?xs)
e625be0d 1159 (?P<negate>-)?
1d485a1a 1160 (?P<fields>{FIELD_RE})
1161 (?P<maths>(?:{MATH_OPERATORS_RE}{MATH_FIELD_RE})*)
e625be0d 1162 (?:>(?P<strf_format>.+?))?
34baa9fd 1163 (?P<remaining>
1164 (?P<alternate>(?<!\\),[^|&)]+)?
1165 (?:&(?P<replacement>.*?))?
1166 (?:\|(?P<default>.*?))?
1d485a1a 1167 )$''')
752cda38 1168
07a1250e 1169 def _traverse_infodict(fields):
1170 fields = [f for x in re.split(r'\.({.+?})\.?', fields)
1171 for f in ([x] if x.startswith('{') else x.split('.'))]
1172 for i in (0, -1):
1173 if fields and not fields[i]:
1174 fields.pop(i)
1175
1176 for i, f in enumerate(fields):
1177 if not f.startswith('{'):
1178 continue
1179 assert f.endswith('}'), f'No closing brace for {f} in {fields}'
1180 fields[i] = {k: k.split('.') for k in f[1:-1].split(',')}
1181
1182 return traverse_obj(info_dict, fields, is_user_input=True, traverse_string=True)
76a264ac 1183
752cda38 1184 def get_value(mdict):
1185 # Object traversal
2b8a2973 1186 value = _traverse_infodict(mdict['fields'])
752cda38 1187 # Negative
1188 if mdict['negate']:
1189 value = float_or_none(value)
1190 if value is not None:
1191 value *= -1
1192 # Do maths
385a27fa 1193 offset_key = mdict['maths']
1194 if offset_key:
752cda38 1195 value = float_or_none(value)
1196 operator = None
385a27fa 1197 while offset_key:
1198 item = re.match(
1199 MATH_FIELD_RE if operator else MATH_OPERATORS_RE,
1200 offset_key).group(0)
1201 offset_key = offset_key[len(item):]
1202 if operator is None:
752cda38 1203 operator = MATH_FUNCTIONS[item]
385a27fa 1204 continue
1205 item, multiplier = (item[1:], -1) if item[0] == '-' else (item, 1)
1206 offset = float_or_none(item)
1207 if offset is None:
2b8a2973 1208 offset = float_or_none(_traverse_infodict(item))
385a27fa 1209 try:
1210 value = operator(value, multiplier * offset)
1211 except (TypeError, ZeroDivisionError):
1212 return None
1213 operator = None
752cda38 1214 # Datetime formatting
1215 if mdict['strf_format']:
7c37ff97 1216 value = strftime_or_none(value, mdict['strf_format'].replace('\\,', ','))
752cda38 1217
a6bcaf71 1218 # XXX: Workaround for https://github.com/yt-dlp/yt-dlp/issues/4485
1219 if sanitize and value == '':
1220 value = None
752cda38 1221 return value
1222
b868936c 1223 na = self.params.get('outtmpl_na_placeholder', 'NA')
1224
e0fd9573 1225 def filename_sanitizer(key, value, restricted=self.params.get('restrictfilenames')):
5c3895ff 1226 return sanitize_filename(str(value), restricted=restricted, is_id=(
1227 bool(re.search(r'(^|[_.])id(\.|$)', key))
8a82af35 1228 if 'filename-sanitization' in self.params['compat_opts']
5c3895ff 1229 else NO_DEFAULT))
e0fd9573 1230
1231 sanitizer = sanitize if callable(sanitize) else filename_sanitizer
1232 sanitize = bool(sanitize)
1233
6e84b215 1234 def _dumpjson_default(obj):
1235 if isinstance(obj, (set, LazyList)):
1236 return list(obj)
adbc4ec4 1237 return repr(obj)
6e84b215 1238
6f2287cb 1239 class _ReplacementFormatter(string.Formatter):
ec9311c4 1240 def get_field(self, field_name, args, kwargs):
1241 if field_name.isdigit():
1242 return args[0], -1
1243 raise ValueError('Unsupported field')
1244
1245 replacement_formatter = _ReplacementFormatter()
1246
752cda38 1247 def create_key(outer_mobj):
1248 if not outer_mobj.group('has_key'):
b836dc94 1249 return outer_mobj.group(0)
752cda38 1250 key = outer_mobj.group('key')
752cda38 1251 mobj = re.match(INTERNAL_FORMAT_RE, key)
e0fd9573 1252 initial_field = mobj.group('fields') if mobj else ''
e978789f 1253 value, replacement, default = None, None, na
7c37ff97 1254 while mobj:
e625be0d 1255 mobj = mobj.groupdict()
7c37ff97 1256 default = mobj['default'] if mobj['default'] is not None else default
752cda38 1257 value = get_value(mobj)
e978789f 1258 replacement = mobj['replacement']
7c37ff97 1259 if value is None and mobj['alternate']:
34baa9fd 1260 mobj = re.match(INTERNAL_FORMAT_RE, mobj['remaining'][1:])
7c37ff97 1261 else:
1262 break
752cda38 1263
b868936c 1264 fmt = outer_mobj.group('format')
752cda38 1265 if fmt == 's' and value is not None and key in field_size_compat_map.keys():
86e5f3ed 1266 fmt = f'0{field_size_compat_map[key]:d}d'
752cda38 1267
ec9311c4 1268 if value is None:
1269 value = default
1270 elif replacement is not None:
1271 try:
1272 value = replacement_formatter.format(replacement, value)
1273 except ValueError:
1274 value = na
752cda38 1275
4476d2c7 1276 flags = outer_mobj.group('conversion') or ''
7d1eb38a 1277 str_fmt = f'{fmt[:-1]}s'
524e2e4f 1278 if fmt[-1] == 'l': # list
4476d2c7 1279 delim = '\n' if '#' in flags else ', '
9e907ebd 1280 value, fmt = delim.join(map(str, variadic(value, allowed_types=(str, bytes)))), str_fmt
524e2e4f 1281 elif fmt[-1] == 'j': # json
deae7c17 1282 value, fmt = json.dumps(
1283 value, default=_dumpjson_default,
9b9dad11 1284 indent=4 if '#' in flags else None, ensure_ascii='+' not in flags), str_fmt
47cdc68e 1285 elif fmt[-1] == 'h': # html
deae7c17 1286 value, fmt = escapeHTML(str(value)), str_fmt
524e2e4f 1287 elif fmt[-1] == 'q': # quoted
4476d2c7 1288 value = map(str, variadic(value) if '#' in flags else [value])
1289 value, fmt = ' '.join(map(compat_shlex_quote, value)), str_fmt
524e2e4f 1290 elif fmt[-1] == 'B': # bytes
0f06bcd7 1291 value = f'%{str_fmt}'.encode() % str(value).encode()
f5aa5cfb 1292 value, fmt = value.decode('utf-8', 'ignore'), 's'
524e2e4f 1293 elif fmt[-1] == 'U': # unicode normalized
524e2e4f 1294 value, fmt = unicodedata.normalize(
1295 # "+" = compatibility equivalence, "#" = NFD
4476d2c7 1296 'NF%s%s' % ('K' if '+' in flags else '', 'D' if '#' in flags else 'C'),
524e2e4f 1297 value), str_fmt
e0fd9573 1298 elif fmt[-1] == 'D': # decimal suffix
abbeeebc 1299 num_fmt, fmt = fmt[:-1].replace('#', ''), 's'
1300 value = format_decimal_suffix(value, f'%{num_fmt}f%s' if num_fmt else '%d%s',
1301 factor=1024 if '#' in flags else 1000)
37893bb0 1302 elif fmt[-1] == 'S': # filename sanitization
e0fd9573 1303 value, fmt = filename_sanitizer(initial_field, value, restricted='#' in flags), str_fmt
7d1eb38a 1304 elif fmt[-1] == 'c':
524e2e4f 1305 if value:
1306 value = str(value)[0]
76a264ac 1307 else:
524e2e4f 1308 fmt = str_fmt
76a264ac 1309 elif fmt[-1] not in 'rs': # numeric
a439a3a4 1310 value = float_or_none(value)
752cda38 1311 if value is None:
1312 value, fmt = default, 's'
901130bb 1313
752cda38 1314 if sanitize:
1315 if fmt[-1] == 'r':
1316 # If value is an object, sanitize might convert it to a string
1317 # So we convert it to repr first
7d1eb38a 1318 value, fmt = repr(value), str_fmt
639f1cea 1319 if fmt[-1] in 'csr':
e0fd9573 1320 value = sanitizer(initial_field, value)
901130bb 1321
b868936c 1322 key = '%s\0%s' % (key.replace('%', '%\0'), outer_mobj.group('format'))
385a27fa 1323 TMPL_DICT[key] = value
b868936c 1324 return '{prefix}%({key}){fmt}'.format(key=key, fmt=fmt, prefix=outer_mobj.group('prefix'))
752cda38 1325
385a27fa 1326 return EXTERNAL_FORMAT_RE.sub(create_key, outtmpl), TMPL_DICT
143db31d 1327
819e0531 1328 def evaluate_outtmpl(self, outtmpl, info_dict, *args, **kwargs):
1329 outtmpl, info_dict = self.prepare_outtmpl(outtmpl, info_dict, *args, **kwargs)
1330 return self.escape_outtmpl(outtmpl) % info_dict
1331
5127e92a 1332 def _prepare_filename(self, info_dict, *, outtmpl=None, tmpl_type=None):
1333 assert None in (outtmpl, tmpl_type), 'outtmpl and tmpl_type are mutually exclusive'
1334 if outtmpl is None:
bf1824b3 1335 outtmpl = self.params['outtmpl'].get(tmpl_type or 'default', self.params['outtmpl']['default'])
8222d8de 1336 try:
5127e92a 1337 outtmpl = self._outtmpl_expandpath(outtmpl)
e0fd9573 1338 filename = self.evaluate_outtmpl(outtmpl, info_dict, True)
6a0546e3 1339 if not filename:
1340 return None
15da37c7 1341
5127e92a 1342 if tmpl_type in ('', 'temp'):
6a0546e3 1343 final_ext, ext = self.params.get('final_ext'), info_dict.get('ext')
1344 if final_ext and ext and final_ext != ext and filename.endswith(f'.{final_ext}'):
1345 filename = replace_extension(filename, ext, final_ext)
5127e92a 1346 elif tmpl_type:
6a0546e3 1347 force_ext = OUTTMPL_TYPES[tmpl_type]
1348 if force_ext:
1349 filename = replace_extension(filename, force_ext, info_dict.get('ext'))
de6000d9 1350
bdc3fd2f
U
1351 # https://github.com/blackjack4494/youtube-dlc/issues/85
1352 trim_file_name = self.params.get('trim_file_name', False)
1353 if trim_file_name:
5c22c63d 1354 no_ext, *ext = filename.rsplit('.', 2)
1355 filename = join_nonempty(no_ext[:trim_file_name], *ext, delim='.')
bdc3fd2f 1356
0202b52a 1357 return filename
8222d8de 1358 except ValueError as err:
6febd1c1 1359 self.report_error('Error in output template: ' + str(err) + ' (encoding: ' + repr(preferredencoding()) + ')')
8222d8de
JMF
1360 return None
1361
5127e92a 1362 def prepare_filename(self, info_dict, dir_type='', *, outtmpl=None, warn=False):
1363 """Generate the output filename"""
1364 if outtmpl:
1365 assert not dir_type, 'outtmpl and dir_type are mutually exclusive'
1366 dir_type = None
1367 filename = self._prepare_filename(info_dict, tmpl_type=dir_type, outtmpl=outtmpl)
80c03fa9 1368 if not filename and dir_type not in ('', 'temp'):
1369 return ''
de6000d9 1370
c84aeac6 1371 if warn:
21cd8fae 1372 if not self.params.get('paths'):
de6000d9 1373 pass
1374 elif filename == '-':
c84aeac6 1375 self.report_warning('--paths is ignored when an outputting to stdout', only_once=True)
de6000d9 1376 elif os.path.isabs(filename):
c84aeac6 1377 self.report_warning('--paths is ignored since an absolute path is given in output template', only_once=True)
de6000d9 1378 if filename == '-' or not filename:
1379 return filename
1380
21cd8fae 1381 return self.get_output_path(dir_type, filename)
0202b52a 1382
120fe513 1383 def _match_entry(self, info_dict, incomplete=False, silent=False):
6368e2e6 1384 """Returns None if the file should be downloaded"""
d7b460d0 1385 _type = info_dict.get('_type', 'video')
1386 assert incomplete or _type == 'video', 'Only video result can be considered complete'
8222d8de 1387
3bec830a 1388 video_title = info_dict.get('title', info_dict.get('id', 'entry'))
c77495e3 1389
8b0d7497 1390 def check_filter():
d7b460d0 1391 if _type in ('playlist', 'multi_video'):
1392 return
1393 elif _type in ('url', 'url_transparent') and not try_call(
1394 lambda: self.get_info_extractor(info_dict['ie_key']).is_single_video(info_dict['url'])):
1395 return
1396
8b0d7497 1397 if 'title' in info_dict:
1398 # This can happen when we're just evaluating the playlist
1399 title = info_dict['title']
1400 matchtitle = self.params.get('matchtitle', False)
1401 if matchtitle:
1402 if not re.search(matchtitle, title, re.IGNORECASE):
1403 return '"' + title + '" title did not match pattern "' + matchtitle + '"'
1404 rejecttitle = self.params.get('rejecttitle', False)
1405 if rejecttitle:
1406 if re.search(rejecttitle, title, re.IGNORECASE):
1407 return '"' + title + '" title matched reject pattern "' + rejecttitle + '"'
6368e2e6 1408
8b0d7497 1409 date = info_dict.get('upload_date')
1410 if date is not None:
1411 dateRange = self.params.get('daterange', DateRange())
1412 if date not in dateRange:
86e5f3ed 1413 return f'{date_from_str(date).isoformat()} upload date is not in range {dateRange}'
8b0d7497 1414 view_count = info_dict.get('view_count')
1415 if view_count is not None:
1416 min_views = self.params.get('min_views')
1417 if min_views is not None and view_count < min_views:
1418 return 'Skipping %s, because it has not reached minimum view count (%d/%d)' % (video_title, view_count, min_views)
1419 max_views = self.params.get('max_views')
1420 if max_views is not None and view_count > max_views:
1421 return 'Skipping %s, because it has exceeded the maximum view count (%d/%d)' % (video_title, view_count, max_views)
1422 if age_restricted(info_dict.get('age_limit'), self.params.get('age_limit')):
1423 return 'Skipping "%s" because it is age restricted' % video_title
8b0d7497 1424
8f18aca8 1425 match_filter = self.params.get('match_filter')
fe2ce85a 1426 if match_filter is None:
1427 return None
1428
1429 cancelled = None
1430 try:
8f18aca8 1431 try:
1432 ret = match_filter(info_dict, incomplete=incomplete)
1433 except TypeError:
1434 # For backward compatibility
1435 ret = None if incomplete else match_filter(info_dict)
fe2ce85a 1436 except DownloadCancelled as err:
1437 if err.msg is not NO_DEFAULT:
1438 raise
1439 ret, cancelled = err.msg, err
1440
1441 if ret is NO_DEFAULT:
1442 while True:
1443 filename = self._format_screen(self.prepare_filename(info_dict), self.Styles.FILENAME)
1444 reply = input(self._format_screen(
1445 f'Download "{filename}"? (Y/n): ', self.Styles.EMPHASIS)).lower().strip()
1446 if reply in {'y', ''}:
1447 return None
1448 elif reply == 'n':
1449 if cancelled:
1450 raise type(cancelled)(f'Skipping {video_title}')
1451 return f'Skipping {video_title}'
1452 return ret
8b0d7497 1453
c77495e3 1454 if self.in_download_archive(info_dict):
1455 reason = '%s has already been recorded in the archive' % video_title
1456 break_opt, break_err = 'break_on_existing', ExistingVideoReached
1457 else:
fe2ce85a 1458 try:
1459 reason = check_filter()
1460 except DownloadCancelled as e:
1461 reason, break_opt, break_err = e.msg, 'match_filter', type(e)
1462 else:
1463 break_opt, break_err = 'break_on_reject', RejectedVideoReached
8b0d7497 1464 if reason is not None:
120fe513 1465 if not silent:
1466 self.to_screen('[download] ' + reason)
c77495e3 1467 if self.params.get(break_opt, False):
1468 raise break_err()
8b0d7497 1469 return reason
fe7e0c98 1470
b6c45014
JMF
1471 @staticmethod
1472 def add_extra_info(info_dict, extra_info):
1473 '''Set the keys from extra_info in info dict if they are missing'''
1474 for key, value in extra_info.items():
1475 info_dict.setdefault(key, value)
1476
409e1828 1477 def extract_info(self, url, download=True, ie_key=None, extra_info=None,
61aa5ba3 1478 process=True, force_generic_extractor=False):
41d1cca3 1479 """
17ffed18 1480 Extract and return the information dictionary of the URL
41d1cca3 1481
1482 Arguments:
17ffed18 1483 @param url URL to extract
41d1cca3 1484
1485 Keyword arguments:
17ffed18 1486 @param download Whether to download videos
1487 @param process Whether to resolve all unresolved references (URLs, playlist items).
1488 Must be True for download to work
1489 @param ie_key Use only the extractor with this key
1490
1491 @param extra_info Dictionary containing the extra values to add to the info (For internal use only)
1492 @force_generic_extractor Force using the generic extractor (Deprecated; use ie_key='Generic')
41d1cca3 1493 """
fe7e0c98 1494
409e1828 1495 if extra_info is None:
1496 extra_info = {}
1497
61aa5ba3 1498 if not ie_key and force_generic_extractor:
d22dec74
S
1499 ie_key = 'Generic'
1500
8222d8de 1501 if ie_key:
fe7866d0 1502 ies = {ie_key: self._ies[ie_key]} if ie_key in self._ies else {}
8222d8de
JMF
1503 else:
1504 ies = self._ies
1505
fe7866d0 1506 for key, ie in ies.items():
8222d8de
JMF
1507 if not ie.suitable(url):
1508 continue
1509
1510 if not ie.working():
6febd1c1
PH
1511 self.report_warning('The program functionality for this site has been marked as broken, '
1512 'and will probably not work.')
8222d8de 1513
1151c407 1514 temp_id = ie.get_temp_id(url)
fe7866d0 1515 if temp_id is not None and self.in_download_archive({'id': temp_id, 'ie_key': key}):
1516 self.to_screen(f'[{key}] {temp_id}: has already been recorded in the archive')
5e5be0c0 1517 if self.params.get('break_on_existing', False):
1518 raise ExistingVideoReached()
a0566bbf 1519 break
fe7866d0 1520 return self.__extract_info(url, self.get_info_extractor(key), download, extra_info, process)
a0566bbf 1521 else:
fe7866d0 1522 extractors_restricted = self.params.get('allowed_extractors') not in (None, ['default'])
1523 self.report_error(f'No suitable extractor{format_field(ie_key, None, " (%s)")} found for URL {url}',
1524 tb=False if extractors_restricted else None)
a0566bbf 1525
7e88d7d7 1526 def _handle_extraction_exceptions(func):
b5ae35ee 1527 @functools.wraps(func)
a0566bbf 1528 def wrapper(self, *args, **kwargs):
6da22e7d 1529 while True:
1530 try:
1531 return func(self, *args, **kwargs)
1532 except (DownloadCancelled, LazyList.IndexError, PagedList.IndexError):
8222d8de 1533 raise
6da22e7d 1534 except ReExtractInfo as e:
1535 if e.expected:
1536 self.to_screen(f'{e}; Re-extracting data')
1537 else:
1538 self.to_stderr('\r')
1539 self.report_warning(f'{e}; Re-extracting data')
1540 continue
1541 except GeoRestrictedError as e:
1542 msg = e.msg
1543 if e.countries:
1544 msg += '\nThis video is available in %s.' % ', '.join(
1545 map(ISO3166Utils.short2full, e.countries))
1546 msg += '\nYou might want to use a VPN or a proxy server (with --proxy) to workaround.'
1547 self.report_error(msg)
1548 except ExtractorError as e: # An error we somewhat expected
1549 self.report_error(str(e), e.format_traceback())
1550 except Exception as e:
1551 if self.params.get('ignoreerrors'):
1552 self.report_error(str(e), tb=encode_compat_str(traceback.format_exc()))
1553 else:
1554 raise
1555 break
a0566bbf 1556 return wrapper
1557
693f0600 1558 def _wait_for_video(self, ie_result={}):
f2ebc5c7 1559 if (not self.params.get('wait_for_video')
1560 or ie_result.get('_type', 'video') != 'video'
1561 or ie_result.get('formats') or ie_result.get('url')):
1562 return
1563
1564 format_dur = lambda dur: '%02d:%02d:%02d' % timetuple_from_msec(dur * 1000)[:-1]
1565 last_msg = ''
1566
1567 def progress(msg):
1568 nonlocal last_msg
a7dc6a89 1569 full_msg = f'{msg}\n'
1570 if not self.params.get('noprogress'):
1571 full_msg = msg + ' ' * (len(last_msg) - len(msg)) + '\r'
1572 elif last_msg:
1573 return
1574 self.to_screen(full_msg, skip_eol=True)
f2ebc5c7 1575 last_msg = msg
1576
1577 min_wait, max_wait = self.params.get('wait_for_video')
1578 diff = try_get(ie_result, lambda x: x['release_timestamp'] - time.time())
1579 if diff is None and ie_result.get('live_status') == 'is_upcoming':
16c620bc 1580 diff = round(random.uniform(min_wait, max_wait) if (max_wait and min_wait) else (max_wait or min_wait), 0)
f2ebc5c7 1581 self.report_warning('Release time of video is not known')
693f0600 1582 elif ie_result and (diff or 0) <= 0:
f2ebc5c7 1583 self.report_warning('Video should already be available according to extracted info')
38d79fd1 1584 diff = min(max(diff or 0, min_wait or 0), max_wait or float('inf'))
f2ebc5c7 1585 self.to_screen(f'[wait] Waiting for {format_dur(diff)} - Press Ctrl+C to try now')
1586
1587 wait_till = time.time() + diff
1588 try:
1589 while True:
1590 diff = wait_till - time.time()
1591 if diff <= 0:
1592 progress('')
1593 raise ReExtractInfo('[wait] Wait period ended', expected=True)
1594 progress(f'[wait] Remaining time until next attempt: {self._format_screen(format_dur(diff), self.Styles.EMPHASIS)}')
1595 time.sleep(1)
1596 except KeyboardInterrupt:
1597 progress('')
1598 raise ReExtractInfo('[wait] Interrupted by user', expected=True)
1599 except BaseException as e:
1600 if not isinstance(e, ReExtractInfo):
1601 self.to_screen('')
1602 raise
1603
7e88d7d7 1604 @_handle_extraction_exceptions
58f197b7 1605 def __extract_info(self, url, ie, download, extra_info, process):
693f0600 1606 try:
1607 ie_result = ie.extract(url)
1608 except UserNotLive as e:
1609 if process:
1610 if self.params.get('wait_for_video'):
1611 self.report_warning(e)
1612 self._wait_for_video()
1613 raise
a0566bbf 1614 if ie_result is None: # Finished already (backwards compatibility; listformats and friends should be moved here)
cb794ee0 1615 self.report_warning(f'Extractor {ie.IE_NAME} returned nothing{bug_reports_message()}')
a0566bbf 1616 return
1617 if isinstance(ie_result, list):
1618 # Backwards compatibility: old IE result format
1619 ie_result = {
1620 '_type': 'compat_list',
1621 'entries': ie_result,
1622 }
e37d0efb 1623 if extra_info.get('original_url'):
1624 ie_result.setdefault('original_url', extra_info['original_url'])
a0566bbf 1625 self.add_default_extra_info(ie_result, ie, url)
1626 if process:
f2ebc5c7 1627 self._wait_for_video(ie_result)
a0566bbf 1628 return self.process_ie_result(ie_result, download, extra_info)
8222d8de 1629 else:
a0566bbf 1630 return ie_result
fe7e0c98 1631
ea38e55f 1632 def add_default_extra_info(self, ie_result, ie, url):
6033d980 1633 if url is not None:
1634 self.add_extra_info(ie_result, {
1635 'webpage_url': url,
1636 'original_url': url,
57ebfca3 1637 })
1638 webpage_url = ie_result.get('webpage_url')
1639 if webpage_url:
1640 self.add_extra_info(ie_result, {
1641 'webpage_url_basename': url_basename(webpage_url),
1642 'webpage_url_domain': get_domain(webpage_url),
6033d980 1643 })
1644 if ie is not None:
1645 self.add_extra_info(ie_result, {
1646 'extractor': ie.IE_NAME,
1647 'extractor_key': ie.ie_key(),
1648 })
ea38e55f 1649
58adec46 1650 def process_ie_result(self, ie_result, download=True, extra_info=None):
8222d8de
JMF
1651 """
1652 Take the result of the ie(may be modified) and resolve all unresolved
1653 references (URLs, playlist items).
1654
1655 It will also download the videos if 'download'.
1656 Returns the resolved ie_result.
1657 """
58adec46 1658 if extra_info is None:
1659 extra_info = {}
e8ee972c
PH
1660 result_type = ie_result.get('_type', 'video')
1661
057a5206 1662 if result_type in ('url', 'url_transparent'):
8f97a15d 1663 ie_result['url'] = sanitize_url(
1664 ie_result['url'], scheme='http' if self.params.get('prefer_insecure') else 'https')
8791e78c 1665 if ie_result.get('original_url') and not extra_info.get('original_url'):
1666 extra_info = {'original_url': ie_result['original_url'], **extra_info}
e37d0efb 1667
057a5206 1668 extract_flat = self.params.get('extract_flat', False)
3089bc74
S
1669 if ((extract_flat == 'in_playlist' and 'playlist' in extra_info)
1670 or extract_flat is True):
ecb54191 1671 info_copy = ie_result.copy()
6033d980 1672 ie = try_get(ie_result.get('ie_key'), self.get_info_extractor)
360167b9 1673 if ie and not ie_result.get('id'):
4614bc22 1674 info_copy['id'] = ie.get_temp_id(ie_result['url'])
6033d980 1675 self.add_default_extra_info(info_copy, ie, ie_result['url'])
4614bc22 1676 self.add_extra_info(info_copy, extra_info)
b5475f11 1677 info_copy, _ = self.pre_process(info_copy)
94dc8604 1678 self._fill_common_fields(info_copy, False)
17060584 1679 self.__forced_printings(info_copy)
415f8d51 1680 self._raise_pending_errors(info_copy)
4614bc22 1681 if self.params.get('force_write_download_archive', False):
1682 self.record_download_archive(info_copy)
e8ee972c
PH
1683 return ie_result
1684
8222d8de 1685 if result_type == 'video':
b6c45014 1686 self.add_extra_info(ie_result, extra_info)
9c2b75b5 1687 ie_result = self.process_video_result(ie_result, download=download)
415f8d51 1688 self._raise_pending_errors(ie_result)
28b0eb0f 1689 additional_urls = (ie_result or {}).get('additional_urls')
9c2b75b5 1690 if additional_urls:
e9f4ccd1 1691 # TODO: Improve MetadataParserPP to allow setting a list
14f25df2 1692 if isinstance(additional_urls, str):
9c2b75b5 1693 additional_urls = [additional_urls]
1694 self.to_screen(
1695 '[info] %s: %d additional URL(s) requested' % (ie_result['id'], len(additional_urls)))
1696 self.write_debug('Additional URLs: "%s"' % '", "'.join(additional_urls))
1697 ie_result['additional_entries'] = [
1698 self.extract_info(
b69fd25c 1699 url, download, extra_info=extra_info,
9c2b75b5 1700 force_generic_extractor=self.params.get('force_generic_extractor'))
1701 for url in additional_urls
1702 ]
1703 return ie_result
8222d8de
JMF
1704 elif result_type == 'url':
1705 # We have to add extra_info to the results because it may be
1706 # contained in a playlist
07cce701 1707 return self.extract_info(
1708 ie_result['url'], download,
1709 ie_key=ie_result.get('ie_key'),
1710 extra_info=extra_info)
7fc3fa05
PH
1711 elif result_type == 'url_transparent':
1712 # Use the information from the embedding page
1713 info = self.extract_info(
1714 ie_result['url'], ie_key=ie_result.get('ie_key'),
1715 extra_info=extra_info, download=False, process=False)
1716
1640eb09
S
1717 # extract_info may return None when ignoreerrors is enabled and
1718 # extraction failed with an error, don't crash and return early
1719 # in this case
1720 if not info:
1721 return info
1722
3975b4d2 1723 exempted_fields = {'_type', 'url', 'ie_key'}
1724 if not ie_result.get('section_end') and ie_result.get('section_start') is None:
1725 # For video clips, the id etc of the clip extractor should be used
1726 exempted_fields |= {'id', 'extractor', 'extractor_key'}
1727
412c617d 1728 new_result = info.copy()
3975b4d2 1729 new_result.update(filter_dict(ie_result, lambda k, v: v is not None and k not in exempted_fields))
7fc3fa05 1730
0563f7ac
S
1731 # Extracted info may not be a video result (i.e.
1732 # info.get('_type', 'video') != video) but rather an url or
1733 # url_transparent. In such cases outer metadata (from ie_result)
1734 # should be propagated to inner one (info). For this to happen
1735 # _type of info should be overridden with url_transparent. This
067aa17e 1736 # fixes issue from https://github.com/ytdl-org/youtube-dl/pull/11163.
0563f7ac
S
1737 if new_result.get('_type') == 'url':
1738 new_result['_type'] = 'url_transparent'
7fc3fa05
PH
1739
1740 return self.process_ie_result(
1741 new_result, download=download, extra_info=extra_info)
40fcba5e 1742 elif result_type in ('playlist', 'multi_video'):
30a074c2 1743 # Protect from infinite recursion due to recursively nested playlists
1744 # (see https://github.com/ytdl-org/youtube-dl/issues/27833)
0bd5a039 1745 webpage_url = ie_result.get('webpage_url') # Playlists maynot have webpage_url
1746 if webpage_url and webpage_url in self._playlist_urls:
7e85e872 1747 self.to_screen(
30a074c2 1748 '[download] Skipping already downloaded playlist: %s'
1749 % ie_result.get('title') or ie_result.get('id'))
1750 return
7e85e872 1751
30a074c2 1752 self._playlist_level += 1
1753 self._playlist_urls.add(webpage_url)
03f83004 1754 self._fill_common_fields(ie_result, False)
bc516a3f 1755 self._sanitize_thumbnails(ie_result)
30a074c2 1756 try:
1757 return self.__process_playlist(ie_result, download)
1758 finally:
1759 self._playlist_level -= 1
1760 if not self._playlist_level:
1761 self._playlist_urls.clear()
8222d8de 1762 elif result_type == 'compat_list':
c9bf4114
PH
1763 self.report_warning(
1764 'Extractor %s returned a compat_list result. '
1765 'It needs to be updated.' % ie_result.get('extractor'))
5f6a1245 1766
8222d8de 1767 def _fixup(r):
b868936c 1768 self.add_extra_info(r, {
1769 'extractor': ie_result['extractor'],
1770 'webpage_url': ie_result['webpage_url'],
1771 'webpage_url_basename': url_basename(ie_result['webpage_url']),
0bb322b9 1772 'webpage_url_domain': get_domain(ie_result['webpage_url']),
b868936c 1773 'extractor_key': ie_result['extractor_key'],
1774 })
8222d8de
JMF
1775 return r
1776 ie_result['entries'] = [
b6c45014 1777 self.process_ie_result(_fixup(r), download, extra_info)
8222d8de
JMF
1778 for r in ie_result['entries']
1779 ]
1780 return ie_result
1781 else:
1782 raise Exception('Invalid result type: %s' % result_type)
1783
e92caff5 1784 def _ensure_dir_exists(self, path):
1785 return make_dir(path, self.report_error)
1786
3b603dbd 1787 @staticmethod
3bec830a 1788 def _playlist_infodict(ie_result, strict=False, **kwargs):
1789 info = {
1790 'playlist_count': ie_result.get('playlist_count'),
3b603dbd 1791 'playlist': ie_result.get('title') or ie_result.get('id'),
1792 'playlist_id': ie_result.get('id'),
1793 'playlist_title': ie_result.get('title'),
1794 'playlist_uploader': ie_result.get('uploader'),
1795 'playlist_uploader_id': ie_result.get('uploader_id'),
3b603dbd 1796 **kwargs,
1797 }
3bec830a 1798 if strict:
1799 return info
0bd5a039 1800 if ie_result.get('webpage_url'):
1801 info.update({
1802 'webpage_url': ie_result['webpage_url'],
1803 'webpage_url_basename': url_basename(ie_result['webpage_url']),
1804 'webpage_url_domain': get_domain(ie_result['webpage_url']),
1805 })
3bec830a 1806 return {
1807 **info,
1808 'playlist_index': 0,
59d7de0d 1809 '__last_playlist_index': max(ie_result.get('requested_entries') or (0, 0)),
3bec830a 1810 'extractor': ie_result['extractor'],
3bec830a 1811 'extractor_key': ie_result['extractor_key'],
1812 }
3b603dbd 1813
30a074c2 1814 def __process_playlist(self, ie_result, download):
7e88d7d7 1815 """Process each entry in the playlist"""
f5ea4748 1816 assert ie_result['_type'] in ('playlist', 'multi_video')
1817
3bec830a 1818 common_info = self._playlist_infodict(ie_result, strict=True)
3955b207 1819 title = common_info.get('playlist') or '<Untitled>'
3bec830a 1820 if self._match_entry(common_info, incomplete=True) is not None:
1821 return
c6e07cf1 1822 self.to_screen(f'[download] Downloading {ie_result["_type"]}: {title}')
f0d785d3 1823
7e88d7d7 1824 all_entries = PlaylistEntries(self, ie_result)
7e9a6125 1825 entries = orderedSet(all_entries.get_requested_items(), lazy=True)
1826
1827 lazy = self.params.get('lazy_playlist')
1828 if lazy:
1829 resolved_entries, n_entries = [], 'N/A'
1830 ie_result['requested_entries'], ie_result['entries'] = None, None
1831 else:
1832 entries = resolved_entries = list(entries)
1833 n_entries = len(resolved_entries)
1834 ie_result['requested_entries'], ie_result['entries'] = tuple(zip(*resolved_entries)) or ([], [])
1835 if not ie_result.get('playlist_count'):
1836 # Better to do this after potentially exhausting entries
1837 ie_result['playlist_count'] = all_entries.get_full_count()
498f5606 1838
0647d925 1839 extra = self._playlist_infodict(ie_result, n_entries=int_or_none(n_entries))
1840 ie_copy = collections.ChainMap(ie_result, extra)
3bec830a 1841
e08a85d8 1842 _infojson_written = False
0bfc53d0 1843 write_playlist_files = self.params.get('allow_playlist_files', True)
1844 if write_playlist_files and self.params.get('list_thumbnails'):
1845 self.list_thumbnails(ie_result)
1846 if write_playlist_files and not self.params.get('simulate'):
e08a85d8 1847 _infojson_written = self._write_info_json(
1848 'playlist', ie_result, self.prepare_filename(ie_copy, 'pl_infojson'))
1849 if _infojson_written is None:
80c03fa9 1850 return
1851 if self._write_description('playlist', ie_result,
1852 self.prepare_filename(ie_copy, 'pl_description')) is None:
1853 return
681de68e 1854 # TODO: This should be passed to ThumbnailsConvertor if necessary
3bec830a 1855 self._write_thumbnails('playlist', ie_result, self.prepare_filename(ie_copy, 'pl_thumbnail'))
30a074c2 1856
7e9a6125 1857 if lazy:
1858 if self.params.get('playlistreverse') or self.params.get('playlistrandom'):
1859 self.report_warning('playlistreverse and playlistrandom are not supported with lazy_playlist', only_once=True)
1860 elif self.params.get('playlistreverse'):
1861 entries.reverse()
1862 elif self.params.get('playlistrandom'):
30a074c2 1863 random.shuffle(entries)
1864
bc5c2f8a 1865 self.to_screen(f'[{ie_result["extractor"]}] Playlist {title}: Downloading {n_entries} items'
7e88d7d7 1866 f'{format_field(ie_result, "playlist_count", " of %s")}')
30a074c2 1867
134c913c 1868 keep_resolved_entries = self.params.get('extract_flat') != 'discard'
1869 if self.params.get('extract_flat') == 'discard_in_playlist':
1870 keep_resolved_entries = ie_result['_type'] != 'playlist'
1871 if keep_resolved_entries:
1872 self.write_debug('The information of all playlist entries will be held in memory')
1873
26e2805c 1874 failures = 0
1875 max_failures = self.params.get('skip_playlist_after_errors') or float('inf')
7e9a6125 1876 for i, (playlist_index, entry) in enumerate(entries):
1877 if lazy:
1878 resolved_entries.append((playlist_index, entry))
3bec830a 1879 if not entry:
7e88d7d7 1880 continue
1881
7e88d7d7 1882 entry['__x_forwarded_for_ip'] = ie_result.get('__x_forwarded_for_ip')
7e9a6125 1883 if not lazy and 'playlist-index' in self.params.get('compat_opts', []):
1884 playlist_index = ie_result['requested_entries'][i]
1885
0647d925 1886 entry_copy = collections.ChainMap(entry, {
3bec830a 1887 **common_info,
3955b207 1888 'n_entries': int_or_none(n_entries),
71729754 1889 'playlist_index': playlist_index,
7e9a6125 1890 'playlist_autonumber': i + 1,
0647d925 1891 })
3bec830a 1892
0647d925 1893 if self._match_entry(entry_copy, incomplete=True) is not None:
f0ad6f8c 1894 # For compatabilty with youtube-dl. See https://github.com/yt-dlp/yt-dlp/issues/4369
1895 resolved_entries[i] = (playlist_index, NO_DEFAULT)
3bec830a 1896 continue
1897
bc5c2f8a 1898 self.to_screen('[download] Downloading item %s of %s' % (
3bec830a 1899 self._format_screen(i + 1, self.Styles.ID), self._format_screen(n_entries, self.Styles.EMPHASIS)))
1900
ec54bd43 1901 entry_result = self.__process_iterable_entry(entry, download, collections.ChainMap({
a6ca61d4 1902 'playlist_index': playlist_index,
1903 'playlist_autonumber': i + 1,
ec54bd43 1904 }, extra))
26e2805c 1905 if not entry_result:
1906 failures += 1
1907 if failures >= max_failures:
1908 self.report_error(
7e88d7d7 1909 f'Skipping the remaining entries in playlist "{title}" since {failures} items failed extraction')
26e2805c 1910 break
134c913c 1911 if keep_resolved_entries:
1912 resolved_entries[i] = (playlist_index, entry_result)
7e88d7d7 1913
1914 # Update with processed data
f0ad6f8c 1915 ie_result['entries'] = [e for _, e in resolved_entries if e is not NO_DEFAULT]
bc5c2f8a 1916 ie_result['requested_entries'] = [i for i, e in resolved_entries if e is not NO_DEFAULT]
1917 if ie_result['requested_entries'] == try_call(lambda: list(range(1, ie_result['playlist_count'] + 1))):
1918 # Do not set for full playlist
1919 ie_result.pop('requested_entries')
e08a85d8 1920
1921 # Write the updated info to json
cb96c5be 1922 if _infojson_written is True and self._write_info_json(
e08a85d8 1923 'updated playlist', ie_result,
1924 self.prepare_filename(ie_copy, 'pl_infojson'), overwrite=True) is None:
1925 return
ca30f449 1926
ed5835b4 1927 ie_result = self.run_all_pps('playlist', ie_result)
7e88d7d7 1928 self.to_screen(f'[download] Finished downloading playlist: {title}')
30a074c2 1929 return ie_result
1930
7e88d7d7 1931 @_handle_extraction_exceptions
a0566bbf 1932 def __process_iterable_entry(self, entry, download, extra_info):
1933 return self.process_ie_result(
1934 entry, download=download, extra_info=extra_info)
1935
67134eab
JMF
1936 def _build_format_filter(self, filter_spec):
1937 " Returns a function to filter the formats according to the filter_spec "
083c9df9
PH
1938
1939 OPERATORS = {
1940 '<': operator.lt,
1941 '<=': operator.le,
1942 '>': operator.gt,
1943 '>=': operator.ge,
1944 '=': operator.eq,
1945 '!=': operator.ne,
1946 }
67134eab 1947 operator_rex = re.compile(r'''(?x)\s*
c3f624ef 1948 (?P<key>[\w.-]+)\s*
187986a8 1949 (?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
1950 (?P<value>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)\s*
083c9df9 1951 ''' % '|'.join(map(re.escape, OPERATORS.keys())))
187986a8 1952 m = operator_rex.fullmatch(filter_spec)
9ddb6925
S
1953 if m:
1954 try:
1955 comparison_value = int(m.group('value'))
1956 except ValueError:
1957 comparison_value = parse_filesize(m.group('value'))
1958 if comparison_value is None:
1959 comparison_value = parse_filesize(m.group('value') + 'B')
1960 if comparison_value is None:
1961 raise ValueError(
1962 'Invalid value %r in format specification %r' % (
67134eab 1963 m.group('value'), filter_spec))
9ddb6925
S
1964 op = OPERATORS[m.group('op')]
1965
083c9df9 1966 if not m:
9ddb6925
S
1967 STR_OPERATORS = {
1968 '=': operator.eq,
10d33b34
YCH
1969 '^=': lambda attr, value: attr.startswith(value),
1970 '$=': lambda attr, value: attr.endswith(value),
1971 '*=': lambda attr, value: value in attr,
1ce9a3cb 1972 '~=': lambda attr, value: value.search(attr) is not None
9ddb6925 1973 }
187986a8 1974 str_operator_rex = re.compile(r'''(?x)\s*
1975 (?P<key>[a-zA-Z0-9._-]+)\s*
1ce9a3cb
LF
1976 (?P<negation>!\s*)?(?P<op>%s)\s*(?P<none_inclusive>\?\s*)?
1977 (?P<quote>["'])?
1978 (?P<value>(?(quote)(?:(?!(?P=quote))[^\\]|\\.)+|[\w.-]+))
1979 (?(quote)(?P=quote))\s*
9ddb6925 1980 ''' % '|'.join(map(re.escape, STR_OPERATORS.keys())))
187986a8 1981 m = str_operator_rex.fullmatch(filter_spec)
9ddb6925 1982 if m:
1ce9a3cb
LF
1983 if m.group('op') == '~=':
1984 comparison_value = re.compile(m.group('value'))
1985 else:
1986 comparison_value = re.sub(r'''\\([\\"'])''', r'\1', m.group('value'))
2cc779f4
S
1987 str_op = STR_OPERATORS[m.group('op')]
1988 if m.group('negation'):
e118a879 1989 op = lambda attr, value: not str_op(attr, value)
2cc779f4
S
1990 else:
1991 op = str_op
083c9df9 1992
9ddb6925 1993 if not m:
187986a8 1994 raise SyntaxError('Invalid filter specification %r' % filter_spec)
083c9df9
PH
1995
1996 def _filter(f):
1997 actual_value = f.get(m.group('key'))
1998 if actual_value is None:
1999 return m.group('none_inclusive')
2000 return op(actual_value, comparison_value)
67134eab
JMF
2001 return _filter
2002
9f1a1c36 2003 def _check_formats(self, formats):
2004 for f in formats:
2005 self.to_screen('[info] Testing format %s' % f['format_id'])
75689fe5 2006 path = self.get_output_path('temp')
2007 if not self._ensure_dir_exists(f'{path}/'):
2008 continue
2009 temp_file = tempfile.NamedTemporaryFile(suffix='.tmp', delete=False, dir=path or None)
9f1a1c36 2010 temp_file.close()
2011 try:
2012 success, _ = self.dl(temp_file.name, f, test=True)
8a82af35 2013 except (DownloadError, OSError, ValueError) + network_exceptions:
9f1a1c36 2014 success = False
2015 finally:
2016 if os.path.exists(temp_file.name):
2017 try:
2018 os.remove(temp_file.name)
2019 except OSError:
2020 self.report_warning('Unable to delete temporary file "%s"' % temp_file.name)
2021 if success:
2022 yield f
2023 else:
2024 self.to_screen('[info] Unable to download format %s. Skipping...' % f['format_id'])
2025
0017d9ad 2026 def _default_format_spec(self, info_dict, download=True):
0017d9ad 2027
af0f7428
S
2028 def can_merge():
2029 merger = FFmpegMergerPP(self)
2030 return merger.available and merger.can_merge()
2031
91ebc640 2032 prefer_best = (
b7b04c78 2033 not self.params.get('simulate')
91ebc640 2034 and download
2035 and (
2036 not can_merge()
21633673 2037 or info_dict.get('is_live') and not self.params.get('live_from_start')
bf1824b3 2038 or self.params['outtmpl']['default'] == '-'))
53ed7066 2039 compat = (
2040 prefer_best
2041 or self.params.get('allow_multiple_audio_streams', False)
8a82af35 2042 or 'format-spec' in self.params['compat_opts'])
91ebc640 2043
2044 return (
53ed7066 2045 'best/bestvideo+bestaudio' if prefer_best
2046 else 'bestvideo*+bestaudio/best' if not compat
91ebc640 2047 else 'bestvideo+bestaudio/best')
0017d9ad 2048
67134eab
JMF
2049 def build_format_selector(self, format_spec):
2050 def syntax_error(note, start):
2051 message = (
2052 'Invalid format specification: '
86e5f3ed 2053 '{}\n\t{}\n\t{}^'.format(note, format_spec, ' ' * start[1]))
67134eab
JMF
2054 return SyntaxError(message)
2055
2056 PICKFIRST = 'PICKFIRST'
2057 MERGE = 'MERGE'
2058 SINGLE = 'SINGLE'
0130afb7 2059 GROUP = 'GROUP'
67134eab
JMF
2060 FormatSelector = collections.namedtuple('FormatSelector', ['type', 'selector', 'filters'])
2061
91ebc640 2062 allow_multiple_streams = {'audio': self.params.get('allow_multiple_audio_streams', False),
2063 'video': self.params.get('allow_multiple_video_streams', False)}
909d24dd 2064
9f1a1c36 2065 check_formats = self.params.get('check_formats') == 'selected'
e8e73840 2066
67134eab
JMF
2067 def _parse_filter(tokens):
2068 filter_parts = []
6f2287cb 2069 for type, string_, start, _, _ in tokens:
2070 if type == tokenize.OP and string_ == ']':
67134eab
JMF
2071 return ''.join(filter_parts)
2072 else:
6f2287cb 2073 filter_parts.append(string_)
67134eab 2074
232541df 2075 def _remove_unused_ops(tokens):
62b58c09
L
2076 # Remove operators that we don't use and join them with the surrounding strings.
2077 # E.g. 'mp4' '-' 'baseline' '-' '16x9' is converted to 'mp4-baseline-16x9'
232541df
JMF
2078 ALLOWED_OPS = ('/', '+', ',', '(', ')')
2079 last_string, last_start, last_end, last_line = None, None, None, None
6f2287cb 2080 for type, string_, start, end, line in tokens:
2081 if type == tokenize.OP and string_ == '[':
232541df
JMF
2082 if last_string:
2083 yield tokenize.NAME, last_string, last_start, last_end, last_line
2084 last_string = None
6f2287cb 2085 yield type, string_, start, end, line
232541df 2086 # everything inside brackets will be handled by _parse_filter
6f2287cb 2087 for type, string_, start, end, line in tokens:
2088 yield type, string_, start, end, line
2089 if type == tokenize.OP and string_ == ']':
232541df 2090 break
6f2287cb 2091 elif type == tokenize.OP and string_ in ALLOWED_OPS:
232541df
JMF
2092 if last_string:
2093 yield tokenize.NAME, last_string, last_start, last_end, last_line
2094 last_string = None
6f2287cb 2095 yield type, string_, start, end, line
232541df
JMF
2096 elif type in [tokenize.NAME, tokenize.NUMBER, tokenize.OP]:
2097 if not last_string:
6f2287cb 2098 last_string = string_
232541df
JMF
2099 last_start = start
2100 last_end = end
2101 else:
6f2287cb 2102 last_string += string_
232541df
JMF
2103 if last_string:
2104 yield tokenize.NAME, last_string, last_start, last_end, last_line
2105
cf2ac6df 2106 def _parse_format_selection(tokens, inside_merge=False, inside_choice=False, inside_group=False):
67134eab
JMF
2107 selectors = []
2108 current_selector = None
6f2287cb 2109 for type, string_, start, _, _ in tokens:
67134eab
JMF
2110 # ENCODING is only defined in python 3.x
2111 if type == getattr(tokenize, 'ENCODING', None):
2112 continue
2113 elif type in [tokenize.NAME, tokenize.NUMBER]:
6f2287cb 2114 current_selector = FormatSelector(SINGLE, string_, [])
67134eab 2115 elif type == tokenize.OP:
6f2287cb 2116 if string_ == ')':
cf2ac6df
JMF
2117 if not inside_group:
2118 # ')' will be handled by the parentheses group
2119 tokens.restore_last_token()
67134eab 2120 break
6f2287cb 2121 elif inside_merge and string_ in ['/', ',']:
0130afb7
JMF
2122 tokens.restore_last_token()
2123 break
6f2287cb 2124 elif inside_choice and string_ == ',':
cf2ac6df
JMF
2125 tokens.restore_last_token()
2126 break
6f2287cb 2127 elif string_ == ',':
0a31a350
JMF
2128 if not current_selector:
2129 raise syntax_error('"," must follow a format selector', start)
67134eab
JMF
2130 selectors.append(current_selector)
2131 current_selector = None
6f2287cb 2132 elif string_ == '/':
d96d604e
JMF
2133 if not current_selector:
2134 raise syntax_error('"/" must follow a format selector', start)
67134eab 2135 first_choice = current_selector
cf2ac6df 2136 second_choice = _parse_format_selection(tokens, inside_choice=True)
f5f4a27a 2137 current_selector = FormatSelector(PICKFIRST, (first_choice, second_choice), [])
6f2287cb 2138 elif string_ == '[':
67134eab
JMF
2139 if not current_selector:
2140 current_selector = FormatSelector(SINGLE, 'best', [])
2141 format_filter = _parse_filter(tokens)
2142 current_selector.filters.append(format_filter)
6f2287cb 2143 elif string_ == '(':
0130afb7
JMF
2144 if current_selector:
2145 raise syntax_error('Unexpected "("', start)
cf2ac6df
JMF
2146 group = _parse_format_selection(tokens, inside_group=True)
2147 current_selector = FormatSelector(GROUP, group, [])
6f2287cb 2148 elif string_ == '+':
d03cfdce 2149 if not current_selector:
2150 raise syntax_error('Unexpected "+"', start)
2151 selector_1 = current_selector
2152 selector_2 = _parse_format_selection(tokens, inside_merge=True)
2153 if not selector_2:
2154 raise syntax_error('Expected a selector', start)
2155 current_selector = FormatSelector(MERGE, (selector_1, selector_2), [])
67134eab 2156 else:
6f2287cb 2157 raise syntax_error(f'Operator not recognized: "{string_}"', start)
67134eab
JMF
2158 elif type == tokenize.ENDMARKER:
2159 break
2160 if current_selector:
2161 selectors.append(current_selector)
2162 return selectors
2163
f8d4ad9a 2164 def _merge(formats_pair):
2165 format_1, format_2 = formats_pair
2166
2167 formats_info = []
2168 formats_info.extend(format_1.get('requested_formats', (format_1,)))
2169 formats_info.extend(format_2.get('requested_formats', (format_2,)))
2170
2171 if not allow_multiple_streams['video'] or not allow_multiple_streams['audio']:
551f9388 2172 get_no_more = {'video': False, 'audio': False}
f8d4ad9a 2173 for (i, fmt_info) in enumerate(formats_info):
551f9388 2174 if fmt_info.get('acodec') == fmt_info.get('vcodec') == 'none':
2175 formats_info.pop(i)
2176 continue
2177 for aud_vid in ['audio', 'video']:
f8d4ad9a 2178 if not allow_multiple_streams[aud_vid] and fmt_info.get(aud_vid[0] + 'codec') != 'none':
2179 if get_no_more[aud_vid]:
2180 formats_info.pop(i)
f5510afe 2181 break
f8d4ad9a 2182 get_no_more[aud_vid] = True
2183
2184 if len(formats_info) == 1:
2185 return formats_info[0]
2186
2187 video_fmts = [fmt_info for fmt_info in formats_info if fmt_info.get('vcodec') != 'none']
2188 audio_fmts = [fmt_info for fmt_info in formats_info if fmt_info.get('acodec') != 'none']
2189
2190 the_only_video = video_fmts[0] if len(video_fmts) == 1 else None
2191 the_only_audio = audio_fmts[0] if len(audio_fmts) == 1 else None
2192
fc61aff4
LL
2193 output_ext = get_compatible_ext(
2194 vcodecs=[f.get('vcodec') for f in video_fmts],
2195 acodecs=[f.get('acodec') for f in audio_fmts],
2196 vexts=[f['ext'] for f in video_fmts],
2197 aexts=[f['ext'] for f in audio_fmts],
2198 preferences=(try_call(lambda: self.params['merge_output_format'].split('/'))
2199 or self.params.get('prefer_free_formats') and ('webm', 'mkv')))
f8d4ad9a 2200
975a0d0d 2201 filtered = lambda *keys: filter(None, (traverse_obj(fmt, *keys) for fmt in formats_info))
2202
f8d4ad9a 2203 new_dict = {
2204 'requested_formats': formats_info,
975a0d0d 2205 'format': '+'.join(filtered('format')),
2206 'format_id': '+'.join(filtered('format_id')),
f8d4ad9a 2207 'ext': output_ext,
975a0d0d 2208 'protocol': '+'.join(map(determine_protocol, formats_info)),
093a1710 2209 'language': '+'.join(orderedSet(filtered('language'))) or None,
2210 'format_note': '+'.join(orderedSet(filtered('format_note'))) or None,
2211 'filesize_approx': sum(filtered('filesize', 'filesize_approx')) or None,
975a0d0d 2212 'tbr': sum(filtered('tbr', 'vbr', 'abr')),
f8d4ad9a 2213 }
2214
2215 if the_only_video:
2216 new_dict.update({
2217 'width': the_only_video.get('width'),
2218 'height': the_only_video.get('height'),
2219 'resolution': the_only_video.get('resolution') or self.format_resolution(the_only_video),
2220 'fps': the_only_video.get('fps'),
49a57e70 2221 'dynamic_range': the_only_video.get('dynamic_range'),
f8d4ad9a 2222 'vcodec': the_only_video.get('vcodec'),
2223 'vbr': the_only_video.get('vbr'),
2224 'stretched_ratio': the_only_video.get('stretched_ratio'),
105bfd90 2225 'aspect_ratio': the_only_video.get('aspect_ratio'),
f8d4ad9a 2226 })
2227
2228 if the_only_audio:
2229 new_dict.update({
2230 'acodec': the_only_audio.get('acodec'),
2231 'abr': the_only_audio.get('abr'),
975a0d0d 2232 'asr': the_only_audio.get('asr'),
b8ed0f15 2233 'audio_channels': the_only_audio.get('audio_channels')
f8d4ad9a 2234 })
2235
2236 return new_dict
2237
e8e73840 2238 def _check_formats(formats):
981052c9 2239 if not check_formats:
2240 yield from formats
b5ac45b1 2241 return
9f1a1c36 2242 yield from self._check_formats(formats)
e8e73840 2243
67134eab 2244 def _build_selector_function(selector):
909d24dd 2245 if isinstance(selector, list): # ,
67134eab
JMF
2246 fs = [_build_selector_function(s) for s in selector]
2247
317f7ab6 2248 def selector_function(ctx):
67134eab 2249 for f in fs:
981052c9 2250 yield from f(ctx)
67134eab 2251 return selector_function
909d24dd 2252
2253 elif selector.type == GROUP: # ()
0130afb7 2254 selector_function = _build_selector_function(selector.selector)
909d24dd 2255
2256 elif selector.type == PICKFIRST: # /
67134eab
JMF
2257 fs = [_build_selector_function(s) for s in selector.selector]
2258
317f7ab6 2259 def selector_function(ctx):
67134eab 2260 for f in fs:
317f7ab6 2261 picked_formats = list(f(ctx))
67134eab
JMF
2262 if picked_formats:
2263 return picked_formats
2264 return []
67134eab 2265
981052c9 2266 elif selector.type == MERGE: # +
2267 selector_1, selector_2 = map(_build_selector_function, selector.selector)
2268
2269 def selector_function(ctx):
adbc4ec4 2270 for pair in itertools.product(selector_1(ctx), selector_2(ctx)):
981052c9 2271 yield _merge(pair)
2272
909d24dd 2273 elif selector.type == SINGLE: # atom
598d185d 2274 format_spec = selector.selector or 'best'
909d24dd 2275
f8d4ad9a 2276 # TODO: Add allvideo, allaudio etc by generalizing the code with best/worst selector
909d24dd 2277 if format_spec == 'all':
2278 def selector_function(ctx):
9222c381 2279 yield from _check_formats(ctx['formats'][::-1])
f8d4ad9a 2280 elif format_spec == 'mergeall':
2281 def selector_function(ctx):
316f2650 2282 formats = list(_check_formats(
2283 f for f in ctx['formats'] if f.get('vcodec') != 'none' or f.get('acodec') != 'none'))
e01d6aa4 2284 if not formats:
2285 return
921b76ca 2286 merged_format = formats[-1]
2287 for f in formats[-2::-1]:
f8d4ad9a 2288 merged_format = _merge((merged_format, f))
2289 yield merged_format
909d24dd 2290
2291 else:
85e801a9 2292 format_fallback, seperate_fallback, format_reverse, format_idx = False, None, True, 1
eff63539 2293 mobj = re.match(
2294 r'(?P<bw>best|worst|b|w)(?P<type>video|audio|v|a)?(?P<mod>\*)?(?:\.(?P<n>[1-9]\d*))?$',
2295 format_spec)
2296 if mobj is not None:
2297 format_idx = int_or_none(mobj.group('n'), default=1)
e8e73840 2298 format_reverse = mobj.group('bw')[0] == 'b'
eff63539 2299 format_type = (mobj.group('type') or [None])[0]
2300 not_format_type = {'v': 'a', 'a': 'v'}.get(format_type)
2301 format_modified = mobj.group('mod') is not None
909d24dd 2302
2303 format_fallback = not format_type and not format_modified # for b, w
8326b00a 2304 _filter_f = (
eff63539 2305 (lambda f: f.get('%scodec' % format_type) != 'none')
2306 if format_type and format_modified # bv*, ba*, wv*, wa*
2307 else (lambda f: f.get('%scodec' % not_format_type) == 'none')
2308 if format_type # bv, ba, wv, wa
2309 else (lambda f: f.get('vcodec') != 'none' and f.get('acodec') != 'none')
2310 if not format_modified # b, w
8326b00a 2311 else lambda f: True) # b*, w*
2312 filter_f = lambda f: _filter_f(f) and (
2313 f.get('vcodec') != 'none' or f.get('acodec') != 'none')
67134eab 2314 else:
48ee10ee 2315 if format_spec in self._format_selection_exts['audio']:
b11c04a8 2316 filter_f = lambda f: f.get('ext') == format_spec and f.get('acodec') != 'none'
48ee10ee 2317 elif format_spec in self._format_selection_exts['video']:
b11c04a8 2318 filter_f = lambda f: f.get('ext') == format_spec and f.get('acodec') != 'none' and f.get('vcodec') != 'none'
85e801a9 2319 seperate_fallback = lambda f: f.get('ext') == format_spec and f.get('vcodec') != 'none'
48ee10ee 2320 elif format_spec in self._format_selection_exts['storyboards']:
b11c04a8 2321 filter_f = lambda f: f.get('ext') == format_spec and f.get('acodec') == 'none' and f.get('vcodec') == 'none'
2322 else:
b5ae35ee 2323 filter_f = lambda f: f.get('format_id') == format_spec # id
909d24dd 2324
2325 def selector_function(ctx):
2326 formats = list(ctx['formats'])
909d24dd 2327 matches = list(filter(filter_f, formats)) if filter_f is not None else formats
85e801a9 2328 if not matches:
2329 if format_fallback and ctx['incomplete_formats']:
2330 # for extractors with incomplete formats (audio only (soundcloud)
2331 # or video only (imgur)) best/worst will fallback to
2332 # best/worst {video,audio}-only format
2333 matches = formats
2334 elif seperate_fallback and not ctx['has_merged_format']:
2335 # for compatibility with youtube-dl when there is no pre-merged format
2336 matches = list(filter(seperate_fallback, formats))
981052c9 2337 matches = LazyList(_check_formats(matches[::-1 if format_reverse else 1]))
2338 try:
e8e73840 2339 yield matches[format_idx - 1]
4abea8ca 2340 except LazyList.IndexError:
981052c9 2341 return
083c9df9 2342
67134eab 2343 filters = [self._build_format_filter(f) for f in selector.filters]
083c9df9 2344
317f7ab6 2345 def final_selector(ctx):
adbc4ec4 2346 ctx_copy = dict(ctx)
67134eab 2347 for _filter in filters:
317f7ab6
S
2348 ctx_copy['formats'] = list(filter(_filter, ctx_copy['formats']))
2349 return selector_function(ctx_copy)
67134eab 2350 return final_selector
083c9df9 2351
0f06bcd7 2352 stream = io.BytesIO(format_spec.encode())
0130afb7 2353 try:
f9934b96 2354 tokens = list(_remove_unused_ops(tokenize.tokenize(stream.readline)))
0130afb7
JMF
2355 except tokenize.TokenError:
2356 raise syntax_error('Missing closing/opening brackets or parenthesis', (0, len(format_spec)))
2357
86e5f3ed 2358 class TokenIterator:
0130afb7
JMF
2359 def __init__(self, tokens):
2360 self.tokens = tokens
2361 self.counter = 0
2362
2363 def __iter__(self):
2364 return self
2365
2366 def __next__(self):
2367 if self.counter >= len(self.tokens):
2368 raise StopIteration()
2369 value = self.tokens[self.counter]
2370 self.counter += 1
2371 return value
2372
2373 next = __next__
2374
2375 def restore_last_token(self):
2376 self.counter -= 1
2377
2378 parsed_selector = _parse_format_selection(iter(TokenIterator(tokens)))
67134eab 2379 return _build_selector_function(parsed_selector)
a9c58ad9 2380
e5660ee6 2381 def _calc_headers(self, info_dict):
8b7539d2 2382 res = merge_headers(self.params['http_headers'], info_dict.get('http_headers') or {})
955c8958 2383 if 'Youtubedl-No-Compression' in res: # deprecated
2384 res.pop('Youtubedl-No-Compression', None)
2385 res['Accept-Encoding'] = 'identity'
c487cf00 2386 cookies = self._calc_cookies(info_dict['url'])
e5660ee6
JMF
2387 if cookies:
2388 res['Cookie'] = cookies
2389
0016b84e
S
2390 if 'X-Forwarded-For' not in res:
2391 x_forwarded_for_ip = info_dict.get('__x_forwarded_for_ip')
2392 if x_forwarded_for_ip:
2393 res['X-Forwarded-For'] = x_forwarded_for_ip
2394
e5660ee6
JMF
2395 return res
2396
c487cf00 2397 def _calc_cookies(self, url):
2398 pr = sanitized_Request(url)
e5660ee6 2399 self.cookiejar.add_cookie_header(pr)
662435f7 2400 return pr.get_header('Cookie')
e5660ee6 2401
9f1a1c36 2402 def _sort_thumbnails(self, thumbnails):
2403 thumbnails.sort(key=lambda t: (
2404 t.get('preference') if t.get('preference') is not None else -1,
2405 t.get('width') if t.get('width') is not None else -1,
2406 t.get('height') if t.get('height') is not None else -1,
2407 t.get('id') if t.get('id') is not None else '',
2408 t.get('url')))
2409
b0249bca 2410 def _sanitize_thumbnails(self, info_dict):
bc516a3f 2411 thumbnails = info_dict.get('thumbnails')
2412 if thumbnails is None:
2413 thumbnail = info_dict.get('thumbnail')
2414 if thumbnail:
2415 info_dict['thumbnails'] = thumbnails = [{'url': thumbnail}]
9f1a1c36 2416 if not thumbnails:
2417 return
2418
2419 def check_thumbnails(thumbnails):
2420 for t in thumbnails:
2421 self.to_screen(f'[info] Testing thumbnail {t["id"]}')
2422 try:
2423 self.urlopen(HEADRequest(t['url']))
2424 except network_exceptions as err:
2425 self.to_screen(f'[info] Unable to connect to thumbnail {t["id"]} URL {t["url"]!r} - {err}. Skipping...')
2426 continue
2427 yield t
2428
2429 self._sort_thumbnails(thumbnails)
2430 for i, t in enumerate(thumbnails):
2431 if t.get('id') is None:
2432 t['id'] = '%d' % i
2433 if t.get('width') and t.get('height'):
2434 t['resolution'] = '%dx%d' % (t['width'], t['height'])
2435 t['url'] = sanitize_url(t['url'])
2436
2437 if self.params.get('check_formats') is True:
282f5709 2438 info_dict['thumbnails'] = LazyList(check_thumbnails(thumbnails[::-1]), reverse=True)
9f1a1c36 2439 else:
2440 info_dict['thumbnails'] = thumbnails
bc516a3f 2441
94dc8604 2442 def _fill_common_fields(self, info_dict, final=True):
03f83004 2443 # TODO: move sanitization here
94dc8604 2444 if final:
7aefd19a 2445 title = info_dict['fulltitle'] = info_dict.get('title')
d4736fdb 2446 if not title:
2447 if title == '':
2448 self.write_debug('Extractor gave empty title. Creating a generic title')
2449 else:
2450 self.report_warning('Extractor failed to obtain "title". Creating a generic title instead')
1d485a1a 2451 info_dict['title'] = f'{info_dict["extractor"].replace(":", "-")} video #{info_dict["id"]}'
03f83004
LNO
2452
2453 if info_dict.get('duration') is not None:
2454 info_dict['duration_string'] = formatSeconds(info_dict['duration'])
2455
2456 for ts_key, date_key in (
2457 ('timestamp', 'upload_date'),
2458 ('release_timestamp', 'release_date'),
2459 ('modified_timestamp', 'modified_date'),
2460 ):
2461 if info_dict.get(date_key) is None and info_dict.get(ts_key) is not None:
2462 # Working around out-of-range timestamp values (e.g. negative ones on Windows,
2463 # see http://bugs.python.org/issue1646728)
19a03940 2464 with contextlib.suppress(ValueError, OverflowError, OSError):
03f83004
LNO
2465 upload_date = datetime.datetime.utcfromtimestamp(info_dict[ts_key])
2466 info_dict[date_key] = upload_date.strftime('%Y%m%d')
03f83004
LNO
2467
2468 live_keys = ('is_live', 'was_live')
2469 live_status = info_dict.get('live_status')
2470 if live_status is None:
2471 for key in live_keys:
2472 if info_dict.get(key) is False:
2473 continue
2474 if info_dict.get(key):
2475 live_status = key
2476 break
2477 if all(info_dict.get(key) is False for key in live_keys):
2478 live_status = 'not_live'
2479 if live_status:
2480 info_dict['live_status'] = live_status
2481 for key in live_keys:
2482 if info_dict.get(key) is None:
2483 info_dict[key] = (live_status == key)
a057779d 2484 if live_status == 'post_live':
2485 info_dict['was_live'] = True
03f83004
LNO
2486
2487 # Auto generate title fields corresponding to the *_number fields when missing
2488 # in order to always have clean titles. This is very common for TV series.
2489 for field in ('chapter', 'season', 'episode'):
94dc8604 2490 if final and info_dict.get('%s_number' % field) is not None and not info_dict.get(field):
03f83004
LNO
2491 info_dict[field] = '%s %d' % (field.capitalize(), info_dict['%s_number' % field])
2492
415f8d51 2493 def _raise_pending_errors(self, info):
2494 err = info.pop('__pending_error', None)
2495 if err:
2496 self.report_error(err, tb=False)
2497
784320c9 2498 def sort_formats(self, info_dict):
2499 formats = self._get_formats(info_dict)
784320c9 2500 formats.sort(key=FormatSorter(
c154302c 2501 self, info_dict.get('_format_sort_fields') or []).calculate_preference)
784320c9 2502
dd82ffea
JMF
2503 def process_video_result(self, info_dict, download=True):
2504 assert info_dict.get('_type', 'video') == 'video'
9c906919 2505 self._num_videos += 1
dd82ffea 2506
bec1fad2 2507 if 'id' not in info_dict:
fc08bdd6 2508 raise ExtractorError('Missing "id" field in extractor result', ie=info_dict['extractor'])
2509 elif not info_dict.get('id'):
2510 raise ExtractorError('Extractor failed to obtain "id"', ie=info_dict['extractor'])
455a15e2 2511
c9969434
S
2512 def report_force_conversion(field, field_not, conversion):
2513 self.report_warning(
2514 '"%s" field is not %s - forcing %s conversion, there is an error in extractor'
2515 % (field, field_not, conversion))
2516
2517 def sanitize_string_field(info, string_field):
2518 field = info.get(string_field)
14f25df2 2519 if field is None or isinstance(field, str):
c9969434
S
2520 return
2521 report_force_conversion(string_field, 'a string', 'string')
14f25df2 2522 info[string_field] = str(field)
c9969434
S
2523
2524 def sanitize_numeric_fields(info):
2525 for numeric_field in self._NUMERIC_FIELDS:
2526 field = info.get(numeric_field)
f9934b96 2527 if field is None or isinstance(field, (int, float)):
c9969434
S
2528 continue
2529 report_force_conversion(numeric_field, 'numeric', 'int')
2530 info[numeric_field] = int_or_none(field)
2531
2532 sanitize_string_field(info_dict, 'id')
2533 sanitize_numeric_fields(info_dict)
3975b4d2 2534 if info_dict.get('section_end') and info_dict.get('section_start') is not None:
2535 info_dict['duration'] = round(info_dict['section_end'] - info_dict['section_start'], 3)
4c3f8c3f 2536 if (info_dict.get('duration') or 0) <= 0 and info_dict.pop('duration', None):
50e93e03 2537 self.report_warning('"duration" field is negative, there is an error in extractor')
be6217b2 2538
9eef7c4e 2539 chapters = info_dict.get('chapters') or []
a3976e07 2540 if chapters and chapters[0].get('start_time'):
2541 chapters.insert(0, {'start_time': 0})
2542
9eef7c4e 2543 dummy_chapter = {'end_time': 0, 'start_time': info_dict.get('duration')}
a3976e07 2544 for idx, (prev, current, next_) in enumerate(zip(
2545 (dummy_chapter, *chapters), chapters, (*chapters[1:], dummy_chapter)), 1):
9eef7c4e 2546 if current.get('start_time') is None:
2547 current['start_time'] = prev.get('end_time')
2548 if not current.get('end_time'):
2549 current['end_time'] = next_.get('start_time')
a3976e07 2550 if not current.get('title'):
2551 current['title'] = f'<Untitled Chapter {idx}>'
9eef7c4e 2552
dd82ffea
JMF
2553 if 'playlist' not in info_dict:
2554 # It isn't part of a playlist
2555 info_dict['playlist'] = None
2556 info_dict['playlist_index'] = None
2557
bc516a3f 2558 self._sanitize_thumbnails(info_dict)
d5519808 2559
536a55da 2560 thumbnail = info_dict.get('thumbnail')
bc516a3f 2561 thumbnails = info_dict.get('thumbnails')
536a55da
S
2562 if thumbnail:
2563 info_dict['thumbnail'] = sanitize_url(thumbnail)
2564 elif thumbnails:
d5519808
PH
2565 info_dict['thumbnail'] = thumbnails[-1]['url']
2566
ae30b840 2567 if info_dict.get('display_id') is None and 'id' in info_dict:
0afef30b
PH
2568 info_dict['display_id'] = info_dict['id']
2569
03f83004 2570 self._fill_common_fields(info_dict)
33d2fc2f 2571
05108a49
S
2572 for cc_kind in ('subtitles', 'automatic_captions'):
2573 cc = info_dict.get(cc_kind)
2574 if cc:
2575 for _, subtitle in cc.items():
2576 for subtitle_format in subtitle:
2577 if subtitle_format.get('url'):
2578 subtitle_format['url'] = sanitize_url(subtitle_format['url'])
2579 if subtitle_format.get('ext') is None:
2580 subtitle_format['ext'] = determine_ext(subtitle_format['url']).lower()
2581
2582 automatic_captions = info_dict.get('automatic_captions')
4bba3716 2583 subtitles = info_dict.get('subtitles')
4bba3716 2584
360e1ca5 2585 info_dict['requested_subtitles'] = self.process_subtitles(
05108a49 2586 info_dict['id'], subtitles, automatic_captions)
a504ced0 2587
aebb4f4b 2588 formats = self._get_formats(info_dict)
dd82ffea 2589
c154302c 2590 # Backward compatibility with InfoExtractor._sort_formats
9ebac355 2591 field_preference = (formats or [{}])[0].pop('__sort_fields', None)
c154302c 2592 if field_preference:
2593 info_dict['_format_sort_fields'] = field_preference
2594
0a5a191a 2595 # or None ensures --clean-infojson removes it
2596 info_dict['_has_drm'] = any(f.get('has_drm') for f in formats) or None
88acdbc2 2597 if not self.params.get('allow_unplayable_formats'):
2598 formats = [f for f in formats if not f.get('has_drm')]
17ffed18 2599
2600 if formats and all(f.get('acodec') == f.get('vcodec') == 'none' for f in formats):
2601 self.report_warning(
2602 f'{"This video is DRM protected and " if info_dict["_has_drm"] else ""}'
2603 'only images are available for download. Use --list-formats to see them'.capitalize())
88acdbc2 2604
319b6059 2605 get_from_start = not info_dict.get('is_live') or bool(self.params.get('live_from_start'))
2606 if not get_from_start:
2607 info_dict['title'] += ' ' + datetime.datetime.now().strftime('%Y-%m-%d %H:%M')
2608 if info_dict.get('is_live') and formats:
adbc4ec4 2609 formats = [f for f in formats if bool(f.get('is_from_start')) == get_from_start]
319b6059 2610 if get_from_start and not formats:
a44ca5a4 2611 self.raise_no_formats(info_dict, msg=(
2612 '--live-from-start is passed, but there are no formats that can be downloaded from the start. '
2613 'If you want to download from the current time, use --no-live-from-start'))
adbc4ec4 2614
73af5cc8
S
2615 def is_wellformed(f):
2616 url = f.get('url')
a5ac0c47 2617 if not url:
73af5cc8
S
2618 self.report_warning(
2619 '"url" field is missing or empty - skipping format, '
2620 'there is an error in extractor')
a5ac0c47
S
2621 return False
2622 if isinstance(url, bytes):
2623 sanitize_string_field(f, 'url')
2624 return True
73af5cc8
S
2625
2626 # Filter out malformed formats for better extraction robustness
1ac7f461 2627 formats = list(filter(is_wellformed, formats or []))
2628
2629 if not formats:
2630 self.raise_no_formats(info_dict)
73af5cc8 2631
39f32f17 2632 for format in formats:
c9969434
S
2633 sanitize_string_field(format, 'format_id')
2634 sanitize_numeric_fields(format)
dcf77cf1 2635 format['url'] = sanitize_url(format['url'])
39f32f17 2636 if format.get('ext') is None:
2637 format['ext'] = determine_ext(format['url']).lower()
2638 if format.get('protocol') is None:
2639 format['protocol'] = determine_protocol(format)
2640 if format.get('resolution') is None:
2641 format['resolution'] = self.format_resolution(format, default=None)
2642 if format.get('dynamic_range') is None and format.get('vcodec') != 'none':
2643 format['dynamic_range'] = 'SDR'
2644 if format.get('aspect_ratio') is None:
2645 format['aspect_ratio'] = try_call(lambda: round(format['width'] / format['height'], 2))
2646 if (info_dict.get('duration') and format.get('tbr')
2647 and not format.get('filesize') and not format.get('filesize_approx')):
2648 format['filesize_approx'] = int(info_dict['duration'] * format['tbr'] * (1024 / 8))
2649 format['http_headers'] = self._calc_headers(collections.ChainMap(format, info_dict))
2650
2651 # This is copied to http_headers by the above _calc_headers and can now be removed
2652 if '__x_forwarded_for_ip' in info_dict:
2653 del info_dict['__x_forwarded_for_ip']
2654
c154302c 2655 self.sort_formats({
2656 'formats': formats,
2657 '_format_sort_fields': info_dict.get('_format_sort_fields')
2658 })
39f32f17 2659
2660 # Sanitize and group by format_id
2661 formats_dict = {}
2662 for i, format in enumerate(formats):
e74e3b63 2663 if not format.get('format_id'):
14f25df2 2664 format['format_id'] = str(i)
e2effb08
S
2665 else:
2666 # Sanitize format_id from characters used in format selector expression
ec85ded8 2667 format['format_id'] = re.sub(r'[\s,/+\[\]()]', '_', format['format_id'])
39f32f17 2668 formats_dict.setdefault(format['format_id'], []).append(format)
181c7053
S
2669
2670 # Make sure all formats have unique format_id
03b4de72 2671 common_exts = set(itertools.chain(*self._format_selection_exts.values()))
181c7053 2672 for format_id, ambiguous_formats in formats_dict.items():
48ee10ee 2673 ambigious_id = len(ambiguous_formats) > 1
2674 for i, format in enumerate(ambiguous_formats):
2675 if ambigious_id:
181c7053 2676 format['format_id'] = '%s-%d' % (format_id, i)
48ee10ee 2677 # Ensure there is no conflict between id and ext in format selection
2678 # See https://github.com/yt-dlp/yt-dlp/issues/1282
2679 if format['format_id'] != format['ext'] and format['format_id'] in common_exts:
2680 format['format_id'] = 'f%s' % format['format_id']
181c7053 2681
39f32f17 2682 if format.get('format') is None:
2683 format['format'] = '{id} - {res}{note}'.format(
2684 id=format['format_id'],
2685 res=self.format_resolution(format),
2686 note=format_field(format, 'format_note', ' (%s)'),
2687 )
dd82ffea 2688
9f1a1c36 2689 if self.params.get('check_formats') is True:
282f5709 2690 formats = LazyList(self._check_formats(formats[::-1]), reverse=True)
9f1a1c36 2691
88acdbc2 2692 if not formats or formats[0] is not info_dict:
b3d9ef88
JMF
2693 # only set the 'formats' fields if the original info_dict list them
2694 # otherwise we end up with a circular reference, the first (and unique)
f89197d7 2695 # element in the 'formats' field in info_dict is info_dict itself,
dfb1b146 2696 # which can't be exported to json
b3d9ef88 2697 info_dict['formats'] = formats
4ec82a72 2698
2699 info_dict, _ = self.pre_process(info_dict)
2700
6db9c4d5 2701 if self._match_entry(info_dict, incomplete=self._format_fields) is not None:
09b49e1f 2702 return info_dict
2703
2704 self.post_extract(info_dict)
2705 info_dict, _ = self.pre_process(info_dict, 'after_filter')
2706
093a1710 2707 # The pre-processors may have modified the formats
aebb4f4b 2708 formats = self._get_formats(info_dict)
093a1710 2709
e4221b70 2710 list_only = self.params.get('simulate') == 'list_only'
fa9f30b8 2711 interactive_format_selection = not list_only and self.format_selector == '-'
b7b04c78 2712 if self.params.get('list_thumbnails'):
2713 self.list_thumbnails(info_dict)
b7b04c78 2714 if self.params.get('listsubtitles'):
2715 if 'automatic_captions' in info_dict:
2716 self.list_subtitles(
2717 info_dict['id'], automatic_captions, 'automatic captions')
2718 self.list_subtitles(info_dict['id'], subtitles, 'subtitles')
fa9f30b8 2719 if self.params.get('listformats') or interactive_format_selection:
b69fd25c 2720 self.list_formats(info_dict)
169dbde9 2721 if list_only:
b7b04c78 2722 # Without this printing, -F --print-json will not work
17060584 2723 self.__forced_printings(info_dict)
c487cf00 2724 return info_dict
bfaae0a7 2725
187986a8 2726 format_selector = self.format_selector
2727 if format_selector is None:
0017d9ad 2728 req_format = self._default_format_spec(info_dict, download=download)
0760b0a7 2729 self.write_debug('Default format spec: %s' % req_format)
187986a8 2730 format_selector = self.build_format_selector(req_format)
317f7ab6 2731
fa9f30b8 2732 while True:
2733 if interactive_format_selection:
2734 req_format = input(
2735 self._format_screen('\nEnter format selector: ', self.Styles.EMPHASIS))
2736 try:
2737 format_selector = self.build_format_selector(req_format)
2738 except SyntaxError as err:
2739 self.report_error(err, tb=False, is_error=False)
2740 continue
2741
85e801a9 2742 formats_to_download = list(format_selector({
fa9f30b8 2743 'formats': formats,
85e801a9 2744 'has_merged_format': any('none' not in (f.get('acodec'), f.get('vcodec')) for f in formats),
2745 'incomplete_formats': (
2746 # All formats are video-only or
2747 all(f.get('vcodec') != 'none' and f.get('acodec') == 'none' for f in formats)
2748 # all formats are audio-only
2749 or all(f.get('vcodec') == 'none' and f.get('acodec') != 'none' for f in formats)),
2750 }))
fa9f30b8 2751 if interactive_format_selection and not formats_to_download:
2752 self.report_error('Requested format is not available', tb=False, is_error=False)
2753 continue
2754 break
317f7ab6 2755
dd82ffea 2756 if not formats_to_download:
b7da73eb 2757 if not self.params.get('ignore_no_formats_error'):
c0b6e5c7 2758 raise ExtractorError(
2759 'Requested format is not available. Use --list-formats for a list of available formats',
2760 expected=True, video_id=info_dict['id'], ie=info_dict['extractor'])
b62fa6d7 2761 self.report_warning('Requested format is not available')
2762 # Process what we can, even without any available formats.
2763 formats_to_download = [{}]
a13e6848 2764
0500ee3d 2765 requested_ranges = tuple(self.params.get('download_ranges', lambda *_: [{}])(info_dict, self))
5ec1b6b7 2766 best_format, downloaded_formats = formats_to_download[-1], []
b62fa6d7 2767 if download:
0500ee3d 2768 if best_format and requested_ranges:
5ec1b6b7 2769 def to_screen(*msg):
2770 self.to_screen(f'[info] {info_dict["id"]}: {" ".join(", ".join(variadic(m)) for m in msg)}')
2771
2772 to_screen(f'Downloading {len(formats_to_download)} format(s):',
2773 (f['format_id'] for f in formats_to_download))
0500ee3d 2774 if requested_ranges != ({}, ):
5ec1b6b7 2775 to_screen(f'Downloading {len(requested_ranges)} time ranges:',
fc2ba496 2776 (f'{c["start_time"]:.1f}-{c["end_time"]:.1f}' for c in requested_ranges))
a13e6848 2777 max_downloads_reached = False
5ec1b6b7 2778
0500ee3d 2779 for fmt, chapter in itertools.product(formats_to_download, requested_ranges):
5ec1b6b7 2780 new_info = self._copy_infodict(info_dict)
b7da73eb 2781 new_info.update(fmt)
3975b4d2 2782 offset, duration = info_dict.get('section_start') or 0, info_dict.get('duration') or float('inf')
fc2ba496 2783 end_time = offset + min(chapter.get('end_time', duration), duration)
3975b4d2 2784 if chapter or offset:
5ec1b6b7 2785 new_info.update({
3975b4d2 2786 'section_start': offset + chapter.get('start_time', 0),
2576d53a 2787 # duration may not be accurate. So allow deviations <1sec
2788 'section_end': end_time if end_time <= offset + duration + 1 else None,
5ec1b6b7 2789 'section_title': chapter.get('title'),
2790 'section_number': chapter.get('index'),
2791 })
2792 downloaded_formats.append(new_info)
a13e6848 2793 try:
2794 self.process_info(new_info)
2795 except MaxDownloadsReached:
2796 max_downloads_reached = True
415f8d51 2797 self._raise_pending_errors(new_info)
f46e2f9d 2798 # Remove copied info
2799 for key, val in tuple(new_info.items()):
2800 if info_dict.get(key) == val:
2801 new_info.pop(key)
a13e6848 2802 if max_downloads_reached:
2803 break
ebed8b37 2804
5ec1b6b7 2805 write_archive = {f.get('__write_download_archive', False) for f in downloaded_formats}
a13e6848 2806 assert write_archive.issubset({True, False, 'ignore'})
2807 if True in write_archive and False not in write_archive:
2808 self.record_download_archive(info_dict)
be72c624 2809
5ec1b6b7 2810 info_dict['requested_downloads'] = downloaded_formats
ed5835b4 2811 info_dict = self.run_all_pps('after_video', info_dict)
a13e6848 2812 if max_downloads_reached:
2813 raise MaxDownloadsReached()
ebed8b37 2814
49a57e70 2815 # We update the info dict with the selected best quality format (backwards compatibility)
be72c624 2816 info_dict.update(best_format)
dd82ffea
JMF
2817 return info_dict
2818
98c70d6f 2819 def process_subtitles(self, video_id, normal_subtitles, automatic_captions):
a504ced0 2820 """Select the requested subtitles and their format"""
d8a58ddc 2821 available_subs, normal_sub_langs = {}, []
98c70d6f
JMF
2822 if normal_subtitles and self.params.get('writesubtitles'):
2823 available_subs.update(normal_subtitles)
d8a58ddc 2824 normal_sub_langs = tuple(normal_subtitles.keys())
98c70d6f
JMF
2825 if automatic_captions and self.params.get('writeautomaticsub'):
2826 for lang, cap_info in automatic_captions.items():
360e1ca5
JMF
2827 if lang not in available_subs:
2828 available_subs[lang] = cap_info
2829
d2c8aadf 2830 if not available_subs or (
2831 not self.params.get('writesubtitles')
2832 and not self.params.get('writeautomaticsub')):
4d171848 2833 return None
a504ced0 2834
d8a58ddc 2835 all_sub_langs = tuple(available_subs.keys())
a504ced0 2836 if self.params.get('allsubtitles', False):
c32b0aab 2837 requested_langs = all_sub_langs
2838 elif self.params.get('subtitleslangs', False):
5314b521 2839 try:
2840 requested_langs = orderedSet_from_options(
2841 self.params.get('subtitleslangs'), {'all': all_sub_langs}, use_regex=True)
2842 except re.error as e:
2843 raise ValueError(f'Wrong regex for subtitlelangs: {e.pattern}')
a504ced0 2844 else:
376aa24b
SS
2845 requested_langs = LazyList(itertools.chain(
2846 ['en'] if 'en' in normal_sub_langs else [],
2847 filter(lambda f: f.startswith('en'), normal_sub_langs),
2848 ['en'] if 'en' in all_sub_langs else [],
2849 filter(lambda f: f.startswith('en'), all_sub_langs),
2850 normal_sub_langs, all_sub_langs,
2851 ))[:1]
ad3dc496 2852 if requested_langs:
d2c8aadf 2853 self.to_screen(f'[info] {video_id}: Downloading subtitles: {", ".join(requested_langs)}')
a504ced0
JMF
2854
2855 formats_query = self.params.get('subtitlesformat', 'best')
2856 formats_preference = formats_query.split('/') if formats_query else []
2857 subs = {}
2858 for lang in requested_langs:
2859 formats = available_subs.get(lang)
2860 if formats is None:
86e5f3ed 2861 self.report_warning(f'{lang} subtitles not available for {video_id}')
a504ced0 2862 continue
a504ced0
JMF
2863 for ext in formats_preference:
2864 if ext == 'best':
2865 f = formats[-1]
2866 break
2867 matches = list(filter(lambda f: f['ext'] == ext, formats))
2868 if matches:
2869 f = matches[-1]
2870 break
2871 else:
2872 f = formats[-1]
2873 self.report_warning(
2874 'No subtitle format found matching "%s" for language %s, '
2875 'using %s' % (formats_query, lang, f['ext']))
2876 subs[lang] = f
2877 return subs
2878
bb66c247 2879 def _forceprint(self, key, info_dict):
2880 if info_dict is None:
2881 return
2882 info_copy = info_dict.copy()
17060584 2883 info_copy.setdefault('filename', self.prepare_filename(info_dict))
2884 if info_dict.get('requested_formats') is not None:
2885 # For RTMP URLs, also include the playpath
2886 info_copy['urls'] = '\n'.join(f['url'] + f.get('play_path', '') for f in info_dict['requested_formats'])
2887 elif info_dict.get('url'):
2888 info_copy['urls'] = info_dict['url'] + info_dict.get('play_path', '')
bb66c247 2889 info_copy['formats_table'] = self.render_formats_table(info_dict)
2890 info_copy['thumbnails_table'] = self.render_thumbnails_table(info_dict)
2891 info_copy['subtitles_table'] = self.render_subtitles_table(info_dict.get('id'), info_dict.get('subtitles'))
2892 info_copy['automatic_captions_table'] = self.render_subtitles_table(info_dict.get('id'), info_dict.get('automatic_captions'))
2893
2894 def format_tmpl(tmpl):
48c8424b 2895 mobj = re.fullmatch(r'([\w.:,]|-\d|(?P<dict>{([\w.:,]|-\d)+}))+=?', tmpl)
07a1250e 2896 if not mobj:
2897 return tmpl
48c8424b 2898
2899 fmt = '%({})s'
2900 if tmpl.startswith('{'):
6f2287cb 2901 tmpl, fmt = f'.{tmpl}', '%({})j'
48c8424b 2902 if tmpl.endswith('='):
2903 tmpl, fmt = tmpl[:-1], '{0} = %({0})#j'
2904 return '\n'.join(map(fmt.format, [tmpl] if mobj.group('dict') else tmpl.split(',')))
8130779d 2905
bb66c247 2906 for tmpl in self.params['forceprint'].get(key, []):
2907 self.to_stdout(self.evaluate_outtmpl(format_tmpl(tmpl), info_copy))
2908
2909 for tmpl, file_tmpl in self.params['print_to_file'].get(key, []):
5127e92a 2910 filename = self.prepare_filename(info_dict, outtmpl=file_tmpl)
bb66c247 2911 tmpl = format_tmpl(tmpl)
2912 self.to_screen(f'[info] Writing {tmpl!r} to: {filename}')
8d93e69d 2913 if self._ensure_dir_exists(filename):
9874e82b 2914 with open(filename, 'a', encoding='utf-8', newline='') as f:
2915 f.write(self.evaluate_outtmpl(tmpl, info_copy) + os.linesep)
ca30f449 2916
17060584 2917 return info_copy
2918
2919 def __forced_printings(self, info_dict, filename=None, incomplete=True):
bb66c247 2920 if (self.params.get('forcejson')
2921 or self.params['forceprint'].get('video')
2922 or self.params['print_to_file'].get('video')):
2b8a2973 2923 self.post_extract(info_dict)
17060584 2924 if filename:
2925 info_dict['filename'] = filename
b5f61b69 2926 info_copy = self._forceprint('video', info_dict)
2927
2928 def print_field(field, actual_field=None, optional=False):
2929 if actual_field is None:
2930 actual_field = field
2931 if self.params.get(f'force{field}') and (
2932 info_copy.get(field) is not None or (not optional and not incomplete)):
2933 self.to_stdout(info_copy[actual_field])
2934
2935 print_field('title')
2936 print_field('id')
2937 print_field('url', 'urls')
2938 print_field('thumbnail', optional=True)
2939 print_field('description', optional=True)
6f2287cb 2940 if filename:
2941 print_field('filename')
b5f61b69 2942 if self.params.get('forceduration') and info_copy.get('duration') is not None:
2943 self.to_stdout(formatSeconds(info_copy['duration']))
2944 print_field('format')
53c18592 2945
2b8a2973 2946 if self.params.get('forcejson'):
6e84b215 2947 self.to_stdout(json.dumps(self.sanitize_info(info_dict)))
d06daf23 2948
e8e73840 2949 def dl(self, name, info, subtitle=False, test=False):
88acdbc2 2950 if not info.get('url'):
1151c407 2951 self.raise_no_formats(info, True)
e8e73840 2952
2953 if test:
2954 verbose = self.params.get('verbose')
2955 params = {
2956 'test': True,
a169858f 2957 'quiet': self.params.get('quiet') or not verbose,
e8e73840 2958 'verbose': verbose,
2959 'noprogress': not verbose,
2960 'nopart': True,
2961 'skip_unavailable_fragments': False,
2962 'keep_fragments': False,
2963 'overwrites': True,
2964 '_no_ytdl_file': True,
2965 }
2966 else:
2967 params = self.params
96fccc10 2968 fd = get_suitable_downloader(info, params, to_stdout=(name == '-'))(self, params)
e8e73840 2969 if not test:
2970 for ph in self._progress_hooks:
2971 fd.add_progress_hook(ph)
42676437
M
2972 urls = '", "'.join(
2973 (f['url'].split(',')[0] + ',<data>' if f['url'].startswith('data:') else f['url'])
2974 for f in info.get('requested_formats', []) or [info])
3a408f9d 2975 self.write_debug(f'Invoking {fd.FD_NAME} downloader on "{urls}"')
03b4de72 2976
adbc4ec4
THD
2977 # Note: Ideally info should be a deep-copied so that hooks cannot modify it.
2978 # But it may contain objects that are not deep-copyable
2979 new_info = self._copy_infodict(info)
e8e73840 2980 if new_info.get('http_headers') is None:
2981 new_info['http_headers'] = self._calc_headers(new_info)
2982 return fd.download(name, new_info, subtitle)
2983
e04938ab 2984 def existing_file(self, filepaths, *, default_overwrite=True):
2985 existing_files = list(filter(os.path.exists, orderedSet(filepaths)))
2986 if existing_files and not self.params.get('overwrites', default_overwrite):
2987 return existing_files[0]
2988
2989 for file in existing_files:
2990 self.report_file_delete(file)
2991 os.remove(file)
2992 return None
2993
8222d8de 2994 def process_info(self, info_dict):
09b49e1f 2995 """Process a single resolved IE result. (Modifies it in-place)"""
8222d8de
JMF
2996
2997 assert info_dict.get('_type', 'video') == 'video'
f46e2f9d 2998 original_infodict = info_dict
fd288278 2999
4513a41a 3000 if 'format' not in info_dict and 'ext' in info_dict:
8222d8de
JMF
3001 info_dict['format'] = info_dict['ext']
3002
c77495e3 3003 if self._match_entry(info_dict) is not None:
9e907ebd 3004 info_dict['__write_download_archive'] = 'ignore'
8222d8de
JMF
3005 return
3006
09b49e1f 3007 # Does nothing under normal operation - for backward compatibility of process_info
277d6ff5 3008 self.post_extract(info_dict)
119e40ef 3009
3010 def replace_info_dict(new_info):
3011 nonlocal info_dict
3012 if new_info == info_dict:
3013 return
3014 info_dict.clear()
3015 info_dict.update(new_info)
3016
3017 new_info, _ = self.pre_process(info_dict, 'video')
3018 replace_info_dict(new_info)
0c14d66a 3019 self._num_downloads += 1
8222d8de 3020
dcf64d43 3021 # info_dict['_filename'] needs to be set for backward compatibility
de6000d9 3022 info_dict['_filename'] = full_filename = self.prepare_filename(info_dict, warn=True)
3023 temp_filename = self.prepare_filename(info_dict, 'temp')
0202b52a 3024 files_to_move = {}
8222d8de
JMF
3025
3026 # Forced printings
4513a41a 3027 self.__forced_printings(info_dict, full_filename, incomplete=('format' not in info_dict))
8222d8de 3028
ca6d59d2 3029 def check_max_downloads():
3030 if self._num_downloads >= float(self.params.get('max_downloads') or 'inf'):
3031 raise MaxDownloadsReached()
3032
b7b04c78 3033 if self.params.get('simulate'):
9e907ebd 3034 info_dict['__write_download_archive'] = self.params.get('force_write_download_archive')
ca6d59d2 3035 check_max_downloads()
8222d8de
JMF
3036 return
3037
de6000d9 3038 if full_filename is None:
8222d8de 3039 return
e92caff5 3040 if not self._ensure_dir_exists(encodeFilename(full_filename)):
0202b52a 3041 return
e92caff5 3042 if not self._ensure_dir_exists(encodeFilename(temp_filename)):
8222d8de
JMF
3043 return
3044
80c03fa9 3045 if self._write_description('video', info_dict,
3046 self.prepare_filename(info_dict, 'description')) is None:
3047 return
3048
3049 sub_files = self._write_subtitles(info_dict, temp_filename)
3050 if sub_files is None:
3051 return
3052 files_to_move.update(dict(sub_files))
3053
3054 thumb_files = self._write_thumbnails(
3055 'video', info_dict, temp_filename, self.prepare_filename(info_dict, 'thumbnail'))
3056 if thumb_files is None:
3057 return
3058 files_to_move.update(dict(thumb_files))
8222d8de 3059
80c03fa9 3060 infofn = self.prepare_filename(info_dict, 'infojson')
3061 _infojson_written = self._write_info_json('video', info_dict, infofn)
3062 if _infojson_written:
dac5df5a 3063 info_dict['infojson_filename'] = infofn
e75bb0d6 3064 # For backward compatibility, even though it was a private field
80c03fa9 3065 info_dict['__infojson_filename'] = infofn
3066 elif _infojson_written is None:
3067 return
3068
3069 # Note: Annotations are deprecated
3070 annofn = None
1fb07d10 3071 if self.params.get('writeannotations', False):
de6000d9 3072 annofn = self.prepare_filename(info_dict, 'annotation')
80c03fa9 3073 if annofn:
e92caff5 3074 if not self._ensure_dir_exists(encodeFilename(annofn)):
0202b52a 3075 return
0c3d0f51 3076 if not self.params.get('overwrites', True) and os.path.exists(encodeFilename(annofn)):
6febd1c1 3077 self.to_screen('[info] Video annotations are already present')
ffddb112
RA
3078 elif not info_dict.get('annotations'):
3079 self.report_warning('There are no annotations to write.')
7b6fefc9
PH
3080 else:
3081 try:
6febd1c1 3082 self.to_screen('[info] Writing video annotations to: ' + annofn)
86e5f3ed 3083 with open(encodeFilename(annofn), 'w', encoding='utf-8') as annofile:
7b6fefc9
PH
3084 annofile.write(info_dict['annotations'])
3085 except (KeyError, TypeError):
6febd1c1 3086 self.report_warning('There are no annotations to write.')
86e5f3ed 3087 except OSError:
6febd1c1 3088 self.report_error('Cannot write annotations file: ' + annofn)
7b6fefc9 3089 return
1fb07d10 3090
732044af 3091 # Write internet shortcut files
08438d2c 3092 def _write_link_file(link_type):
60f3e995 3093 url = try_get(info_dict['webpage_url'], iri_to_uri)
3094 if not url:
3095 self.report_warning(
3096 f'Cannot write internet shortcut file because the actual URL of "{info_dict["webpage_url"]}" is unknown')
3097 return True
08438d2c 3098 linkfn = replace_extension(self.prepare_filename(info_dict, 'link'), link_type, info_dict.get('ext'))
0e6b018a
Z
3099 if not self._ensure_dir_exists(encodeFilename(linkfn)):
3100 return False
10e3742e 3101 if self.params.get('overwrites', True) and os.path.exists(encodeFilename(linkfn)):
08438d2c 3102 self.to_screen(f'[info] Internet shortcut (.{link_type}) is already present')
3103 return True
3104 try:
3105 self.to_screen(f'[info] Writing internet shortcut (.{link_type}) to: {linkfn}')
86e5f3ed 3106 with open(encodeFilename(to_high_limit_path(linkfn)), 'w', encoding='utf-8',
3107 newline='\r\n' if link_type == 'url' else '\n') as linkfile:
60f3e995 3108 template_vars = {'url': url}
08438d2c 3109 if link_type == 'desktop':
3110 template_vars['filename'] = linkfn[:-(len(link_type) + 1)]
3111 linkfile.write(LINK_TEMPLATES[link_type] % template_vars)
86e5f3ed 3112 except OSError:
08438d2c 3113 self.report_error(f'Cannot write internet shortcut {linkfn}')
3114 return False
732044af 3115 return True
3116
08438d2c 3117 write_links = {
3118 'url': self.params.get('writeurllink'),
3119 'webloc': self.params.get('writewebloclink'),
3120 'desktop': self.params.get('writedesktoplink'),
3121 }
3122 if self.params.get('writelink'):
3123 link_type = ('webloc' if sys.platform == 'darwin'
3124 else 'desktop' if sys.platform.startswith('linux')
3125 else 'url')
3126 write_links[link_type] = True
3127
3128 if any(should_write and not _write_link_file(link_type)
3129 for link_type, should_write in write_links.items()):
3130 return
732044af 3131
415f8d51 3132 new_info, files_to_move = self.pre_process(info_dict, 'before_dl', files_to_move)
3133 replace_info_dict(new_info)
56d868db 3134
a13e6848 3135 if self.params.get('skip_download'):
56d868db 3136 info_dict['filepath'] = temp_filename
3137 info_dict['__finaldir'] = os.path.dirname(os.path.abspath(encodeFilename(full_filename)))
3138 info_dict['__files_to_move'] = files_to_move
f46e2f9d 3139 replace_info_dict(self.run_pp(MoveFilesAfterDownloadPP(self, False), info_dict))
9e907ebd 3140 info_dict['__write_download_archive'] = self.params.get('force_write_download_archive')
56d868db 3141 else:
3142 # Download
b868936c 3143 info_dict.setdefault('__postprocessors', [])
4340deca 3144 try:
0202b52a 3145
e04938ab 3146 def existing_video_file(*filepaths):
6b591b29 3147 ext = info_dict.get('ext')
e04938ab 3148 converted = lambda file: replace_extension(file, self.params.get('final_ext') or ext, ext)
3149 file = self.existing_file(itertools.chain(*zip(map(converted, filepaths), filepaths)),
3150 default_overwrite=False)
3151 if file:
3152 info_dict['ext'] = os.path.splitext(file)[1][1:]
3153 return file
0202b52a 3154
7b2c3f47 3155 fd, success = None, True
fccf90e7 3156 if info_dict.get('protocol') or info_dict.get('url'):
56ba69e4 3157 fd = get_suitable_downloader(info_dict, self.params, to_stdout=temp_filename == '-')
71df9b7f 3158 if fd is not FFmpegFD and 'no-direct-merge' not in self.params['compat_opts'] and (
56ba69e4 3159 info_dict.get('section_start') or info_dict.get('section_end')):
7b2c3f47 3160 msg = ('This format cannot be partially downloaded' if FFmpegFD.available()
56ba69e4 3161 else 'You have requested downloading the video partially, but ffmpeg is not installed')
3162 self.report_error(f'{msg}. Aborting')
5ec1b6b7 3163 return
5ec1b6b7 3164
4340deca 3165 if info_dict.get('requested_formats') is not None:
81cd954a 3166 requested_formats = info_dict['requested_formats']
0202b52a 3167 old_ext = info_dict['ext']
4e3b637d 3168 if self.params.get('merge_output_format') is None:
4e3b637d 3169 if (info_dict['ext'] == 'webm'
3170 and info_dict.get('thumbnails')
3171 # check with type instead of pp_key, __name__, or isinstance
3172 # since we dont want any custom PPs to trigger this
c487cf00 3173 and any(type(pp) == EmbedThumbnailPP for pp in self._pps['post_process'])): # noqa: E721
4e3b637d 3174 info_dict['ext'] = 'mkv'
3175 self.report_warning(
3176 'webm doesn\'t support embedding a thumbnail, mkv will be used')
124bc071 3177 new_ext = info_dict['ext']
0202b52a 3178
124bc071 3179 def correct_ext(filename, ext=new_ext):
96fccc10 3180 if filename == '-':
3181 return filename
0202b52a 3182 filename_real_ext = os.path.splitext(filename)[1][1:]
3183 filename_wo_ext = (
3184 os.path.splitext(filename)[0]
124bc071 3185 if filename_real_ext in (old_ext, new_ext)
0202b52a 3186 else filename)
86e5f3ed 3187 return f'{filename_wo_ext}.{ext}'
0202b52a 3188
38c6902b 3189 # Ensure filename always has a correct extension for successful merge
0202b52a 3190 full_filename = correct_ext(full_filename)
3191 temp_filename = correct_ext(temp_filename)
e04938ab 3192 dl_filename = existing_video_file(full_filename, temp_filename)
1ea24129 3193 info_dict['__real_download'] = False
18e674b4 3194
7b2c3f47 3195 merger = FFmpegMergerPP(self)
adbc4ec4 3196 downloaded = []
dbf5416a 3197 if dl_filename is not None:
6c7274ec 3198 self.report_file_already_downloaded(dl_filename)
adbc4ec4
THD
3199 elif fd:
3200 for f in requested_formats if fd != FFmpegFD else []:
3201 f['filepath'] = fname = prepend_extension(
3202 correct_ext(temp_filename, info_dict['ext']),
3203 'f%s' % f['format_id'], info_dict['ext'])
3204 downloaded.append(fname)
dbf5416a 3205 info_dict['url'] = '\n'.join(f['url'] for f in requested_formats)
3206 success, real_download = self.dl(temp_filename, info_dict)
3207 info_dict['__real_download'] = real_download
18e674b4 3208 else:
18e674b4 3209 if self.params.get('allow_unplayable_formats'):
3210 self.report_warning(
3211 'You have requested merging of multiple formats '
3212 'while also allowing unplayable formats to be downloaded. '
3213 'The formats won\'t be merged to prevent data corruption.')
3214 elif not merger.available:
e8969bda 3215 msg = 'You have requested merging of multiple formats but ffmpeg is not installed'
3216 if not self.params.get('ignoreerrors'):
3217 self.report_error(f'{msg}. Aborting due to --abort-on-error')
3218 return
3219 self.report_warning(f'{msg}. The formats won\'t be merged')
18e674b4 3220
96fccc10 3221 if temp_filename == '-':
adbc4ec4 3222 reason = ('using a downloader other than ffmpeg' if FFmpegFD.can_merge_formats(info_dict, self.params)
96fccc10 3223 else 'but the formats are incompatible for simultaneous download' if merger.available
3224 else 'but ffmpeg is not installed')
3225 self.report_warning(
3226 f'You have requested downloading multiple formats to stdout {reason}. '
3227 'The formats will be streamed one after the other')
3228 fname = temp_filename
dbf5416a 3229 for f in requested_formats:
3230 new_info = dict(info_dict)
3231 del new_info['requested_formats']
3232 new_info.update(f)
96fccc10 3233 if temp_filename != '-':
124bc071 3234 fname = prepend_extension(
3235 correct_ext(temp_filename, new_info['ext']),
3236 'f%s' % f['format_id'], new_info['ext'])
96fccc10 3237 if not self._ensure_dir_exists(fname):
3238 return
a21e0ab1 3239 f['filepath'] = fname
96fccc10 3240 downloaded.append(fname)
dbf5416a 3241 partial_success, real_download = self.dl(fname, new_info)
3242 info_dict['__real_download'] = info_dict['__real_download'] or real_download
3243 success = success and partial_success
adbc4ec4
THD
3244
3245 if downloaded and merger.available and not self.params.get('allow_unplayable_formats'):
3246 info_dict['__postprocessors'].append(merger)
3247 info_dict['__files_to_merge'] = downloaded
3248 # Even if there were no downloads, it is being merged only now
3249 info_dict['__real_download'] = True
3250 else:
3251 for file in downloaded:
3252 files_to_move[file] = None
4340deca
P
3253 else:
3254 # Just a single file
e04938ab 3255 dl_filename = existing_video_file(full_filename, temp_filename)
6c7274ec 3256 if dl_filename is None or dl_filename == temp_filename:
3257 # dl_filename == temp_filename could mean that the file was partially downloaded with --no-part.
3258 # So we should try to resume the download
e8e73840 3259 success, real_download = self.dl(temp_filename, info_dict)
0202b52a 3260 info_dict['__real_download'] = real_download
6c7274ec 3261 else:
3262 self.report_file_already_downloaded(dl_filename)
0202b52a 3263
0202b52a 3264 dl_filename = dl_filename or temp_filename
c571435f 3265 info_dict['__finaldir'] = os.path.dirname(os.path.abspath(encodeFilename(full_filename)))
0202b52a 3266
3158150c 3267 except network_exceptions as err:
7960b056 3268 self.report_error('unable to download video data: %s' % error_to_compat_str(err))
4340deca 3269 return
86e5f3ed 3270 except OSError as err:
4340deca
P
3271 raise UnavailableVideoError(err)
3272 except (ContentTooShortError, ) as err:
86e5f3ed 3273 self.report_error(f'content too short (expected {err.expected} bytes and served {err.downloaded})')
4340deca 3274 return
8222d8de 3275
415f8d51 3276 self._raise_pending_errors(info_dict)
de6000d9 3277 if success and full_filename != '-':
f17f8651 3278
fd7cfb64 3279 def fixup():
3280 do_fixup = True
3281 fixup_policy = self.params.get('fixup')
3282 vid = info_dict['id']
3283
3284 if fixup_policy in ('ignore', 'never'):
3285 return
3286 elif fixup_policy == 'warn':
3fe75fdc 3287 do_fixup = 'warn'
f89b3e2d 3288 elif fixup_policy != 'force':
3289 assert fixup_policy in ('detect_or_warn', None)
3290 if not info_dict.get('__real_download'):
3291 do_fixup = False
fd7cfb64 3292
3293 def ffmpeg_fixup(cndn, msg, cls):
3fe75fdc 3294 if not (do_fixup and cndn):
fd7cfb64 3295 return
3fe75fdc 3296 elif do_fixup == 'warn':
fd7cfb64 3297 self.report_warning(f'{vid}: {msg}')
3298 return
3299 pp = cls(self)
3300 if pp.available:
3301 info_dict['__postprocessors'].append(pp)
3302 else:
3303 self.report_warning(f'{vid}: {msg}. Install ffmpeg to fix this automatically')
3304
3305 stretched_ratio = info_dict.get('stretched_ratio')
ca9def71
LNO
3306 ffmpeg_fixup(stretched_ratio not in (1, None),
3307 f'Non-uniform pixel ratio {stretched_ratio}',
3308 FFmpegFixupStretchedPP)
fd7cfb64 3309
993191c0 3310 downloader = get_suitable_downloader(info_dict, self.params) if 'protocol' in info_dict else None
24146491 3311 downloader = downloader.FD_NAME if downloader else None
adbc4ec4 3312
ca9def71
LNO
3313 ext = info_dict.get('ext')
3314 postprocessed_by_ffmpeg = info_dict.get('requested_formats') or any((
3315 isinstance(pp, FFmpegVideoConvertorPP)
3316 and resolve_recode_mapping(ext, pp.mapping)[0] not in (ext, None)
3317 ) for pp in self._pps['post_process'])
3318
3319 if not postprocessed_by_ffmpeg:
3320 ffmpeg_fixup(ext == 'm4a' and info_dict.get('container') == 'm4a_dash',
f2df4071 3321 'writing DASH m4a. Only some players support this container',
3322 FFmpegFixupM4aPP)
24146491 3323 ffmpeg_fixup(downloader == 'hlsnative' and not self.params.get('hls_use_mpegts')
494f5230 3324 or info_dict.get('is_live') and self.params.get('hls_use_mpegts') is None,
adbc4ec4
THD
3325 'Possible MPEG-TS in MP4 container or malformed AAC timestamps',
3326 FFmpegFixupM3u8PP)
26010b5c 3327 ffmpeg_fixup(info_dict.get('is_live') and downloader == 'dashsegments',
adbc4ec4
THD
3328 'Possible duplicate MOOV atoms', FFmpegFixupDuplicateMoovPP)
3329
24146491 3330 ffmpeg_fixup(downloader == 'web_socket_fragment', 'Malformed timestamps detected', FFmpegFixupTimestampPP)
3331 ffmpeg_fixup(downloader == 'web_socket_fragment', 'Malformed duration detected', FFmpegFixupDurationPP)
fd7cfb64 3332
3333 fixup()
8222d8de 3334 try:
f46e2f9d 3335 replace_info_dict(self.post_process(dl_filename, info_dict, files_to_move))
af819c21 3336 except PostProcessingError as err:
3337 self.report_error('Postprocessing: %s' % str(err))
8222d8de 3338 return
ab8e5e51
AM
3339 try:
3340 for ph in self._post_hooks:
23c1a667 3341 ph(info_dict['filepath'])
ab8e5e51
AM
3342 except Exception as err:
3343 self.report_error('post hooks: %s' % str(err))
3344 return
9e907ebd 3345 info_dict['__write_download_archive'] = True
2d30509f 3346
c487cf00 3347 assert info_dict is original_infodict # Make sure the info_dict was modified in-place
a13e6848 3348 if self.params.get('force_write_download_archive'):
9e907ebd 3349 info_dict['__write_download_archive'] = True
ca6d59d2 3350 check_max_downloads()
8222d8de 3351
aa9369a2 3352 def __download_wrapper(self, func):
3353 @functools.wraps(func)
3354 def wrapper(*args, **kwargs):
3355 try:
3356 res = func(*args, **kwargs)
3357 except UnavailableVideoError as e:
3358 self.report_error(e)
b222c271 3359 except DownloadCancelled as e:
3360 self.to_screen(f'[info] {e}')
3361 if not self.params.get('break_per_url'):
3362 raise
fd404bec 3363 self._num_downloads = 0
aa9369a2 3364 else:
3365 if self.params.get('dump_single_json', False):
3366 self.post_extract(res)
3367 self.to_stdout(json.dumps(self.sanitize_info(res)))
3368 return wrapper
3369
8222d8de
JMF
3370 def download(self, url_list):
3371 """Download a given list of URLs."""
aa9369a2 3372 url_list = variadic(url_list) # Passing a single URL is a common mistake
bf1824b3 3373 outtmpl = self.params['outtmpl']['default']
3089bc74
S
3374 if (len(url_list) > 1
3375 and outtmpl != '-'
3376 and '%' not in outtmpl
3377 and self.params.get('max_downloads') != 1):
acd69589 3378 raise SameFileError(outtmpl)
8222d8de
JMF
3379
3380 for url in url_list:
aa9369a2 3381 self.__download_wrapper(self.extract_info)(
3382 url, force_generic_extractor=self.params.get('force_generic_extractor', False))
8222d8de
JMF
3383
3384 return self._download_retcode
3385
1dcc4c0c 3386 def download_with_info_file(self, info_filename):
31bd3925
JMF
3387 with contextlib.closing(fileinput.FileInput(
3388 [info_filename], mode='r',
3389 openhook=fileinput.hook_encoded('utf-8'))) as f:
3390 # FileInput doesn't have a read method, we can't call json.load
ab1de9cb 3391 infos = [self.sanitize_info(info, self.params.get('clean_infojson', True))
3392 for info in variadic(json.loads('\n'.join(f)))]
3393 for info in infos:
3394 try:
3395 self.__download_wrapper(self.process_ie_result)(info, download=True)
3396 except (DownloadError, EntryNotInPlaylist, ReExtractInfo) as e:
3397 if not isinstance(e, EntryNotInPlaylist):
3398 self.to_stderr('\r')
3399 webpage_url = info.get('webpage_url')
3400 if webpage_url is None:
3401 raise
aa9369a2 3402 self.report_warning(f'The info failed to download: {e}; trying with URL {webpage_url}')
ab1de9cb 3403 self.download([webpage_url])
d4943898 3404 return self._download_retcode
1dcc4c0c 3405
cb202fd2 3406 @staticmethod
8012d892 3407 def sanitize_info(info_dict, remove_private_keys=False):
3408 ''' Sanitize the infodict for converting to json '''
3ad56b42 3409 if info_dict is None:
3410 return info_dict
6e84b215 3411 info_dict.setdefault('epoch', int(time.time()))
6a5a30f9 3412 info_dict.setdefault('_type', 'video')
b5e7a2e6 3413 info_dict.setdefault('_version', {
3414 'version': __version__,
3415 'current_git_head': current_git_head(),
3416 'release_git_head': RELEASE_GIT_HEAD,
3417 'repository': REPOSITORY,
3418 })
09b49e1f 3419
8012d892 3420 if remove_private_keys:
0a5a191a 3421 reject = lambda k, v: v is None or k.startswith('__') or k in {
f46e2f9d 3422 'requested_downloads', 'requested_formats', 'requested_subtitles', 'requested_entries',
6f2287cb 3423 'entries', 'filepath', '_filename', 'filename', 'infojson_filename', 'original_url',
3424 'playlist_autonumber', '_format_sort_fields',
6e84b215 3425 }
ae8f99e6 3426 else:
09b49e1f 3427 reject = lambda k, v: False
adbc4ec4
THD
3428
3429 def filter_fn(obj):
3430 if isinstance(obj, dict):
3431 return {k: filter_fn(v) for k, v in obj.items() if not reject(k, v)}
3432 elif isinstance(obj, (list, tuple, set, LazyList)):
3433 return list(map(filter_fn, obj))
3434 elif obj is None or isinstance(obj, (str, int, float, bool)):
3435 return obj
3436 else:
3437 return repr(obj)
3438
5226731e 3439 return filter_fn(info_dict)
cb202fd2 3440
8012d892 3441 @staticmethod
3442 def filter_requested_info(info_dict, actually_filter=True):
3443 ''' Alias of sanitize_info for backward compatibility '''
3444 return YoutubeDL.sanitize_info(info_dict, actually_filter)
3445
43d7f5a5 3446 def _delete_downloaded_files(self, *files_to_delete, info={}, msg=None):
3447 for filename in set(filter(None, files_to_delete)):
3448 if msg:
3449 self.to_screen(msg % filename)
3450 try:
3451 os.remove(filename)
3452 except OSError:
3453 self.report_warning(f'Unable to delete file {filename}')
3454 if filename in info.get('__files_to_move', []): # NB: Delete even if None
3455 del info['__files_to_move'][filename]
3456
ed5835b4 3457 @staticmethod
3458 def post_extract(info_dict):
3459 def actual_post_extract(info_dict):
3460 if info_dict.get('_type') in ('playlist', 'multi_video'):
3461 for video_dict in info_dict.get('entries', {}):
3462 actual_post_extract(video_dict or {})
3463 return
3464
09b49e1f 3465 post_extractor = info_dict.pop('__post_extractor', None) or (lambda: {})
3466 info_dict.update(post_extractor())
ed5835b4 3467
3468 actual_post_extract(info_dict or {})
3469
dcf64d43 3470 def run_pp(self, pp, infodict):
5bfa4862 3471 files_to_delete = []
dcf64d43 3472 if '__files_to_move' not in infodict:
3473 infodict['__files_to_move'] = {}
b1940459 3474 try:
3475 files_to_delete, infodict = pp.run(infodict)
3476 except PostProcessingError as e:
3477 # Must be True and not 'only_download'
3478 if self.params.get('ignoreerrors') is True:
3479 self.report_error(e)
3480 return infodict
3481 raise
3482
5bfa4862 3483 if not files_to_delete:
dcf64d43 3484 return infodict
5bfa4862 3485 if self.params.get('keepvideo', False):
3486 for f in files_to_delete:
dcf64d43 3487 infodict['__files_to_move'].setdefault(f, '')
5bfa4862 3488 else:
43d7f5a5 3489 self._delete_downloaded_files(
3490 *files_to_delete, info=infodict, msg='Deleting original file %s (pass -k to keep)')
dcf64d43 3491 return infodict
5bfa4862 3492
6f2287cb 3493 def run_all_pps(self, key, info, *, additional_pps=None):
17ba4343 3494 if key != 'video':
3495 self._forceprint(key, info)
3496 for pp in (additional_pps or []) + self._pps[key]:
3497 info = self.run_pp(pp, info)
ed5835b4 3498 return info
277d6ff5 3499
56d868db 3500 def pre_process(self, ie_info, key='pre_process', files_to_move=None):
5bfa4862 3501 info = dict(ie_info)
56d868db 3502 info['__files_to_move'] = files_to_move or {}
415f8d51 3503 try:
3504 info = self.run_all_pps(key, info)
3505 except PostProcessingError as err:
3506 msg = f'Preprocessing: {err}'
3507 info.setdefault('__pending_error', msg)
3508 self.report_error(msg, is_error=False)
56d868db 3509 return info, info.pop('__files_to_move', None)
5bfa4862 3510
f46e2f9d 3511 def post_process(self, filename, info, files_to_move=None):
8222d8de 3512 """Run all the postprocessors on the given file."""
8222d8de 3513 info['filepath'] = filename
dcf64d43 3514 info['__files_to_move'] = files_to_move or {}
ed5835b4 3515 info = self.run_all_pps('post_process', info, additional_pps=info.get('__postprocessors'))
dcf64d43 3516 info = self.run_pp(MoveFilesAfterDownloadPP(self), info)
3517 del info['__files_to_move']
ed5835b4 3518 return self.run_all_pps('after_move', info)
c1c9a79c 3519
5db07df6 3520 def _make_archive_id(self, info_dict):
e9fef7ee
S
3521 video_id = info_dict.get('id')
3522 if not video_id:
3523 return
5db07df6
PH
3524 # Future-proof against any change in case
3525 # and backwards compatibility with prior versions
e9fef7ee 3526 extractor = info_dict.get('extractor_key') or info_dict.get('ie_key') # key in a playlist
7012b23c 3527 if extractor is None:
1211bb6d
S
3528 url = str_or_none(info_dict.get('url'))
3529 if not url:
3530 return
e9fef7ee 3531 # Try to find matching extractor for the URL and take its ie_key
8b7491c8 3532 for ie_key, ie in self._ies.items():
1211bb6d 3533 if ie.suitable(url):
8b7491c8 3534 extractor = ie_key
e9fef7ee
S
3535 break
3536 else:
3537 return
0647d925 3538 return make_archive_id(extractor, video_id)
5db07df6
PH
3539
3540 def in_download_archive(self, info_dict):
ae103564 3541 if not self.archive:
5db07df6
PH
3542 return False
3543
1e8fe57e 3544 vid_ids = [self._make_archive_id(info_dict)]
c200096c 3545 vid_ids.extend(info_dict.get('_old_archive_ids') or [])
1e8fe57e 3546 return any(id_ in self.archive for id_ in vid_ids)
c1c9a79c
PH
3547
3548 def record_download_archive(self, info_dict):
3549 fn = self.params.get('download_archive')
3550 if fn is None:
3551 return
5db07df6
PH
3552 vid_id = self._make_archive_id(info_dict)
3553 assert vid_id
ae103564 3554
a13e6848 3555 self.write_debug(f'Adding to archive: {vid_id}')
9c935fbc 3556 if is_path_like(fn):
ae103564 3557 with locked_file(fn, 'a', encoding='utf-8') as archive_file:
3558 archive_file.write(vid_id + '\n')
a45e8619 3559 self.archive.add(vid_id)
dd82ffea 3560
8c51aa65 3561 @staticmethod
8abeeb94 3562 def format_resolution(format, default='unknown'):
9359f3d4 3563 if format.get('vcodec') == 'none' and format.get('acodec') != 'none':
fb04e403 3564 return 'audio only'
f49d89ee
PH
3565 if format.get('resolution') is not None:
3566 return format['resolution']
35615307 3567 if format.get('width') and format.get('height'):
ff51ed58 3568 return '%dx%d' % (format['width'], format['height'])
35615307 3569 elif format.get('height'):
ff51ed58 3570 return '%sp' % format['height']
35615307 3571 elif format.get('width'):
ff51ed58 3572 return '%dx?' % format['width']
3573 return default
8c51aa65 3574
8130779d 3575 def _list_format_headers(self, *headers):
3576 if self.params.get('listformats_table', True) is not False:
591bb9d3 3577 return [self._format_out(header, self.Styles.HEADERS) for header in headers]
8130779d 3578 return headers
3579
c57f7757
PH
3580 def _format_note(self, fdict):
3581 res = ''
3582 if fdict.get('ext') in ['f4f', 'f4m']:
f304da8a 3583 res += '(unsupported)'
32f90364
PH
3584 if fdict.get('language'):
3585 if res:
3586 res += ' '
f304da8a 3587 res += '[%s]' % fdict['language']
c57f7757 3588 if fdict.get('format_note') is not None:
f304da8a 3589 if res:
3590 res += ' '
3591 res += fdict['format_note']
c57f7757 3592 if fdict.get('tbr') is not None:
f304da8a 3593 if res:
3594 res += ', '
3595 res += '%4dk' % fdict['tbr']
c57f7757
PH
3596 if fdict.get('container') is not None:
3597 if res:
3598 res += ', '
3599 res += '%s container' % fdict['container']
3089bc74
S
3600 if (fdict.get('vcodec') is not None
3601 and fdict.get('vcodec') != 'none'):
c57f7757
PH
3602 if res:
3603 res += ', '
3604 res += fdict['vcodec']
91c7271a 3605 if fdict.get('vbr') is not None:
c57f7757
PH
3606 res += '@'
3607 elif fdict.get('vbr') is not None and fdict.get('abr') is not None:
3608 res += 'video@'
3609 if fdict.get('vbr') is not None:
3610 res += '%4dk' % fdict['vbr']
fbb21cf5 3611 if fdict.get('fps') is not None:
5d583bdf
S
3612 if res:
3613 res += ', '
3614 res += '%sfps' % fdict['fps']
c57f7757
PH
3615 if fdict.get('acodec') is not None:
3616 if res:
3617 res += ', '
3618 if fdict['acodec'] == 'none':
3619 res += 'video only'
3620 else:
3621 res += '%-5s' % fdict['acodec']
3622 elif fdict.get('abr') is not None:
3623 if res:
3624 res += ', '
3625 res += 'audio'
3626 if fdict.get('abr') is not None:
3627 res += '@%3dk' % fdict['abr']
3628 if fdict.get('asr') is not None:
3629 res += ' (%5dHz)' % fdict['asr']
3630 if fdict.get('filesize') is not None:
3631 if res:
3632 res += ', '
3633 res += format_bytes(fdict['filesize'])
9732d77e
PH
3634 elif fdict.get('filesize_approx') is not None:
3635 if res:
3636 res += ', '
3637 res += '~' + format_bytes(fdict['filesize_approx'])
c57f7757 3638 return res
91c7271a 3639
aebb4f4b 3640 def _get_formats(self, info_dict):
3641 if info_dict.get('formats') is None:
3642 if info_dict.get('url') and info_dict.get('_type', 'video') == 'video':
3643 return [info_dict]
3644 return []
3645 return info_dict['formats']
b69fd25c 3646
aebb4f4b 3647 def render_formats_table(self, info_dict):
3648 formats = self._get_formats(info_dict)
3649 if not formats:
3650 return
8130779d 3651 if not self.params.get('listformats_table', True) is not False:
76d321f6 3652 table = [
3653 [
3654 format_field(f, 'format_id'),
3655 format_field(f, 'ext'),
3656 self.format_resolution(f),
8130779d 3657 self._format_note(f)
d5d1df8a 3658 ] for f in formats if (f.get('preference') or 0) >= -1000]
8130779d 3659 return render_table(['format code', 'extension', 'resolution', 'note'], table, extra_gap=1)
3660
d816f61f 3661 def simplified_codec(f, field):
3662 assert field in ('acodec', 'vcodec')
3663 codec = f.get(field, 'unknown')
f5ea4748 3664 if not codec:
3665 return 'unknown'
3666 elif codec != 'none':
d816f61f 3667 return '.'.join(codec.split('.')[:4])
3668
3669 if field == 'vcodec' and f.get('acodec') == 'none':
3670 return 'images'
3671 elif field == 'acodec' and f.get('vcodec') == 'none':
3672 return ''
3673 return self._format_out('audio only' if field == 'vcodec' else 'video only',
3674 self.Styles.SUPPRESS)
3675
591bb9d3 3676 delim = self._format_out('\u2502', self.Styles.DELIM, '|', test_encoding=True)
8130779d 3677 table = [
3678 [
591bb9d3 3679 self._format_out(format_field(f, 'format_id'), self.Styles.ID),
8130779d 3680 format_field(f, 'ext'),
3681 format_field(f, func=self.format_resolution, ignore=('audio only', 'images')),
563e0bf8 3682 format_field(f, 'fps', '\t%d', func=round),
8130779d 3683 format_field(f, 'dynamic_range', '%s', ignore=(None, 'SDR')).replace('HDR', ''),
b8ed0f15 3684 format_field(f, 'audio_channels', '\t%s'),
8130779d 3685 delim,
3686 format_field(f, 'filesize', ' \t%s', func=format_bytes) + format_field(f, 'filesize_approx', '~\t%s', func=format_bytes),
563e0bf8 3687 format_field(f, 'tbr', '\t%dk', func=round),
8130779d 3688 shorten_protocol_name(f.get('protocol', '')),
3689 delim,
d816f61f 3690 simplified_codec(f, 'vcodec'),
563e0bf8 3691 format_field(f, 'vbr', '\t%dk', func=round),
d816f61f 3692 simplified_codec(f, 'acodec'),
563e0bf8 3693 format_field(f, 'abr', '\t%dk', func=round),
ae61d108 3694 format_field(f, 'asr', '\t%s', func=format_decimal_suffix),
8130779d 3695 join_nonempty(
591bb9d3 3696 self._format_out('UNSUPPORTED', 'light red') if f.get('ext') in ('f4f', 'f4m') else None,
a5387729 3697 self._format_out('DRM', 'light red') if f.get('has_drm') else None,
8130779d 3698 format_field(f, 'language', '[%s]'),
3699 join_nonempty(format_field(f, 'format_note'),
3700 format_field(f, 'container', ignore=(None, f.get('ext'))),
3701 delim=', '),
3702 delim=' '),
3703 ] for f in formats if f.get('preference') is None or f['preference'] >= -1000]
3704 header_line = self._list_format_headers(
b8ed0f15 3705 'ID', 'EXT', 'RESOLUTION', '\tFPS', 'HDR', 'CH', delim, '\tFILESIZE', '\tTBR', 'PROTO',
8130779d 3706 delim, 'VCODEC', '\tVBR', 'ACODEC', '\tABR', '\tASR', 'MORE INFO')
3707
3708 return render_table(
3709 header_line, table, hide_empty=True,
591bb9d3 3710 delim=self._format_out('\u2500', self.Styles.DELIM, '-', test_encoding=True))
8130779d 3711
3712 def render_thumbnails_table(self, info_dict):
88f23a18 3713 thumbnails = list(info_dict.get('thumbnails') or [])
cfb56d1a 3714 if not thumbnails:
8130779d 3715 return None
3716 return render_table(
ec11a9f4 3717 self._list_format_headers('ID', 'Width', 'Height', 'URL'),
177662e0 3718 [[t.get('id'), t.get('width') or 'unknown', t.get('height') or 'unknown', t['url']] for t in thumbnails])
2412044c 3719
8130779d 3720 def render_subtitles_table(self, video_id, subtitles):
2412044c 3721 def _row(lang, formats):
49c258e1 3722 exts, names = zip(*((f['ext'], f.get('name') or 'unknown') for f in reversed(formats)))
2412044c 3723 if len(set(names)) == 1:
7aee40c1 3724 names = [] if names[0] == 'unknown' else names[:1]
2412044c 3725 return [lang, ', '.join(names), ', '.join(exts)]
3726
8130779d 3727 if not subtitles:
3728 return None
3729 return render_table(
ec11a9f4 3730 self._list_format_headers('Language', 'Name', 'Formats'),
2412044c 3731 [_row(lang, formats) for lang, formats in subtitles.items()],
8130779d 3732 hide_empty=True)
3733
3734 def __list_table(self, video_id, name, func, *args):
3735 table = func(*args)
3736 if not table:
3737 self.to_screen(f'{video_id} has no {name}')
3738 return
3739 self.to_screen(f'[info] Available {name} for {video_id}:')
3740 self.to_stdout(table)
3741
3742 def list_formats(self, info_dict):
3743 self.__list_table(info_dict['id'], 'formats', self.render_formats_table, info_dict)
3744
3745 def list_thumbnails(self, info_dict):
3746 self.__list_table(info_dict['id'], 'thumbnails', self.render_thumbnails_table, info_dict)
3747
3748 def list_subtitles(self, video_id, subtitles, name='subtitles'):
3749 self.__list_table(video_id, name, self.render_subtitles_table, video_id, subtitles)
a504ced0 3750
dca08720
PH
3751 def urlopen(self, req):
3752 """ Start an HTTP download """
f9934b96 3753 if isinstance(req, str):
67dda517 3754 req = sanitized_Request(req)
19a41fc6 3755 return self._opener.open(req, timeout=self._socket_timeout)
dca08720
PH
3756
3757 def print_debug_header(self):
3758 if not self.params.get('verbose'):
3759 return
49a57e70 3760
a057779d 3761 from . import _IN_CLI # Must be delayed import
3762
560738f3 3763 # These imports can be slow. So import them only as needed
3764 from .extractor.extractors import _LAZY_LOADER
e756f45b
M
3765 from .extractor.extractors import (
3766 _PLUGIN_CLASSES as plugin_ies,
3767 _PLUGIN_OVERRIDES as plugin_ie_overrides
3768 )
560738f3 3769
49a57e70 3770 def get_encoding(stream):
2a938746 3771 ret = str(getattr(stream, 'encoding', 'missing (%s)' % type(stream).__name__))
49a57e70 3772 if not supports_terminal_sequences(stream):
53973b4d 3773 from .utils import WINDOWS_VT_MODE # Must be imported locally
e3c7d495 3774 ret += ' (No VT)' if WINDOWS_VT_MODE is False else ' (No ANSI)'
49a57e70 3775 return ret
3776
591bb9d3 3777 encoding_str = 'Encodings: locale %s, fs %s, pref %s, %s' % (
49a57e70 3778 locale.getpreferredencoding(),
3779 sys.getfilesystemencoding(),
591bb9d3 3780 self.get_encoding(),
3781 ', '.join(
64fa820c 3782 f'{key} {get_encoding(stream)}' for key, stream in self._out_files.items_
591bb9d3 3783 if stream is not None and key != 'console')
3784 )
883d4b1e 3785
3786 logger = self.params.get('logger')
3787 if logger:
3788 write_debug = lambda msg: logger.debug(f'[debug] {msg}')
3789 write_debug(encoding_str)
3790 else:
96565c7e 3791 write_string(f'[debug] {encoding_str}\n', encoding=None)
49a57e70 3792 write_debug = lambda msg: self._write_string(f'[debug] {msg}\n')
734f90bb 3793
4c88ff87 3794 source = detect_variant()
70b23409 3795 if VARIANT not in (None, 'pip'):
3796 source += '*'
a5387729 3797 klass = type(self)
36eaf303 3798 write_debug(join_nonempty(
b5e7a2e6 3799 f'{"yt-dlp" if REPOSITORY == "yt-dlp/yt-dlp" else REPOSITORY} version',
392389b7 3800 f'{CHANNEL}@{__version__}',
29cb20bd 3801 f'[{RELEASE_GIT_HEAD[:9]}]' if RELEASE_GIT_HEAD else '',
36eaf303 3802 '' if source == 'unknown' else f'({source})',
a5387729 3803 '' if _IN_CLI else 'API' if klass == YoutubeDL else f'API:{self.__module__}.{klass.__qualname__}',
36eaf303 3804 delim=' '))
497074f0 3805
3806 if not _IN_CLI:
3807 write_debug(f'params: {self.params}')
3808
6e21fdd2 3809 if not _LAZY_LOADER:
3810 if os.environ.get('YTDLP_NO_LAZY_EXTRACTORS'):
49a57e70 3811 write_debug('Lazy loading extractors is forcibly disabled')
6e21fdd2 3812 else:
49a57e70 3813 write_debug('Lazy loading extractors is disabled')
8a82af35 3814 if self.params['compat_opts']:
3815 write_debug('Compatibility options: %s' % ', '.join(self.params['compat_opts']))
36eaf303 3816
b5e7a2e6 3817 if current_git_head():
3818 write_debug(f'Git HEAD: {current_git_head()}')
b1f94422 3819 write_debug(system_identifier())
d28b5171 3820
8913ef74 3821 exe_versions, ffmpeg_features = FFmpegPostProcessor.get_versions_and_features(self)
3822 ffmpeg_features = {key for key, val in ffmpeg_features.items() if val}
3823 if ffmpeg_features:
19a03940 3824 exe_versions['ffmpeg'] += ' (%s)' % ','.join(sorted(ffmpeg_features))
8913ef74 3825
4c83c967 3826 exe_versions['rtmpdump'] = rtmpdump_version()
feee8d32 3827 exe_versions['phantomjs'] = PhantomJSwrapper._version()
d28b5171 3828 exe_str = ', '.join(
2831b468 3829 f'{exe} {v}' for exe, v in sorted(exe_versions.items()) if v
3830 ) or 'none'
49a57e70 3831 write_debug('exe versions: %s' % exe_str)
dca08720 3832
1d485a1a 3833 from .compat.compat_utils import get_package_info
9b8ee23b 3834 from .dependencies import available_dependencies
3835
3836 write_debug('Optional libraries: %s' % (', '.join(sorted({
1d485a1a 3837 join_nonempty(*get_package_info(m)) for m in available_dependencies.values()
9b8ee23b 3838 })) or 'none'))
2831b468 3839
97ec5bc5 3840 self._setup_opener()
dca08720
PH
3841 proxy_map = {}
3842 for handler in self._opener.handlers:
3843 if hasattr(handler, 'proxies'):
3844 proxy_map.update(handler.proxies)
49a57e70 3845 write_debug(f'Proxy map: {proxy_map}')
dca08720 3846
e756f45b
M
3847 for plugin_type, plugins in {'Extractor': plugin_ies, 'Post-Processor': plugin_pps}.items():
3848 display_list = ['%s%s' % (
8e40b9d1 3849 klass.__name__, '' if klass.__name__ == name else f' as {name}')
e756f45b
M
3850 for name, klass in plugins.items()]
3851 if plugin_type == 'Extractor':
3852 display_list.extend(f'{plugins[-1].IE_NAME.partition("+")[2]} ({parent.__name__})'
3853 for parent, plugins in plugin_ie_overrides.items())
3854 if not display_list:
3855 continue
3856 write_debug(f'{plugin_type} Plugins: {", ".join(sorted(display_list))}')
3857
8e40b9d1
M
3858 plugin_dirs = plugin_directories()
3859 if plugin_dirs:
3860 write_debug(f'Plugin directories: {plugin_dirs}')
3861
49a57e70 3862 # Not implemented
3863 if False and self.params.get('call_home'):
0f06bcd7 3864 ipaddr = self.urlopen('https://yt-dl.org/ip').read().decode()
49a57e70 3865 write_debug('Public IP address: %s' % ipaddr)
58b1f00d 3866 latest_version = self.urlopen(
0f06bcd7 3867 'https://yt-dl.org/latest/version').read().decode()
58b1f00d
PH
3868 if version_tuple(latest_version) > version_tuple(__version__):
3869 self.report_warning(
3870 'You are using an outdated version (newest version: %s)! '
3871 'See https://yt-dl.org/update if you need help updating.' %
3872 latest_version)
3873
e344693b 3874 def _setup_opener(self):
97ec5bc5 3875 if hasattr(self, '_opener'):
3876 return
6ad14cab 3877 timeout_val = self.params.get('socket_timeout')
17bddf3e 3878 self._socket_timeout = 20 if timeout_val is None else float(timeout_val)
6ad14cab 3879
982ee69a 3880 opts_cookiesfrombrowser = self.params.get('cookiesfrombrowser')
dca08720
PH
3881 opts_cookiefile = self.params.get('cookiefile')
3882 opts_proxy = self.params.get('proxy')
3883
982ee69a 3884 self.cookiejar = load_cookies(opts_cookiefile, opts_cookiesfrombrowser, self)
dca08720 3885
6a3f4c3f 3886 cookie_processor = YoutubeDLCookieProcessor(self.cookiejar)
dca08720
PH
3887 if opts_proxy is not None:
3888 if opts_proxy == '':
3889 proxies = {}
3890 else:
3891 proxies = {'http': opts_proxy, 'https': opts_proxy}
3892 else:
ac668111 3893 proxies = urllib.request.getproxies()
067aa17e 3894 # Set HTTPS proxy to HTTP one if given (https://github.com/ytdl-org/youtube-dl/issues/805)
dca08720
PH
3895 if 'http' in proxies and 'https' not in proxies:
3896 proxies['https'] = proxies['http']
91410c9b 3897 proxy_handler = PerRequestProxyHandler(proxies)
a0ddb8a2
PH
3898
3899 debuglevel = 1 if self.params.get('debug_printtraffic') else 0
be4a824d
PH
3900 https_handler = make_HTTPS_handler(self.params, debuglevel=debuglevel)
3901 ydlh = YoutubeDLHandler(self.params, debuglevel=debuglevel)
fca6dba8 3902 redirect_handler = YoutubeDLRedirectHandler()
f9934b96 3903 data_handler = urllib.request.DataHandler()
6240b0a2
JMF
3904
3905 # When passing our own FileHandler instance, build_opener won't add the
3906 # default FileHandler and allows us to disable the file protocol, which
3907 # can be used for malicious purposes (see
067aa17e 3908 # https://github.com/ytdl-org/youtube-dl/issues/8227)
ac668111 3909 file_handler = urllib.request.FileHandler()
6240b0a2 3910
8300774c
M
3911 if not self.params.get('enable_file_urls'):
3912 def file_open(*args, **kwargs):
3913 raise urllib.error.URLError(
3914 'file:// URLs are explicitly disabled in yt-dlp for security reasons. '
3915 'Use --enable-file-urls to enable at your own risk.')
3916 file_handler.file_open = file_open
6240b0a2 3917
ac668111 3918 opener = urllib.request.build_opener(
fca6dba8 3919 proxy_handler, https_handler, cookie_processor, ydlh, redirect_handler, data_handler, file_handler)
2461f79d 3920
dca08720
PH
3921 # Delete the default user-agent header, which would otherwise apply in
3922 # cases where our custom HTTP handler doesn't come into play
067aa17e 3923 # (See https://github.com/ytdl-org/youtube-dl/issues/1309 for details)
dca08720
PH
3924 opener.addheaders = []
3925 self._opener = opener
62fec3b2
PH
3926
3927 def encode(self, s):
3928 if isinstance(s, bytes):
3929 return s # Already encoded
3930
3931 try:
3932 return s.encode(self.get_encoding())
3933 except UnicodeEncodeError as err:
3934 err.reason = err.reason + '. Check your system encoding configuration or use the --encoding option.'
3935 raise
3936
3937 def get_encoding(self):
3938 encoding = self.params.get('encoding')
3939 if encoding is None:
3940 encoding = preferredencoding()
3941 return encoding
ec82d85a 3942
e08a85d8 3943 def _write_info_json(self, label, ie_result, infofn, overwrite=None):
cb96c5be 3944 ''' Write infojson and returns True = written, 'exists' = Already exists, False = skip, None = error '''
e08a85d8 3945 if overwrite is None:
3946 overwrite = self.params.get('overwrites', True)
80c03fa9 3947 if not self.params.get('writeinfojson'):
3948 return False
3949 elif not infofn:
3950 self.write_debug(f'Skipping writing {label} infojson')
3951 return False
3952 elif not self._ensure_dir_exists(infofn):
3953 return None
e08a85d8 3954 elif not overwrite and os.path.exists(infofn):
80c03fa9 3955 self.to_screen(f'[info] {label.title()} metadata is already present')
cb96c5be 3956 return 'exists'
3957
3958 self.to_screen(f'[info] Writing {label} metadata as JSON to: {infofn}')
3959 try:
3960 write_json_file(self.sanitize_info(ie_result, self.params.get('clean_infojson', True)), infofn)
3961 return True
86e5f3ed 3962 except OSError:
cb96c5be 3963 self.report_error(f'Cannot write {label} metadata to JSON file {infofn}')
3964 return None
80c03fa9 3965
3966 def _write_description(self, label, ie_result, descfn):
3967 ''' Write description and returns True = written, False = skip, None = error '''
3968 if not self.params.get('writedescription'):
3969 return False
3970 elif not descfn:
3971 self.write_debug(f'Skipping writing {label} description')
3972 return False
3973 elif not self._ensure_dir_exists(descfn):
3974 return None
3975 elif not self.params.get('overwrites', True) and os.path.exists(descfn):
3976 self.to_screen(f'[info] {label.title()} description is already present')
3977 elif ie_result.get('description') is None:
88fb9425 3978 self.to_screen(f'[info] There\'s no {label} description to write')
80c03fa9 3979 return False
3980 else:
3981 try:
3982 self.to_screen(f'[info] Writing {label} description to: {descfn}')
86e5f3ed 3983 with open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile:
80c03fa9 3984 descfile.write(ie_result['description'])
86e5f3ed 3985 except OSError:
80c03fa9 3986 self.report_error(f'Cannot write {label} description file {descfn}')
3987 return None
3988 return True
3989
3990 def _write_subtitles(self, info_dict, filename):
3991 ''' Write subtitles to file and return list of (sub_filename, final_sub_filename); or None if error'''
3992 ret = []
3993 subtitles = info_dict.get('requested_subtitles')
88fb9425 3994 if not (self.params.get('writesubtitles') or self.params.get('writeautomaticsub')):
80c03fa9 3995 # subtitles download errors are already managed as troubles in relevant IE
3996 # that way it will silently go on when used with unsupporting IE
3997 return ret
88fb9425 3998 elif not subtitles:
c8bc203f 3999 self.to_screen('[info] There are no subtitles for the requested languages')
88fb9425 4000 return ret
80c03fa9 4001 sub_filename_base = self.prepare_filename(info_dict, 'subtitle')
4002 if not sub_filename_base:
4003 self.to_screen('[info] Skipping writing video subtitles')
4004 return ret
88fb9425 4005
80c03fa9 4006 for sub_lang, sub_info in subtitles.items():
4007 sub_format = sub_info['ext']
4008 sub_filename = subtitles_filename(filename, sub_lang, sub_format, info_dict.get('ext'))
4009 sub_filename_final = subtitles_filename(sub_filename_base, sub_lang, sub_format, info_dict.get('ext'))
e04938ab 4010 existing_sub = self.existing_file((sub_filename_final, sub_filename))
4011 if existing_sub:
80c03fa9 4012 self.to_screen(f'[info] Video subtitle {sub_lang}.{sub_format} is already present')
e04938ab 4013 sub_info['filepath'] = existing_sub
4014 ret.append((existing_sub, sub_filename_final))
80c03fa9 4015 continue
4016
4017 self.to_screen(f'[info] Writing video subtitles to: {sub_filename}')
4018 if sub_info.get('data') is not None:
4019 try:
4020 # Use newline='' to prevent conversion of newline characters
4021 # See https://github.com/ytdl-org/youtube-dl/issues/10268
86e5f3ed 4022 with open(sub_filename, 'w', encoding='utf-8', newline='') as subfile:
80c03fa9 4023 subfile.write(sub_info['data'])
4024 sub_info['filepath'] = sub_filename
4025 ret.append((sub_filename, sub_filename_final))
4026 continue
86e5f3ed 4027 except OSError:
80c03fa9 4028 self.report_error(f'Cannot write video subtitles file {sub_filename}')
4029 return None
4030
4031 try:
4032 sub_copy = sub_info.copy()
4033 sub_copy.setdefault('http_headers', info_dict.get('http_headers'))
4034 self.dl(sub_filename, sub_copy, subtitle=True)
4035 sub_info['filepath'] = sub_filename
4036 ret.append((sub_filename, sub_filename_final))
6020e05d 4037 except (DownloadError, ExtractorError, IOError, OSError, ValueError) + network_exceptions as err:
c70c418d 4038 msg = f'Unable to download video subtitles for {sub_lang!r}: {err}'
6020e05d 4039 if self.params.get('ignoreerrors') is not True: # False or 'only_download'
c70c418d 4040 if not self.params.get('ignoreerrors'):
4041 self.report_error(msg)
4042 raise DownloadError(msg)
4043 self.report_warning(msg)
519804a9 4044 return ret
80c03fa9 4045
4046 def _write_thumbnails(self, label, info_dict, filename, thumb_filename_base=None):
4047 ''' Write thumbnails to file and return list of (thumb_filename, final_thumb_filename) '''
6c4fd172 4048 write_all = self.params.get('write_all_thumbnails', False)
80c03fa9 4049 thumbnails, ret = [], []
6c4fd172 4050 if write_all or self.params.get('writethumbnail', False):
0202b52a 4051 thumbnails = info_dict.get('thumbnails') or []
88fb9425 4052 if not thumbnails:
c8bc203f 4053 self.to_screen(f'[info] There are no {label} thumbnails to download')
88fb9425 4054 return ret
6c4fd172 4055 multiple = write_all and len(thumbnails) > 1
ec82d85a 4056
80c03fa9 4057 if thumb_filename_base is None:
4058 thumb_filename_base = filename
4059 if thumbnails and not thumb_filename_base:
4060 self.write_debug(f'Skipping writing {label} thumbnail')
4061 return ret
4062
dd0228ce 4063 for idx, t in list(enumerate(thumbnails))[::-1]:
80c03fa9 4064 thumb_ext = (f'{t["id"]}.' if multiple else '') + determine_ext(t['url'], 'jpg')
aa9369a2 4065 thumb_display_id = f'{label} thumbnail {t["id"]}'
80c03fa9 4066 thumb_filename = replace_extension(filename, thumb_ext, info_dict.get('ext'))
4067 thumb_filename_final = replace_extension(thumb_filename_base, thumb_ext, info_dict.get('ext'))
ec82d85a 4068
e04938ab 4069 existing_thumb = self.existing_file((thumb_filename_final, thumb_filename))
4070 if existing_thumb:
aa9369a2 4071 self.to_screen('[info] %s is already present' % (
4072 thumb_display_id if multiple else f'{label} thumbnail').capitalize())
e04938ab 4073 t['filepath'] = existing_thumb
4074 ret.append((existing_thumb, thumb_filename_final))
ec82d85a 4075 else:
80c03fa9 4076 self.to_screen(f'[info] Downloading {thumb_display_id} ...')
ec82d85a 4077 try:
297e9952 4078 uf = self.urlopen(sanitized_Request(t['url'], headers=t.get('http_headers', {})))
80c03fa9 4079 self.to_screen(f'[info] Writing {thumb_display_id} to: {thumb_filename}')
d3d89c32 4080 with open(encodeFilename(thumb_filename), 'wb') as thumbf:
ec82d85a 4081 shutil.copyfileobj(uf, thumbf)
80c03fa9 4082 ret.append((thumb_filename, thumb_filename_final))
885cc0b7 4083 t['filepath'] = thumb_filename
3158150c 4084 except network_exceptions as err:
dd0228ce 4085 thumbnails.pop(idx)
80c03fa9 4086 self.report_warning(f'Unable to download {thumb_display_id}: {err}')
6c4fd172 4087 if ret and not write_all:
4088 break
0202b52a 4089 return ret