]> jfr.im git - yt-dlp.git/blame - yt_dlp/YoutubeDL.py
[cleanup] Misc
[yt-dlp.git] / yt_dlp / YoutubeDL.py
CommitLineData
26e63931 1import collections
31bd3925 2import contextlib
9d2ecdbc 3import datetime
c1c9a79c 4import errno
31bd3925 5import fileinput
b5ae35ee 6import functools
8222d8de 7import io
b82f815f 8import itertools
8694c600 9import json
62fec3b2 10import locale
083c9df9 11import operator
8222d8de 12import os
f8271158 13import random
8222d8de
JMF
14import re
15import shutil
6f2287cb 16import string
dca08720 17import subprocess
8222d8de 18import sys
21cd8fae 19import tempfile
8222d8de 20import time
67134eab 21import tokenize
8222d8de 22import traceback
524e2e4f 23import unicodedata
961ea474 24
f8271158 25from .cache import Cache
3f66b6fe 26from .compat import urllib # isort: split
14f25df2 27from .compat import compat_os_name, compat_shlex_quote
982ee69a 28from .cookies import load_cookies
f8271158 29from .downloader import FFmpegFD, get_suitable_downloader, shorten_protocol_name
30from .downloader.rtmp import rtmpdump_version
f8271158 31from .extractor import gen_extractor_classes, get_info_extractor
fe7866d0 32from .extractor.common import UnsupportedURLIE
f8271158 33from .extractor.openload import PhantomJSwrapper
34from .minicurses import format_text
8e40b9d1 35from .plugins import directories as plugin_directories
e756f45b 36from .postprocessor import _PLUGIN_CLASSES as plugin_pps
f8271158 37from .postprocessor import (
38 EmbedThumbnailPP,
39 FFmpegFixupDuplicateMoovPP,
40 FFmpegFixupDurationPP,
41 FFmpegFixupM3u8PP,
42 FFmpegFixupM4aPP,
43 FFmpegFixupStretchedPP,
44 FFmpegFixupTimestampPP,
45 FFmpegMergerPP,
46 FFmpegPostProcessor,
ca9def71 47 FFmpegVideoConvertorPP,
f8271158 48 MoveFilesAfterDownloadPP,
49 get_postprocessor,
50)
ca9def71 51from .postprocessor.ffmpeg import resolve_mapping as resolve_recode_mapping
b5e7a2e6 52from .update import REPOSITORY, current_git_head, detect_variant
8c25f81b 53from .utils import (
f8271158 54 DEFAULT_OUTTMPL,
7b2c3f47 55 IDENTITY,
f8271158 56 LINK_TEMPLATES,
8dc59305 57 MEDIA_EXTENSIONS,
f8271158 58 NO_DEFAULT,
1d485a1a 59 NUMBER_RE,
f8271158 60 OUTTMPL_TYPES,
61 POSTPROCESS_WHEN,
62 STR_FORMAT_RE_TMPL,
63 STR_FORMAT_TYPES,
64 ContentTooShortError,
65 DateRange,
66 DownloadCancelled,
67 DownloadError,
68 EntryNotInPlaylist,
69 ExistingVideoReached,
70 ExtractorError,
784320c9 71 FormatSorter,
f8271158 72 GeoRestrictedError,
73 HEADRequest,
f8271158 74 ISO3166Utils,
75 LazyList,
76 MaxDownloadsReached,
19a03940 77 Namespace,
f8271158 78 PagedList,
79 PerRequestProxyHandler,
7e88d7d7 80 PlaylistEntries,
f8271158 81 Popen,
82 PostProcessingError,
83 ReExtractInfo,
84 RejectedVideoReached,
85 SameFileError,
86 UnavailableVideoError,
693f0600 87 UserNotLive,
f8271158 88 YoutubeDLCookieProcessor,
89 YoutubeDLHandler,
90 YoutubeDLRedirectHandler,
eedb7ba5
S
91 age_restricted,
92 args_to_str,
cb794ee0 93 bug_reports_message,
ce02ed60 94 date_from_str,
da4db748 95 deprecation_warning,
ce02ed60 96 determine_ext,
b5559424 97 determine_protocol,
c0384f22 98 encode_compat_str,
ce02ed60 99 encodeFilename,
a06916d9 100 error_to_compat_str,
47cdc68e 101 escapeHTML,
590bc6f6 102 expand_path,
90137ca4 103 filter_dict,
e29663c6 104 float_or_none,
02dbf93f 105 format_bytes,
e0fd9573 106 format_decimal_suffix,
f8271158 107 format_field,
525ef922 108 formatSeconds,
fc61aff4 109 get_compatible_ext,
0bb322b9 110 get_domain,
c9969434 111 int_or_none,
732044af 112 iri_to_uri,
941e881e 113 is_path_like,
34921b43 114 join_nonempty,
ce02ed60 115 locked_file,
0647d925 116 make_archive_id,
0202b52a 117 make_dir,
dca08720 118 make_HTTPS_handler,
8b7539d2 119 merge_headers,
3158150c 120 network_exceptions,
ec11a9f4 121 number_of_digits,
cd6fc19e 122 orderedSet,
5314b521 123 orderedSet_from_options,
083c9df9 124 parse_filesize,
ce02ed60 125 preferredencoding,
eedb7ba5 126 prepend_extension,
3efb96a6 127 remove_terminal_sequences,
cfb56d1a 128 render_table,
eedb7ba5 129 replace_extension,
ce02ed60 130 sanitize_filename,
1bb5c511 131 sanitize_path,
dcf77cf1 132 sanitize_url,
67dda517 133 sanitized_Request,
e5660ee6 134 std_headers,
1211bb6d 135 str_or_none,
e29663c6 136 strftime_or_none,
ce02ed60 137 subtitles_filename,
819e0531 138 supports_terminal_sequences,
b1f94422 139 system_identifier,
f2ebc5c7 140 timetuple_from_msec,
732044af 141 to_high_limit_path,
324ad820 142 traverse_obj,
fc61aff4 143 try_call,
6033d980 144 try_get,
29eb5174 145 url_basename,
7d1eb38a 146 variadic,
58b1f00d 147 version_tuple,
53973b4d 148 windows_enable_vt_mode,
ce02ed60
PH
149 write_json_file,
150 write_string,
4f026faf 151)
29cb20bd 152from .version import CHANNEL, RELEASE_GIT_HEAD, VARIANT, __version__
8222d8de 153
e9c0cdd3
YCH
154if compat_os_name == 'nt':
155 import ctypes
156
2459b6e1 157
86e5f3ed 158class YoutubeDL:
8222d8de
JMF
159 """YoutubeDL class.
160
161 YoutubeDL objects are the ones responsible of downloading the
162 actual video file and writing it to disk if the user has requested
163 it, among some other tasks. In most cases there should be one per
164 program. As, given a video URL, the downloader doesn't know how to
165 extract all the needed information, task that InfoExtractors do, it
166 has to pass the URL to one of them.
167
168 For this, YoutubeDL objects have a method that allows
169 InfoExtractors to be registered in a given order. When it is passed
170 a URL, the YoutubeDL object handles it to the first InfoExtractor it
171 finds that reports being able to handle it. The InfoExtractor extracts
172 all the information about the video or videos the URL refers to, and
173 YoutubeDL process the extracted information, possibly using a File
174 Downloader to download the video.
175
176 YoutubeDL objects accept a lot of parameters. In order not to saturate
177 the object constructor with arguments, it receives a dictionary of
178 options instead. These options are available through the params
179 attribute for the InfoExtractors to use. The YoutubeDL also
180 registers itself as the downloader in charge for the InfoExtractors
181 that are added to it, so this is a "mutual registration".
182
183 Available options:
184
185 username: Username for authentication purposes.
186 password: Password for authentication purposes.
180940e0 187 videopassword: Password for accessing a video.
1da50aa3
S
188 ap_mso: Adobe Pass multiple-system operator identifier.
189 ap_username: Multiple-system operator account username.
190 ap_password: Multiple-system operator account password.
8222d8de 191 usenetrc: Use netrc for authentication instead.
c8bc203f 192 netrc_location: Location of the netrc file. Defaults to ~/.netrc.
db3ad8a6 193 netrc_cmd: Use a shell command to get credentials
8222d8de
JMF
194 verbose: Print additional info to stdout.
195 quiet: Do not print messages to stdout.
ad8915b7 196 no_warnings: Do not print out anything for warnings.
bb66c247 197 forceprint: A dict with keys WHEN mapped to a list of templates to
198 print to stdout. The allowed keys are video or any of the
199 items in utils.POSTPROCESS_WHEN.
ca30f449 200 For compatibility, a single list is also accepted
bb66c247 201 print_to_file: A dict with keys WHEN (same as forceprint) mapped to
202 a list of tuples with (template, filename)
8694c600 203 forcejson: Force printing info_dict as JSON.
63e0be34
PH
204 dump_single_json: Force printing the info_dict of the whole playlist
205 (or video) as a single JSON line.
c25228e5 206 force_write_download_archive: Force writing download archive regardless
207 of 'skip_download' or 'simulate'.
b7b04c78 208 simulate: Do not download the video files. If unset (or None),
209 simulate only if listsubtitles, listformats or list_thumbnails is used
eb8a4433 210 format: Video format code. see "FORMAT SELECTION" for more details.
093a1710 211 You can also pass a function. The function takes 'ctx' as
212 argument and returns the formats to download.
213 See "build_format_selector" for an implementation
63ad4d43 214 allow_unplayable_formats: Allow unplayable formats to be extracted and downloaded.
b7da73eb 215 ignore_no_formats_error: Ignore "No video formats" error. Usefull for
216 extracting metadata even if the video is not actually
217 available for download (experimental)
0930b11f 218 format_sort: A list of fields by which to sort the video formats.
219 See "Sorting Formats" for more details.
c25228e5 220 format_sort_force: Force the given format_sort. see "Sorting Formats"
221 for more details.
08d30158 222 prefer_free_formats: Whether to prefer video formats with free containers
223 over non-free ones of same quality.
c25228e5 224 allow_multiple_video_streams: Allow multiple video streams to be merged
225 into a single file
226 allow_multiple_audio_streams: Allow multiple audio streams to be merged
227 into a single file
0ba692ac 228 check_formats Whether to test if the formats are downloadable.
9f1a1c36 229 Can be True (check all), False (check none),
230 'selected' (check selected formats),
0ba692ac 231 or None (check only if requested by extractor)
4524baf0 232 paths: Dictionary of output paths. The allowed keys are 'home'
233 'temp' and the keys of OUTTMPL_TYPES (in utils.py)
de6000d9 234 outtmpl: Dictionary of templates for output names. Allowed keys
4524baf0 235 are 'default' and the keys of OUTTMPL_TYPES (in utils.py).
34488702 236 For compatibility with youtube-dl, a single string can also be used
a820dc72
RA
237 outtmpl_na_placeholder: Placeholder for unavailable meta fields.
238 restrictfilenames: Do not allow "&" and spaces in file names
239 trim_file_name: Limit length of filename (extension excluded)
4524baf0 240 windowsfilenames: Force the filenames to be windows compatible
b1940459 241 ignoreerrors: Do not stop on download/postprocessing errors.
242 Can be 'only_download' to ignore only download errors.
243 Default is 'only_download' for CLI, but False for API
26e2805c 244 skip_playlist_after_errors: Number of allowed failures until the rest of
245 the playlist is skipped
fe7866d0 246 allowed_extractors: List of regexes to match against extractor names that are allowed
0c3d0f51 247 overwrites: Overwrite all video and metadata files if True,
248 overwrite only non-video files if None
249 and don't overwrite any file if False
34488702 250 For compatibility with youtube-dl,
251 "nooverwrites" may also be used instead
c14e88f0 252 playlist_items: Specific indices of playlist to download.
75822ca7 253 playlistrandom: Download playlist items in random order.
7e9a6125 254 lazy_playlist: Process playlist entries as they are received.
8222d8de
JMF
255 matchtitle: Download only matching titles.
256 rejecttitle: Reject downloads for matching titles.
8bf9319e 257 logger: Log messages to a logging.Logger instance.
17ffed18 258 logtostderr: Print everything to stderr instead of stdout.
259 consoletitle: Display progress in console window's titlebar.
8222d8de
JMF
260 writedescription: Write the video description to a .description file
261 writeinfojson: Write the video description to a .info.json file
ad54c913 262 clean_infojson: Remove internal metadata from the infojson
34488702 263 getcomments: Extract video comments. This will not be written to disk
06167fbb 264 unless writeinfojson is also given
1fb07d10 265 writeannotations: Write the video annotations to a .annotations.xml file
8222d8de 266 writethumbnail: Write the thumbnail image to a file
c25228e5 267 allow_playlist_files: Whether to write playlists' description, infojson etc
268 also to disk when using the 'write*' options
ec82d85a 269 write_all_thumbnails: Write all thumbnail formats to files
732044af 270 writelink: Write an internet shortcut file, depending on the
271 current platform (.url/.webloc/.desktop)
272 writeurllink: Write a Windows internet shortcut file (.url)
273 writewebloclink: Write a macOS internet shortcut file (.webloc)
274 writedesktoplink: Write a Linux internet shortcut file (.desktop)
8222d8de 275 writesubtitles: Write the video subtitles to a file
741dd8ea 276 writeautomaticsub: Write the automatically generated subtitles to a file
8222d8de 277 listsubtitles: Lists all available subtitles for the video
a504ced0 278 subtitlesformat: The format code for subtitles
c32b0aab 279 subtitleslangs: List of languages of the subtitles to download (can be regex).
280 The list may contain "all" to refer to all the available
281 subtitles. The language can be prefixed with a "-" to
62b58c09 282 exclude it from the requested languages, e.g. ['all', '-live_chat']
8222d8de 283 keepvideo: Keep the video file after post-processing
46f1370e 284 daterange: A utils.DateRange object, download only if the upload_date is in the range.
8222d8de 285 skip_download: Skip the actual download of the video file
c35f9e72 286 cachedir: Location of the cache files in the filesystem.
a0e07d31 287 False to disable filesystem cache.
47192f92 288 noplaylist: Download single video instead of a playlist if in doubt.
8dbe9899
PH
289 age_limit: An integer representing the user's age in years.
290 Unsuitable videos for the given age are skipped.
5fe18bdb
PH
291 min_views: An integer representing the minimum view count the video
292 must have in order to not be skipped.
293 Videos without view count information are always
294 downloaded. None for no limit.
295 max_views: An integer representing the maximum view count.
296 Videos that are more popular than that are not
297 downloaded.
298 Videos without view count information are always
299 downloaded. None for no limit.
ae103564 300 download_archive: A set, or the name of a file where all downloads are recorded.
301 Videos already present in the file are not downloaded again.
8a51f564 302 break_on_existing: Stop the download process after attempting to download a
303 file that is in the archive.
b222c271 304 break_per_url: Whether break_on_reject and break_on_existing
305 should act on each input URL as opposed to for the entire queue
d76fa1f3 306 cookiefile: File name or text stream from where cookies should be read and dumped to
f59f5ef8 307 cookiesfrombrowser: A tuple containing the name of the browser, the profile
9bd13fe5 308 name/path from where cookies are loaded, the name of the keyring,
309 and the container name, e.g. ('chrome', ) or
310 ('vivaldi', 'default', 'BASICTEXT') or ('firefox', 'default', None, 'Meta')
f81c62a6 311 legacyserverconnect: Explicitly allow HTTPS connection to servers that do not
312 support RFC 5746 secure renegotiation
f59f5ef8 313 nocheckcertificate: Do not verify SSL certificates
bb58c9ed 314 client_certificate: Path to client certificate file in PEM format. May include the private key
315 client_certificate_key: Path to private key file for client certificate
316 client_certificate_password: Password for client certificate private key, if encrypted.
317 If not provided and the key is encrypted, yt-dlp will ask interactively
7e8c0af0 318 prefer_insecure: Use HTTP instead of HTTPS to retrieve information.
c6e07cf1 319 (Only supported by some extractors)
8300774c 320 enable_file_urls: Enable file:// URLs. This is disabled by default for security reasons.
8b7539d2 321 http_headers: A dictionary of custom headers to be used for all requests
a1ee09e8 322 proxy: URL of the proxy server to use
38cce791 323 geo_verification_proxy: URL of the proxy to use for IP address verification
504f20dd 324 on geo-restricted sites.
e344693b 325 socket_timeout: Time to wait for unresponsive hosts, in seconds
0783b09b
PH
326 bidi_workaround: Work around buggy terminals without bidirectional text
327 support, using fridibi
a0ddb8a2 328 debug_printtraffic:Print out sent and received HTTP traffic
04b4d394
PH
329 default_search: Prepend this string if an input url is not valid.
330 'auto' for elaborate guessing
62fec3b2 331 encoding: Use this encoding instead of the system-specified.
134c913c 332 extract_flat: Whether to resolve and process url_results further
46f1370e 333 * False: Always process. Default for API
134c913c 334 * True: Never process
335 * 'in_playlist': Do not process inside playlist/multi_video
336 * 'discard': Always process, but don't return the result
337 from inside playlist/multi_video
338 * 'discard_in_playlist': Same as "discard", but only for
46f1370e 339 playlists (not multi_video). Default for CLI
f2ebc5c7 340 wait_for_video: If given, wait for scheduled streams to become available.
341 The value should be a tuple containing the range
342 (min_secs, max_secs) to wait between retries
4f026faf 343 postprocessors: A list of dictionaries, each with an entry
71b640cc 344 * key: The name of the postprocessor. See
7a5c1cfe 345 yt_dlp/postprocessor/__init__.py for a list.
bb66c247 346 * when: When to run the postprocessor. Allowed values are
347 the entries of utils.POSTPROCESS_WHEN
56d868db 348 Assumed to be 'post_process' if not given
71b640cc
PH
349 progress_hooks: A list of functions that get called on download
350 progress, with a dictionary with the entries
5cda4eda 351 * status: One of "downloading", "error", or "finished".
ee69b99a 352 Check this first and ignore unknown values.
3ba7740d 353 * info_dict: The extracted info_dict
71b640cc 354
5cda4eda 355 If status is one of "downloading", or "finished", the
ee69b99a
PH
356 following properties may also be present:
357 * filename: The final filename (always present)
5cda4eda 358 * tmpfilename: The filename we're currently writing to
71b640cc
PH
359 * downloaded_bytes: Bytes on disk
360 * total_bytes: Size of the whole file, None if unknown
5cda4eda
PH
361 * total_bytes_estimate: Guess of the eventual file size,
362 None if unavailable.
363 * elapsed: The number of seconds since download started.
71b640cc
PH
364 * eta: The estimated time in seconds, None if unknown
365 * speed: The download speed in bytes/second, None if
366 unknown
5cda4eda
PH
367 * fragment_index: The counter of the currently
368 downloaded video fragment.
369 * fragment_count: The number of fragments (= individual
370 files that will be merged)
71b640cc
PH
371
372 Progress hooks are guaranteed to be called at least once
373 (with status "finished") if the download is successful.
819e0531 374 postprocessor_hooks: A list of functions that get called on postprocessing
375 progress, with a dictionary with the entries
376 * status: One of "started", "processing", or "finished".
377 Check this first and ignore unknown values.
378 * postprocessor: Name of the postprocessor
379 * info_dict: The extracted info_dict
380
381 Progress hooks are guaranteed to be called at least twice
382 (with status "started" and "finished") if the processing is successful.
fc61aff4 383 merge_output_format: "/" separated list of extensions to use when merging formats.
6b591b29 384 final_ext: Expected final extension; used to detect when the file was
59a7a13e 385 already downloaded and converted
6271f1ca
PH
386 fixup: Automatically correct known faults of the file.
387 One of:
388 - "never": do nothing
389 - "warn": only emit a warning
390 - "detect_or_warn": check whether we can do anything
62cd676c 391 about it, warn otherwise (default)
504f20dd 392 source_address: Client-side IP address to bind to.
1cf376f5 393 sleep_interval_requests: Number of seconds to sleep between requests
394 during extraction
7aa589a5
S
395 sleep_interval: Number of seconds to sleep before each download when
396 used alone or a lower bound of a range for randomized
397 sleep before each download (minimum possible number
398 of seconds to sleep) when used along with
399 max_sleep_interval.
400 max_sleep_interval:Upper bound of a range for randomized sleep before each
401 download (maximum possible number of seconds to sleep).
402 Must only be used along with sleep_interval.
403 Actual sleep time will be a random float from range
404 [sleep_interval; max_sleep_interval].
1cf376f5 405 sleep_interval_subtitles: Number of seconds to sleep before each subtitle download
cfb56d1a
PH
406 listformats: Print an overview of available video formats and exit.
407 list_thumbnails: Print a table of all thumbnails and exit.
0a41f331 408 match_filter: A function that gets called for every video with the signature
409 (info_dict, *, incomplete: bool) -> Optional[str]
410 For backward compatibility with youtube-dl, the signature
411 (info_dict) -> Optional[str] is also allowed.
412 - If it returns a message, the video is ignored.
413 - If it returns None, the video is downloaded.
414 - If it returns utils.NO_DEFAULT, the user is interactively
415 asked whether to download the video.
fe2ce85a 416 - Raise utils.DownloadCancelled(msg) to abort remaining
417 downloads when a video is rejected.
347de493 418 match_filter_func in utils.py is one example for this.
8417f26b
SS
419 color: A Dictionary with output stream names as keys
420 and their respective color policy as values.
421 Can also just be a single color policy,
422 in which case it applies to all outputs.
423 Valid stream names are 'stdout' and 'stderr'.
424 Valid color policies are one of 'always', 'auto', 'no_color' or 'never'.
0a840f58 425 geo_bypass: Bypass geographic restriction via faking X-Forwarded-For
504f20dd 426 HTTP header
0a840f58 427 geo_bypass_country:
773f291d
S
428 Two-letter ISO 3166-2 country code that will be used for
429 explicit geographic restriction bypassing via faking
504f20dd 430 X-Forwarded-For HTTP header
5f95927a
S
431 geo_bypass_ip_block:
432 IP range in CIDR notation that will be used similarly to
504f20dd 433 geo_bypass_country
52a8a1e1 434 external_downloader: A dictionary of protocol keys and the executable of the
435 external downloader to use for it. The allowed protocols
436 are default|http|ftp|m3u8|dash|rtsp|rtmp|mms.
437 Set the value to 'native' to use the native downloader
53ed7066 438 compat_opts: Compatibility options. See "Differences in default behavior".
3acf6d38 439 The following options do not work when used through the API:
b5ae35ee 440 filename, abort-on-error, multistreams, no-live-chat, format-sort
dac5df5a 441 no-clean-infojson, no-playlist-metafiles, no-keep-subs, no-attach-info-json.
e4f02757 442 Refer __init__.py for their implementation
819e0531 443 progress_template: Dictionary of templates for progress outputs.
444 Allowed keys are 'download', 'postprocess',
445 'download-title' (console title) and 'postprocess-title'.
446 The template is mapped on a dictionary with keys 'progress' and 'info'
23326151 447 retry_sleep_functions: Dictionary of functions that takes the number of attempts
448 as argument and returns the time to sleep in seconds.
449 Allowed keys are 'http', 'fragment', 'file_access'
0f446365
SW
450 download_ranges: A callback function that gets called for every video with
451 the signature (info_dict, ydl) -> Iterable[Section].
452 Only the returned sections will be downloaded.
453 Each Section is a dict with the following keys:
5ec1b6b7 454 * start_time: Start time of the section in seconds
455 * end_time: End time of the section in seconds
456 * title: Section title (Optional)
457 * index: Section number (Optional)
0f446365 458 force_keyframes_at_cuts: Re-encode the video when downloading ranges to get precise cuts
a7dc6a89 459 noprogress: Do not print the progress bar
a831c2ea 460 live_from_start: Whether to download livestreams videos from the start
fe7e0c98 461
8222d8de 462 The following parameters are not used by YoutubeDL itself, they are used by
7a5c1cfe 463 the downloader (see yt_dlp/downloader/common.py):
51d9739f 464 nopart, updatetime, buffersize, ratelimit, throttledratelimit, min_filesize,
205a0654 465 max_filesize, test, noresizebuffer, retries, file_access_retries, fragment_retries,
a7dc6a89 466 continuedl, xattr_set_filesize, hls_use_mpegts, http_chunk_size,
59a7a13e 467 external_downloader_args, concurrent_fragment_downloads.
76b1bd67
JMF
468
469 The following options are used by the post processors:
c0b7d117
S
470 ffmpeg_location: Location of the ffmpeg/avconv binary; either the path
471 to the binary or its containing directory.
43820c03 472 postprocessor_args: A dictionary of postprocessor/executable keys (in lower case)
34488702 473 and a list of additional command-line arguments for the
474 postprocessor/executable. The dict can also have "PP+EXE" keys
475 which are used when the given exe is used by the given PP.
476 Use 'default' as the name for arguments to passed to all PP
477 For compatibility with youtube-dl, a single list of args
478 can also be used
e409895f 479
480 The following options are used by the extractors:
46f1370e 481 extractor_retries: Number of times to retry for known errors (default: 3)
62bff2c1 482 dynamic_mpd: Whether to process dynamic DASH manifests (default: True)
e409895f 483 hls_split_discontinuity: Split HLS playlists to different formats at
62bff2c1 484 discontinuities such as ad breaks (default: False)
5d3a0e79 485 extractor_args: A dictionary of arguments to be passed to the extractors.
486 See "EXTRACTOR ARGUMENTS" for details.
62b58c09 487 E.g. {'youtube': {'skip': ['dash', 'hls']}}
88f23a18 488 mark_watched: Mark videos watched (even with --simulate). Only for YouTube
1890fc63 489
490 The following options are deprecated and may be removed in the future:
491
fe2ce85a 492 break_on_reject: Stop the download process when encountering a video that
493 has been filtered out.
494 - `raise DownloadCancelled(msg)` in match_filter instead
fe7866d0 495 force_generic_extractor: Force downloader to use the generic extractor
496 - Use allowed_extractors = ['generic', 'default']
7e9a6125 497 playliststart: - Use playlist_items
498 Playlist item to start at.
499 playlistend: - Use playlist_items
500 Playlist item to end at.
501 playlistreverse: - Use playlist_items
502 Download playlist items in reverse order.
1890fc63 503 forceurl: - Use forceprint
504 Force printing final URL.
505 forcetitle: - Use forceprint
506 Force printing title.
507 forceid: - Use forceprint
508 Force printing ID.
509 forcethumbnail: - Use forceprint
510 Force printing thumbnail URL.
511 forcedescription: - Use forceprint
512 Force printing description.
513 forcefilename: - Use forceprint
514 Force printing final filename.
515 forceduration: - Use forceprint
516 Force printing duration.
517 allsubtitles: - Use subtitleslangs = ['all']
518 Downloads all the subtitles of the video
519 (requires writesubtitles or writeautomaticsub)
520 include_ads: - Doesn't work
521 Download ads as well
522 call_home: - Not implemented
523 Boolean, true iff we are allowed to contact the
524 yt-dlp servers for debugging.
525 post_hooks: - Register a custom postprocessor
526 A list of functions that get called as the final step
527 for each video file, after all postprocessors have been
528 called. The filename will be passed as the only argument.
529 hls_prefer_native: - Use external_downloader = {'m3u8': 'native'} or {'m3u8': 'ffmpeg'}.
530 Use the native HLS downloader instead of ffmpeg/avconv
531 if True, otherwise use ffmpeg/avconv if False, otherwise
532 use downloader suggested by extractor if None.
533 prefer_ffmpeg: - avconv support is deprecated
534 If False, use avconv instead of ffmpeg if both are available,
535 otherwise prefer ffmpeg.
536 youtube_include_dash_manifest: - Use extractor_args
5d3a0e79 537 If True (default), DASH manifests and related
62bff2c1 538 data will be downloaded and processed by extractor.
539 You can reduce network I/O by disabling it if you don't
540 care about DASH. (only for youtube)
1890fc63 541 youtube_include_hls_manifest: - Use extractor_args
5d3a0e79 542 If True (default), HLS manifests and related
62bff2c1 543 data will be downloaded and processed by extractor.
544 You can reduce network I/O by disabling it if you don't
545 care about HLS. (only for youtube)
8417f26b 546 no_color: Same as `color='no_color'`
8222d8de
JMF
547 """
548
86e5f3ed 549 _NUMERIC_FIELDS = {
b8ed0f15 550 'width', 'height', 'asr', 'audio_channels', 'fps',
551 'tbr', 'abr', 'vbr', 'filesize', 'filesize_approx',
e6f21b3d 552 'timestamp', 'release_timestamp',
c9969434
S
553 'duration', 'view_count', 'like_count', 'dislike_count', 'repost_count',
554 'average_rating', 'comment_count', 'age_limit',
555 'start_time', 'end_time',
556 'chapter_number', 'season_number', 'episode_number',
557 'track_number', 'disc_number', 'release_year',
86e5f3ed 558 }
c9969434 559
6db9c4d5 560 _format_fields = {
561 # NB: Keep in sync with the docstring of extractor/common.py
a44ca5a4 562 'url', 'manifest_url', 'manifest_stream_number', 'ext', 'format', 'format_id', 'format_note',
105bfd90 563 'width', 'height', 'aspect_ratio', 'resolution', 'dynamic_range', 'tbr', 'abr', 'acodec', 'asr', 'audio_channels',
d5d1df8a 564 'vbr', 'fps', 'vcodec', 'container', 'filesize', 'filesize_approx', 'rows', 'columns',
6db9c4d5 565 'player_url', 'protocol', 'fragment_base_url', 'fragments', 'is_from_start',
566 'preference', 'language', 'language_preference', 'quality', 'source_preference',
7e68567e 567 'http_headers', 'stretched_ratio', 'no_resume', 'has_drm', 'extra_param_to_segment_url', 'hls_aes', 'downloader_options',
6db9c4d5 568 'page_url', 'app', 'play_path', 'tc_url', 'flash_version', 'rtmp_live', 'rtmp_conn', 'rtmp_protocol', 'rtmp_real_time'
569 }
48ee10ee 570 _format_selection_exts = {
8dc59305 571 'audio': set(MEDIA_EXTENSIONS.common_audio),
572 'video': set(MEDIA_EXTENSIONS.common_video + ('3gp', )),
573 'storyboards': set(MEDIA_EXTENSIONS.storyboards),
48ee10ee 574 }
575
3511266b 576 def __init__(self, params=None, auto_init=True):
883d4b1e 577 """Create a FileDownloader object with the given options.
578 @param auto_init Whether to load the default extractors and print header (if verbose).
49a57e70 579 Set to 'no_verbose_header' to not print the header
883d4b1e 580 """
e9f9a10f
JMF
581 if params is None:
582 params = {}
592b7485 583 self.params = params
8b7491c8 584 self._ies = {}
56c73665 585 self._ies_instances = {}
1e43a6f7 586 self._pps = {k: [] for k in POSTPROCESS_WHEN}
b35496d8 587 self._printed_messages = set()
1cf376f5 588 self._first_webpage_request = True
ab8e5e51 589 self._post_hooks = []
933605d7 590 self._progress_hooks = []
819e0531 591 self._postprocessor_hooks = []
8222d8de
JMF
592 self._download_retcode = 0
593 self._num_downloads = 0
9c906919 594 self._num_videos = 0
592b7485 595 self._playlist_level = 0
596 self._playlist_urls = set()
a0e07d31 597 self.cache = Cache(self)
34308b30 598
591bb9d3 599 stdout = sys.stderr if self.params.get('logtostderr') else sys.stdout
600 self._out_files = Namespace(
601 out=stdout,
602 error=sys.stderr,
603 screen=sys.stderr if self.params.get('quiet') else stdout,
604 console=None if compat_os_name == 'nt' else next(
cf4f42cb 605 filter(supports_terminal_sequences, (sys.stderr, sys.stdout)), None)
591bb9d3 606 )
f0795149 607
608 try:
609 windows_enable_vt_mode()
610 except Exception as e:
611 self.write_debug(f'Failed to enable VT mode: {e}')
612
8417f26b
SS
613 if self.params.get('no_color'):
614 if self.params.get('color') is not None:
615 self.report_warning('Overwriting params from "color" with "no_color"')
616 self.params['color'] = 'no_color'
617
618 term_allow_color = os.environ.get('TERM', '').lower() != 'dumb'
619
620 def process_color_policy(stream):
621 stream_name = {sys.stdout: 'stdout', sys.stderr: 'stderr'}[stream]
622 policy = traverse_obj(self.params, ('color', (stream_name, None), {str}), get_all=False)
623 if policy in ('auto', None):
624 return term_allow_color and supports_terminal_sequences(stream)
625 assert policy in ('always', 'never', 'no_color')
626 return {'always': True, 'never': False}.get(policy, policy)
627
591bb9d3 628 self._allow_colors = Namespace(**{
8417f26b
SS
629 name: process_color_policy(stream)
630 for name, stream in self._out_files.items_ if name != 'console'
591bb9d3 631 })
819e0531 632
6929b41a 633 # The code is left like this to be reused for future deprecations
634 MIN_SUPPORTED, MIN_RECOMMENDED = (3, 7), (3, 7)
eff42759 635 current_version = sys.version_info[:2]
636 if current_version < MIN_RECOMMENDED:
9d339c41 637 msg = ('Support for Python version %d.%d has been deprecated. '
24093d52 638 'See https://github.com/yt-dlp/yt-dlp/issues/3764 for more details.'
c6e07cf1 639 '\n You will no longer receive updates on this version')
eff42759 640 if current_version < MIN_SUPPORTED:
641 msg = 'Python version %d.%d is no longer supported'
5b28cef7 642 self.deprecated_feature(
eff42759 643 f'{msg}! Please update to Python %d.%d or above' % (*current_version, *MIN_RECOMMENDED))
a61f4b28 644
88acdbc2 645 if self.params.get('allow_unplayable_formats'):
646 self.report_warning(
ec11a9f4 647 f'You have asked for {self._format_err("UNPLAYABLE", self.Styles.EMPHASIS)} formats to be listed/downloaded. '
819e0531 648 'This is a developer option intended for debugging. \n'
649 ' If you experience any issues while using this option, '
ec11a9f4 650 f'{self._format_err("DO NOT", self.Styles.ERROR)} open a bug report')
88acdbc2 651
497074f0 652 if self.params.get('bidi_workaround', False):
653 try:
654 import pty
655 master, slave = pty.openpty()
656 width = shutil.get_terminal_size().columns
657 width_args = [] if width is None else ['-w', str(width)]
658 sp_kwargs = {'stdin': subprocess.PIPE, 'stdout': slave, 'stderr': self._out_files.error}
659 try:
660 self._output_process = Popen(['bidiv'] + width_args, **sp_kwargs)
661 except OSError:
662 self._output_process = Popen(['fribidi', '-c', 'UTF-8'] + width_args, **sp_kwargs)
663 self._output_channel = os.fdopen(master, 'rb')
664 except OSError as ose:
665 if ose.errno == errno.ENOENT:
666 self.report_warning(
667 'Could not find fribidi executable, ignoring --bidi-workaround. '
668 'Make sure that fribidi is an executable file in one of the directories in your $PATH.')
669 else:
670 raise
671
672 self.params['compat_opts'] = set(self.params.get('compat_opts', ()))
673 if auto_init and auto_init != 'no_verbose_header':
674 self.print_debug_header()
675
be5df5ee
S
676 def check_deprecated(param, option, suggestion):
677 if self.params.get(param) is not None:
86e5f3ed 678 self.report_warning(f'{option} is deprecated. Use {suggestion} instead')
be5df5ee
S
679 return True
680 return False
681
682 if check_deprecated('cn_verification_proxy', '--cn-verification-proxy', '--geo-verification-proxy'):
38cce791
YCH
683 if self.params.get('geo_verification_proxy') is None:
684 self.params['geo_verification_proxy'] = self.params['cn_verification_proxy']
685
0d1bb027 686 check_deprecated('autonumber', '--auto-number', '-o "%(autonumber)s-%(title)s.%(ext)s"')
687 check_deprecated('usetitle', '--title', '-o "%(title)s-%(id)s.%(ext)s"')
53ed7066 688 check_deprecated('useid', '--id', '-o "%(id)s.%(ext)s"')
0d1bb027 689
49a57e70 690 for msg in self.params.get('_warnings', []):
0d1bb027 691 self.report_warning(msg)
ee8dd27a 692 for msg in self.params.get('_deprecation_warnings', []):
da4db748 693 self.deprecated_feature(msg)
0d1bb027 694
8a82af35 695 if 'list-formats' in self.params['compat_opts']:
ec11a9f4 696 self.params['listformats_table'] = False
697
b5ae35ee 698 if 'overwrites' not in self.params and self.params.get('nooverwrites') is not None:
b868936c 699 # nooverwrites was unnecessarily changed to overwrites
700 # in 0c3d0f51778b153f65c21906031c2e091fcfb641
701 # This ensures compatibility with both keys
702 self.params['overwrites'] = not self.params['nooverwrites']
b5ae35ee 703 elif self.params.get('overwrites') is None:
704 self.params.pop('overwrites', None)
b868936c 705 else:
706 self.params['nooverwrites'] = not self.params['overwrites']
b9d973be 707
e4221b70 708 if self.params.get('simulate') is None and any((
709 self.params.get('list_thumbnails'),
710 self.params.get('listformats'),
711 self.params.get('listsubtitles'),
712 )):
713 self.params['simulate'] = 'list_only'
714
455a15e2 715 self.params.setdefault('forceprint', {})
716 self.params.setdefault('print_to_file', {})
bb66c247 717
718 # Compatibility with older syntax
ca30f449 719 if not isinstance(params['forceprint'], dict):
455a15e2 720 self.params['forceprint'] = {'video': params['forceprint']}
ca30f449 721
97ec5bc5 722 if auto_init:
97ec5bc5 723 self.add_default_info_extractors()
724
3089bc74
S
725 if (sys.platform != 'win32'
726 and sys.getfilesystemencoding() in ['ascii', 'ANSI_X3.4-1968']
455a15e2 727 and not self.params.get('restrictfilenames', False)):
e9137224 728 # Unicode filesystem API will throw errors (#1474, #13027)
34308b30 729 self.report_warning(
6febd1c1 730 'Assuming --restrict-filenames since file system encoding '
1b725173 731 'cannot encode all characters. '
6febd1c1 732 'Set the LC_ALL environment variable to fix this.')
4a98cdbf 733 self.params['restrictfilenames'] = True
34308b30 734
bf1824b3 735 self._parse_outtmpl()
486dd09e 736
187986a8 737 # Creating format selector here allows us to catch syntax errors before the extraction
738 self.format_selector = (
fa9f30b8 739 self.params.get('format') if self.params.get('format') in (None, '-')
093a1710 740 else self.params['format'] if callable(self.params['format'])
187986a8 741 else self.build_format_selector(self.params['format']))
742
8b7539d2 743 # Set http_headers defaults according to std_headers
744 self.params['http_headers'] = merge_headers(std_headers, self.params.get('http_headers', {}))
745
013b50b7 746 hooks = {
747 'post_hooks': self.add_post_hook,
748 'progress_hooks': self.add_progress_hook,
749 'postprocessor_hooks': self.add_postprocessor_hook,
750 }
751 for opt, fn in hooks.items():
752 for ph in self.params.get(opt, []):
753 fn(ph)
71b640cc 754
5bfc8bee 755 for pp_def_raw in self.params.get('postprocessors', []):
756 pp_def = dict(pp_def_raw)
757 when = pp_def.pop('when', 'post_process')
758 self.add_post_processor(
f9934b96 759 get_postprocessor(pp_def.pop('key'))(self, **pp_def),
5bfc8bee 760 when=when)
761
97ec5bc5 762 self._setup_opener()
51fb4995 763
ed39cac5 764 def preload_download_archive(fn):
765 """Preload the archive, if any is specified"""
ae103564 766 archive = set()
ed39cac5 767 if fn is None:
ae103564 768 return archive
941e881e 769 elif not is_path_like(fn):
ae103564 770 return fn
771
49a57e70 772 self.write_debug(f'Loading archive file {fn!r}')
ed39cac5 773 try:
774 with locked_file(fn, 'r', encoding='utf-8') as archive_file:
775 for line in archive_file:
ae103564 776 archive.add(line.strip())
86e5f3ed 777 except OSError as ioe:
ed39cac5 778 if ioe.errno != errno.ENOENT:
779 raise
ae103564 780 return archive
ed39cac5 781
ae103564 782 self.archive = preload_download_archive(self.params.get('download_archive'))
ed39cac5 783
7d4111ed
PH
784 def warn_if_short_id(self, argv):
785 # short YouTube ID starting with dash?
786 idxs = [
787 i for i, a in enumerate(argv)
788 if re.match(r'^-[0-9A-Za-z_-]{10}$', a)]
789 if idxs:
790 correct_argv = (
7a5c1cfe 791 ['yt-dlp']
3089bc74
S
792 + [a for i, a in enumerate(argv) if i not in idxs]
793 + ['--'] + [argv[i] for i in idxs]
7d4111ed
PH
794 )
795 self.report_warning(
796 'Long argument string detected. '
49a57e70 797 'Use -- to separate parameters and URLs, like this:\n%s' %
7d4111ed
PH
798 args_to_str(correct_argv))
799
8222d8de
JMF
800 def add_info_extractor(self, ie):
801 """Add an InfoExtractor object to the end of the list."""
8b7491c8 802 ie_key = ie.ie_key()
803 self._ies[ie_key] = ie
e52d7f85 804 if not isinstance(ie, type):
8b7491c8 805 self._ies_instances[ie_key] = ie
e52d7f85 806 ie.set_downloader(self)
8222d8de 807
56c73665
JMF
808 def get_info_extractor(self, ie_key):
809 """
810 Get an instance of an IE with name ie_key, it will try to get one from
811 the _ies list, if there's no instance it will create a new one and add
812 it to the extractor list.
813 """
814 ie = self._ies_instances.get(ie_key)
815 if ie is None:
816 ie = get_info_extractor(ie_key)()
817 self.add_info_extractor(ie)
818 return ie
819
023fa8c4
JMF
820 def add_default_info_extractors(self):
821 """
822 Add the InfoExtractors returned by gen_extractors to the end of the list
823 """
fe7866d0 824 all_ies = {ie.IE_NAME.lower(): ie for ie in gen_extractor_classes()}
825 all_ies['end'] = UnsupportedURLIE()
826 try:
827 ie_names = orderedSet_from_options(
828 self.params.get('allowed_extractors', ['default']), {
829 'all': list(all_ies),
830 'default': [name for name, ie in all_ies.items() if ie._ENABLED],
831 }, use_regex=True)
832 except re.error as e:
833 raise ValueError(f'Wrong regex for allowed_extractors: {e.pattern}')
834 for name in ie_names:
835 self.add_info_extractor(all_ies[name])
836 self.write_debug(f'Loaded {len(ie_names)} extractors')
023fa8c4 837
56d868db 838 def add_post_processor(self, pp, when='post_process'):
8222d8de 839 """Add a PostProcessor object to the end of the chain."""
8aa0e7cd 840 assert when in POSTPROCESS_WHEN, f'Invalid when={when}'
5bfa4862 841 self._pps[when].append(pp)
8222d8de
JMF
842 pp.set_downloader(self)
843
ab8e5e51
AM
844 def add_post_hook(self, ph):
845 """Add the post hook"""
846 self._post_hooks.append(ph)
847
933605d7 848 def add_progress_hook(self, ph):
819e0531 849 """Add the download progress hook"""
933605d7 850 self._progress_hooks.append(ph)
8ab470f1 851
819e0531 852 def add_postprocessor_hook(self, ph):
853 """Add the postprocessing progress hook"""
854 self._postprocessor_hooks.append(ph)
5bfc8bee 855 for pps in self._pps.values():
856 for pp in pps:
857 pp.add_progress_hook(ph)
819e0531 858
1c088fa8 859 def _bidi_workaround(self, message):
5d681e96 860 if not hasattr(self, '_output_channel'):
1c088fa8
PH
861 return message
862
5d681e96 863 assert hasattr(self, '_output_process')
14f25df2 864 assert isinstance(message, str)
6febd1c1 865 line_count = message.count('\n') + 1
0f06bcd7 866 self._output_process.stdin.write((message + '\n').encode())
5d681e96 867 self._output_process.stdin.flush()
0f06bcd7 868 res = ''.join(self._output_channel.readline().decode()
9e1a5b84 869 for _ in range(line_count))
6febd1c1 870 return res[:-len('\n')]
1c088fa8 871
b35496d8 872 def _write_string(self, message, out=None, only_once=False):
873 if only_once:
874 if message in self._printed_messages:
875 return
876 self._printed_messages.add(message)
877 write_string(message, out=out, encoding=self.params.get('encoding'))
734f90bb 878
cf4f42cb 879 def to_stdout(self, message, skip_eol=False, quiet=None):
0760b0a7 880 """Print message to stdout"""
cf4f42cb 881 if quiet is not None:
da4db748 882 self.deprecation_warning('"YoutubeDL.to_stdout" no longer accepts the argument quiet. '
883 'Use "YoutubeDL.to_screen" instead')
8a82af35 884 if skip_eol is not False:
da4db748 885 self.deprecation_warning('"YoutubeDL.to_stdout" no longer accepts the argument skip_eol. '
886 'Use "YoutubeDL.to_screen" instead')
0bf9dc1e 887 self._write_string(f'{self._bidi_workaround(message)}\n', self._out_files.out)
cf4f42cb 888
dfea94f8 889 def to_screen(self, message, skip_eol=False, quiet=None, only_once=False):
cf4f42cb 890 """Print message to screen if not in quiet mode"""
8bf9319e 891 if self.params.get('logger'):
43afe285 892 self.params['logger'].debug(message)
cf4f42cb 893 return
894 if (self.params.get('quiet') if quiet is None else quiet) and not self.params.get('verbose'):
895 return
896 self._write_string(
897 '%s%s' % (self._bidi_workaround(message), ('' if skip_eol else '\n')),
dfea94f8 898 self._out_files.screen, only_once=only_once)
8222d8de 899
b35496d8 900 def to_stderr(self, message, only_once=False):
0760b0a7 901 """Print message to stderr"""
14f25df2 902 assert isinstance(message, str)
8bf9319e 903 if self.params.get('logger'):
43afe285
IB
904 self.params['logger'].error(message)
905 else:
5792c950 906 self._write_string(f'{self._bidi_workaround(message)}\n', self._out_files.error, only_once=only_once)
cf4f42cb 907
908 def _send_console_code(self, code):
591bb9d3 909 if compat_os_name == 'nt' or not self._out_files.console:
cf4f42cb 910 return
591bb9d3 911 self._write_string(code, self._out_files.console)
8222d8de 912
1e5b9a95
PH
913 def to_console_title(self, message):
914 if not self.params.get('consoletitle', False):
915 return
3efb96a6 916 message = remove_terminal_sequences(message)
4bede0d8
C
917 if compat_os_name == 'nt':
918 if ctypes.windll.kernel32.GetConsoleWindow():
919 # c_wchar_p() might not be necessary if `message` is
920 # already of type unicode()
921 ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(message))
cf4f42cb 922 else:
923 self._send_console_code(f'\033]0;{message}\007')
1e5b9a95 924
bdde425c 925 def save_console_title(self):
cf4f42cb 926 if not self.params.get('consoletitle') or self.params.get('simulate'):
bdde425c 927 return
592b7485 928 self._send_console_code('\033[22;0t') # Save the title on stack
bdde425c
PH
929
930 def restore_console_title(self):
cf4f42cb 931 if not self.params.get('consoletitle') or self.params.get('simulate'):
bdde425c 932 return
592b7485 933 self._send_console_code('\033[23;0t') # Restore the title from stack
bdde425c
PH
934
935 def __enter__(self):
936 self.save_console_title()
937 return self
938
939 def __exit__(self, *args):
940 self.restore_console_title()
f89197d7 941
dca08720 942 if self.params.get('cookiefile') is not None:
1bab3437 943 self.cookiejar.save(ignore_discard=True, ignore_expires=True)
bdde425c 944
fa9f30b8 945 def trouble(self, message=None, tb=None, is_error=True):
8222d8de
JMF
946 """Determine action to take when a download problem appears.
947
948 Depending on if the downloader has been configured to ignore
949 download errors or not, this method may throw an exception or
950 not when errors are found, after printing the message.
951
fa9f30b8 952 @param tb If given, is additional traceback information
953 @param is_error Whether to raise error according to ignorerrors
8222d8de
JMF
954 """
955 if message is not None:
956 self.to_stderr(message)
957 if self.params.get('verbose'):
958 if tb is None:
959 if sys.exc_info()[0]: # if .trouble has been called from an except block
6febd1c1 960 tb = ''
8222d8de 961 if hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
6febd1c1 962 tb += ''.join(traceback.format_exception(*sys.exc_info()[1].exc_info))
c0384f22 963 tb += encode_compat_str(traceback.format_exc())
8222d8de
JMF
964 else:
965 tb_data = traceback.format_list(traceback.extract_stack())
6febd1c1 966 tb = ''.join(tb_data)
c19bc311 967 if tb:
968 self.to_stderr(tb)
fa9f30b8 969 if not is_error:
970 return
b1940459 971 if not self.params.get('ignoreerrors'):
8222d8de
JMF
972 if sys.exc_info()[0] and hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
973 exc_info = sys.exc_info()[1].exc_info
974 else:
975 exc_info = sys.exc_info()
976 raise DownloadError(message, exc_info)
977 self._download_retcode = 1
978
19a03940 979 Styles = Namespace(
980 HEADERS='yellow',
981 EMPHASIS='light blue',
492272fe 982 FILENAME='green',
19a03940 983 ID='green',
984 DELIM='blue',
985 ERROR='red',
986 WARNING='yellow',
987 SUPPRESS='light black',
988 )
ec11a9f4 989
7578d77d 990 def _format_text(self, handle, allow_colors, text, f, fallback=None, *, test_encoding=False):
e5a998f3 991 text = str(text)
ec11a9f4 992 if test_encoding:
993 original_text = text
5c104538 994 # handle.encoding can be None. See https://github.com/yt-dlp/yt-dlp/issues/2711
995 encoding = self.params.get('encoding') or getattr(handle, 'encoding', None) or 'ascii'
ec11a9f4 996 text = text.encode(encoding, 'ignore').decode(encoding)
997 if fallback is not None and text != original_text:
998 text = fallback
8417f26b 999 return format_text(text, f) if allow_colors is True else text if fallback is None else fallback
ec11a9f4 1000
591bb9d3 1001 def _format_out(self, *args, **kwargs):
1002 return self._format_text(self._out_files.out, self._allow_colors.out, *args, **kwargs)
1003
ec11a9f4 1004 def _format_screen(self, *args, **kwargs):
591bb9d3 1005 return self._format_text(self._out_files.screen, self._allow_colors.screen, *args, **kwargs)
ec11a9f4 1006
1007 def _format_err(self, *args, **kwargs):
591bb9d3 1008 return self._format_text(self._out_files.error, self._allow_colors.error, *args, **kwargs)
819e0531 1009
c84aeac6 1010 def report_warning(self, message, only_once=False):
8222d8de
JMF
1011 '''
1012 Print the message to stderr, it will be prefixed with 'WARNING:'
1013 If stderr is a tty file the 'WARNING:' will be colored
1014 '''
6d07ce01
JMF
1015 if self.params.get('logger') is not None:
1016 self.params['logger'].warning(message)
8222d8de 1017 else:
ad8915b7
PH
1018 if self.params.get('no_warnings'):
1019 return
ec11a9f4 1020 self.to_stderr(f'{self._format_err("WARNING:", self.Styles.WARNING)} {message}', only_once)
8222d8de 1021
da4db748 1022 def deprecation_warning(self, message, *, stacklevel=0):
1023 deprecation_warning(
1024 message, stacklevel=stacklevel + 1, printer=self.report_error, is_error=False)
1025
1026 def deprecated_feature(self, message):
ee8dd27a 1027 if self.params.get('logger') is not None:
da4db748 1028 self.params['logger'].warning(f'Deprecated Feature: {message}')
1029 self.to_stderr(f'{self._format_err("Deprecated Feature:", self.Styles.ERROR)} {message}', True)
ee8dd27a 1030
fa9f30b8 1031 def report_error(self, message, *args, **kwargs):
8222d8de
JMF
1032 '''
1033 Do the same as trouble, but prefixes the message with 'ERROR:', colored
1034 in red if stderr is a tty file.
1035 '''
fa9f30b8 1036 self.trouble(f'{self._format_err("ERROR:", self.Styles.ERROR)} {message}', *args, **kwargs)
8222d8de 1037
b35496d8 1038 def write_debug(self, message, only_once=False):
0760b0a7 1039 '''Log debug message or Print message to stderr'''
1040 if not self.params.get('verbose', False):
1041 return
8a82af35 1042 message = f'[debug] {message}'
0760b0a7 1043 if self.params.get('logger'):
1044 self.params['logger'].debug(message)
1045 else:
b35496d8 1046 self.to_stderr(message, only_once)
0760b0a7 1047
8222d8de
JMF
1048 def report_file_already_downloaded(self, file_name):
1049 """Report file has already been fully downloaded."""
1050 try:
6febd1c1 1051 self.to_screen('[download] %s has already been downloaded' % file_name)
ce02ed60 1052 except UnicodeEncodeError:
6febd1c1 1053 self.to_screen('[download] The file has already been downloaded')
8222d8de 1054
0c3d0f51 1055 def report_file_delete(self, file_name):
1056 """Report that existing file will be deleted."""
1057 try:
c25228e5 1058 self.to_screen('Deleting existing file %s' % file_name)
0c3d0f51 1059 except UnicodeEncodeError:
c25228e5 1060 self.to_screen('Deleting existing file')
0c3d0f51 1061
319b6059 1062 def raise_no_formats(self, info, forced=False, *, msg=None):
0a5a191a 1063 has_drm = info.get('_has_drm')
319b6059 1064 ignored, expected = self.params.get('ignore_no_formats_error'), bool(msg)
1065 msg = msg or has_drm and 'This video is DRM protected' or 'No video formats found!'
1066 if forced or not ignored:
1151c407 1067 raise ExtractorError(msg, video_id=info['id'], ie=info['extractor'],
319b6059 1068 expected=has_drm or ignored or expected)
88acdbc2 1069 else:
1070 self.report_warning(msg)
1071
de6000d9 1072 def parse_outtmpl(self):
bf1824b3 1073 self.deprecation_warning('"YoutubeDL.parse_outtmpl" is deprecated and may be removed in a future version')
1074 self._parse_outtmpl()
1075 return self.params['outtmpl']
1076
1077 def _parse_outtmpl(self):
7b2c3f47 1078 sanitize = IDENTITY
bf1824b3 1079 if self.params.get('restrictfilenames'): # Remove spaces in the default template
71ce444a 1080 sanitize = lambda x: x.replace(' - ', ' ').replace(' ', '-')
bf1824b3 1081
1082 outtmpl = self.params.setdefault('outtmpl', {})
1083 if not isinstance(outtmpl, dict):
1084 self.params['outtmpl'] = outtmpl = {'default': outtmpl}
1085 outtmpl.update({k: sanitize(v) for k, v in DEFAULT_OUTTMPL.items() if outtmpl.get(k) is None})
de6000d9 1086
21cd8fae 1087 def get_output_path(self, dir_type='', filename=None):
1088 paths = self.params.get('paths', {})
d2c8aadf 1089 assert isinstance(paths, dict), '"paths" parameter must be a dictionary'
21cd8fae 1090 path = os.path.join(
1091 expand_path(paths.get('home', '').strip()),
1092 expand_path(paths.get(dir_type, '').strip()) if dir_type else '',
1093 filename or '')
21cd8fae 1094 return sanitize_path(path, force=self.params.get('windowsfilenames'))
1095
76a264ac 1096 @staticmethod
901130bb 1097 def _outtmpl_expandpath(outtmpl):
1098 # expand_path translates '%%' into '%' and '$$' into '$'
1099 # correspondingly that is not what we want since we need to keep
1100 # '%%' intact for template dict substitution step. Working around
1101 # with boundary-alike separator hack.
6f2287cb 1102 sep = ''.join(random.choices(string.ascii_letters, k=32))
86e5f3ed 1103 outtmpl = outtmpl.replace('%%', f'%{sep}%').replace('$$', f'${sep}$')
901130bb 1104
1105 # outtmpl should be expand_path'ed before template dict substitution
1106 # because meta fields may contain env variables we don't want to
62b58c09 1107 # be expanded. E.g. for outtmpl "%(title)s.%(ext)s" and
901130bb 1108 # title "Hello $PATH", we don't want `$PATH` to be expanded.
1109 return expand_path(outtmpl).replace(sep, '')
1110
1111 @staticmethod
1112 def escape_outtmpl(outtmpl):
1113 ''' Escape any remaining strings like %s, %abc% etc. '''
1114 return re.sub(
1115 STR_FORMAT_RE_TMPL.format('', '(?![%(\0])'),
1116 lambda mobj: ('' if mobj.group('has_key') else '%') + mobj.group(0),
1117 outtmpl)
1118
1119 @classmethod
1120 def validate_outtmpl(cls, outtmpl):
76a264ac 1121 ''' @return None or Exception object '''
7d1eb38a 1122 outtmpl = re.sub(
47cdc68e 1123 STR_FORMAT_RE_TMPL.format('[^)]*', '[ljhqBUDS]'),
7d1eb38a 1124 lambda mobj: f'{mobj.group(0)[:-1]}s',
1125 cls._outtmpl_expandpath(outtmpl))
76a264ac 1126 try:
7d1eb38a 1127 cls.escape_outtmpl(outtmpl) % collections.defaultdict(int)
76a264ac 1128 return None
1129 except ValueError as err:
1130 return err
1131
03b4de72 1132 @staticmethod
1133 def _copy_infodict(info_dict):
1134 info_dict = dict(info_dict)
09b49e1f 1135 info_dict.pop('__postprocessors', None)
415f8d51 1136 info_dict.pop('__pending_error', None)
03b4de72 1137 return info_dict
1138
e0fd9573 1139 def prepare_outtmpl(self, outtmpl, info_dict, sanitize=False):
1140 """ Make the outtmpl and info_dict suitable for substitution: ydl.escape_outtmpl(outtmpl) % info_dict
1141 @param sanitize Whether to sanitize the output as a filename.
1142 For backward compatibility, a function can also be passed
1143 """
1144
6e84b215 1145 info_dict.setdefault('epoch', int(time.time())) # keep epoch consistent once set
143db31d 1146
03b4de72 1147 info_dict = self._copy_infodict(info_dict)
752cda38 1148 info_dict['duration_string'] = ( # %(duration>%H-%M-%S)s is wrong if duration > 24hrs
53c18592 1149 formatSeconds(info_dict['duration'], '-' if sanitize else ':')
143db31d 1150 if info_dict.get('duration', None) is not None
1151 else None)
1d485a1a 1152 info_dict['autonumber'] = int(self.params.get('autonumber_start', 1) - 1 + self._num_downloads)
9c906919 1153 info_dict['video_autonumber'] = self._num_videos
752cda38 1154 if info_dict.get('resolution') is None:
1155 info_dict['resolution'] = self.format_resolution(info_dict, default=None)
143db31d 1156
e6f21b3d 1157 # For fields playlist_index, playlist_autonumber and autonumber convert all occurrences
143db31d 1158 # of %(field)s to %(field)0Nd for backward compatibility
1159 field_size_compat_map = {
0a5a191a 1160 'playlist_index': number_of_digits(info_dict.get('__last_playlist_index') or 0),
ec11a9f4 1161 'playlist_autonumber': number_of_digits(info_dict.get('n_entries') or 0),
752cda38 1162 'autonumber': self.params.get('autonumber_size') or 5,
143db31d 1163 }
752cda38 1164
385a27fa 1165 TMPL_DICT = {}
47cdc68e 1166 EXTERNAL_FORMAT_RE = re.compile(STR_FORMAT_RE_TMPL.format('[^)]*', f'[{STR_FORMAT_TYPES}ljhqBUDS]'))
385a27fa 1167 MATH_FUNCTIONS = {
1168 '+': float.__add__,
1169 '-': float.__sub__,
1170 }
e625be0d 1171 # Field is of the form key1.key2...
07a1250e 1172 # where keys (except first) can be string, int, slice or "{field, ...}"
1173 FIELD_INNER_RE = r'(?:\w+|%(num)s|%(num)s?(?::%(num)s?){1,2})' % {'num': r'(?:-?\d+)'}
1174 FIELD_RE = r'\w*(?:\.(?:%(inner)s|{%(field)s(?:,%(field)s)*}))*' % {
1175 'inner': FIELD_INNER_RE,
1176 'field': rf'\w*(?:\.{FIELD_INNER_RE})*'
1177 }
1d485a1a 1178 MATH_FIELD_RE = rf'(?:{FIELD_RE}|-?{NUMBER_RE})'
385a27fa 1179 MATH_OPERATORS_RE = r'(?:%s)' % '|'.join(map(re.escape, MATH_FUNCTIONS.keys()))
78fde6e3 1180 INTERNAL_FORMAT_RE = re.compile(rf'''(?xs)
e625be0d 1181 (?P<negate>-)?
1d485a1a 1182 (?P<fields>{FIELD_RE})
1183 (?P<maths>(?:{MATH_OPERATORS_RE}{MATH_FIELD_RE})*)
e625be0d 1184 (?:>(?P<strf_format>.+?))?
34baa9fd 1185 (?P<remaining>
1186 (?P<alternate>(?<!\\),[^|&)]+)?
1187 (?:&(?P<replacement>.*?))?
1188 (?:\|(?P<default>.*?))?
1d485a1a 1189 )$''')
752cda38 1190
07a1250e 1191 def _traverse_infodict(fields):
1192 fields = [f for x in re.split(r'\.({.+?})\.?', fields)
1193 for f in ([x] if x.startswith('{') else x.split('.'))]
1194 for i in (0, -1):
1195 if fields and not fields[i]:
1196 fields.pop(i)
1197
1198 for i, f in enumerate(fields):
1199 if not f.startswith('{'):
1200 continue
1201 assert f.endswith('}'), f'No closing brace for {f} in {fields}'
1202 fields[i] = {k: k.split('.') for k in f[1:-1].split(',')}
1203
1204 return traverse_obj(info_dict, fields, is_user_input=True, traverse_string=True)
76a264ac 1205
752cda38 1206 def get_value(mdict):
1207 # Object traversal
2b8a2973 1208 value = _traverse_infodict(mdict['fields'])
752cda38 1209 # Negative
1210 if mdict['negate']:
1211 value = float_or_none(value)
1212 if value is not None:
1213 value *= -1
1214 # Do maths
385a27fa 1215 offset_key = mdict['maths']
1216 if offset_key:
752cda38 1217 value = float_or_none(value)
1218 operator = None
385a27fa 1219 while offset_key:
1220 item = re.match(
1221 MATH_FIELD_RE if operator else MATH_OPERATORS_RE,
1222 offset_key).group(0)
1223 offset_key = offset_key[len(item):]
1224 if operator is None:
752cda38 1225 operator = MATH_FUNCTIONS[item]
385a27fa 1226 continue
1227 item, multiplier = (item[1:], -1) if item[0] == '-' else (item, 1)
1228 offset = float_or_none(item)
1229 if offset is None:
2b8a2973 1230 offset = float_or_none(_traverse_infodict(item))
385a27fa 1231 try:
1232 value = operator(value, multiplier * offset)
1233 except (TypeError, ZeroDivisionError):
1234 return None
1235 operator = None
752cda38 1236 # Datetime formatting
1237 if mdict['strf_format']:
7c37ff97 1238 value = strftime_or_none(value, mdict['strf_format'].replace('\\,', ','))
752cda38 1239
a6bcaf71 1240 # XXX: Workaround for https://github.com/yt-dlp/yt-dlp/issues/4485
1241 if sanitize and value == '':
1242 value = None
752cda38 1243 return value
1244
b868936c 1245 na = self.params.get('outtmpl_na_placeholder', 'NA')
1246
e0fd9573 1247 def filename_sanitizer(key, value, restricted=self.params.get('restrictfilenames')):
5c3895ff 1248 return sanitize_filename(str(value), restricted=restricted, is_id=(
1249 bool(re.search(r'(^|[_.])id(\.|$)', key))
8a82af35 1250 if 'filename-sanitization' in self.params['compat_opts']
5c3895ff 1251 else NO_DEFAULT))
e0fd9573 1252
1253 sanitizer = sanitize if callable(sanitize) else filename_sanitizer
1254 sanitize = bool(sanitize)
1255
6e84b215 1256 def _dumpjson_default(obj):
1257 if isinstance(obj, (set, LazyList)):
1258 return list(obj)
adbc4ec4 1259 return repr(obj)
6e84b215 1260
6f2287cb 1261 class _ReplacementFormatter(string.Formatter):
ec9311c4 1262 def get_field(self, field_name, args, kwargs):
1263 if field_name.isdigit():
1264 return args[0], -1
1265 raise ValueError('Unsupported field')
1266
1267 replacement_formatter = _ReplacementFormatter()
1268
752cda38 1269 def create_key(outer_mobj):
1270 if not outer_mobj.group('has_key'):
b836dc94 1271 return outer_mobj.group(0)
752cda38 1272 key = outer_mobj.group('key')
752cda38 1273 mobj = re.match(INTERNAL_FORMAT_RE, key)
e0fd9573 1274 initial_field = mobj.group('fields') if mobj else ''
e978789f 1275 value, replacement, default = None, None, na
7c37ff97 1276 while mobj:
e625be0d 1277 mobj = mobj.groupdict()
7c37ff97 1278 default = mobj['default'] if mobj['default'] is not None else default
752cda38 1279 value = get_value(mobj)
e978789f 1280 replacement = mobj['replacement']
7c37ff97 1281 if value is None and mobj['alternate']:
34baa9fd 1282 mobj = re.match(INTERNAL_FORMAT_RE, mobj['remaining'][1:])
7c37ff97 1283 else:
1284 break
752cda38 1285
b868936c 1286 fmt = outer_mobj.group('format')
752cda38 1287 if fmt == 's' and value is not None and key in field_size_compat_map.keys():
86e5f3ed 1288 fmt = f'0{field_size_compat_map[key]:d}d'
752cda38 1289
ebe1b4e3 1290 if None not in (value, replacement):
ec9311c4 1291 try:
1292 value = replacement_formatter.format(replacement, value)
1293 except ValueError:
ebe1b4e3 1294 value, default = None, na
752cda38 1295
4476d2c7 1296 flags = outer_mobj.group('conversion') or ''
7d1eb38a 1297 str_fmt = f'{fmt[:-1]}s'
ebe1b4e3 1298 if value is None:
1299 value, fmt = default, 's'
1300 elif fmt[-1] == 'l': # list
4476d2c7 1301 delim = '\n' if '#' in flags else ', '
9e907ebd 1302 value, fmt = delim.join(map(str, variadic(value, allowed_types=(str, bytes)))), str_fmt
524e2e4f 1303 elif fmt[-1] == 'j': # json
deae7c17 1304 value, fmt = json.dumps(
1305 value, default=_dumpjson_default,
9b9dad11 1306 indent=4 if '#' in flags else None, ensure_ascii='+' not in flags), str_fmt
47cdc68e 1307 elif fmt[-1] == 'h': # html
deae7c17 1308 value, fmt = escapeHTML(str(value)), str_fmt
524e2e4f 1309 elif fmt[-1] == 'q': # quoted
4476d2c7 1310 value = map(str, variadic(value) if '#' in flags else [value])
1311 value, fmt = ' '.join(map(compat_shlex_quote, value)), str_fmt
524e2e4f 1312 elif fmt[-1] == 'B': # bytes
0f06bcd7 1313 value = f'%{str_fmt}'.encode() % str(value).encode()
f5aa5cfb 1314 value, fmt = value.decode('utf-8', 'ignore'), 's'
524e2e4f 1315 elif fmt[-1] == 'U': # unicode normalized
524e2e4f 1316 value, fmt = unicodedata.normalize(
1317 # "+" = compatibility equivalence, "#" = NFD
4476d2c7 1318 'NF%s%s' % ('K' if '+' in flags else '', 'D' if '#' in flags else 'C'),
524e2e4f 1319 value), str_fmt
e0fd9573 1320 elif fmt[-1] == 'D': # decimal suffix
abbeeebc 1321 num_fmt, fmt = fmt[:-1].replace('#', ''), 's'
1322 value = format_decimal_suffix(value, f'%{num_fmt}f%s' if num_fmt else '%d%s',
1323 factor=1024 if '#' in flags else 1000)
37893bb0 1324 elif fmt[-1] == 'S': # filename sanitization
e0fd9573 1325 value, fmt = filename_sanitizer(initial_field, value, restricted='#' in flags), str_fmt
7d1eb38a 1326 elif fmt[-1] == 'c':
524e2e4f 1327 if value:
1328 value = str(value)[0]
76a264ac 1329 else:
524e2e4f 1330 fmt = str_fmt
76a264ac 1331 elif fmt[-1] not in 'rs': # numeric
a439a3a4 1332 value = float_or_none(value)
752cda38 1333 if value is None:
1334 value, fmt = default, 's'
901130bb 1335
752cda38 1336 if sanitize:
1337 if fmt[-1] == 'r':
1338 # If value is an object, sanitize might convert it to a string
1339 # So we convert it to repr first
7d1eb38a 1340 value, fmt = repr(value), str_fmt
639f1cea 1341 if fmt[-1] in 'csr':
e0fd9573 1342 value = sanitizer(initial_field, value)
901130bb 1343
b868936c 1344 key = '%s\0%s' % (key.replace('%', '%\0'), outer_mobj.group('format'))
385a27fa 1345 TMPL_DICT[key] = value
b868936c 1346 return '{prefix}%({key}){fmt}'.format(key=key, fmt=fmt, prefix=outer_mobj.group('prefix'))
752cda38 1347
385a27fa 1348 return EXTERNAL_FORMAT_RE.sub(create_key, outtmpl), TMPL_DICT
143db31d 1349
819e0531 1350 def evaluate_outtmpl(self, outtmpl, info_dict, *args, **kwargs):
1351 outtmpl, info_dict = self.prepare_outtmpl(outtmpl, info_dict, *args, **kwargs)
1352 return self.escape_outtmpl(outtmpl) % info_dict
1353
5127e92a 1354 def _prepare_filename(self, info_dict, *, outtmpl=None, tmpl_type=None):
1355 assert None in (outtmpl, tmpl_type), 'outtmpl and tmpl_type are mutually exclusive'
1356 if outtmpl is None:
bf1824b3 1357 outtmpl = self.params['outtmpl'].get(tmpl_type or 'default', self.params['outtmpl']['default'])
8222d8de 1358 try:
5127e92a 1359 outtmpl = self._outtmpl_expandpath(outtmpl)
e0fd9573 1360 filename = self.evaluate_outtmpl(outtmpl, info_dict, True)
6a0546e3 1361 if not filename:
1362 return None
15da37c7 1363
5127e92a 1364 if tmpl_type in ('', 'temp'):
6a0546e3 1365 final_ext, ext = self.params.get('final_ext'), info_dict.get('ext')
1366 if final_ext and ext and final_ext != ext and filename.endswith(f'.{final_ext}'):
1367 filename = replace_extension(filename, ext, final_ext)
5127e92a 1368 elif tmpl_type:
6a0546e3 1369 force_ext = OUTTMPL_TYPES[tmpl_type]
1370 if force_ext:
1371 filename = replace_extension(filename, force_ext, info_dict.get('ext'))
de6000d9 1372
bdc3fd2f
U
1373 # https://github.com/blackjack4494/youtube-dlc/issues/85
1374 trim_file_name = self.params.get('trim_file_name', False)
1375 if trim_file_name:
5c22c63d 1376 no_ext, *ext = filename.rsplit('.', 2)
1377 filename = join_nonempty(no_ext[:trim_file_name], *ext, delim='.')
bdc3fd2f 1378
0202b52a 1379 return filename
8222d8de 1380 except ValueError as err:
6febd1c1 1381 self.report_error('Error in output template: ' + str(err) + ' (encoding: ' + repr(preferredencoding()) + ')')
8222d8de
JMF
1382 return None
1383
5127e92a 1384 def prepare_filename(self, info_dict, dir_type='', *, outtmpl=None, warn=False):
1385 """Generate the output filename"""
1386 if outtmpl:
1387 assert not dir_type, 'outtmpl and dir_type are mutually exclusive'
1388 dir_type = None
1389 filename = self._prepare_filename(info_dict, tmpl_type=dir_type, outtmpl=outtmpl)
80c03fa9 1390 if not filename and dir_type not in ('', 'temp'):
1391 return ''
de6000d9 1392
c84aeac6 1393 if warn:
21cd8fae 1394 if not self.params.get('paths'):
de6000d9 1395 pass
1396 elif filename == '-':
c84aeac6 1397 self.report_warning('--paths is ignored when an outputting to stdout', only_once=True)
de6000d9 1398 elif os.path.isabs(filename):
c84aeac6 1399 self.report_warning('--paths is ignored since an absolute path is given in output template', only_once=True)
de6000d9 1400 if filename == '-' or not filename:
1401 return filename
1402
21cd8fae 1403 return self.get_output_path(dir_type, filename)
0202b52a 1404
120fe513 1405 def _match_entry(self, info_dict, incomplete=False, silent=False):
6368e2e6 1406 """Returns None if the file should be downloaded"""
93b39cdb 1407 _type = 'video' if 'playlist-match-filter' in self.params['compat_opts'] else info_dict.get('_type', 'video')
d7b460d0 1408 assert incomplete or _type == 'video', 'Only video result can be considered complete'
8222d8de 1409
3bec830a 1410 video_title = info_dict.get('title', info_dict.get('id', 'entry'))
c77495e3 1411
8b0d7497 1412 def check_filter():
d7b460d0 1413 if _type in ('playlist', 'multi_video'):
1414 return
1415 elif _type in ('url', 'url_transparent') and not try_call(
1416 lambda: self.get_info_extractor(info_dict['ie_key']).is_single_video(info_dict['url'])):
1417 return
1418
8b0d7497 1419 if 'title' in info_dict:
1420 # This can happen when we're just evaluating the playlist
1421 title = info_dict['title']
1422 matchtitle = self.params.get('matchtitle', False)
1423 if matchtitle:
1424 if not re.search(matchtitle, title, re.IGNORECASE):
1425 return '"' + title + '" title did not match pattern "' + matchtitle + '"'
1426 rejecttitle = self.params.get('rejecttitle', False)
1427 if rejecttitle:
1428 if re.search(rejecttitle, title, re.IGNORECASE):
1429 return '"' + title + '" title matched reject pattern "' + rejecttitle + '"'
6368e2e6 1430
8b0d7497 1431 date = info_dict.get('upload_date')
1432 if date is not None:
1433 dateRange = self.params.get('daterange', DateRange())
1434 if date not in dateRange:
86e5f3ed 1435 return f'{date_from_str(date).isoformat()} upload date is not in range {dateRange}'
8b0d7497 1436 view_count = info_dict.get('view_count')
1437 if view_count is not None:
1438 min_views = self.params.get('min_views')
1439 if min_views is not None and view_count < min_views:
1440 return 'Skipping %s, because it has not reached minimum view count (%d/%d)' % (video_title, view_count, min_views)
1441 max_views = self.params.get('max_views')
1442 if max_views is not None and view_count > max_views:
1443 return 'Skipping %s, because it has exceeded the maximum view count (%d/%d)' % (video_title, view_count, max_views)
1444 if age_restricted(info_dict.get('age_limit'), self.params.get('age_limit')):
1445 return 'Skipping "%s" because it is age restricted' % video_title
8b0d7497 1446
8f18aca8 1447 match_filter = self.params.get('match_filter')
fe2ce85a 1448 if match_filter is None:
1449 return None
1450
1451 cancelled = None
1452 try:
8f18aca8 1453 try:
1454 ret = match_filter(info_dict, incomplete=incomplete)
1455 except TypeError:
1456 # For backward compatibility
1457 ret = None if incomplete else match_filter(info_dict)
fe2ce85a 1458 except DownloadCancelled as err:
1459 if err.msg is not NO_DEFAULT:
1460 raise
1461 ret, cancelled = err.msg, err
1462
1463 if ret is NO_DEFAULT:
1464 while True:
1465 filename = self._format_screen(self.prepare_filename(info_dict), self.Styles.FILENAME)
1466 reply = input(self._format_screen(
1467 f'Download "{filename}"? (Y/n): ', self.Styles.EMPHASIS)).lower().strip()
1468 if reply in {'y', ''}:
1469 return None
1470 elif reply == 'n':
1471 if cancelled:
1472 raise type(cancelled)(f'Skipping {video_title}')
1473 return f'Skipping {video_title}'
1474 return ret
8b0d7497 1475
c77495e3 1476 if self.in_download_archive(info_dict):
1477 reason = '%s has already been recorded in the archive' % video_title
1478 break_opt, break_err = 'break_on_existing', ExistingVideoReached
1479 else:
fe2ce85a 1480 try:
1481 reason = check_filter()
1482 except DownloadCancelled as e:
1483 reason, break_opt, break_err = e.msg, 'match_filter', type(e)
1484 else:
1485 break_opt, break_err = 'break_on_reject', RejectedVideoReached
8b0d7497 1486 if reason is not None:
120fe513 1487 if not silent:
1488 self.to_screen('[download] ' + reason)
c77495e3 1489 if self.params.get(break_opt, False):
1490 raise break_err()
8b0d7497 1491 return reason
fe7e0c98 1492
b6c45014
JMF
1493 @staticmethod
1494 def add_extra_info(info_dict, extra_info):
1495 '''Set the keys from extra_info in info dict if they are missing'''
1496 for key, value in extra_info.items():
1497 info_dict.setdefault(key, value)
1498
409e1828 1499 def extract_info(self, url, download=True, ie_key=None, extra_info=None,
61aa5ba3 1500 process=True, force_generic_extractor=False):
41d1cca3 1501 """
17ffed18 1502 Extract and return the information dictionary of the URL
41d1cca3 1503
1504 Arguments:
17ffed18 1505 @param url URL to extract
41d1cca3 1506
1507 Keyword arguments:
17ffed18 1508 @param download Whether to download videos
1509 @param process Whether to resolve all unresolved references (URLs, playlist items).
1510 Must be True for download to work
1511 @param ie_key Use only the extractor with this key
1512
1513 @param extra_info Dictionary containing the extra values to add to the info (For internal use only)
1514 @force_generic_extractor Force using the generic extractor (Deprecated; use ie_key='Generic')
41d1cca3 1515 """
fe7e0c98 1516
409e1828 1517 if extra_info is None:
1518 extra_info = {}
1519
61aa5ba3 1520 if not ie_key and force_generic_extractor:
d22dec74
S
1521 ie_key = 'Generic'
1522
8222d8de 1523 if ie_key:
fe7866d0 1524 ies = {ie_key: self._ies[ie_key]} if ie_key in self._ies else {}
8222d8de
JMF
1525 else:
1526 ies = self._ies
1527
fe7866d0 1528 for key, ie in ies.items():
8222d8de
JMF
1529 if not ie.suitable(url):
1530 continue
1531
1532 if not ie.working():
6febd1c1
PH
1533 self.report_warning('The program functionality for this site has been marked as broken, '
1534 'and will probably not work.')
8222d8de 1535
1151c407 1536 temp_id = ie.get_temp_id(url)
fe7866d0 1537 if temp_id is not None and self.in_download_archive({'id': temp_id, 'ie_key': key}):
1538 self.to_screen(f'[{key}] {temp_id}: has already been recorded in the archive')
5e5be0c0 1539 if self.params.get('break_on_existing', False):
1540 raise ExistingVideoReached()
a0566bbf 1541 break
fe7866d0 1542 return self.__extract_info(url, self.get_info_extractor(key), download, extra_info, process)
a0566bbf 1543 else:
fe7866d0 1544 extractors_restricted = self.params.get('allowed_extractors') not in (None, ['default'])
1545 self.report_error(f'No suitable extractor{format_field(ie_key, None, " (%s)")} found for URL {url}',
1546 tb=False if extractors_restricted else None)
a0566bbf 1547
7e88d7d7 1548 def _handle_extraction_exceptions(func):
b5ae35ee 1549 @functools.wraps(func)
a0566bbf 1550 def wrapper(self, *args, **kwargs):
6da22e7d 1551 while True:
1552 try:
1553 return func(self, *args, **kwargs)
1554 except (DownloadCancelled, LazyList.IndexError, PagedList.IndexError):
8222d8de 1555 raise
6da22e7d 1556 except ReExtractInfo as e:
1557 if e.expected:
1558 self.to_screen(f'{e}; Re-extracting data')
1559 else:
1560 self.to_stderr('\r')
1561 self.report_warning(f'{e}; Re-extracting data')
1562 continue
1563 except GeoRestrictedError as e:
1564 msg = e.msg
1565 if e.countries:
1566 msg += '\nThis video is available in %s.' % ', '.join(
1567 map(ISO3166Utils.short2full, e.countries))
1568 msg += '\nYou might want to use a VPN or a proxy server (with --proxy) to workaround.'
1569 self.report_error(msg)
1570 except ExtractorError as e: # An error we somewhat expected
1571 self.report_error(str(e), e.format_traceback())
1572 except Exception as e:
1573 if self.params.get('ignoreerrors'):
1574 self.report_error(str(e), tb=encode_compat_str(traceback.format_exc()))
1575 else:
1576 raise
1577 break
a0566bbf 1578 return wrapper
1579
693f0600 1580 def _wait_for_video(self, ie_result={}):
f2ebc5c7 1581 if (not self.params.get('wait_for_video')
1582 or ie_result.get('_type', 'video') != 'video'
1583 or ie_result.get('formats') or ie_result.get('url')):
1584 return
1585
1586 format_dur = lambda dur: '%02d:%02d:%02d' % timetuple_from_msec(dur * 1000)[:-1]
1587 last_msg = ''
1588
1589 def progress(msg):
1590 nonlocal last_msg
a7dc6a89 1591 full_msg = f'{msg}\n'
1592 if not self.params.get('noprogress'):
1593 full_msg = msg + ' ' * (len(last_msg) - len(msg)) + '\r'
1594 elif last_msg:
1595 return
1596 self.to_screen(full_msg, skip_eol=True)
f2ebc5c7 1597 last_msg = msg
1598
1599 min_wait, max_wait = self.params.get('wait_for_video')
1600 diff = try_get(ie_result, lambda x: x['release_timestamp'] - time.time())
1601 if diff is None and ie_result.get('live_status') == 'is_upcoming':
16c620bc 1602 diff = round(random.uniform(min_wait, max_wait) if (max_wait and min_wait) else (max_wait or min_wait), 0)
f2ebc5c7 1603 self.report_warning('Release time of video is not known')
693f0600 1604 elif ie_result and (diff or 0) <= 0:
f2ebc5c7 1605 self.report_warning('Video should already be available according to extracted info')
38d79fd1 1606 diff = min(max(diff or 0, min_wait or 0), max_wait or float('inf'))
f2ebc5c7 1607 self.to_screen(f'[wait] Waiting for {format_dur(diff)} - Press Ctrl+C to try now')
1608
1609 wait_till = time.time() + diff
1610 try:
1611 while True:
1612 diff = wait_till - time.time()
1613 if diff <= 0:
1614 progress('')
1615 raise ReExtractInfo('[wait] Wait period ended', expected=True)
1616 progress(f'[wait] Remaining time until next attempt: {self._format_screen(format_dur(diff), self.Styles.EMPHASIS)}')
1617 time.sleep(1)
1618 except KeyboardInterrupt:
1619 progress('')
1620 raise ReExtractInfo('[wait] Interrupted by user', expected=True)
1621 except BaseException as e:
1622 if not isinstance(e, ReExtractInfo):
1623 self.to_screen('')
1624 raise
1625
7e88d7d7 1626 @_handle_extraction_exceptions
58f197b7 1627 def __extract_info(self, url, ie, download, extra_info, process):
693f0600 1628 try:
1629 ie_result = ie.extract(url)
1630 except UserNotLive as e:
1631 if process:
1632 if self.params.get('wait_for_video'):
1633 self.report_warning(e)
1634 self._wait_for_video()
1635 raise
a0566bbf 1636 if ie_result is None: # Finished already (backwards compatibility; listformats and friends should be moved here)
cb794ee0 1637 self.report_warning(f'Extractor {ie.IE_NAME} returned nothing{bug_reports_message()}')
a0566bbf 1638 return
1639 if isinstance(ie_result, list):
1640 # Backwards compatibility: old IE result format
1641 ie_result = {
1642 '_type': 'compat_list',
1643 'entries': ie_result,
1644 }
e37d0efb 1645 if extra_info.get('original_url'):
1646 ie_result.setdefault('original_url', extra_info['original_url'])
a0566bbf 1647 self.add_default_extra_info(ie_result, ie, url)
1648 if process:
f2ebc5c7 1649 self._wait_for_video(ie_result)
a0566bbf 1650 return self.process_ie_result(ie_result, download, extra_info)
8222d8de 1651 else:
a0566bbf 1652 return ie_result
fe7e0c98 1653
ea38e55f 1654 def add_default_extra_info(self, ie_result, ie, url):
6033d980 1655 if url is not None:
1656 self.add_extra_info(ie_result, {
1657 'webpage_url': url,
1658 'original_url': url,
57ebfca3 1659 })
1660 webpage_url = ie_result.get('webpage_url')
1661 if webpage_url:
1662 self.add_extra_info(ie_result, {
1663 'webpage_url_basename': url_basename(webpage_url),
1664 'webpage_url_domain': get_domain(webpage_url),
6033d980 1665 })
1666 if ie is not None:
1667 self.add_extra_info(ie_result, {
1668 'extractor': ie.IE_NAME,
1669 'extractor_key': ie.ie_key(),
1670 })
ea38e55f 1671
58adec46 1672 def process_ie_result(self, ie_result, download=True, extra_info=None):
8222d8de
JMF
1673 """
1674 Take the result of the ie(may be modified) and resolve all unresolved
1675 references (URLs, playlist items).
1676
1677 It will also download the videos if 'download'.
1678 Returns the resolved ie_result.
1679 """
58adec46 1680 if extra_info is None:
1681 extra_info = {}
e8ee972c
PH
1682 result_type = ie_result.get('_type', 'video')
1683
057a5206 1684 if result_type in ('url', 'url_transparent'):
8f97a15d 1685 ie_result['url'] = sanitize_url(
1686 ie_result['url'], scheme='http' if self.params.get('prefer_insecure') else 'https')
8791e78c 1687 if ie_result.get('original_url') and not extra_info.get('original_url'):
1688 extra_info = {'original_url': ie_result['original_url'], **extra_info}
e37d0efb 1689
057a5206 1690 extract_flat = self.params.get('extract_flat', False)
3089bc74
S
1691 if ((extract_flat == 'in_playlist' and 'playlist' in extra_info)
1692 or extract_flat is True):
ecb54191 1693 info_copy = ie_result.copy()
6033d980 1694 ie = try_get(ie_result.get('ie_key'), self.get_info_extractor)
360167b9 1695 if ie and not ie_result.get('id'):
4614bc22 1696 info_copy['id'] = ie.get_temp_id(ie_result['url'])
6033d980 1697 self.add_default_extra_info(info_copy, ie, ie_result['url'])
4614bc22 1698 self.add_extra_info(info_copy, extra_info)
b5475f11 1699 info_copy, _ = self.pre_process(info_copy)
94dc8604 1700 self._fill_common_fields(info_copy, False)
17060584 1701 self.__forced_printings(info_copy)
415f8d51 1702 self._raise_pending_errors(info_copy)
4614bc22 1703 if self.params.get('force_write_download_archive', False):
1704 self.record_download_archive(info_copy)
e8ee972c
PH
1705 return ie_result
1706
8222d8de 1707 if result_type == 'video':
b6c45014 1708 self.add_extra_info(ie_result, extra_info)
9c2b75b5 1709 ie_result = self.process_video_result(ie_result, download=download)
415f8d51 1710 self._raise_pending_errors(ie_result)
28b0eb0f 1711 additional_urls = (ie_result or {}).get('additional_urls')
9c2b75b5 1712 if additional_urls:
e9f4ccd1 1713 # TODO: Improve MetadataParserPP to allow setting a list
14f25df2 1714 if isinstance(additional_urls, str):
9c2b75b5 1715 additional_urls = [additional_urls]
1716 self.to_screen(
1717 '[info] %s: %d additional URL(s) requested' % (ie_result['id'], len(additional_urls)))
1718 self.write_debug('Additional URLs: "%s"' % '", "'.join(additional_urls))
1719 ie_result['additional_entries'] = [
1720 self.extract_info(
b69fd25c 1721 url, download, extra_info=extra_info,
9c2b75b5 1722 force_generic_extractor=self.params.get('force_generic_extractor'))
1723 for url in additional_urls
1724 ]
1725 return ie_result
8222d8de
JMF
1726 elif result_type == 'url':
1727 # We have to add extra_info to the results because it may be
1728 # contained in a playlist
07cce701 1729 return self.extract_info(
1730 ie_result['url'], download,
1731 ie_key=ie_result.get('ie_key'),
1732 extra_info=extra_info)
7fc3fa05
PH
1733 elif result_type == 'url_transparent':
1734 # Use the information from the embedding page
1735 info = self.extract_info(
1736 ie_result['url'], ie_key=ie_result.get('ie_key'),
1737 extra_info=extra_info, download=False, process=False)
1738
1640eb09
S
1739 # extract_info may return None when ignoreerrors is enabled and
1740 # extraction failed with an error, don't crash and return early
1741 # in this case
1742 if not info:
1743 return info
1744
3975b4d2 1745 exempted_fields = {'_type', 'url', 'ie_key'}
1746 if not ie_result.get('section_end') and ie_result.get('section_start') is None:
1747 # For video clips, the id etc of the clip extractor should be used
1748 exempted_fields |= {'id', 'extractor', 'extractor_key'}
1749
412c617d 1750 new_result = info.copy()
3975b4d2 1751 new_result.update(filter_dict(ie_result, lambda k, v: v is not None and k not in exempted_fields))
7fc3fa05 1752
0563f7ac
S
1753 # Extracted info may not be a video result (i.e.
1754 # info.get('_type', 'video') != video) but rather an url or
1755 # url_transparent. In such cases outer metadata (from ie_result)
1756 # should be propagated to inner one (info). For this to happen
1757 # _type of info should be overridden with url_transparent. This
067aa17e 1758 # fixes issue from https://github.com/ytdl-org/youtube-dl/pull/11163.
0563f7ac
S
1759 if new_result.get('_type') == 'url':
1760 new_result['_type'] = 'url_transparent'
7fc3fa05
PH
1761
1762 return self.process_ie_result(
1763 new_result, download=download, extra_info=extra_info)
40fcba5e 1764 elif result_type in ('playlist', 'multi_video'):
30a074c2 1765 # Protect from infinite recursion due to recursively nested playlists
1766 # (see https://github.com/ytdl-org/youtube-dl/issues/27833)
0bd5a039 1767 webpage_url = ie_result.get('webpage_url') # Playlists maynot have webpage_url
1768 if webpage_url and webpage_url in self._playlist_urls:
7e85e872 1769 self.to_screen(
30a074c2 1770 '[download] Skipping already downloaded playlist: %s'
1771 % ie_result.get('title') or ie_result.get('id'))
1772 return
7e85e872 1773
30a074c2 1774 self._playlist_level += 1
1775 self._playlist_urls.add(webpage_url)
03f83004 1776 self._fill_common_fields(ie_result, False)
bc516a3f 1777 self._sanitize_thumbnails(ie_result)
30a074c2 1778 try:
1779 return self.__process_playlist(ie_result, download)
1780 finally:
1781 self._playlist_level -= 1
1782 if not self._playlist_level:
1783 self._playlist_urls.clear()
8222d8de 1784 elif result_type == 'compat_list':
c9bf4114
PH
1785 self.report_warning(
1786 'Extractor %s returned a compat_list result. '
1787 'It needs to be updated.' % ie_result.get('extractor'))
5f6a1245 1788
8222d8de 1789 def _fixup(r):
b868936c 1790 self.add_extra_info(r, {
1791 'extractor': ie_result['extractor'],
1792 'webpage_url': ie_result['webpage_url'],
1793 'webpage_url_basename': url_basename(ie_result['webpage_url']),
0bb322b9 1794 'webpage_url_domain': get_domain(ie_result['webpage_url']),
b868936c 1795 'extractor_key': ie_result['extractor_key'],
1796 })
8222d8de
JMF
1797 return r
1798 ie_result['entries'] = [
b6c45014 1799 self.process_ie_result(_fixup(r), download, extra_info)
8222d8de
JMF
1800 for r in ie_result['entries']
1801 ]
1802 return ie_result
1803 else:
1804 raise Exception('Invalid result type: %s' % result_type)
1805
e92caff5 1806 def _ensure_dir_exists(self, path):
1807 return make_dir(path, self.report_error)
1808
3b603dbd 1809 @staticmethod
3bec830a 1810 def _playlist_infodict(ie_result, strict=False, **kwargs):
1811 info = {
1812 'playlist_count': ie_result.get('playlist_count'),
3b603dbd 1813 'playlist': ie_result.get('title') or ie_result.get('id'),
1814 'playlist_id': ie_result.get('id'),
1815 'playlist_title': ie_result.get('title'),
1816 'playlist_uploader': ie_result.get('uploader'),
1817 'playlist_uploader_id': ie_result.get('uploader_id'),
3b603dbd 1818 **kwargs,
1819 }
3bec830a 1820 if strict:
1821 return info
0bd5a039 1822 if ie_result.get('webpage_url'):
1823 info.update({
1824 'webpage_url': ie_result['webpage_url'],
1825 'webpage_url_basename': url_basename(ie_result['webpage_url']),
1826 'webpage_url_domain': get_domain(ie_result['webpage_url']),
1827 })
3bec830a 1828 return {
1829 **info,
1830 'playlist_index': 0,
59d7de0d 1831 '__last_playlist_index': max(ie_result.get('requested_entries') or (0, 0)),
3bec830a 1832 'extractor': ie_result['extractor'],
3bec830a 1833 'extractor_key': ie_result['extractor_key'],
1834 }
3b603dbd 1835
30a074c2 1836 def __process_playlist(self, ie_result, download):
7e88d7d7 1837 """Process each entry in the playlist"""
f5ea4748 1838 assert ie_result['_type'] in ('playlist', 'multi_video')
1839
3bec830a 1840 common_info = self._playlist_infodict(ie_result, strict=True)
3955b207 1841 title = common_info.get('playlist') or '<Untitled>'
3bec830a 1842 if self._match_entry(common_info, incomplete=True) is not None:
1843 return
c6e07cf1 1844 self.to_screen(f'[download] Downloading {ie_result["_type"]}: {title}')
f0d785d3 1845
7e88d7d7 1846 all_entries = PlaylistEntries(self, ie_result)
7e9a6125 1847 entries = orderedSet(all_entries.get_requested_items(), lazy=True)
1848
1849 lazy = self.params.get('lazy_playlist')
1850 if lazy:
1851 resolved_entries, n_entries = [], 'N/A'
1852 ie_result['requested_entries'], ie_result['entries'] = None, None
1853 else:
1854 entries = resolved_entries = list(entries)
1855 n_entries = len(resolved_entries)
1856 ie_result['requested_entries'], ie_result['entries'] = tuple(zip(*resolved_entries)) or ([], [])
1857 if not ie_result.get('playlist_count'):
1858 # Better to do this after potentially exhausting entries
1859 ie_result['playlist_count'] = all_entries.get_full_count()
498f5606 1860
0647d925 1861 extra = self._playlist_infodict(ie_result, n_entries=int_or_none(n_entries))
1862 ie_copy = collections.ChainMap(ie_result, extra)
3bec830a 1863
e08a85d8 1864 _infojson_written = False
0bfc53d0 1865 write_playlist_files = self.params.get('allow_playlist_files', True)
1866 if write_playlist_files and self.params.get('list_thumbnails'):
1867 self.list_thumbnails(ie_result)
1868 if write_playlist_files and not self.params.get('simulate'):
e08a85d8 1869 _infojson_written = self._write_info_json(
1870 'playlist', ie_result, self.prepare_filename(ie_copy, 'pl_infojson'))
1871 if _infojson_written is None:
80c03fa9 1872 return
1873 if self._write_description('playlist', ie_result,
1874 self.prepare_filename(ie_copy, 'pl_description')) is None:
1875 return
681de68e 1876 # TODO: This should be passed to ThumbnailsConvertor if necessary
3bec830a 1877 self._write_thumbnails('playlist', ie_result, self.prepare_filename(ie_copy, 'pl_thumbnail'))
30a074c2 1878
7e9a6125 1879 if lazy:
1880 if self.params.get('playlistreverse') or self.params.get('playlistrandom'):
1881 self.report_warning('playlistreverse and playlistrandom are not supported with lazy_playlist', only_once=True)
1882 elif self.params.get('playlistreverse'):
1883 entries.reverse()
1884 elif self.params.get('playlistrandom'):
30a074c2 1885 random.shuffle(entries)
1886
bc5c2f8a 1887 self.to_screen(f'[{ie_result["extractor"]}] Playlist {title}: Downloading {n_entries} items'
7e88d7d7 1888 f'{format_field(ie_result, "playlist_count", " of %s")}')
30a074c2 1889
134c913c 1890 keep_resolved_entries = self.params.get('extract_flat') != 'discard'
1891 if self.params.get('extract_flat') == 'discard_in_playlist':
1892 keep_resolved_entries = ie_result['_type'] != 'playlist'
1893 if keep_resolved_entries:
1894 self.write_debug('The information of all playlist entries will be held in memory')
1895
26e2805c 1896 failures = 0
1897 max_failures = self.params.get('skip_playlist_after_errors') or float('inf')
7e9a6125 1898 for i, (playlist_index, entry) in enumerate(entries):
1899 if lazy:
1900 resolved_entries.append((playlist_index, entry))
3bec830a 1901 if not entry:
7e88d7d7 1902 continue
1903
7e88d7d7 1904 entry['__x_forwarded_for_ip'] = ie_result.get('__x_forwarded_for_ip')
ad54c913 1905 if not lazy and 'playlist-index' in self.params['compat_opts']:
7e9a6125 1906 playlist_index = ie_result['requested_entries'][i]
1907
0647d925 1908 entry_copy = collections.ChainMap(entry, {
3bec830a 1909 **common_info,
3955b207 1910 'n_entries': int_or_none(n_entries),
71729754 1911 'playlist_index': playlist_index,
7e9a6125 1912 'playlist_autonumber': i + 1,
0647d925 1913 })
3bec830a 1914
0647d925 1915 if self._match_entry(entry_copy, incomplete=True) is not None:
f0ad6f8c 1916 # For compatabilty with youtube-dl. See https://github.com/yt-dlp/yt-dlp/issues/4369
1917 resolved_entries[i] = (playlist_index, NO_DEFAULT)
3bec830a 1918 continue
1919
bc5c2f8a 1920 self.to_screen('[download] Downloading item %s of %s' % (
3bec830a 1921 self._format_screen(i + 1, self.Styles.ID), self._format_screen(n_entries, self.Styles.EMPHASIS)))
1922
ec54bd43 1923 entry_result = self.__process_iterable_entry(entry, download, collections.ChainMap({
a6ca61d4 1924 'playlist_index': playlist_index,
1925 'playlist_autonumber': i + 1,
ec54bd43 1926 }, extra))
26e2805c 1927 if not entry_result:
1928 failures += 1
1929 if failures >= max_failures:
1930 self.report_error(
7e88d7d7 1931 f'Skipping the remaining entries in playlist "{title}" since {failures} items failed extraction')
26e2805c 1932 break
134c913c 1933 if keep_resolved_entries:
1934 resolved_entries[i] = (playlist_index, entry_result)
7e88d7d7 1935
1936 # Update with processed data
f0ad6f8c 1937 ie_result['entries'] = [e for _, e in resolved_entries if e is not NO_DEFAULT]
bc5c2f8a 1938 ie_result['requested_entries'] = [i for i, e in resolved_entries if e is not NO_DEFAULT]
1939 if ie_result['requested_entries'] == try_call(lambda: list(range(1, ie_result['playlist_count'] + 1))):
1940 # Do not set for full playlist
1941 ie_result.pop('requested_entries')
e08a85d8 1942
1943 # Write the updated info to json
cb96c5be 1944 if _infojson_written is True and self._write_info_json(
e08a85d8 1945 'updated playlist', ie_result,
1946 self.prepare_filename(ie_copy, 'pl_infojson'), overwrite=True) is None:
1947 return
ca30f449 1948
ed5835b4 1949 ie_result = self.run_all_pps('playlist', ie_result)
7e88d7d7 1950 self.to_screen(f'[download] Finished downloading playlist: {title}')
30a074c2 1951 return ie_result
1952
7e88d7d7 1953 @_handle_extraction_exceptions
a0566bbf 1954 def __process_iterable_entry(self, entry, download, extra_info):
1955 return self.process_ie_result(
1956 entry, download=download, extra_info=extra_info)
1957
67134eab
JMF
1958 def _build_format_filter(self, filter_spec):
1959 " Returns a function to filter the formats according to the filter_spec "
083c9df9
PH
1960
1961 OPERATORS = {
1962 '<': operator.lt,
1963 '<=': operator.le,
1964 '>': operator.gt,
1965 '>=': operator.ge,
1966 '=': operator.eq,
1967 '!=': operator.ne,
1968 }
67134eab 1969 operator_rex = re.compile(r'''(?x)\s*
c3f624ef 1970 (?P<key>[\w.-]+)\s*
187986a8 1971 (?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
1972 (?P<value>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)\s*
083c9df9 1973 ''' % '|'.join(map(re.escape, OPERATORS.keys())))
187986a8 1974 m = operator_rex.fullmatch(filter_spec)
9ddb6925
S
1975 if m:
1976 try:
1977 comparison_value = int(m.group('value'))
1978 except ValueError:
1979 comparison_value = parse_filesize(m.group('value'))
1980 if comparison_value is None:
1981 comparison_value = parse_filesize(m.group('value') + 'B')
1982 if comparison_value is None:
1983 raise ValueError(
1984 'Invalid value %r in format specification %r' % (
67134eab 1985 m.group('value'), filter_spec))
9ddb6925
S
1986 op = OPERATORS[m.group('op')]
1987
083c9df9 1988 if not m:
9ddb6925
S
1989 STR_OPERATORS = {
1990 '=': operator.eq,
10d33b34
YCH
1991 '^=': lambda attr, value: attr.startswith(value),
1992 '$=': lambda attr, value: attr.endswith(value),
1993 '*=': lambda attr, value: value in attr,
1ce9a3cb 1994 '~=': lambda attr, value: value.search(attr) is not None
9ddb6925 1995 }
187986a8 1996 str_operator_rex = re.compile(r'''(?x)\s*
1997 (?P<key>[a-zA-Z0-9._-]+)\s*
1ce9a3cb
LF
1998 (?P<negation>!\s*)?(?P<op>%s)\s*(?P<none_inclusive>\?\s*)?
1999 (?P<quote>["'])?
2000 (?P<value>(?(quote)(?:(?!(?P=quote))[^\\]|\\.)+|[\w.-]+))
2001 (?(quote)(?P=quote))\s*
9ddb6925 2002 ''' % '|'.join(map(re.escape, STR_OPERATORS.keys())))
187986a8 2003 m = str_operator_rex.fullmatch(filter_spec)
9ddb6925 2004 if m:
1ce9a3cb
LF
2005 if m.group('op') == '~=':
2006 comparison_value = re.compile(m.group('value'))
2007 else:
2008 comparison_value = re.sub(r'''\\([\\"'])''', r'\1', m.group('value'))
2cc779f4
S
2009 str_op = STR_OPERATORS[m.group('op')]
2010 if m.group('negation'):
e118a879 2011 op = lambda attr, value: not str_op(attr, value)
2cc779f4
S
2012 else:
2013 op = str_op
083c9df9 2014
9ddb6925 2015 if not m:
187986a8 2016 raise SyntaxError('Invalid filter specification %r' % filter_spec)
083c9df9
PH
2017
2018 def _filter(f):
2019 actual_value = f.get(m.group('key'))
2020 if actual_value is None:
2021 return m.group('none_inclusive')
2022 return op(actual_value, comparison_value)
67134eab
JMF
2023 return _filter
2024
9f1a1c36 2025 def _check_formats(self, formats):
2026 for f in formats:
2027 self.to_screen('[info] Testing format %s' % f['format_id'])
75689fe5 2028 path = self.get_output_path('temp')
2029 if not self._ensure_dir_exists(f'{path}/'):
2030 continue
2031 temp_file = tempfile.NamedTemporaryFile(suffix='.tmp', delete=False, dir=path or None)
9f1a1c36 2032 temp_file.close()
2033 try:
2034 success, _ = self.dl(temp_file.name, f, test=True)
8a82af35 2035 except (DownloadError, OSError, ValueError) + network_exceptions:
9f1a1c36 2036 success = False
2037 finally:
2038 if os.path.exists(temp_file.name):
2039 try:
2040 os.remove(temp_file.name)
2041 except OSError:
2042 self.report_warning('Unable to delete temporary file "%s"' % temp_file.name)
2043 if success:
2044 yield f
2045 else:
2046 self.to_screen('[info] Unable to download format %s. Skipping...' % f['format_id'])
2047
0017d9ad 2048 def _default_format_spec(self, info_dict, download=True):
0017d9ad 2049
af0f7428
S
2050 def can_merge():
2051 merger = FFmpegMergerPP(self)
2052 return merger.available and merger.can_merge()
2053
91ebc640 2054 prefer_best = (
b7b04c78 2055 not self.params.get('simulate')
91ebc640 2056 and download
2057 and (
2058 not can_merge()
21633673 2059 or info_dict.get('is_live') and not self.params.get('live_from_start')
bf1824b3 2060 or self.params['outtmpl']['default'] == '-'))
53ed7066 2061 compat = (
2062 prefer_best
2063 or self.params.get('allow_multiple_audio_streams', False)
8a82af35 2064 or 'format-spec' in self.params['compat_opts'])
91ebc640 2065
2066 return (
53ed7066 2067 'best/bestvideo+bestaudio' if prefer_best
2068 else 'bestvideo*+bestaudio/best' if not compat
91ebc640 2069 else 'bestvideo+bestaudio/best')
0017d9ad 2070
67134eab
JMF
2071 def build_format_selector(self, format_spec):
2072 def syntax_error(note, start):
2073 message = (
2074 'Invalid format specification: '
86e5f3ed 2075 '{}\n\t{}\n\t{}^'.format(note, format_spec, ' ' * start[1]))
67134eab
JMF
2076 return SyntaxError(message)
2077
2078 PICKFIRST = 'PICKFIRST'
2079 MERGE = 'MERGE'
2080 SINGLE = 'SINGLE'
0130afb7 2081 GROUP = 'GROUP'
67134eab
JMF
2082 FormatSelector = collections.namedtuple('FormatSelector', ['type', 'selector', 'filters'])
2083
91ebc640 2084 allow_multiple_streams = {'audio': self.params.get('allow_multiple_audio_streams', False),
2085 'video': self.params.get('allow_multiple_video_streams', False)}
909d24dd 2086
9f1a1c36 2087 check_formats = self.params.get('check_formats') == 'selected'
e8e73840 2088
67134eab
JMF
2089 def _parse_filter(tokens):
2090 filter_parts = []
6f2287cb 2091 for type, string_, start, _, _ in tokens:
2092 if type == tokenize.OP and string_ == ']':
67134eab
JMF
2093 return ''.join(filter_parts)
2094 else:
6f2287cb 2095 filter_parts.append(string_)
67134eab 2096
232541df 2097 def _remove_unused_ops(tokens):
62b58c09
L
2098 # Remove operators that we don't use and join them with the surrounding strings.
2099 # E.g. 'mp4' '-' 'baseline' '-' '16x9' is converted to 'mp4-baseline-16x9'
232541df
JMF
2100 ALLOWED_OPS = ('/', '+', ',', '(', ')')
2101 last_string, last_start, last_end, last_line = None, None, None, None
6f2287cb 2102 for type, string_, start, end, line in tokens:
2103 if type == tokenize.OP and string_ == '[':
232541df
JMF
2104 if last_string:
2105 yield tokenize.NAME, last_string, last_start, last_end, last_line
2106 last_string = None
6f2287cb 2107 yield type, string_, start, end, line
232541df 2108 # everything inside brackets will be handled by _parse_filter
6f2287cb 2109 for type, string_, start, end, line in tokens:
2110 yield type, string_, start, end, line
2111 if type == tokenize.OP and string_ == ']':
232541df 2112 break
6f2287cb 2113 elif type == tokenize.OP and string_ in ALLOWED_OPS:
232541df
JMF
2114 if last_string:
2115 yield tokenize.NAME, last_string, last_start, last_end, last_line
2116 last_string = None
6f2287cb 2117 yield type, string_, start, end, line
232541df
JMF
2118 elif type in [tokenize.NAME, tokenize.NUMBER, tokenize.OP]:
2119 if not last_string:
6f2287cb 2120 last_string = string_
232541df
JMF
2121 last_start = start
2122 last_end = end
2123 else:
6f2287cb 2124 last_string += string_
232541df
JMF
2125 if last_string:
2126 yield tokenize.NAME, last_string, last_start, last_end, last_line
2127
cf2ac6df 2128 def _parse_format_selection(tokens, inside_merge=False, inside_choice=False, inside_group=False):
67134eab
JMF
2129 selectors = []
2130 current_selector = None
6f2287cb 2131 for type, string_, start, _, _ in tokens:
67134eab
JMF
2132 # ENCODING is only defined in python 3.x
2133 if type == getattr(tokenize, 'ENCODING', None):
2134 continue
2135 elif type in [tokenize.NAME, tokenize.NUMBER]:
6f2287cb 2136 current_selector = FormatSelector(SINGLE, string_, [])
67134eab 2137 elif type == tokenize.OP:
6f2287cb 2138 if string_ == ')':
cf2ac6df
JMF
2139 if not inside_group:
2140 # ')' will be handled by the parentheses group
2141 tokens.restore_last_token()
67134eab 2142 break
6f2287cb 2143 elif inside_merge and string_ in ['/', ',']:
0130afb7
JMF
2144 tokens.restore_last_token()
2145 break
6f2287cb 2146 elif inside_choice and string_ == ',':
cf2ac6df
JMF
2147 tokens.restore_last_token()
2148 break
6f2287cb 2149 elif string_ == ',':
0a31a350
JMF
2150 if not current_selector:
2151 raise syntax_error('"," must follow a format selector', start)
67134eab
JMF
2152 selectors.append(current_selector)
2153 current_selector = None
6f2287cb 2154 elif string_ == '/':
d96d604e
JMF
2155 if not current_selector:
2156 raise syntax_error('"/" must follow a format selector', start)
67134eab 2157 first_choice = current_selector
cf2ac6df 2158 second_choice = _parse_format_selection(tokens, inside_choice=True)
f5f4a27a 2159 current_selector = FormatSelector(PICKFIRST, (first_choice, second_choice), [])
6f2287cb 2160 elif string_ == '[':
67134eab
JMF
2161 if not current_selector:
2162 current_selector = FormatSelector(SINGLE, 'best', [])
2163 format_filter = _parse_filter(tokens)
2164 current_selector.filters.append(format_filter)
6f2287cb 2165 elif string_ == '(':
0130afb7
JMF
2166 if current_selector:
2167 raise syntax_error('Unexpected "("', start)
cf2ac6df
JMF
2168 group = _parse_format_selection(tokens, inside_group=True)
2169 current_selector = FormatSelector(GROUP, group, [])
6f2287cb 2170 elif string_ == '+':
d03cfdce 2171 if not current_selector:
2172 raise syntax_error('Unexpected "+"', start)
2173 selector_1 = current_selector
2174 selector_2 = _parse_format_selection(tokens, inside_merge=True)
2175 if not selector_2:
2176 raise syntax_error('Expected a selector', start)
2177 current_selector = FormatSelector(MERGE, (selector_1, selector_2), [])
67134eab 2178 else:
6f2287cb 2179 raise syntax_error(f'Operator not recognized: "{string_}"', start)
67134eab
JMF
2180 elif type == tokenize.ENDMARKER:
2181 break
2182 if current_selector:
2183 selectors.append(current_selector)
2184 return selectors
2185
f8d4ad9a 2186 def _merge(formats_pair):
2187 format_1, format_2 = formats_pair
2188
2189 formats_info = []
2190 formats_info.extend(format_1.get('requested_formats', (format_1,)))
2191 formats_info.extend(format_2.get('requested_formats', (format_2,)))
2192
2193 if not allow_multiple_streams['video'] or not allow_multiple_streams['audio']:
551f9388 2194 get_no_more = {'video': False, 'audio': False}
f8d4ad9a 2195 for (i, fmt_info) in enumerate(formats_info):
551f9388 2196 if fmt_info.get('acodec') == fmt_info.get('vcodec') == 'none':
2197 formats_info.pop(i)
2198 continue
2199 for aud_vid in ['audio', 'video']:
f8d4ad9a 2200 if not allow_multiple_streams[aud_vid] and fmt_info.get(aud_vid[0] + 'codec') != 'none':
2201 if get_no_more[aud_vid]:
2202 formats_info.pop(i)
f5510afe 2203 break
f8d4ad9a 2204 get_no_more[aud_vid] = True
2205
2206 if len(formats_info) == 1:
2207 return formats_info[0]
2208
2209 video_fmts = [fmt_info for fmt_info in formats_info if fmt_info.get('vcodec') != 'none']
2210 audio_fmts = [fmt_info for fmt_info in formats_info if fmt_info.get('acodec') != 'none']
2211
2212 the_only_video = video_fmts[0] if len(video_fmts) == 1 else None
2213 the_only_audio = audio_fmts[0] if len(audio_fmts) == 1 else None
2214
fc61aff4
LL
2215 output_ext = get_compatible_ext(
2216 vcodecs=[f.get('vcodec') for f in video_fmts],
2217 acodecs=[f.get('acodec') for f in audio_fmts],
2218 vexts=[f['ext'] for f in video_fmts],
2219 aexts=[f['ext'] for f in audio_fmts],
2220 preferences=(try_call(lambda: self.params['merge_output_format'].split('/'))
2221 or self.params.get('prefer_free_formats') and ('webm', 'mkv')))
f8d4ad9a 2222
975a0d0d 2223 filtered = lambda *keys: filter(None, (traverse_obj(fmt, *keys) for fmt in formats_info))
2224
f8d4ad9a 2225 new_dict = {
2226 'requested_formats': formats_info,
975a0d0d 2227 'format': '+'.join(filtered('format')),
2228 'format_id': '+'.join(filtered('format_id')),
f8d4ad9a 2229 'ext': output_ext,
975a0d0d 2230 'protocol': '+'.join(map(determine_protocol, formats_info)),
093a1710 2231 'language': '+'.join(orderedSet(filtered('language'))) or None,
2232 'format_note': '+'.join(orderedSet(filtered('format_note'))) or None,
2233 'filesize_approx': sum(filtered('filesize', 'filesize_approx')) or None,
975a0d0d 2234 'tbr': sum(filtered('tbr', 'vbr', 'abr')),
f8d4ad9a 2235 }
2236
2237 if the_only_video:
2238 new_dict.update({
2239 'width': the_only_video.get('width'),
2240 'height': the_only_video.get('height'),
2241 'resolution': the_only_video.get('resolution') or self.format_resolution(the_only_video),
2242 'fps': the_only_video.get('fps'),
49a57e70 2243 'dynamic_range': the_only_video.get('dynamic_range'),
f8d4ad9a 2244 'vcodec': the_only_video.get('vcodec'),
2245 'vbr': the_only_video.get('vbr'),
2246 'stretched_ratio': the_only_video.get('stretched_ratio'),
105bfd90 2247 'aspect_ratio': the_only_video.get('aspect_ratio'),
f8d4ad9a 2248 })
2249
2250 if the_only_audio:
2251 new_dict.update({
2252 'acodec': the_only_audio.get('acodec'),
2253 'abr': the_only_audio.get('abr'),
975a0d0d 2254 'asr': the_only_audio.get('asr'),
b8ed0f15 2255 'audio_channels': the_only_audio.get('audio_channels')
f8d4ad9a 2256 })
2257
2258 return new_dict
2259
e8e73840 2260 def _check_formats(formats):
981052c9 2261 if not check_formats:
2262 yield from formats
b5ac45b1 2263 return
9f1a1c36 2264 yield from self._check_formats(formats)
e8e73840 2265
67134eab 2266 def _build_selector_function(selector):
909d24dd 2267 if isinstance(selector, list): # ,
67134eab
JMF
2268 fs = [_build_selector_function(s) for s in selector]
2269
317f7ab6 2270 def selector_function(ctx):
67134eab 2271 for f in fs:
981052c9 2272 yield from f(ctx)
67134eab 2273 return selector_function
909d24dd 2274
2275 elif selector.type == GROUP: # ()
0130afb7 2276 selector_function = _build_selector_function(selector.selector)
909d24dd 2277
2278 elif selector.type == PICKFIRST: # /
67134eab
JMF
2279 fs = [_build_selector_function(s) for s in selector.selector]
2280
317f7ab6 2281 def selector_function(ctx):
67134eab 2282 for f in fs:
317f7ab6 2283 picked_formats = list(f(ctx))
67134eab
JMF
2284 if picked_formats:
2285 return picked_formats
2286 return []
67134eab 2287
981052c9 2288 elif selector.type == MERGE: # +
2289 selector_1, selector_2 = map(_build_selector_function, selector.selector)
2290
2291 def selector_function(ctx):
adbc4ec4 2292 for pair in itertools.product(selector_1(ctx), selector_2(ctx)):
981052c9 2293 yield _merge(pair)
2294
909d24dd 2295 elif selector.type == SINGLE: # atom
598d185d 2296 format_spec = selector.selector or 'best'
909d24dd 2297
f8d4ad9a 2298 # TODO: Add allvideo, allaudio etc by generalizing the code with best/worst selector
909d24dd 2299 if format_spec == 'all':
2300 def selector_function(ctx):
9222c381 2301 yield from _check_formats(ctx['formats'][::-1])
f8d4ad9a 2302 elif format_spec == 'mergeall':
2303 def selector_function(ctx):
316f2650 2304 formats = list(_check_formats(
2305 f for f in ctx['formats'] if f.get('vcodec') != 'none' or f.get('acodec') != 'none'))
e01d6aa4 2306 if not formats:
2307 return
921b76ca 2308 merged_format = formats[-1]
2309 for f in formats[-2::-1]:
f8d4ad9a 2310 merged_format = _merge((merged_format, f))
2311 yield merged_format
909d24dd 2312
2313 else:
85e801a9 2314 format_fallback, seperate_fallback, format_reverse, format_idx = False, None, True, 1
eff63539 2315 mobj = re.match(
2316 r'(?P<bw>best|worst|b|w)(?P<type>video|audio|v|a)?(?P<mod>\*)?(?:\.(?P<n>[1-9]\d*))?$',
2317 format_spec)
2318 if mobj is not None:
2319 format_idx = int_or_none(mobj.group('n'), default=1)
e8e73840 2320 format_reverse = mobj.group('bw')[0] == 'b'
eff63539 2321 format_type = (mobj.group('type') or [None])[0]
2322 not_format_type = {'v': 'a', 'a': 'v'}.get(format_type)
2323 format_modified = mobj.group('mod') is not None
909d24dd 2324
2325 format_fallback = not format_type and not format_modified # for b, w
8326b00a 2326 _filter_f = (
eff63539 2327 (lambda f: f.get('%scodec' % format_type) != 'none')
2328 if format_type and format_modified # bv*, ba*, wv*, wa*
2329 else (lambda f: f.get('%scodec' % not_format_type) == 'none')
2330 if format_type # bv, ba, wv, wa
2331 else (lambda f: f.get('vcodec') != 'none' and f.get('acodec') != 'none')
2332 if not format_modified # b, w
8326b00a 2333 else lambda f: True) # b*, w*
2334 filter_f = lambda f: _filter_f(f) and (
2335 f.get('vcodec') != 'none' or f.get('acodec') != 'none')
67134eab 2336 else:
48ee10ee 2337 if format_spec in self._format_selection_exts['audio']:
b11c04a8 2338 filter_f = lambda f: f.get('ext') == format_spec and f.get('acodec') != 'none'
48ee10ee 2339 elif format_spec in self._format_selection_exts['video']:
b11c04a8 2340 filter_f = lambda f: f.get('ext') == format_spec and f.get('acodec') != 'none' and f.get('vcodec') != 'none'
85e801a9 2341 seperate_fallback = lambda f: f.get('ext') == format_spec and f.get('vcodec') != 'none'
48ee10ee 2342 elif format_spec in self._format_selection_exts['storyboards']:
b11c04a8 2343 filter_f = lambda f: f.get('ext') == format_spec and f.get('acodec') == 'none' and f.get('vcodec') == 'none'
2344 else:
b5ae35ee 2345 filter_f = lambda f: f.get('format_id') == format_spec # id
909d24dd 2346
2347 def selector_function(ctx):
2348 formats = list(ctx['formats'])
909d24dd 2349 matches = list(filter(filter_f, formats)) if filter_f is not None else formats
85e801a9 2350 if not matches:
2351 if format_fallback and ctx['incomplete_formats']:
2352 # for extractors with incomplete formats (audio only (soundcloud)
2353 # or video only (imgur)) best/worst will fallback to
2354 # best/worst {video,audio}-only format
2355 matches = formats
2356 elif seperate_fallback and not ctx['has_merged_format']:
2357 # for compatibility with youtube-dl when there is no pre-merged format
2358 matches = list(filter(seperate_fallback, formats))
981052c9 2359 matches = LazyList(_check_formats(matches[::-1 if format_reverse else 1]))
2360 try:
e8e73840 2361 yield matches[format_idx - 1]
4abea8ca 2362 except LazyList.IndexError:
981052c9 2363 return
083c9df9 2364
67134eab 2365 filters = [self._build_format_filter(f) for f in selector.filters]
083c9df9 2366
317f7ab6 2367 def final_selector(ctx):
adbc4ec4 2368 ctx_copy = dict(ctx)
67134eab 2369 for _filter in filters:
317f7ab6
S
2370 ctx_copy['formats'] = list(filter(_filter, ctx_copy['formats']))
2371 return selector_function(ctx_copy)
67134eab 2372 return final_selector
083c9df9 2373
0f06bcd7 2374 stream = io.BytesIO(format_spec.encode())
0130afb7 2375 try:
f9934b96 2376 tokens = list(_remove_unused_ops(tokenize.tokenize(stream.readline)))
0130afb7
JMF
2377 except tokenize.TokenError:
2378 raise syntax_error('Missing closing/opening brackets or parenthesis', (0, len(format_spec)))
2379
86e5f3ed 2380 class TokenIterator:
0130afb7
JMF
2381 def __init__(self, tokens):
2382 self.tokens = tokens
2383 self.counter = 0
2384
2385 def __iter__(self):
2386 return self
2387
2388 def __next__(self):
2389 if self.counter >= len(self.tokens):
2390 raise StopIteration()
2391 value = self.tokens[self.counter]
2392 self.counter += 1
2393 return value
2394
2395 next = __next__
2396
2397 def restore_last_token(self):
2398 self.counter -= 1
2399
2400 parsed_selector = _parse_format_selection(iter(TokenIterator(tokens)))
67134eab 2401 return _build_selector_function(parsed_selector)
a9c58ad9 2402
e5660ee6 2403 def _calc_headers(self, info_dict):
8b7539d2 2404 res = merge_headers(self.params['http_headers'], info_dict.get('http_headers') or {})
955c8958 2405 if 'Youtubedl-No-Compression' in res: # deprecated
2406 res.pop('Youtubedl-No-Compression', None)
2407 res['Accept-Encoding'] = 'identity'
b87e01c1 2408 cookies = self.cookiejar.get_cookie_header(info_dict['url'])
e5660ee6
JMF
2409 if cookies:
2410 res['Cookie'] = cookies
2411
0016b84e
S
2412 if 'X-Forwarded-For' not in res:
2413 x_forwarded_for_ip = info_dict.get('__x_forwarded_for_ip')
2414 if x_forwarded_for_ip:
2415 res['X-Forwarded-For'] = x_forwarded_for_ip
2416
e5660ee6
JMF
2417 return res
2418
c487cf00 2419 def _calc_cookies(self, url):
b87e01c1 2420 self.deprecation_warning('"YoutubeDL._calc_cookies" is deprecated and may be removed in a future version')
2421 return self.cookiejar.get_cookie_header(url)
e5660ee6 2422
9f1a1c36 2423 def _sort_thumbnails(self, thumbnails):
2424 thumbnails.sort(key=lambda t: (
2425 t.get('preference') if t.get('preference') is not None else -1,
2426 t.get('width') if t.get('width') is not None else -1,
2427 t.get('height') if t.get('height') is not None else -1,
2428 t.get('id') if t.get('id') is not None else '',
2429 t.get('url')))
2430
b0249bca 2431 def _sanitize_thumbnails(self, info_dict):
bc516a3f 2432 thumbnails = info_dict.get('thumbnails')
2433 if thumbnails is None:
2434 thumbnail = info_dict.get('thumbnail')
2435 if thumbnail:
2436 info_dict['thumbnails'] = thumbnails = [{'url': thumbnail}]
9f1a1c36 2437 if not thumbnails:
2438 return
2439
2440 def check_thumbnails(thumbnails):
2441 for t in thumbnails:
2442 self.to_screen(f'[info] Testing thumbnail {t["id"]}')
2443 try:
2444 self.urlopen(HEADRequest(t['url']))
2445 except network_exceptions as err:
2446 self.to_screen(f'[info] Unable to connect to thumbnail {t["id"]} URL {t["url"]!r} - {err}. Skipping...')
2447 continue
2448 yield t
2449
2450 self._sort_thumbnails(thumbnails)
2451 for i, t in enumerate(thumbnails):
2452 if t.get('id') is None:
2453 t['id'] = '%d' % i
2454 if t.get('width') and t.get('height'):
2455 t['resolution'] = '%dx%d' % (t['width'], t['height'])
2456 t['url'] = sanitize_url(t['url'])
2457
2458 if self.params.get('check_formats') is True:
282f5709 2459 info_dict['thumbnails'] = LazyList(check_thumbnails(thumbnails[::-1]), reverse=True)
9f1a1c36 2460 else:
2461 info_dict['thumbnails'] = thumbnails
bc516a3f 2462
94dc8604 2463 def _fill_common_fields(self, info_dict, final=True):
03f83004 2464 # TODO: move sanitization here
94dc8604 2465 if final:
7aefd19a 2466 title = info_dict['fulltitle'] = info_dict.get('title')
d4736fdb 2467 if not title:
2468 if title == '':
2469 self.write_debug('Extractor gave empty title. Creating a generic title')
2470 else:
2471 self.report_warning('Extractor failed to obtain "title". Creating a generic title instead')
1d485a1a 2472 info_dict['title'] = f'{info_dict["extractor"].replace(":", "-")} video #{info_dict["id"]}'
03f83004
LNO
2473
2474 if info_dict.get('duration') is not None:
2475 info_dict['duration_string'] = formatSeconds(info_dict['duration'])
2476
2477 for ts_key, date_key in (
2478 ('timestamp', 'upload_date'),
2479 ('release_timestamp', 'release_date'),
2480 ('modified_timestamp', 'modified_date'),
2481 ):
2482 if info_dict.get(date_key) is None and info_dict.get(ts_key) is not None:
2483 # Working around out-of-range timestamp values (e.g. negative ones on Windows,
2484 # see http://bugs.python.org/issue1646728)
19a03940 2485 with contextlib.suppress(ValueError, OverflowError, OSError):
03f83004
LNO
2486 upload_date = datetime.datetime.utcfromtimestamp(info_dict[ts_key])
2487 info_dict[date_key] = upload_date.strftime('%Y%m%d')
03f83004
LNO
2488
2489 live_keys = ('is_live', 'was_live')
2490 live_status = info_dict.get('live_status')
2491 if live_status is None:
2492 for key in live_keys:
2493 if info_dict.get(key) is False:
2494 continue
2495 if info_dict.get(key):
2496 live_status = key
2497 break
2498 if all(info_dict.get(key) is False for key in live_keys):
2499 live_status = 'not_live'
2500 if live_status:
2501 info_dict['live_status'] = live_status
2502 for key in live_keys:
2503 if info_dict.get(key) is None:
2504 info_dict[key] = (live_status == key)
a057779d 2505 if live_status == 'post_live':
2506 info_dict['was_live'] = True
03f83004
LNO
2507
2508 # Auto generate title fields corresponding to the *_number fields when missing
2509 # in order to always have clean titles. This is very common for TV series.
2510 for field in ('chapter', 'season', 'episode'):
94dc8604 2511 if final and info_dict.get('%s_number' % field) is not None and not info_dict.get(field):
03f83004
LNO
2512 info_dict[field] = '%s %d' % (field.capitalize(), info_dict['%s_number' % field])
2513
415f8d51 2514 def _raise_pending_errors(self, info):
2515 err = info.pop('__pending_error', None)
2516 if err:
2517 self.report_error(err, tb=False)
2518
784320c9 2519 def sort_formats(self, info_dict):
2520 formats = self._get_formats(info_dict)
784320c9 2521 formats.sort(key=FormatSorter(
c154302c 2522 self, info_dict.get('_format_sort_fields') or []).calculate_preference)
784320c9 2523
dd82ffea
JMF
2524 def process_video_result(self, info_dict, download=True):
2525 assert info_dict.get('_type', 'video') == 'video'
9c906919 2526 self._num_videos += 1
dd82ffea 2527
bec1fad2 2528 if 'id' not in info_dict:
fc08bdd6 2529 raise ExtractorError('Missing "id" field in extractor result', ie=info_dict['extractor'])
2530 elif not info_dict.get('id'):
2531 raise ExtractorError('Extractor failed to obtain "id"', ie=info_dict['extractor'])
455a15e2 2532
c9969434
S
2533 def report_force_conversion(field, field_not, conversion):
2534 self.report_warning(
2535 '"%s" field is not %s - forcing %s conversion, there is an error in extractor'
2536 % (field, field_not, conversion))
2537
2538 def sanitize_string_field(info, string_field):
2539 field = info.get(string_field)
14f25df2 2540 if field is None or isinstance(field, str):
c9969434
S
2541 return
2542 report_force_conversion(string_field, 'a string', 'string')
14f25df2 2543 info[string_field] = str(field)
c9969434
S
2544
2545 def sanitize_numeric_fields(info):
2546 for numeric_field in self._NUMERIC_FIELDS:
2547 field = info.get(numeric_field)
f9934b96 2548 if field is None or isinstance(field, (int, float)):
c9969434
S
2549 continue
2550 report_force_conversion(numeric_field, 'numeric', 'int')
2551 info[numeric_field] = int_or_none(field)
2552
2553 sanitize_string_field(info_dict, 'id')
2554 sanitize_numeric_fields(info_dict)
3975b4d2 2555 if info_dict.get('section_end') and info_dict.get('section_start') is not None:
2556 info_dict['duration'] = round(info_dict['section_end'] - info_dict['section_start'], 3)
4c3f8c3f 2557 if (info_dict.get('duration') or 0) <= 0 and info_dict.pop('duration', None):
50e93e03 2558 self.report_warning('"duration" field is negative, there is an error in extractor')
be6217b2 2559
9eef7c4e 2560 chapters = info_dict.get('chapters') or []
a3976e07 2561 if chapters and chapters[0].get('start_time'):
2562 chapters.insert(0, {'start_time': 0})
2563
9eef7c4e 2564 dummy_chapter = {'end_time': 0, 'start_time': info_dict.get('duration')}
a3976e07 2565 for idx, (prev, current, next_) in enumerate(zip(
2566 (dummy_chapter, *chapters), chapters, (*chapters[1:], dummy_chapter)), 1):
9eef7c4e 2567 if current.get('start_time') is None:
2568 current['start_time'] = prev.get('end_time')
2569 if not current.get('end_time'):
2570 current['end_time'] = next_.get('start_time')
a3976e07 2571 if not current.get('title'):
2572 current['title'] = f'<Untitled Chapter {idx}>'
9eef7c4e 2573
dd82ffea
JMF
2574 if 'playlist' not in info_dict:
2575 # It isn't part of a playlist
2576 info_dict['playlist'] = None
2577 info_dict['playlist_index'] = None
2578
bc516a3f 2579 self._sanitize_thumbnails(info_dict)
d5519808 2580
536a55da 2581 thumbnail = info_dict.get('thumbnail')
bc516a3f 2582 thumbnails = info_dict.get('thumbnails')
536a55da
S
2583 if thumbnail:
2584 info_dict['thumbnail'] = sanitize_url(thumbnail)
2585 elif thumbnails:
d5519808
PH
2586 info_dict['thumbnail'] = thumbnails[-1]['url']
2587
ae30b840 2588 if info_dict.get('display_id') is None and 'id' in info_dict:
0afef30b
PH
2589 info_dict['display_id'] = info_dict['id']
2590
03f83004 2591 self._fill_common_fields(info_dict)
33d2fc2f 2592
05108a49
S
2593 for cc_kind in ('subtitles', 'automatic_captions'):
2594 cc = info_dict.get(cc_kind)
2595 if cc:
2596 for _, subtitle in cc.items():
2597 for subtitle_format in subtitle:
2598 if subtitle_format.get('url'):
2599 subtitle_format['url'] = sanitize_url(subtitle_format['url'])
2600 if subtitle_format.get('ext') is None:
2601 subtitle_format['ext'] = determine_ext(subtitle_format['url']).lower()
2602
2603 automatic_captions = info_dict.get('automatic_captions')
4bba3716 2604 subtitles = info_dict.get('subtitles')
4bba3716 2605
360e1ca5 2606 info_dict['requested_subtitles'] = self.process_subtitles(
05108a49 2607 info_dict['id'], subtitles, automatic_captions)
a504ced0 2608
aebb4f4b 2609 formats = self._get_formats(info_dict)
dd82ffea 2610
c154302c 2611 # Backward compatibility with InfoExtractor._sort_formats
9ebac355 2612 field_preference = (formats or [{}])[0].pop('__sort_fields', None)
c154302c 2613 if field_preference:
2614 info_dict['_format_sort_fields'] = field_preference
2615
0a5a191a 2616 # or None ensures --clean-infojson removes it
2617 info_dict['_has_drm'] = any(f.get('has_drm') for f in formats) or None
88acdbc2 2618 if not self.params.get('allow_unplayable_formats'):
2619 formats = [f for f in formats if not f.get('has_drm')]
17ffed18 2620
2621 if formats and all(f.get('acodec') == f.get('vcodec') == 'none' for f in formats):
2622 self.report_warning(
2623 f'{"This video is DRM protected and " if info_dict["_has_drm"] else ""}'
2624 'only images are available for download. Use --list-formats to see them'.capitalize())
88acdbc2 2625
319b6059 2626 get_from_start = not info_dict.get('is_live') or bool(self.params.get('live_from_start'))
2627 if not get_from_start:
2628 info_dict['title'] += ' ' + datetime.datetime.now().strftime('%Y-%m-%d %H:%M')
2629 if info_dict.get('is_live') and formats:
adbc4ec4 2630 formats = [f for f in formats if bool(f.get('is_from_start')) == get_from_start]
319b6059 2631 if get_from_start and not formats:
a44ca5a4 2632 self.raise_no_formats(info_dict, msg=(
2633 '--live-from-start is passed, but there are no formats that can be downloaded from the start. '
2634 'If you want to download from the current time, use --no-live-from-start'))
adbc4ec4 2635
73af5cc8
S
2636 def is_wellformed(f):
2637 url = f.get('url')
a5ac0c47 2638 if not url:
73af5cc8
S
2639 self.report_warning(
2640 '"url" field is missing or empty - skipping format, '
2641 'there is an error in extractor')
a5ac0c47
S
2642 return False
2643 if isinstance(url, bytes):
2644 sanitize_string_field(f, 'url')
2645 return True
73af5cc8
S
2646
2647 # Filter out malformed formats for better extraction robustness
1ac7f461 2648 formats = list(filter(is_wellformed, formats or []))
2649
2650 if not formats:
2651 self.raise_no_formats(info_dict)
73af5cc8 2652
39f32f17 2653 for format in formats:
c9969434
S
2654 sanitize_string_field(format, 'format_id')
2655 sanitize_numeric_fields(format)
dcf77cf1 2656 format['url'] = sanitize_url(format['url'])
39f32f17 2657 if format.get('ext') is None:
2658 format['ext'] = determine_ext(format['url']).lower()
2659 if format.get('protocol') is None:
2660 format['protocol'] = determine_protocol(format)
2661 if format.get('resolution') is None:
2662 format['resolution'] = self.format_resolution(format, default=None)
2663 if format.get('dynamic_range') is None and format.get('vcodec') != 'none':
2664 format['dynamic_range'] = 'SDR'
2665 if format.get('aspect_ratio') is None:
2666 format['aspect_ratio'] = try_call(lambda: round(format['width'] / format['height'], 2))
2667 if (info_dict.get('duration') and format.get('tbr')
2668 and not format.get('filesize') and not format.get('filesize_approx')):
2669 format['filesize_approx'] = int(info_dict['duration'] * format['tbr'] * (1024 / 8))
2670 format['http_headers'] = self._calc_headers(collections.ChainMap(format, info_dict))
2671
2672 # This is copied to http_headers by the above _calc_headers and can now be removed
2673 if '__x_forwarded_for_ip' in info_dict:
2674 del info_dict['__x_forwarded_for_ip']
2675
c154302c 2676 self.sort_formats({
2677 'formats': formats,
2678 '_format_sort_fields': info_dict.get('_format_sort_fields')
2679 })
39f32f17 2680
2681 # Sanitize and group by format_id
2682 formats_dict = {}
2683 for i, format in enumerate(formats):
e74e3b63 2684 if not format.get('format_id'):
14f25df2 2685 format['format_id'] = str(i)
e2effb08
S
2686 else:
2687 # Sanitize format_id from characters used in format selector expression
ec85ded8 2688 format['format_id'] = re.sub(r'[\s,/+\[\]()]', '_', format['format_id'])
39f32f17 2689 formats_dict.setdefault(format['format_id'], []).append(format)
181c7053
S
2690
2691 # Make sure all formats have unique format_id
03b4de72 2692 common_exts = set(itertools.chain(*self._format_selection_exts.values()))
181c7053 2693 for format_id, ambiguous_formats in formats_dict.items():
48ee10ee 2694 ambigious_id = len(ambiguous_formats) > 1
2695 for i, format in enumerate(ambiguous_formats):
2696 if ambigious_id:
181c7053 2697 format['format_id'] = '%s-%d' % (format_id, i)
48ee10ee 2698 # Ensure there is no conflict between id and ext in format selection
2699 # See https://github.com/yt-dlp/yt-dlp/issues/1282
2700 if format['format_id'] != format['ext'] and format['format_id'] in common_exts:
2701 format['format_id'] = 'f%s' % format['format_id']
181c7053 2702
39f32f17 2703 if format.get('format') is None:
2704 format['format'] = '{id} - {res}{note}'.format(
2705 id=format['format_id'],
2706 res=self.format_resolution(format),
2707 note=format_field(format, 'format_note', ' (%s)'),
2708 )
dd82ffea 2709
9f1a1c36 2710 if self.params.get('check_formats') is True:
282f5709 2711 formats = LazyList(self._check_formats(formats[::-1]), reverse=True)
9f1a1c36 2712
88acdbc2 2713 if not formats or formats[0] is not info_dict:
b3d9ef88
JMF
2714 # only set the 'formats' fields if the original info_dict list them
2715 # otherwise we end up with a circular reference, the first (and unique)
f89197d7 2716 # element in the 'formats' field in info_dict is info_dict itself,
dfb1b146 2717 # which can't be exported to json
b3d9ef88 2718 info_dict['formats'] = formats
4ec82a72 2719
2720 info_dict, _ = self.pre_process(info_dict)
2721
6db9c4d5 2722 if self._match_entry(info_dict, incomplete=self._format_fields) is not None:
09b49e1f 2723 return info_dict
2724
2725 self.post_extract(info_dict)
2726 info_dict, _ = self.pre_process(info_dict, 'after_filter')
2727
093a1710 2728 # The pre-processors may have modified the formats
aebb4f4b 2729 formats = self._get_formats(info_dict)
093a1710 2730
e4221b70 2731 list_only = self.params.get('simulate') == 'list_only'
fa9f30b8 2732 interactive_format_selection = not list_only and self.format_selector == '-'
b7b04c78 2733 if self.params.get('list_thumbnails'):
2734 self.list_thumbnails(info_dict)
b7b04c78 2735 if self.params.get('listsubtitles'):
2736 if 'automatic_captions' in info_dict:
2737 self.list_subtitles(
2738 info_dict['id'], automatic_captions, 'automatic captions')
2739 self.list_subtitles(info_dict['id'], subtitles, 'subtitles')
fa9f30b8 2740 if self.params.get('listformats') or interactive_format_selection:
b69fd25c 2741 self.list_formats(info_dict)
169dbde9 2742 if list_only:
b7b04c78 2743 # Without this printing, -F --print-json will not work
17060584 2744 self.__forced_printings(info_dict)
c487cf00 2745 return info_dict
bfaae0a7 2746
187986a8 2747 format_selector = self.format_selector
fa9f30b8 2748 while True:
2749 if interactive_format_selection:
372a0f3b
IS
2750 req_format = input(self._format_screen('\nEnter format selector ', self.Styles.EMPHASIS)
2751 + '(Press ENTER for default, or Ctrl+C to quit)'
2752 + self._format_screen(': ', self.Styles.EMPHASIS))
fa9f30b8 2753 try:
372a0f3b 2754 format_selector = self.build_format_selector(req_format) if req_format else None
fa9f30b8 2755 except SyntaxError as err:
2756 self.report_error(err, tb=False, is_error=False)
2757 continue
2758
372a0f3b
IS
2759 if format_selector is None:
2760 req_format = self._default_format_spec(info_dict, download=download)
2761 self.write_debug(f'Default format spec: {req_format}')
2762 format_selector = self.build_format_selector(req_format)
2763
85e801a9 2764 formats_to_download = list(format_selector({
fa9f30b8 2765 'formats': formats,
85e801a9 2766 'has_merged_format': any('none' not in (f.get('acodec'), f.get('vcodec')) for f in formats),
2767 'incomplete_formats': (
2768 # All formats are video-only or
2769 all(f.get('vcodec') != 'none' and f.get('acodec') == 'none' for f in formats)
2770 # all formats are audio-only
2771 or all(f.get('vcodec') == 'none' and f.get('acodec') != 'none' for f in formats)),
2772 }))
fa9f30b8 2773 if interactive_format_selection and not formats_to_download:
2774 self.report_error('Requested format is not available', tb=False, is_error=False)
2775 continue
2776 break
317f7ab6 2777
dd82ffea 2778 if not formats_to_download:
b7da73eb 2779 if not self.params.get('ignore_no_formats_error'):
c0b6e5c7 2780 raise ExtractorError(
2781 'Requested format is not available. Use --list-formats for a list of available formats',
2782 expected=True, video_id=info_dict['id'], ie=info_dict['extractor'])
b62fa6d7 2783 self.report_warning('Requested format is not available')
2784 # Process what we can, even without any available formats.
2785 formats_to_download = [{}]
a13e6848 2786
0500ee3d 2787 requested_ranges = tuple(self.params.get('download_ranges', lambda *_: [{}])(info_dict, self))
5ec1b6b7 2788 best_format, downloaded_formats = formats_to_download[-1], []
b62fa6d7 2789 if download:
0500ee3d 2790 if best_format and requested_ranges:
5ec1b6b7 2791 def to_screen(*msg):
2792 self.to_screen(f'[info] {info_dict["id"]}: {" ".join(", ".join(variadic(m)) for m in msg)}')
2793
2794 to_screen(f'Downloading {len(formats_to_download)} format(s):',
2795 (f['format_id'] for f in formats_to_download))
0500ee3d 2796 if requested_ranges != ({}, ):
5ec1b6b7 2797 to_screen(f'Downloading {len(requested_ranges)} time ranges:',
fc2ba496 2798 (f'{c["start_time"]:.1f}-{c["end_time"]:.1f}' for c in requested_ranges))
a13e6848 2799 max_downloads_reached = False
5ec1b6b7 2800
0500ee3d 2801 for fmt, chapter in itertools.product(formats_to_download, requested_ranges):
5ec1b6b7 2802 new_info = self._copy_infodict(info_dict)
b7da73eb 2803 new_info.update(fmt)
3975b4d2 2804 offset, duration = info_dict.get('section_start') or 0, info_dict.get('duration') or float('inf')
fc2ba496 2805 end_time = offset + min(chapter.get('end_time', duration), duration)
3975b4d2 2806 if chapter or offset:
5ec1b6b7 2807 new_info.update({
3975b4d2 2808 'section_start': offset + chapter.get('start_time', 0),
2576d53a 2809 # duration may not be accurate. So allow deviations <1sec
2810 'section_end': end_time if end_time <= offset + duration + 1 else None,
5ec1b6b7 2811 'section_title': chapter.get('title'),
2812 'section_number': chapter.get('index'),
2813 })
2814 downloaded_formats.append(new_info)
a13e6848 2815 try:
2816 self.process_info(new_info)
2817 except MaxDownloadsReached:
2818 max_downloads_reached = True
415f8d51 2819 self._raise_pending_errors(new_info)
f46e2f9d 2820 # Remove copied info
2821 for key, val in tuple(new_info.items()):
2822 if info_dict.get(key) == val:
2823 new_info.pop(key)
a13e6848 2824 if max_downloads_reached:
2825 break
ebed8b37 2826
5ec1b6b7 2827 write_archive = {f.get('__write_download_archive', False) for f in downloaded_formats}
a13e6848 2828 assert write_archive.issubset({True, False, 'ignore'})
2829 if True in write_archive and False not in write_archive:
2830 self.record_download_archive(info_dict)
be72c624 2831
5ec1b6b7 2832 info_dict['requested_downloads'] = downloaded_formats
ed5835b4 2833 info_dict = self.run_all_pps('after_video', info_dict)
a13e6848 2834 if max_downloads_reached:
2835 raise MaxDownloadsReached()
ebed8b37 2836
49a57e70 2837 # We update the info dict with the selected best quality format (backwards compatibility)
be72c624 2838 info_dict.update(best_format)
dd82ffea
JMF
2839 return info_dict
2840
98c70d6f 2841 def process_subtitles(self, video_id, normal_subtitles, automatic_captions):
a504ced0 2842 """Select the requested subtitles and their format"""
d8a58ddc 2843 available_subs, normal_sub_langs = {}, []
98c70d6f
JMF
2844 if normal_subtitles and self.params.get('writesubtitles'):
2845 available_subs.update(normal_subtitles)
d8a58ddc 2846 normal_sub_langs = tuple(normal_subtitles.keys())
98c70d6f
JMF
2847 if automatic_captions and self.params.get('writeautomaticsub'):
2848 for lang, cap_info in automatic_captions.items():
360e1ca5
JMF
2849 if lang not in available_subs:
2850 available_subs[lang] = cap_info
2851
d2c8aadf 2852 if not available_subs or (
2853 not self.params.get('writesubtitles')
2854 and not self.params.get('writeautomaticsub')):
4d171848 2855 return None
a504ced0 2856
d8a58ddc 2857 all_sub_langs = tuple(available_subs.keys())
a504ced0 2858 if self.params.get('allsubtitles', False):
c32b0aab 2859 requested_langs = all_sub_langs
2860 elif self.params.get('subtitleslangs', False):
5314b521 2861 try:
2862 requested_langs = orderedSet_from_options(
2863 self.params.get('subtitleslangs'), {'all': all_sub_langs}, use_regex=True)
2864 except re.error as e:
2865 raise ValueError(f'Wrong regex for subtitlelangs: {e.pattern}')
a504ced0 2866 else:
376aa24b
SS
2867 requested_langs = LazyList(itertools.chain(
2868 ['en'] if 'en' in normal_sub_langs else [],
2869 filter(lambda f: f.startswith('en'), normal_sub_langs),
2870 ['en'] if 'en' in all_sub_langs else [],
2871 filter(lambda f: f.startswith('en'), all_sub_langs),
2872 normal_sub_langs, all_sub_langs,
2873 ))[:1]
ad3dc496 2874 if requested_langs:
d2c8aadf 2875 self.to_screen(f'[info] {video_id}: Downloading subtitles: {", ".join(requested_langs)}')
a504ced0
JMF
2876
2877 formats_query = self.params.get('subtitlesformat', 'best')
2878 formats_preference = formats_query.split('/') if formats_query else []
2879 subs = {}
2880 for lang in requested_langs:
2881 formats = available_subs.get(lang)
2882 if formats is None:
86e5f3ed 2883 self.report_warning(f'{lang} subtitles not available for {video_id}')
a504ced0 2884 continue
a504ced0
JMF
2885 for ext in formats_preference:
2886 if ext == 'best':
2887 f = formats[-1]
2888 break
2889 matches = list(filter(lambda f: f['ext'] == ext, formats))
2890 if matches:
2891 f = matches[-1]
2892 break
2893 else:
2894 f = formats[-1]
2895 self.report_warning(
2896 'No subtitle format found matching "%s" for language %s, '
2897 'using %s' % (formats_query, lang, f['ext']))
2898 subs[lang] = f
2899 return subs
2900
bb66c247 2901 def _forceprint(self, key, info_dict):
2902 if info_dict is None:
2903 return
2904 info_copy = info_dict.copy()
17060584 2905 info_copy.setdefault('filename', self.prepare_filename(info_dict))
2906 if info_dict.get('requested_formats') is not None:
2907 # For RTMP URLs, also include the playpath
2908 info_copy['urls'] = '\n'.join(f['url'] + f.get('play_path', '') for f in info_dict['requested_formats'])
2909 elif info_dict.get('url'):
2910 info_copy['urls'] = info_dict['url'] + info_dict.get('play_path', '')
bb66c247 2911 info_copy['formats_table'] = self.render_formats_table(info_dict)
2912 info_copy['thumbnails_table'] = self.render_thumbnails_table(info_dict)
2913 info_copy['subtitles_table'] = self.render_subtitles_table(info_dict.get('id'), info_dict.get('subtitles'))
2914 info_copy['automatic_captions_table'] = self.render_subtitles_table(info_dict.get('id'), info_dict.get('automatic_captions'))
2915
2916 def format_tmpl(tmpl):
48c8424b 2917 mobj = re.fullmatch(r'([\w.:,]|-\d|(?P<dict>{([\w.:,]|-\d)+}))+=?', tmpl)
07a1250e 2918 if not mobj:
2919 return tmpl
48c8424b 2920
2921 fmt = '%({})s'
2922 if tmpl.startswith('{'):
6f2287cb 2923 tmpl, fmt = f'.{tmpl}', '%({})j'
48c8424b 2924 if tmpl.endswith('='):
2925 tmpl, fmt = tmpl[:-1], '{0} = %({0})#j'
2926 return '\n'.join(map(fmt.format, [tmpl] if mobj.group('dict') else tmpl.split(',')))
8130779d 2927
bb66c247 2928 for tmpl in self.params['forceprint'].get(key, []):
2929 self.to_stdout(self.evaluate_outtmpl(format_tmpl(tmpl), info_copy))
2930
2931 for tmpl, file_tmpl in self.params['print_to_file'].get(key, []):
5127e92a 2932 filename = self.prepare_filename(info_dict, outtmpl=file_tmpl)
bb66c247 2933 tmpl = format_tmpl(tmpl)
2934 self.to_screen(f'[info] Writing {tmpl!r} to: {filename}')
8d93e69d 2935 if self._ensure_dir_exists(filename):
9874e82b 2936 with open(filename, 'a', encoding='utf-8', newline='') as f:
2937 f.write(self.evaluate_outtmpl(tmpl, info_copy) + os.linesep)
ca30f449 2938
17060584 2939 return info_copy
2940
2941 def __forced_printings(self, info_dict, filename=None, incomplete=True):
bb66c247 2942 if (self.params.get('forcejson')
2943 or self.params['forceprint'].get('video')
2944 or self.params['print_to_file'].get('video')):
2b8a2973 2945 self.post_extract(info_dict)
17060584 2946 if filename:
2947 info_dict['filename'] = filename
b5f61b69 2948 info_copy = self._forceprint('video', info_dict)
2949
2950 def print_field(field, actual_field=None, optional=False):
2951 if actual_field is None:
2952 actual_field = field
2953 if self.params.get(f'force{field}') and (
2954 info_copy.get(field) is not None or (not optional and not incomplete)):
2955 self.to_stdout(info_copy[actual_field])
2956
2957 print_field('title')
2958 print_field('id')
2959 print_field('url', 'urls')
2960 print_field('thumbnail', optional=True)
2961 print_field('description', optional=True)
ad54c913 2962 print_field('filename')
b5f61b69 2963 if self.params.get('forceduration') and info_copy.get('duration') is not None:
2964 self.to_stdout(formatSeconds(info_copy['duration']))
2965 print_field('format')
53c18592 2966
2b8a2973 2967 if self.params.get('forcejson'):
6e84b215 2968 self.to_stdout(json.dumps(self.sanitize_info(info_dict)))
d06daf23 2969
e8e73840 2970 def dl(self, name, info, subtitle=False, test=False):
88acdbc2 2971 if not info.get('url'):
1151c407 2972 self.raise_no_formats(info, True)
e8e73840 2973
2974 if test:
2975 verbose = self.params.get('verbose')
2976 params = {
2977 'test': True,
a169858f 2978 'quiet': self.params.get('quiet') or not verbose,
e8e73840 2979 'verbose': verbose,
2980 'noprogress': not verbose,
2981 'nopart': True,
2982 'skip_unavailable_fragments': False,
2983 'keep_fragments': False,
2984 'overwrites': True,
2985 '_no_ytdl_file': True,
2986 }
2987 else:
2988 params = self.params
96fccc10 2989 fd = get_suitable_downloader(info, params, to_stdout=(name == '-'))(self, params)
e8e73840 2990 if not test:
2991 for ph in self._progress_hooks:
2992 fd.add_progress_hook(ph)
42676437
M
2993 urls = '", "'.join(
2994 (f['url'].split(',')[0] + ',<data>' if f['url'].startswith('data:') else f['url'])
2995 for f in info.get('requested_formats', []) or [info])
3a408f9d 2996 self.write_debug(f'Invoking {fd.FD_NAME} downloader on "{urls}"')
03b4de72 2997
adbc4ec4
THD
2998 # Note: Ideally info should be a deep-copied so that hooks cannot modify it.
2999 # But it may contain objects that are not deep-copyable
3000 new_info = self._copy_infodict(info)
e8e73840 3001 if new_info.get('http_headers') is None:
3002 new_info['http_headers'] = self._calc_headers(new_info)
3003 return fd.download(name, new_info, subtitle)
3004
e04938ab 3005 def existing_file(self, filepaths, *, default_overwrite=True):
3006 existing_files = list(filter(os.path.exists, orderedSet(filepaths)))
3007 if existing_files and not self.params.get('overwrites', default_overwrite):
3008 return existing_files[0]
3009
3010 for file in existing_files:
3011 self.report_file_delete(file)
3012 os.remove(file)
3013 return None
3014
8222d8de 3015 def process_info(self, info_dict):
09b49e1f 3016 """Process a single resolved IE result. (Modifies it in-place)"""
8222d8de
JMF
3017
3018 assert info_dict.get('_type', 'video') == 'video'
f46e2f9d 3019 original_infodict = info_dict
fd288278 3020
4513a41a 3021 if 'format' not in info_dict and 'ext' in info_dict:
8222d8de
JMF
3022 info_dict['format'] = info_dict['ext']
3023
c77495e3 3024 if self._match_entry(info_dict) is not None:
9e907ebd 3025 info_dict['__write_download_archive'] = 'ignore'
8222d8de
JMF
3026 return
3027
09b49e1f 3028 # Does nothing under normal operation - for backward compatibility of process_info
277d6ff5 3029 self.post_extract(info_dict)
119e40ef 3030
3031 def replace_info_dict(new_info):
3032 nonlocal info_dict
3033 if new_info == info_dict:
3034 return
3035 info_dict.clear()
3036 info_dict.update(new_info)
3037
3038 new_info, _ = self.pre_process(info_dict, 'video')
3039 replace_info_dict(new_info)
0c14d66a 3040 self._num_downloads += 1
8222d8de 3041
dcf64d43 3042 # info_dict['_filename'] needs to be set for backward compatibility
de6000d9 3043 info_dict['_filename'] = full_filename = self.prepare_filename(info_dict, warn=True)
3044 temp_filename = self.prepare_filename(info_dict, 'temp')
0202b52a 3045 files_to_move = {}
8222d8de
JMF
3046
3047 # Forced printings
4513a41a 3048 self.__forced_printings(info_dict, full_filename, incomplete=('format' not in info_dict))
8222d8de 3049
ca6d59d2 3050 def check_max_downloads():
3051 if self._num_downloads >= float(self.params.get('max_downloads') or 'inf'):
3052 raise MaxDownloadsReached()
3053
b7b04c78 3054 if self.params.get('simulate'):
9e907ebd 3055 info_dict['__write_download_archive'] = self.params.get('force_write_download_archive')
ca6d59d2 3056 check_max_downloads()
8222d8de
JMF
3057 return
3058
de6000d9 3059 if full_filename is None:
8222d8de 3060 return
e92caff5 3061 if not self._ensure_dir_exists(encodeFilename(full_filename)):
0202b52a 3062 return
e92caff5 3063 if not self._ensure_dir_exists(encodeFilename(temp_filename)):
8222d8de
JMF
3064 return
3065
80c03fa9 3066 if self._write_description('video', info_dict,
3067 self.prepare_filename(info_dict, 'description')) is None:
3068 return
3069
3070 sub_files = self._write_subtitles(info_dict, temp_filename)
3071 if sub_files is None:
3072 return
3073 files_to_move.update(dict(sub_files))
3074
3075 thumb_files = self._write_thumbnails(
3076 'video', info_dict, temp_filename, self.prepare_filename(info_dict, 'thumbnail'))
3077 if thumb_files is None:
3078 return
3079 files_to_move.update(dict(thumb_files))
8222d8de 3080
80c03fa9 3081 infofn = self.prepare_filename(info_dict, 'infojson')
3082 _infojson_written = self._write_info_json('video', info_dict, infofn)
3083 if _infojson_written:
dac5df5a 3084 info_dict['infojson_filename'] = infofn
e75bb0d6 3085 # For backward compatibility, even though it was a private field
80c03fa9 3086 info_dict['__infojson_filename'] = infofn
3087 elif _infojson_written is None:
3088 return
3089
3090 # Note: Annotations are deprecated
3091 annofn = None
1fb07d10 3092 if self.params.get('writeannotations', False):
de6000d9 3093 annofn = self.prepare_filename(info_dict, 'annotation')
80c03fa9 3094 if annofn:
e92caff5 3095 if not self._ensure_dir_exists(encodeFilename(annofn)):
0202b52a 3096 return
0c3d0f51 3097 if not self.params.get('overwrites', True) and os.path.exists(encodeFilename(annofn)):
6febd1c1 3098 self.to_screen('[info] Video annotations are already present')
ffddb112
RA
3099 elif not info_dict.get('annotations'):
3100 self.report_warning('There are no annotations to write.')
7b6fefc9
PH
3101 else:
3102 try:
6febd1c1 3103 self.to_screen('[info] Writing video annotations to: ' + annofn)
86e5f3ed 3104 with open(encodeFilename(annofn), 'w', encoding='utf-8') as annofile:
7b6fefc9
PH
3105 annofile.write(info_dict['annotations'])
3106 except (KeyError, TypeError):
6febd1c1 3107 self.report_warning('There are no annotations to write.')
86e5f3ed 3108 except OSError:
6febd1c1 3109 self.report_error('Cannot write annotations file: ' + annofn)
7b6fefc9 3110 return
1fb07d10 3111
732044af 3112 # Write internet shortcut files
08438d2c 3113 def _write_link_file(link_type):
60f3e995 3114 url = try_get(info_dict['webpage_url'], iri_to_uri)
3115 if not url:
3116 self.report_warning(
3117 f'Cannot write internet shortcut file because the actual URL of "{info_dict["webpage_url"]}" is unknown')
3118 return True
08438d2c 3119 linkfn = replace_extension(self.prepare_filename(info_dict, 'link'), link_type, info_dict.get('ext'))
0e6b018a
Z
3120 if not self._ensure_dir_exists(encodeFilename(linkfn)):
3121 return False
10e3742e 3122 if self.params.get('overwrites', True) and os.path.exists(encodeFilename(linkfn)):
08438d2c 3123 self.to_screen(f'[info] Internet shortcut (.{link_type}) is already present')
3124 return True
3125 try:
3126 self.to_screen(f'[info] Writing internet shortcut (.{link_type}) to: {linkfn}')
86e5f3ed 3127 with open(encodeFilename(to_high_limit_path(linkfn)), 'w', encoding='utf-8',
3128 newline='\r\n' if link_type == 'url' else '\n') as linkfile:
60f3e995 3129 template_vars = {'url': url}
08438d2c 3130 if link_type == 'desktop':
3131 template_vars['filename'] = linkfn[:-(len(link_type) + 1)]
3132 linkfile.write(LINK_TEMPLATES[link_type] % template_vars)
86e5f3ed 3133 except OSError:
08438d2c 3134 self.report_error(f'Cannot write internet shortcut {linkfn}')
3135 return False
732044af 3136 return True
3137
08438d2c 3138 write_links = {
3139 'url': self.params.get('writeurllink'),
3140 'webloc': self.params.get('writewebloclink'),
3141 'desktop': self.params.get('writedesktoplink'),
3142 }
3143 if self.params.get('writelink'):
3144 link_type = ('webloc' if sys.platform == 'darwin'
3145 else 'desktop' if sys.platform.startswith('linux')
3146 else 'url')
3147 write_links[link_type] = True
3148
3149 if any(should_write and not _write_link_file(link_type)
3150 for link_type, should_write in write_links.items()):
3151 return
732044af 3152
415f8d51 3153 new_info, files_to_move = self.pre_process(info_dict, 'before_dl', files_to_move)
3154 replace_info_dict(new_info)
56d868db 3155
a13e6848 3156 if self.params.get('skip_download'):
56d868db 3157 info_dict['filepath'] = temp_filename
3158 info_dict['__finaldir'] = os.path.dirname(os.path.abspath(encodeFilename(full_filename)))
3159 info_dict['__files_to_move'] = files_to_move
f46e2f9d 3160 replace_info_dict(self.run_pp(MoveFilesAfterDownloadPP(self, False), info_dict))
9e907ebd 3161 info_dict['__write_download_archive'] = self.params.get('force_write_download_archive')
56d868db 3162 else:
3163 # Download
b868936c 3164 info_dict.setdefault('__postprocessors', [])
4340deca 3165 try:
0202b52a 3166
e04938ab 3167 def existing_video_file(*filepaths):
6b591b29 3168 ext = info_dict.get('ext')
e04938ab 3169 converted = lambda file: replace_extension(file, self.params.get('final_ext') or ext, ext)
3170 file = self.existing_file(itertools.chain(*zip(map(converted, filepaths), filepaths)),
3171 default_overwrite=False)
3172 if file:
3173 info_dict['ext'] = os.path.splitext(file)[1][1:]
3174 return file
0202b52a 3175
7b2c3f47 3176 fd, success = None, True
fccf90e7 3177 if info_dict.get('protocol') or info_dict.get('url'):
56ba69e4 3178 fd = get_suitable_downloader(info_dict, self.params, to_stdout=temp_filename == '-')
71df9b7f 3179 if fd is not FFmpegFD and 'no-direct-merge' not in self.params['compat_opts'] and (
56ba69e4 3180 info_dict.get('section_start') or info_dict.get('section_end')):
7b2c3f47 3181 msg = ('This format cannot be partially downloaded' if FFmpegFD.available()
56ba69e4 3182 else 'You have requested downloading the video partially, but ffmpeg is not installed')
3183 self.report_error(f'{msg}. Aborting')
5ec1b6b7 3184 return
5ec1b6b7 3185
4340deca 3186 if info_dict.get('requested_formats') is not None:
0202b52a 3187 old_ext = info_dict['ext']
4e3b637d 3188 if self.params.get('merge_output_format') is None:
4e3b637d 3189 if (info_dict['ext'] == 'webm'
3190 and info_dict.get('thumbnails')
3191 # check with type instead of pp_key, __name__, or isinstance
3192 # since we dont want any custom PPs to trigger this
c487cf00 3193 and any(type(pp) == EmbedThumbnailPP for pp in self._pps['post_process'])): # noqa: E721
4e3b637d 3194 info_dict['ext'] = 'mkv'
3195 self.report_warning(
3196 'webm doesn\'t support embedding a thumbnail, mkv will be used')
124bc071 3197 new_ext = info_dict['ext']
0202b52a 3198
124bc071 3199 def correct_ext(filename, ext=new_ext):
96fccc10 3200 if filename == '-':
3201 return filename
0202b52a 3202 filename_real_ext = os.path.splitext(filename)[1][1:]
3203 filename_wo_ext = (
3204 os.path.splitext(filename)[0]
124bc071 3205 if filename_real_ext in (old_ext, new_ext)
0202b52a 3206 else filename)
86e5f3ed 3207 return f'{filename_wo_ext}.{ext}'
0202b52a 3208
38c6902b 3209 # Ensure filename always has a correct extension for successful merge
0202b52a 3210 full_filename = correct_ext(full_filename)
3211 temp_filename = correct_ext(temp_filename)
e04938ab 3212 dl_filename = existing_video_file(full_filename, temp_filename)
ad54c913 3213
1ea24129 3214 info_dict['__real_download'] = False
18e674b4 3215
7b2c3f47 3216 merger = FFmpegMergerPP(self)
adbc4ec4 3217 downloaded = []
dbf5416a 3218 if dl_filename is not None:
6c7274ec 3219 self.report_file_already_downloaded(dl_filename)
adbc4ec4 3220 elif fd:
ad54c913 3221 for f in info_dict['requested_formats'] if fd != FFmpegFD else []:
adbc4ec4
THD
3222 f['filepath'] = fname = prepend_extension(
3223 correct_ext(temp_filename, info_dict['ext']),
3224 'f%s' % f['format_id'], info_dict['ext'])
3225 downloaded.append(fname)
ad54c913 3226 info_dict['url'] = '\n'.join(f['url'] for f in info_dict['requested_formats'])
dbf5416a 3227 success, real_download = self.dl(temp_filename, info_dict)
3228 info_dict['__real_download'] = real_download
18e674b4 3229 else:
18e674b4 3230 if self.params.get('allow_unplayable_formats'):
3231 self.report_warning(
3232 'You have requested merging of multiple formats '
3233 'while also allowing unplayable formats to be downloaded. '
3234 'The formats won\'t be merged to prevent data corruption.')
3235 elif not merger.available:
e8969bda 3236 msg = 'You have requested merging of multiple formats but ffmpeg is not installed'
3237 if not self.params.get('ignoreerrors'):
3238 self.report_error(f'{msg}. Aborting due to --abort-on-error')
3239 return
3240 self.report_warning(f'{msg}. The formats won\'t be merged')
18e674b4 3241
96fccc10 3242 if temp_filename == '-':
adbc4ec4 3243 reason = ('using a downloader other than ffmpeg' if FFmpegFD.can_merge_formats(info_dict, self.params)
96fccc10 3244 else 'but the formats are incompatible for simultaneous download' if merger.available
3245 else 'but ffmpeg is not installed')
3246 self.report_warning(
3247 f'You have requested downloading multiple formats to stdout {reason}. '
3248 'The formats will be streamed one after the other')
3249 fname = temp_filename
ad54c913 3250 for f in info_dict['requested_formats']:
dbf5416a 3251 new_info = dict(info_dict)
3252 del new_info['requested_formats']
3253 new_info.update(f)
96fccc10 3254 if temp_filename != '-':
124bc071 3255 fname = prepend_extension(
3256 correct_ext(temp_filename, new_info['ext']),
3257 'f%s' % f['format_id'], new_info['ext'])
96fccc10 3258 if not self._ensure_dir_exists(fname):
3259 return
a21e0ab1 3260 f['filepath'] = fname
96fccc10 3261 downloaded.append(fname)
dbf5416a 3262 partial_success, real_download = self.dl(fname, new_info)
3263 info_dict['__real_download'] = info_dict['__real_download'] or real_download
3264 success = success and partial_success
adbc4ec4
THD
3265
3266 if downloaded and merger.available and not self.params.get('allow_unplayable_formats'):
3267 info_dict['__postprocessors'].append(merger)
3268 info_dict['__files_to_merge'] = downloaded
3269 # Even if there were no downloads, it is being merged only now
3270 info_dict['__real_download'] = True
3271 else:
3272 for file in downloaded:
3273 files_to_move[file] = None
4340deca
P
3274 else:
3275 # Just a single file
e04938ab 3276 dl_filename = existing_video_file(full_filename, temp_filename)
6c7274ec 3277 if dl_filename is None or dl_filename == temp_filename:
3278 # dl_filename == temp_filename could mean that the file was partially downloaded with --no-part.
3279 # So we should try to resume the download
e8e73840 3280 success, real_download = self.dl(temp_filename, info_dict)
0202b52a 3281 info_dict['__real_download'] = real_download
6c7274ec 3282 else:
3283 self.report_file_already_downloaded(dl_filename)
0202b52a 3284
0202b52a 3285 dl_filename = dl_filename or temp_filename
c571435f 3286 info_dict['__finaldir'] = os.path.dirname(os.path.abspath(encodeFilename(full_filename)))
0202b52a 3287
3158150c 3288 except network_exceptions as err:
7960b056 3289 self.report_error('unable to download video data: %s' % error_to_compat_str(err))
4340deca 3290 return
86e5f3ed 3291 except OSError as err:
4340deca
P
3292 raise UnavailableVideoError(err)
3293 except (ContentTooShortError, ) as err:
86e5f3ed 3294 self.report_error(f'content too short (expected {err.expected} bytes and served {err.downloaded})')
4340deca 3295 return
8222d8de 3296
415f8d51 3297 self._raise_pending_errors(info_dict)
de6000d9 3298 if success and full_filename != '-':
f17f8651 3299
fd7cfb64 3300 def fixup():
3301 do_fixup = True
3302 fixup_policy = self.params.get('fixup')
3303 vid = info_dict['id']
3304
3305 if fixup_policy in ('ignore', 'never'):
3306 return
3307 elif fixup_policy == 'warn':
3fe75fdc 3308 do_fixup = 'warn'
f89b3e2d 3309 elif fixup_policy != 'force':
3310 assert fixup_policy in ('detect_or_warn', None)
3311 if not info_dict.get('__real_download'):
3312 do_fixup = False
fd7cfb64 3313
3314 def ffmpeg_fixup(cndn, msg, cls):
3fe75fdc 3315 if not (do_fixup and cndn):
fd7cfb64 3316 return
3fe75fdc 3317 elif do_fixup == 'warn':
fd7cfb64 3318 self.report_warning(f'{vid}: {msg}')
3319 return
3320 pp = cls(self)
3321 if pp.available:
3322 info_dict['__postprocessors'].append(pp)
3323 else:
3324 self.report_warning(f'{vid}: {msg}. Install ffmpeg to fix this automatically')
3325
3326 stretched_ratio = info_dict.get('stretched_ratio')
ca9def71
LNO
3327 ffmpeg_fixup(stretched_ratio not in (1, None),
3328 f'Non-uniform pixel ratio {stretched_ratio}',
3329 FFmpegFixupStretchedPP)
fd7cfb64 3330
993191c0 3331 downloader = get_suitable_downloader(info_dict, self.params) if 'protocol' in info_dict else None
24146491 3332 downloader = downloader.FD_NAME if downloader else None
adbc4ec4 3333
ca9def71
LNO
3334 ext = info_dict.get('ext')
3335 postprocessed_by_ffmpeg = info_dict.get('requested_formats') or any((
3336 isinstance(pp, FFmpegVideoConvertorPP)
3337 and resolve_recode_mapping(ext, pp.mapping)[0] not in (ext, None)
3338 ) for pp in self._pps['post_process'])
3339
3340 if not postprocessed_by_ffmpeg:
3341 ffmpeg_fixup(ext == 'm4a' and info_dict.get('container') == 'm4a_dash',
f2df4071 3342 'writing DASH m4a. Only some players support this container',
3343 FFmpegFixupM4aPP)
24146491 3344 ffmpeg_fixup(downloader == 'hlsnative' and not self.params.get('hls_use_mpegts')
494f5230 3345 or info_dict.get('is_live') and self.params.get('hls_use_mpegts') is None,
adbc4ec4
THD
3346 'Possible MPEG-TS in MP4 container or malformed AAC timestamps',
3347 FFmpegFixupM3u8PP)
26010b5c 3348 ffmpeg_fixup(info_dict.get('is_live') and downloader == 'dashsegments',
adbc4ec4
THD
3349 'Possible duplicate MOOV atoms', FFmpegFixupDuplicateMoovPP)
3350
24146491 3351 ffmpeg_fixup(downloader == 'web_socket_fragment', 'Malformed timestamps detected', FFmpegFixupTimestampPP)
3352 ffmpeg_fixup(downloader == 'web_socket_fragment', 'Malformed duration detected', FFmpegFixupDurationPP)
fd7cfb64 3353
3354 fixup()
8222d8de 3355 try:
f46e2f9d 3356 replace_info_dict(self.post_process(dl_filename, info_dict, files_to_move))
af819c21 3357 except PostProcessingError as err:
3358 self.report_error('Postprocessing: %s' % str(err))
8222d8de 3359 return
ab8e5e51
AM
3360 try:
3361 for ph in self._post_hooks:
23c1a667 3362 ph(info_dict['filepath'])
ab8e5e51
AM
3363 except Exception as err:
3364 self.report_error('post hooks: %s' % str(err))
3365 return
9e907ebd 3366 info_dict['__write_download_archive'] = True
2d30509f 3367
c487cf00 3368 assert info_dict is original_infodict # Make sure the info_dict was modified in-place
a13e6848 3369 if self.params.get('force_write_download_archive'):
9e907ebd 3370 info_dict['__write_download_archive'] = True
ca6d59d2 3371 check_max_downloads()
8222d8de 3372
aa9369a2 3373 def __download_wrapper(self, func):
3374 @functools.wraps(func)
3375 def wrapper(*args, **kwargs):
3376 try:
3377 res = func(*args, **kwargs)
3378 except UnavailableVideoError as e:
3379 self.report_error(e)
b222c271 3380 except DownloadCancelled as e:
3381 self.to_screen(f'[info] {e}')
3382 if not self.params.get('break_per_url'):
3383 raise
fd404bec 3384 self._num_downloads = 0
aa9369a2 3385 else:
3386 if self.params.get('dump_single_json', False):
3387 self.post_extract(res)
3388 self.to_stdout(json.dumps(self.sanitize_info(res)))
3389 return wrapper
3390
8222d8de
JMF
3391 def download(self, url_list):
3392 """Download a given list of URLs."""
aa9369a2 3393 url_list = variadic(url_list) # Passing a single URL is a common mistake
bf1824b3 3394 outtmpl = self.params['outtmpl']['default']
3089bc74
S
3395 if (len(url_list) > 1
3396 and outtmpl != '-'
3397 and '%' not in outtmpl
3398 and self.params.get('max_downloads') != 1):
acd69589 3399 raise SameFileError(outtmpl)
8222d8de
JMF
3400
3401 for url in url_list:
aa9369a2 3402 self.__download_wrapper(self.extract_info)(
3403 url, force_generic_extractor=self.params.get('force_generic_extractor', False))
8222d8de
JMF
3404
3405 return self._download_retcode
3406
1dcc4c0c 3407 def download_with_info_file(self, info_filename):
31bd3925
JMF
3408 with contextlib.closing(fileinput.FileInput(
3409 [info_filename], mode='r',
3410 openhook=fileinput.hook_encoded('utf-8'))) as f:
3411 # FileInput doesn't have a read method, we can't call json.load
ab1de9cb 3412 infos = [self.sanitize_info(info, self.params.get('clean_infojson', True))
3413 for info in variadic(json.loads('\n'.join(f)))]
3414 for info in infos:
3415 try:
3416 self.__download_wrapper(self.process_ie_result)(info, download=True)
3417 except (DownloadError, EntryNotInPlaylist, ReExtractInfo) as e:
3418 if not isinstance(e, EntryNotInPlaylist):
3419 self.to_stderr('\r')
3420 webpage_url = info.get('webpage_url')
3421 if webpage_url is None:
3422 raise
aa9369a2 3423 self.report_warning(f'The info failed to download: {e}; trying with URL {webpage_url}')
ab1de9cb 3424 self.download([webpage_url])
d4943898 3425 return self._download_retcode
1dcc4c0c 3426
cb202fd2 3427 @staticmethod
8012d892 3428 def sanitize_info(info_dict, remove_private_keys=False):
3429 ''' Sanitize the infodict for converting to json '''
3ad56b42 3430 if info_dict is None:
3431 return info_dict
6e84b215 3432 info_dict.setdefault('epoch', int(time.time()))
6a5a30f9 3433 info_dict.setdefault('_type', 'video')
b5e7a2e6 3434 info_dict.setdefault('_version', {
3435 'version': __version__,
3436 'current_git_head': current_git_head(),
3437 'release_git_head': RELEASE_GIT_HEAD,
3438 'repository': REPOSITORY,
3439 })
09b49e1f 3440
8012d892 3441 if remove_private_keys:
0a5a191a 3442 reject = lambda k, v: v is None or k.startswith('__') or k in {
f46e2f9d 3443 'requested_downloads', 'requested_formats', 'requested_subtitles', 'requested_entries',
6f2287cb 3444 'entries', 'filepath', '_filename', 'filename', 'infojson_filename', 'original_url',
3445 'playlist_autonumber', '_format_sort_fields',
6e84b215 3446 }
ae8f99e6 3447 else:
09b49e1f 3448 reject = lambda k, v: False
adbc4ec4
THD
3449
3450 def filter_fn(obj):
3451 if isinstance(obj, dict):
3452 return {k: filter_fn(v) for k, v in obj.items() if not reject(k, v)}
3453 elif isinstance(obj, (list, tuple, set, LazyList)):
3454 return list(map(filter_fn, obj))
3455 elif obj is None or isinstance(obj, (str, int, float, bool)):
3456 return obj
3457 else:
3458 return repr(obj)
3459
5226731e 3460 return filter_fn(info_dict)
cb202fd2 3461
8012d892 3462 @staticmethod
3463 def filter_requested_info(info_dict, actually_filter=True):
3464 ''' Alias of sanitize_info for backward compatibility '''
3465 return YoutubeDL.sanitize_info(info_dict, actually_filter)
3466
43d7f5a5 3467 def _delete_downloaded_files(self, *files_to_delete, info={}, msg=None):
3468 for filename in set(filter(None, files_to_delete)):
3469 if msg:
3470 self.to_screen(msg % filename)
3471 try:
3472 os.remove(filename)
3473 except OSError:
3474 self.report_warning(f'Unable to delete file {filename}')
3475 if filename in info.get('__files_to_move', []): # NB: Delete even if None
3476 del info['__files_to_move'][filename]
3477
ed5835b4 3478 @staticmethod
3479 def post_extract(info_dict):
3480 def actual_post_extract(info_dict):
3481 if info_dict.get('_type') in ('playlist', 'multi_video'):
3482 for video_dict in info_dict.get('entries', {}):
3483 actual_post_extract(video_dict or {})
3484 return
3485
09b49e1f 3486 post_extractor = info_dict.pop('__post_extractor', None) or (lambda: {})
3487 info_dict.update(post_extractor())
ed5835b4 3488
3489 actual_post_extract(info_dict or {})
3490
dcf64d43 3491 def run_pp(self, pp, infodict):
5bfa4862 3492 files_to_delete = []
dcf64d43 3493 if '__files_to_move' not in infodict:
3494 infodict['__files_to_move'] = {}
b1940459 3495 try:
3496 files_to_delete, infodict = pp.run(infodict)
3497 except PostProcessingError as e:
3498 # Must be True and not 'only_download'
3499 if self.params.get('ignoreerrors') is True:
3500 self.report_error(e)
3501 return infodict
3502 raise
3503
5bfa4862 3504 if not files_to_delete:
dcf64d43 3505 return infodict
5bfa4862 3506 if self.params.get('keepvideo', False):
3507 for f in files_to_delete:
dcf64d43 3508 infodict['__files_to_move'].setdefault(f, '')
5bfa4862 3509 else:
43d7f5a5 3510 self._delete_downloaded_files(
3511 *files_to_delete, info=infodict, msg='Deleting original file %s (pass -k to keep)')
dcf64d43 3512 return infodict
5bfa4862 3513
6f2287cb 3514 def run_all_pps(self, key, info, *, additional_pps=None):
17ba4343 3515 if key != 'video':
3516 self._forceprint(key, info)
3517 for pp in (additional_pps or []) + self._pps[key]:
3518 info = self.run_pp(pp, info)
ed5835b4 3519 return info
277d6ff5 3520
56d868db 3521 def pre_process(self, ie_info, key='pre_process', files_to_move=None):
5bfa4862 3522 info = dict(ie_info)
56d868db 3523 info['__files_to_move'] = files_to_move or {}
415f8d51 3524 try:
3525 info = self.run_all_pps(key, info)
3526 except PostProcessingError as err:
3527 msg = f'Preprocessing: {err}'
3528 info.setdefault('__pending_error', msg)
3529 self.report_error(msg, is_error=False)
56d868db 3530 return info, info.pop('__files_to_move', None)
5bfa4862 3531
f46e2f9d 3532 def post_process(self, filename, info, files_to_move=None):
8222d8de 3533 """Run all the postprocessors on the given file."""
8222d8de 3534 info['filepath'] = filename
dcf64d43 3535 info['__files_to_move'] = files_to_move or {}
ed5835b4 3536 info = self.run_all_pps('post_process', info, additional_pps=info.get('__postprocessors'))
dcf64d43 3537 info = self.run_pp(MoveFilesAfterDownloadPP(self), info)
3538 del info['__files_to_move']
ed5835b4 3539 return self.run_all_pps('after_move', info)
c1c9a79c 3540
5db07df6 3541 def _make_archive_id(self, info_dict):
e9fef7ee
S
3542 video_id = info_dict.get('id')
3543 if not video_id:
3544 return
5db07df6
PH
3545 # Future-proof against any change in case
3546 # and backwards compatibility with prior versions
e9fef7ee 3547 extractor = info_dict.get('extractor_key') or info_dict.get('ie_key') # key in a playlist
7012b23c 3548 if extractor is None:
1211bb6d
S
3549 url = str_or_none(info_dict.get('url'))
3550 if not url:
3551 return
e9fef7ee 3552 # Try to find matching extractor for the URL and take its ie_key
8b7491c8 3553 for ie_key, ie in self._ies.items():
1211bb6d 3554 if ie.suitable(url):
8b7491c8 3555 extractor = ie_key
e9fef7ee
S
3556 break
3557 else:
3558 return
0647d925 3559 return make_archive_id(extractor, video_id)
5db07df6
PH
3560
3561 def in_download_archive(self, info_dict):
ae103564 3562 if not self.archive:
5db07df6
PH
3563 return False
3564
1e8fe57e 3565 vid_ids = [self._make_archive_id(info_dict)]
c200096c 3566 vid_ids.extend(info_dict.get('_old_archive_ids') or [])
1e8fe57e 3567 return any(id_ in self.archive for id_ in vid_ids)
c1c9a79c
PH
3568
3569 def record_download_archive(self, info_dict):
3570 fn = self.params.get('download_archive')
3571 if fn is None:
3572 return
5db07df6
PH
3573 vid_id = self._make_archive_id(info_dict)
3574 assert vid_id
ae103564 3575
a13e6848 3576 self.write_debug(f'Adding to archive: {vid_id}')
9c935fbc 3577 if is_path_like(fn):
ae103564 3578 with locked_file(fn, 'a', encoding='utf-8') as archive_file:
3579 archive_file.write(vid_id + '\n')
a45e8619 3580 self.archive.add(vid_id)
dd82ffea 3581
8c51aa65 3582 @staticmethod
8abeeb94 3583 def format_resolution(format, default='unknown'):
9359f3d4 3584 if format.get('vcodec') == 'none' and format.get('acodec') != 'none':
fb04e403 3585 return 'audio only'
f49d89ee
PH
3586 if format.get('resolution') is not None:
3587 return format['resolution']
35615307 3588 if format.get('width') and format.get('height'):
ff51ed58 3589 return '%dx%d' % (format['width'], format['height'])
35615307 3590 elif format.get('height'):
ff51ed58 3591 return '%sp' % format['height']
35615307 3592 elif format.get('width'):
ff51ed58 3593 return '%dx?' % format['width']
3594 return default
8c51aa65 3595
8130779d 3596 def _list_format_headers(self, *headers):
3597 if self.params.get('listformats_table', True) is not False:
591bb9d3 3598 return [self._format_out(header, self.Styles.HEADERS) for header in headers]
8130779d 3599 return headers
3600
c57f7757
PH
3601 def _format_note(self, fdict):
3602 res = ''
3603 if fdict.get('ext') in ['f4f', 'f4m']:
f304da8a 3604 res += '(unsupported)'
32f90364
PH
3605 if fdict.get('language'):
3606 if res:
3607 res += ' '
f304da8a 3608 res += '[%s]' % fdict['language']
c57f7757 3609 if fdict.get('format_note') is not None:
f304da8a 3610 if res:
3611 res += ' '
3612 res += fdict['format_note']
c57f7757 3613 if fdict.get('tbr') is not None:
f304da8a 3614 if res:
3615 res += ', '
3616 res += '%4dk' % fdict['tbr']
c57f7757
PH
3617 if fdict.get('container') is not None:
3618 if res:
3619 res += ', '
3620 res += '%s container' % fdict['container']
3089bc74
S
3621 if (fdict.get('vcodec') is not None
3622 and fdict.get('vcodec') != 'none'):
c57f7757
PH
3623 if res:
3624 res += ', '
3625 res += fdict['vcodec']
91c7271a 3626 if fdict.get('vbr') is not None:
c57f7757
PH
3627 res += '@'
3628 elif fdict.get('vbr') is not None and fdict.get('abr') is not None:
3629 res += 'video@'
3630 if fdict.get('vbr') is not None:
3631 res += '%4dk' % fdict['vbr']
fbb21cf5 3632 if fdict.get('fps') is not None:
5d583bdf
S
3633 if res:
3634 res += ', '
3635 res += '%sfps' % fdict['fps']
c57f7757
PH
3636 if fdict.get('acodec') is not None:
3637 if res:
3638 res += ', '
3639 if fdict['acodec'] == 'none':
3640 res += 'video only'
3641 else:
3642 res += '%-5s' % fdict['acodec']
3643 elif fdict.get('abr') is not None:
3644 if res:
3645 res += ', '
3646 res += 'audio'
3647 if fdict.get('abr') is not None:
3648 res += '@%3dk' % fdict['abr']
3649 if fdict.get('asr') is not None:
3650 res += ' (%5dHz)' % fdict['asr']
3651 if fdict.get('filesize') is not None:
3652 if res:
3653 res += ', '
3654 res += format_bytes(fdict['filesize'])
9732d77e
PH
3655 elif fdict.get('filesize_approx') is not None:
3656 if res:
3657 res += ', '
3658 res += '~' + format_bytes(fdict['filesize_approx'])
c57f7757 3659 return res
91c7271a 3660
aebb4f4b 3661 def _get_formats(self, info_dict):
3662 if info_dict.get('formats') is None:
3663 if info_dict.get('url') and info_dict.get('_type', 'video') == 'video':
3664 return [info_dict]
3665 return []
3666 return info_dict['formats']
b69fd25c 3667
aebb4f4b 3668 def render_formats_table(self, info_dict):
3669 formats = self._get_formats(info_dict)
3670 if not formats:
3671 return
8130779d 3672 if not self.params.get('listformats_table', True) is not False:
76d321f6 3673 table = [
3674 [
3675 format_field(f, 'format_id'),
3676 format_field(f, 'ext'),
3677 self.format_resolution(f),
8130779d 3678 self._format_note(f)
d5d1df8a 3679 ] for f in formats if (f.get('preference') or 0) >= -1000]
8130779d 3680 return render_table(['format code', 'extension', 'resolution', 'note'], table, extra_gap=1)
3681
d816f61f 3682 def simplified_codec(f, field):
3683 assert field in ('acodec', 'vcodec')
3684 codec = f.get(field, 'unknown')
f5ea4748 3685 if not codec:
3686 return 'unknown'
3687 elif codec != 'none':
d816f61f 3688 return '.'.join(codec.split('.')[:4])
3689
3690 if field == 'vcodec' and f.get('acodec') == 'none':
3691 return 'images'
3692 elif field == 'acodec' and f.get('vcodec') == 'none':
3693 return ''
3694 return self._format_out('audio only' if field == 'vcodec' else 'video only',
3695 self.Styles.SUPPRESS)
3696
591bb9d3 3697 delim = self._format_out('\u2502', self.Styles.DELIM, '|', test_encoding=True)
8130779d 3698 table = [
3699 [
591bb9d3 3700 self._format_out(format_field(f, 'format_id'), self.Styles.ID),
8130779d 3701 format_field(f, 'ext'),
3702 format_field(f, func=self.format_resolution, ignore=('audio only', 'images')),
563e0bf8 3703 format_field(f, 'fps', '\t%d', func=round),
8130779d 3704 format_field(f, 'dynamic_range', '%s', ignore=(None, 'SDR')).replace('HDR', ''),
b8ed0f15 3705 format_field(f, 'audio_channels', '\t%s'),
8130779d 3706 delim,
3707 format_field(f, 'filesize', ' \t%s', func=format_bytes) + format_field(f, 'filesize_approx', '~\t%s', func=format_bytes),
563e0bf8 3708 format_field(f, 'tbr', '\t%dk', func=round),
8130779d 3709 shorten_protocol_name(f.get('protocol', '')),
3710 delim,
d816f61f 3711 simplified_codec(f, 'vcodec'),
563e0bf8 3712 format_field(f, 'vbr', '\t%dk', func=round),
d816f61f 3713 simplified_codec(f, 'acodec'),
563e0bf8 3714 format_field(f, 'abr', '\t%dk', func=round),
ae61d108 3715 format_field(f, 'asr', '\t%s', func=format_decimal_suffix),
8130779d 3716 join_nonempty(
591bb9d3 3717 self._format_out('UNSUPPORTED', 'light red') if f.get('ext') in ('f4f', 'f4m') else None,
a5387729 3718 self._format_out('DRM', 'light red') if f.get('has_drm') else None,
8130779d 3719 format_field(f, 'language', '[%s]'),
3720 join_nonempty(format_field(f, 'format_note'),
3721 format_field(f, 'container', ignore=(None, f.get('ext'))),
3722 delim=', '),
3723 delim=' '),
3724 ] for f in formats if f.get('preference') is None or f['preference'] >= -1000]
3725 header_line = self._list_format_headers(
b8ed0f15 3726 'ID', 'EXT', 'RESOLUTION', '\tFPS', 'HDR', 'CH', delim, '\tFILESIZE', '\tTBR', 'PROTO',
8130779d 3727 delim, 'VCODEC', '\tVBR', 'ACODEC', '\tABR', '\tASR', 'MORE INFO')
3728
3729 return render_table(
3730 header_line, table, hide_empty=True,
591bb9d3 3731 delim=self._format_out('\u2500', self.Styles.DELIM, '-', test_encoding=True))
8130779d 3732
3733 def render_thumbnails_table(self, info_dict):
88f23a18 3734 thumbnails = list(info_dict.get('thumbnails') or [])
cfb56d1a 3735 if not thumbnails:
8130779d 3736 return None
3737 return render_table(
ec11a9f4 3738 self._list_format_headers('ID', 'Width', 'Height', 'URL'),
177662e0 3739 [[t.get('id'), t.get('width') or 'unknown', t.get('height') or 'unknown', t['url']] for t in thumbnails])
2412044c 3740
8130779d 3741 def render_subtitles_table(self, video_id, subtitles):
2412044c 3742 def _row(lang, formats):
49c258e1 3743 exts, names = zip(*((f['ext'], f.get('name') or 'unknown') for f in reversed(formats)))
2412044c 3744 if len(set(names)) == 1:
7aee40c1 3745 names = [] if names[0] == 'unknown' else names[:1]
2412044c 3746 return [lang, ', '.join(names), ', '.join(exts)]
3747
8130779d 3748 if not subtitles:
3749 return None
3750 return render_table(
ec11a9f4 3751 self._list_format_headers('Language', 'Name', 'Formats'),
2412044c 3752 [_row(lang, formats) for lang, formats in subtitles.items()],
8130779d 3753 hide_empty=True)
3754
3755 def __list_table(self, video_id, name, func, *args):
3756 table = func(*args)
3757 if not table:
3758 self.to_screen(f'{video_id} has no {name}')
3759 return
3760 self.to_screen(f'[info] Available {name} for {video_id}:')
3761 self.to_stdout(table)
3762
3763 def list_formats(self, info_dict):
3764 self.__list_table(info_dict['id'], 'formats', self.render_formats_table, info_dict)
3765
3766 def list_thumbnails(self, info_dict):
3767 self.__list_table(info_dict['id'], 'thumbnails', self.render_thumbnails_table, info_dict)
3768
3769 def list_subtitles(self, video_id, subtitles, name='subtitles'):
3770 self.__list_table(video_id, name, self.render_subtitles_table, video_id, subtitles)
a504ced0 3771
dca08720
PH
3772 def urlopen(self, req):
3773 """ Start an HTTP download """
f9934b96 3774 if isinstance(req, str):
67dda517 3775 req = sanitized_Request(req)
19a41fc6 3776 return self._opener.open(req, timeout=self._socket_timeout)
dca08720
PH
3777
3778 def print_debug_header(self):
3779 if not self.params.get('verbose'):
3780 return
49a57e70 3781
a057779d 3782 from . import _IN_CLI # Must be delayed import
3783
560738f3 3784 # These imports can be slow. So import them only as needed
3785 from .extractor.extractors import _LAZY_LOADER
e756f45b
M
3786 from .extractor.extractors import (
3787 _PLUGIN_CLASSES as plugin_ies,
3788 _PLUGIN_OVERRIDES as plugin_ie_overrides
3789 )
560738f3 3790
49a57e70 3791 def get_encoding(stream):
2a938746 3792 ret = str(getattr(stream, 'encoding', 'missing (%s)' % type(stream).__name__))
8417f26b
SS
3793 additional_info = []
3794 if os.environ.get('TERM', '').lower() == 'dumb':
3795 additional_info.append('dumb')
49a57e70 3796 if not supports_terminal_sequences(stream):
53973b4d 3797 from .utils import WINDOWS_VT_MODE # Must be imported locally
8417f26b
SS
3798 additional_info.append('No VT' if WINDOWS_VT_MODE is False else 'No ANSI')
3799 if additional_info:
3800 ret = f'{ret} ({",".join(additional_info)})'
49a57e70 3801 return ret
3802
591bb9d3 3803 encoding_str = 'Encodings: locale %s, fs %s, pref %s, %s' % (
49a57e70 3804 locale.getpreferredencoding(),
3805 sys.getfilesystemencoding(),
591bb9d3 3806 self.get_encoding(),
3807 ', '.join(
64fa820c 3808 f'{key} {get_encoding(stream)}' for key, stream in self._out_files.items_
591bb9d3 3809 if stream is not None and key != 'console')
3810 )
883d4b1e 3811
3812 logger = self.params.get('logger')
3813 if logger:
3814 write_debug = lambda msg: logger.debug(f'[debug] {msg}')
3815 write_debug(encoding_str)
3816 else:
96565c7e 3817 write_string(f'[debug] {encoding_str}\n', encoding=None)
49a57e70 3818 write_debug = lambda msg: self._write_string(f'[debug] {msg}\n')
734f90bb 3819
4c88ff87 3820 source = detect_variant()
70b23409 3821 if VARIANT not in (None, 'pip'):
3822 source += '*'
a5387729 3823 klass = type(self)
36eaf303 3824 write_debug(join_nonempty(
b5e7a2e6 3825 f'{"yt-dlp" if REPOSITORY == "yt-dlp/yt-dlp" else REPOSITORY} version',
392389b7 3826 f'{CHANNEL}@{__version__}',
29cb20bd 3827 f'[{RELEASE_GIT_HEAD[:9]}]' if RELEASE_GIT_HEAD else '',
36eaf303 3828 '' if source == 'unknown' else f'({source})',
a5387729 3829 '' if _IN_CLI else 'API' if klass == YoutubeDL else f'API:{self.__module__}.{klass.__qualname__}',
36eaf303 3830 delim=' '))
497074f0 3831
3832 if not _IN_CLI:
3833 write_debug(f'params: {self.params}')
3834
6e21fdd2 3835 if not _LAZY_LOADER:
3836 if os.environ.get('YTDLP_NO_LAZY_EXTRACTORS'):
49a57e70 3837 write_debug('Lazy loading extractors is forcibly disabled')
6e21fdd2 3838 else:
49a57e70 3839 write_debug('Lazy loading extractors is disabled')
8a82af35 3840 if self.params['compat_opts']:
3841 write_debug('Compatibility options: %s' % ', '.join(self.params['compat_opts']))
36eaf303 3842
b5e7a2e6 3843 if current_git_head():
3844 write_debug(f'Git HEAD: {current_git_head()}')
b1f94422 3845 write_debug(system_identifier())
d28b5171 3846
8913ef74 3847 exe_versions, ffmpeg_features = FFmpegPostProcessor.get_versions_and_features(self)
3848 ffmpeg_features = {key for key, val in ffmpeg_features.items() if val}
3849 if ffmpeg_features:
19a03940 3850 exe_versions['ffmpeg'] += ' (%s)' % ','.join(sorted(ffmpeg_features))
8913ef74 3851
4c83c967 3852 exe_versions['rtmpdump'] = rtmpdump_version()
feee8d32 3853 exe_versions['phantomjs'] = PhantomJSwrapper._version()
d28b5171 3854 exe_str = ', '.join(
2831b468 3855 f'{exe} {v}' for exe, v in sorted(exe_versions.items()) if v
3856 ) or 'none'
49a57e70 3857 write_debug('exe versions: %s' % exe_str)
dca08720 3858
1d485a1a 3859 from .compat.compat_utils import get_package_info
9b8ee23b 3860 from .dependencies import available_dependencies
3861
3862 write_debug('Optional libraries: %s' % (', '.join(sorted({
1d485a1a 3863 join_nonempty(*get_package_info(m)) for m in available_dependencies.values()
9b8ee23b 3864 })) or 'none'))
2831b468 3865
97ec5bc5 3866 self._setup_opener()
dca08720
PH
3867 proxy_map = {}
3868 for handler in self._opener.handlers:
3869 if hasattr(handler, 'proxies'):
3870 proxy_map.update(handler.proxies)
49a57e70 3871 write_debug(f'Proxy map: {proxy_map}')
dca08720 3872
e756f45b
M
3873 for plugin_type, plugins in {'Extractor': plugin_ies, 'Post-Processor': plugin_pps}.items():
3874 display_list = ['%s%s' % (
8e40b9d1 3875 klass.__name__, '' if klass.__name__ == name else f' as {name}')
e756f45b
M
3876 for name, klass in plugins.items()]
3877 if plugin_type == 'Extractor':
3878 display_list.extend(f'{plugins[-1].IE_NAME.partition("+")[2]} ({parent.__name__})'
3879 for parent, plugins in plugin_ie_overrides.items())
3880 if not display_list:
3881 continue
3882 write_debug(f'{plugin_type} Plugins: {", ".join(sorted(display_list))}')
3883
8e40b9d1
M
3884 plugin_dirs = plugin_directories()
3885 if plugin_dirs:
3886 write_debug(f'Plugin directories: {plugin_dirs}')
3887
49a57e70 3888 # Not implemented
3889 if False and self.params.get('call_home'):
0f06bcd7 3890 ipaddr = self.urlopen('https://yt-dl.org/ip').read().decode()
49a57e70 3891 write_debug('Public IP address: %s' % ipaddr)
58b1f00d 3892 latest_version = self.urlopen(
0f06bcd7 3893 'https://yt-dl.org/latest/version').read().decode()
58b1f00d
PH
3894 if version_tuple(latest_version) > version_tuple(__version__):
3895 self.report_warning(
3896 'You are using an outdated version (newest version: %s)! '
3897 'See https://yt-dl.org/update if you need help updating.' %
3898 latest_version)
3899
e344693b 3900 def _setup_opener(self):
97ec5bc5 3901 if hasattr(self, '_opener'):
3902 return
6ad14cab 3903 timeout_val = self.params.get('socket_timeout')
17bddf3e 3904 self._socket_timeout = 20 if timeout_val is None else float(timeout_val)
6ad14cab 3905
982ee69a 3906 opts_cookiesfrombrowser = self.params.get('cookiesfrombrowser')
dca08720
PH
3907 opts_cookiefile = self.params.get('cookiefile')
3908 opts_proxy = self.params.get('proxy')
3909
982ee69a 3910 self.cookiejar = load_cookies(opts_cookiefile, opts_cookiesfrombrowser, self)
dca08720 3911
6a3f4c3f 3912 cookie_processor = YoutubeDLCookieProcessor(self.cookiejar)
dca08720
PH
3913 if opts_proxy is not None:
3914 if opts_proxy == '':
3915 proxies = {}
3916 else:
3917 proxies = {'http': opts_proxy, 'https': opts_proxy}
3918 else:
ac668111 3919 proxies = urllib.request.getproxies()
067aa17e 3920 # Set HTTPS proxy to HTTP one if given (https://github.com/ytdl-org/youtube-dl/issues/805)
dca08720
PH
3921 if 'http' in proxies and 'https' not in proxies:
3922 proxies['https'] = proxies['http']
91410c9b 3923 proxy_handler = PerRequestProxyHandler(proxies)
a0ddb8a2
PH
3924
3925 debuglevel = 1 if self.params.get('debug_printtraffic') else 0
be4a824d
PH
3926 https_handler = make_HTTPS_handler(self.params, debuglevel=debuglevel)
3927 ydlh = YoutubeDLHandler(self.params, debuglevel=debuglevel)
fca6dba8 3928 redirect_handler = YoutubeDLRedirectHandler()
f9934b96 3929 data_handler = urllib.request.DataHandler()
6240b0a2
JMF
3930
3931 # When passing our own FileHandler instance, build_opener won't add the
3932 # default FileHandler and allows us to disable the file protocol, which
3933 # can be used for malicious purposes (see
067aa17e 3934 # https://github.com/ytdl-org/youtube-dl/issues/8227)
ac668111 3935 file_handler = urllib.request.FileHandler()
6240b0a2 3936
8300774c
M
3937 if not self.params.get('enable_file_urls'):
3938 def file_open(*args, **kwargs):
3939 raise urllib.error.URLError(
3940 'file:// URLs are explicitly disabled in yt-dlp for security reasons. '
3941 'Use --enable-file-urls to enable at your own risk.')
3942 file_handler.file_open = file_open
6240b0a2 3943
ac668111 3944 opener = urllib.request.build_opener(
fca6dba8 3945 proxy_handler, https_handler, cookie_processor, ydlh, redirect_handler, data_handler, file_handler)
2461f79d 3946
dca08720
PH
3947 # Delete the default user-agent header, which would otherwise apply in
3948 # cases where our custom HTTP handler doesn't come into play
067aa17e 3949 # (See https://github.com/ytdl-org/youtube-dl/issues/1309 for details)
dca08720
PH
3950 opener.addheaders = []
3951 self._opener = opener
62fec3b2
PH
3952
3953 def encode(self, s):
3954 if isinstance(s, bytes):
3955 return s # Already encoded
3956
3957 try:
3958 return s.encode(self.get_encoding())
3959 except UnicodeEncodeError as err:
3960 err.reason = err.reason + '. Check your system encoding configuration or use the --encoding option.'
3961 raise
3962
3963 def get_encoding(self):
3964 encoding = self.params.get('encoding')
3965 if encoding is None:
3966 encoding = preferredencoding()
3967 return encoding
ec82d85a 3968
e08a85d8 3969 def _write_info_json(self, label, ie_result, infofn, overwrite=None):
cb96c5be 3970 ''' Write infojson and returns True = written, 'exists' = Already exists, False = skip, None = error '''
e08a85d8 3971 if overwrite is None:
3972 overwrite = self.params.get('overwrites', True)
80c03fa9 3973 if not self.params.get('writeinfojson'):
3974 return False
3975 elif not infofn:
3976 self.write_debug(f'Skipping writing {label} infojson')
3977 return False
3978 elif not self._ensure_dir_exists(infofn):
3979 return None
e08a85d8 3980 elif not overwrite and os.path.exists(infofn):
80c03fa9 3981 self.to_screen(f'[info] {label.title()} metadata is already present')
cb96c5be 3982 return 'exists'
3983
3984 self.to_screen(f'[info] Writing {label} metadata as JSON to: {infofn}')
3985 try:
3986 write_json_file(self.sanitize_info(ie_result, self.params.get('clean_infojson', True)), infofn)
3987 return True
86e5f3ed 3988 except OSError:
cb96c5be 3989 self.report_error(f'Cannot write {label} metadata to JSON file {infofn}')
3990 return None
80c03fa9 3991
3992 def _write_description(self, label, ie_result, descfn):
3993 ''' Write description and returns True = written, False = skip, None = error '''
3994 if not self.params.get('writedescription'):
3995 return False
3996 elif not descfn:
3997 self.write_debug(f'Skipping writing {label} description')
3998 return False
3999 elif not self._ensure_dir_exists(descfn):
4000 return None
4001 elif not self.params.get('overwrites', True) and os.path.exists(descfn):
4002 self.to_screen(f'[info] {label.title()} description is already present')
4003 elif ie_result.get('description') is None:
88fb9425 4004 self.to_screen(f'[info] There\'s no {label} description to write')
80c03fa9 4005 return False
4006 else:
4007 try:
4008 self.to_screen(f'[info] Writing {label} description to: {descfn}')
86e5f3ed 4009 with open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile:
80c03fa9 4010 descfile.write(ie_result['description'])
86e5f3ed 4011 except OSError:
80c03fa9 4012 self.report_error(f'Cannot write {label} description file {descfn}')
4013 return None
4014 return True
4015
4016 def _write_subtitles(self, info_dict, filename):
4017 ''' Write subtitles to file and return list of (sub_filename, final_sub_filename); or None if error'''
4018 ret = []
4019 subtitles = info_dict.get('requested_subtitles')
88fb9425 4020 if not (self.params.get('writesubtitles') or self.params.get('writeautomaticsub')):
80c03fa9 4021 # subtitles download errors are already managed as troubles in relevant IE
4022 # that way it will silently go on when used with unsupporting IE
4023 return ret
88fb9425 4024 elif not subtitles:
c8bc203f 4025 self.to_screen('[info] There are no subtitles for the requested languages')
88fb9425 4026 return ret
80c03fa9 4027 sub_filename_base = self.prepare_filename(info_dict, 'subtitle')
4028 if not sub_filename_base:
4029 self.to_screen('[info] Skipping writing video subtitles')
4030 return ret
88fb9425 4031
80c03fa9 4032 for sub_lang, sub_info in subtitles.items():
4033 sub_format = sub_info['ext']
4034 sub_filename = subtitles_filename(filename, sub_lang, sub_format, info_dict.get('ext'))
4035 sub_filename_final = subtitles_filename(sub_filename_base, sub_lang, sub_format, info_dict.get('ext'))
e04938ab 4036 existing_sub = self.existing_file((sub_filename_final, sub_filename))
4037 if existing_sub:
80c03fa9 4038 self.to_screen(f'[info] Video subtitle {sub_lang}.{sub_format} is already present')
e04938ab 4039 sub_info['filepath'] = existing_sub
4040 ret.append((existing_sub, sub_filename_final))
80c03fa9 4041 continue
4042
4043 self.to_screen(f'[info] Writing video subtitles to: {sub_filename}')
4044 if sub_info.get('data') is not None:
4045 try:
4046 # Use newline='' to prevent conversion of newline characters
4047 # See https://github.com/ytdl-org/youtube-dl/issues/10268
86e5f3ed 4048 with open(sub_filename, 'w', encoding='utf-8', newline='') as subfile:
80c03fa9 4049 subfile.write(sub_info['data'])
4050 sub_info['filepath'] = sub_filename
4051 ret.append((sub_filename, sub_filename_final))
4052 continue
86e5f3ed 4053 except OSError:
80c03fa9 4054 self.report_error(f'Cannot write video subtitles file {sub_filename}')
4055 return None
4056
4057 try:
4058 sub_copy = sub_info.copy()
4059 sub_copy.setdefault('http_headers', info_dict.get('http_headers'))
4060 self.dl(sub_filename, sub_copy, subtitle=True)
4061 sub_info['filepath'] = sub_filename
4062 ret.append((sub_filename, sub_filename_final))
6020e05d 4063 except (DownloadError, ExtractorError, IOError, OSError, ValueError) + network_exceptions as err:
c70c418d 4064 msg = f'Unable to download video subtitles for {sub_lang!r}: {err}'
6020e05d 4065 if self.params.get('ignoreerrors') is not True: # False or 'only_download'
c70c418d 4066 if not self.params.get('ignoreerrors'):
4067 self.report_error(msg)
4068 raise DownloadError(msg)
4069 self.report_warning(msg)
519804a9 4070 return ret
80c03fa9 4071
4072 def _write_thumbnails(self, label, info_dict, filename, thumb_filename_base=None):
4073 ''' Write thumbnails to file and return list of (thumb_filename, final_thumb_filename) '''
6c4fd172 4074 write_all = self.params.get('write_all_thumbnails', False)
80c03fa9 4075 thumbnails, ret = [], []
6c4fd172 4076 if write_all or self.params.get('writethumbnail', False):
0202b52a 4077 thumbnails = info_dict.get('thumbnails') or []
88fb9425 4078 if not thumbnails:
c8bc203f 4079 self.to_screen(f'[info] There are no {label} thumbnails to download')
88fb9425 4080 return ret
6c4fd172 4081 multiple = write_all and len(thumbnails) > 1
ec82d85a 4082
80c03fa9 4083 if thumb_filename_base is None:
4084 thumb_filename_base = filename
4085 if thumbnails and not thumb_filename_base:
4086 self.write_debug(f'Skipping writing {label} thumbnail')
4087 return ret
4088
dd0228ce 4089 for idx, t in list(enumerate(thumbnails))[::-1]:
80c03fa9 4090 thumb_ext = (f'{t["id"]}.' if multiple else '') + determine_ext(t['url'], 'jpg')
aa9369a2 4091 thumb_display_id = f'{label} thumbnail {t["id"]}'
80c03fa9 4092 thumb_filename = replace_extension(filename, thumb_ext, info_dict.get('ext'))
4093 thumb_filename_final = replace_extension(thumb_filename_base, thumb_ext, info_dict.get('ext'))
ec82d85a 4094
e04938ab 4095 existing_thumb = self.existing_file((thumb_filename_final, thumb_filename))
4096 if existing_thumb:
aa9369a2 4097 self.to_screen('[info] %s is already present' % (
4098 thumb_display_id if multiple else f'{label} thumbnail').capitalize())
e04938ab 4099 t['filepath'] = existing_thumb
4100 ret.append((existing_thumb, thumb_filename_final))
ec82d85a 4101 else:
80c03fa9 4102 self.to_screen(f'[info] Downloading {thumb_display_id} ...')
ec82d85a 4103 try:
297e9952 4104 uf = self.urlopen(sanitized_Request(t['url'], headers=t.get('http_headers', {})))
80c03fa9 4105 self.to_screen(f'[info] Writing {thumb_display_id} to: {thumb_filename}')
d3d89c32 4106 with open(encodeFilename(thumb_filename), 'wb') as thumbf:
ec82d85a 4107 shutil.copyfileobj(uf, thumbf)
80c03fa9 4108 ret.append((thumb_filename, thumb_filename_final))
885cc0b7 4109 t['filepath'] = thumb_filename
3158150c 4110 except network_exceptions as err:
ad54c913 4111 if isinstance(err, urllib.error.HTTPError) and err.code == 404:
4112 self.to_screen(f'[info] {thumb_display_id.title()} does not exist')
4113 else:
4114 self.report_warning(f'Unable to download {thumb_display_id}: {err}')
dd0228ce 4115 thumbnails.pop(idx)
6c4fd172 4116 if ret and not write_all:
4117 break
0202b52a 4118 return ret