]> jfr.im git - yt-dlp.git/blame - yt_dlp/YoutubeDL.py
[docs] Consistent use of `e.g.` (#4643)
[yt-dlp.git] / yt_dlp / YoutubeDL.py
CommitLineData
26e63931 1import collections
31bd3925 2import contextlib
9d2ecdbc 3import datetime
c1c9a79c 4import errno
31bd3925 5import fileinput
b5ae35ee 6import functools
8222d8de 7import io
b82f815f 8import itertools
8694c600 9import json
62fec3b2 10import locale
083c9df9 11import operator
8222d8de 12import os
f8271158 13import random
8222d8de
JMF
14import re
15import shutil
dca08720 16import subprocess
8222d8de 17import sys
21cd8fae 18import tempfile
8222d8de 19import time
67134eab 20import tokenize
8222d8de 21import traceback
524e2e4f 22import unicodedata
f9934b96 23import urllib.request
961ea474
S
24from string import ascii_letters
25
f8271158 26from .cache import Cache
14f25df2 27from .compat import compat_os_name, compat_shlex_quote
982ee69a 28from .cookies import load_cookies
f8271158 29from .downloader import FFmpegFD, get_suitable_downloader, shorten_protocol_name
30from .downloader.rtmp import rtmpdump_version
f8271158 31from .extractor import gen_extractor_classes, get_info_extractor
32from .extractor.openload import PhantomJSwrapper
33from .minicurses import format_text
34from .postprocessor import _PLUGIN_CLASSES as plugin_postprocessors
35from .postprocessor import (
36 EmbedThumbnailPP,
37 FFmpegFixupDuplicateMoovPP,
38 FFmpegFixupDurationPP,
39 FFmpegFixupM3u8PP,
40 FFmpegFixupM4aPP,
41 FFmpegFixupStretchedPP,
42 FFmpegFixupTimestampPP,
43 FFmpegMergerPP,
44 FFmpegPostProcessor,
ca9def71 45 FFmpegVideoConvertorPP,
f8271158 46 MoveFilesAfterDownloadPP,
47 get_postprocessor,
48)
ca9def71 49from .postprocessor.ffmpeg import resolve_mapping as resolve_recode_mapping
f8271158 50from .update import detect_variant
8c25f81b 51from .utils import (
f8271158 52 DEFAULT_OUTTMPL,
7b2c3f47 53 IDENTITY,
f8271158 54 LINK_TEMPLATES,
8dc59305 55 MEDIA_EXTENSIONS,
f8271158 56 NO_DEFAULT,
1d485a1a 57 NUMBER_RE,
f8271158 58 OUTTMPL_TYPES,
59 POSTPROCESS_WHEN,
60 STR_FORMAT_RE_TMPL,
61 STR_FORMAT_TYPES,
62 ContentTooShortError,
63 DateRange,
64 DownloadCancelled,
65 DownloadError,
66 EntryNotInPlaylist,
67 ExistingVideoReached,
68 ExtractorError,
69 GeoRestrictedError,
70 HEADRequest,
f8271158 71 ISO3166Utils,
72 LazyList,
73 MaxDownloadsReached,
19a03940 74 Namespace,
f8271158 75 PagedList,
76 PerRequestProxyHandler,
7e88d7d7 77 PlaylistEntries,
f8271158 78 Popen,
79 PostProcessingError,
80 ReExtractInfo,
81 RejectedVideoReached,
82 SameFileError,
83 UnavailableVideoError,
693f0600 84 UserNotLive,
f8271158 85 YoutubeDLCookieProcessor,
86 YoutubeDLHandler,
87 YoutubeDLRedirectHandler,
eedb7ba5
S
88 age_restricted,
89 args_to_str,
cb794ee0 90 bug_reports_message,
ce02ed60 91 date_from_str,
ce02ed60 92 determine_ext,
b5559424 93 determine_protocol,
c0384f22 94 encode_compat_str,
ce02ed60 95 encodeFilename,
a06916d9 96 error_to_compat_str,
47cdc68e 97 escapeHTML,
590bc6f6 98 expand_path,
90137ca4 99 filter_dict,
e29663c6 100 float_or_none,
02dbf93f 101 format_bytes,
e0fd9573 102 format_decimal_suffix,
f8271158 103 format_field,
525ef922 104 formatSeconds,
fc61aff4 105 get_compatible_ext,
0bb322b9 106 get_domain,
c9969434 107 int_or_none,
732044af 108 iri_to_uri,
34921b43 109 join_nonempty,
ce02ed60 110 locked_file,
0647d925 111 make_archive_id,
0202b52a 112 make_dir,
dca08720 113 make_HTTPS_handler,
8b7539d2 114 merge_headers,
3158150c 115 network_exceptions,
ec11a9f4 116 number_of_digits,
cd6fc19e 117 orderedSet,
083c9df9 118 parse_filesize,
ce02ed60 119 preferredencoding,
eedb7ba5 120 prepend_extension,
51fb4995 121 register_socks_protocols,
3efb96a6 122 remove_terminal_sequences,
cfb56d1a 123 render_table,
eedb7ba5 124 replace_extension,
ce02ed60 125 sanitize_filename,
1bb5c511 126 sanitize_path,
dcf77cf1 127 sanitize_url,
67dda517 128 sanitized_Request,
e5660ee6 129 std_headers,
1211bb6d 130 str_or_none,
e29663c6 131 strftime_or_none,
ce02ed60 132 subtitles_filename,
819e0531 133 supports_terminal_sequences,
b1f94422 134 system_identifier,
f2ebc5c7 135 timetuple_from_msec,
732044af 136 to_high_limit_path,
324ad820 137 traverse_obj,
fc61aff4 138 try_call,
6033d980 139 try_get,
29eb5174 140 url_basename,
7d1eb38a 141 variadic,
58b1f00d 142 version_tuple,
53973b4d 143 windows_enable_vt_mode,
ce02ed60
PH
144 write_json_file,
145 write_string,
4f026faf 146)
70b23409 147from .version import RELEASE_GIT_HEAD, VARIANT, __version__
8222d8de 148
e9c0cdd3
YCH
149if compat_os_name == 'nt':
150 import ctypes
151
2459b6e1 152
86e5f3ed 153class YoutubeDL:
8222d8de
JMF
154 """YoutubeDL class.
155
156 YoutubeDL objects are the ones responsible of downloading the
157 actual video file and writing it to disk if the user has requested
158 it, among some other tasks. In most cases there should be one per
159 program. As, given a video URL, the downloader doesn't know how to
160 extract all the needed information, task that InfoExtractors do, it
161 has to pass the URL to one of them.
162
163 For this, YoutubeDL objects have a method that allows
164 InfoExtractors to be registered in a given order. When it is passed
165 a URL, the YoutubeDL object handles it to the first InfoExtractor it
166 finds that reports being able to handle it. The InfoExtractor extracts
167 all the information about the video or videos the URL refers to, and
168 YoutubeDL process the extracted information, possibly using a File
169 Downloader to download the video.
170
171 YoutubeDL objects accept a lot of parameters. In order not to saturate
172 the object constructor with arguments, it receives a dictionary of
173 options instead. These options are available through the params
174 attribute for the InfoExtractors to use. The YoutubeDL also
175 registers itself as the downloader in charge for the InfoExtractors
176 that are added to it, so this is a "mutual registration".
177
178 Available options:
179
180 username: Username for authentication purposes.
181 password: Password for authentication purposes.
180940e0 182 videopassword: Password for accessing a video.
1da50aa3
S
183 ap_mso: Adobe Pass multiple-system operator identifier.
184 ap_username: Multiple-system operator account username.
185 ap_password: Multiple-system operator account password.
8222d8de
JMF
186 usenetrc: Use netrc for authentication instead.
187 verbose: Print additional info to stdout.
188 quiet: Do not print messages to stdout.
ad8915b7 189 no_warnings: Do not print out anything for warnings.
bb66c247 190 forceprint: A dict with keys WHEN mapped to a list of templates to
191 print to stdout. The allowed keys are video or any of the
192 items in utils.POSTPROCESS_WHEN.
ca30f449 193 For compatibility, a single list is also accepted
bb66c247 194 print_to_file: A dict with keys WHEN (same as forceprint) mapped to
195 a list of tuples with (template, filename)
8694c600 196 forcejson: Force printing info_dict as JSON.
63e0be34
PH
197 dump_single_json: Force printing the info_dict of the whole playlist
198 (or video) as a single JSON line.
c25228e5 199 force_write_download_archive: Force writing download archive regardless
200 of 'skip_download' or 'simulate'.
b7b04c78 201 simulate: Do not download the video files. If unset (or None),
202 simulate only if listsubtitles, listformats or list_thumbnails is used
eb8a4433 203 format: Video format code. see "FORMAT SELECTION" for more details.
093a1710 204 You can also pass a function. The function takes 'ctx' as
205 argument and returns the formats to download.
206 See "build_format_selector" for an implementation
63ad4d43 207 allow_unplayable_formats: Allow unplayable formats to be extracted and downloaded.
b7da73eb 208 ignore_no_formats_error: Ignore "No video formats" error. Usefull for
209 extracting metadata even if the video is not actually
210 available for download (experimental)
0930b11f 211 format_sort: A list of fields by which to sort the video formats.
212 See "Sorting Formats" for more details.
c25228e5 213 format_sort_force: Force the given format_sort. see "Sorting Formats"
214 for more details.
08d30158 215 prefer_free_formats: Whether to prefer video formats with free containers
216 over non-free ones of same quality.
c25228e5 217 allow_multiple_video_streams: Allow multiple video streams to be merged
218 into a single file
219 allow_multiple_audio_streams: Allow multiple audio streams to be merged
220 into a single file
0ba692ac 221 check_formats Whether to test if the formats are downloadable.
9f1a1c36 222 Can be True (check all), False (check none),
223 'selected' (check selected formats),
0ba692ac 224 or None (check only if requested by extractor)
4524baf0 225 paths: Dictionary of output paths. The allowed keys are 'home'
226 'temp' and the keys of OUTTMPL_TYPES (in utils.py)
de6000d9 227 outtmpl: Dictionary of templates for output names. Allowed keys
4524baf0 228 are 'default' and the keys of OUTTMPL_TYPES (in utils.py).
34488702 229 For compatibility with youtube-dl, a single string can also be used
a820dc72
RA
230 outtmpl_na_placeholder: Placeholder for unavailable meta fields.
231 restrictfilenames: Do not allow "&" and spaces in file names
232 trim_file_name: Limit length of filename (extension excluded)
4524baf0 233 windowsfilenames: Force the filenames to be windows compatible
b1940459 234 ignoreerrors: Do not stop on download/postprocessing errors.
235 Can be 'only_download' to ignore only download errors.
236 Default is 'only_download' for CLI, but False for API
26e2805c 237 skip_playlist_after_errors: Number of allowed failures until the rest of
238 the playlist is skipped
d22dec74 239 force_generic_extractor: Force downloader to use the generic extractor
0c3d0f51 240 overwrites: Overwrite all video and metadata files if True,
241 overwrite only non-video files if None
242 and don't overwrite any file if False
34488702 243 For compatibility with youtube-dl,
244 "nooverwrites" may also be used instead
c14e88f0 245 playlist_items: Specific indices of playlist to download.
75822ca7 246 playlistrandom: Download playlist items in random order.
7e9a6125 247 lazy_playlist: Process playlist entries as they are received.
8222d8de
JMF
248 matchtitle: Download only matching titles.
249 rejecttitle: Reject downloads for matching titles.
8bf9319e 250 logger: Log messages to a logging.Logger instance.
8222d8de 251 logtostderr: Log messages to stderr instead of stdout.
819e0531 252 consoletitle: Display progress in console window's titlebar.
8222d8de
JMF
253 writedescription: Write the video description to a .description file
254 writeinfojson: Write the video description to a .info.json file
75d43ca0 255 clean_infojson: Remove private fields from the infojson
34488702 256 getcomments: Extract video comments. This will not be written to disk
06167fbb 257 unless writeinfojson is also given
1fb07d10 258 writeannotations: Write the video annotations to a .annotations.xml file
8222d8de 259 writethumbnail: Write the thumbnail image to a file
c25228e5 260 allow_playlist_files: Whether to write playlists' description, infojson etc
261 also to disk when using the 'write*' options
ec82d85a 262 write_all_thumbnails: Write all thumbnail formats to files
732044af 263 writelink: Write an internet shortcut file, depending on the
264 current platform (.url/.webloc/.desktop)
265 writeurllink: Write a Windows internet shortcut file (.url)
266 writewebloclink: Write a macOS internet shortcut file (.webloc)
267 writedesktoplink: Write a Linux internet shortcut file (.desktop)
8222d8de 268 writesubtitles: Write the video subtitles to a file
741dd8ea 269 writeautomaticsub: Write the automatically generated subtitles to a file
8222d8de 270 listsubtitles: Lists all available subtitles for the video
a504ced0 271 subtitlesformat: The format code for subtitles
c32b0aab 272 subtitleslangs: List of languages of the subtitles to download (can be regex).
273 The list may contain "all" to refer to all the available
274 subtitles. The language can be prefixed with a "-" to
62b58c09 275 exclude it from the requested languages, e.g. ['all', '-live_chat']
8222d8de
JMF
276 keepvideo: Keep the video file after post-processing
277 daterange: A DateRange object, download only if the upload_date is in the range.
278 skip_download: Skip the actual download of the video file
c35f9e72 279 cachedir: Location of the cache files in the filesystem.
a0e07d31 280 False to disable filesystem cache.
47192f92 281 noplaylist: Download single video instead of a playlist if in doubt.
8dbe9899
PH
282 age_limit: An integer representing the user's age in years.
283 Unsuitable videos for the given age are skipped.
5fe18bdb
PH
284 min_views: An integer representing the minimum view count the video
285 must have in order to not be skipped.
286 Videos without view count information are always
287 downloaded. None for no limit.
288 max_views: An integer representing the maximum view count.
289 Videos that are more popular than that are not
290 downloaded.
291 Videos without view count information are always
292 downloaded. None for no limit.
293 download_archive: File name of a file where all downloads are recorded.
c1c9a79c
PH
294 Videos already present in the file are not downloaded
295 again.
8a51f564 296 break_on_existing: Stop the download process after attempting to download a
297 file that is in the archive.
298 break_on_reject: Stop the download process when encountering a video that
299 has been filtered out.
b222c271 300 break_per_url: Whether break_on_reject and break_on_existing
301 should act on each input URL as opposed to for the entire queue
d76fa1f3 302 cookiefile: File name or text stream from where cookies should be read and dumped to
f59f5ef8
MB
303 cookiesfrombrowser: A tuple containing the name of the browser, the profile
304 name/pathfrom where cookies are loaded, and the name of the
62b58c09 305 keyring, e.g. ('chrome', ) or ('vivaldi', 'default', 'BASICTEXT')
f81c62a6 306 legacyserverconnect: Explicitly allow HTTPS connection to servers that do not
307 support RFC 5746 secure renegotiation
f59f5ef8 308 nocheckcertificate: Do not verify SSL certificates
bb58c9ed 309 client_certificate: Path to client certificate file in PEM format. May include the private key
310 client_certificate_key: Path to private key file for client certificate
311 client_certificate_password: Password for client certificate private key, if encrypted.
312 If not provided and the key is encrypted, yt-dlp will ask interactively
7e8c0af0 313 prefer_insecure: Use HTTP instead of HTTPS to retrieve information.
c6e07cf1 314 (Only supported by some extractors)
8b7539d2 315 http_headers: A dictionary of custom headers to be used for all requests
a1ee09e8 316 proxy: URL of the proxy server to use
38cce791 317 geo_verification_proxy: URL of the proxy to use for IP address verification
504f20dd 318 on geo-restricted sites.
e344693b 319 socket_timeout: Time to wait for unresponsive hosts, in seconds
0783b09b
PH
320 bidi_workaround: Work around buggy terminals without bidirectional text
321 support, using fridibi
a0ddb8a2 322 debug_printtraffic:Print out sent and received HTTP traffic
04b4d394
PH
323 default_search: Prepend this string if an input url is not valid.
324 'auto' for elaborate guessing
62fec3b2 325 encoding: Use this encoding instead of the system-specified.
134c913c 326 extract_flat: Whether to resolve and process url_results further
327 * False: Always process (default)
328 * True: Never process
329 * 'in_playlist': Do not process inside playlist/multi_video
330 * 'discard': Always process, but don't return the result
331 from inside playlist/multi_video
332 * 'discard_in_playlist': Same as "discard", but only for
333 playlists (not multi_video)
f2ebc5c7 334 wait_for_video: If given, wait for scheduled streams to become available.
335 The value should be a tuple containing the range
336 (min_secs, max_secs) to wait between retries
4f026faf 337 postprocessors: A list of dictionaries, each with an entry
71b640cc 338 * key: The name of the postprocessor. See
7a5c1cfe 339 yt_dlp/postprocessor/__init__.py for a list.
bb66c247 340 * when: When to run the postprocessor. Allowed values are
341 the entries of utils.POSTPROCESS_WHEN
56d868db 342 Assumed to be 'post_process' if not given
71b640cc
PH
343 progress_hooks: A list of functions that get called on download
344 progress, with a dictionary with the entries
5cda4eda 345 * status: One of "downloading", "error", or "finished".
ee69b99a 346 Check this first and ignore unknown values.
3ba7740d 347 * info_dict: The extracted info_dict
71b640cc 348
5cda4eda 349 If status is one of "downloading", or "finished", the
ee69b99a
PH
350 following properties may also be present:
351 * filename: The final filename (always present)
5cda4eda 352 * tmpfilename: The filename we're currently writing to
71b640cc
PH
353 * downloaded_bytes: Bytes on disk
354 * total_bytes: Size of the whole file, None if unknown
5cda4eda
PH
355 * total_bytes_estimate: Guess of the eventual file size,
356 None if unavailable.
357 * elapsed: The number of seconds since download started.
71b640cc
PH
358 * eta: The estimated time in seconds, None if unknown
359 * speed: The download speed in bytes/second, None if
360 unknown
5cda4eda
PH
361 * fragment_index: The counter of the currently
362 downloaded video fragment.
363 * fragment_count: The number of fragments (= individual
364 files that will be merged)
71b640cc
PH
365
366 Progress hooks are guaranteed to be called at least once
367 (with status "finished") if the download is successful.
819e0531 368 postprocessor_hooks: A list of functions that get called on postprocessing
369 progress, with a dictionary with the entries
370 * status: One of "started", "processing", or "finished".
371 Check this first and ignore unknown values.
372 * postprocessor: Name of the postprocessor
373 * info_dict: The extracted info_dict
374
375 Progress hooks are guaranteed to be called at least twice
376 (with status "started" and "finished") if the processing is successful.
fc61aff4 377 merge_output_format: "/" separated list of extensions to use when merging formats.
6b591b29 378 final_ext: Expected final extension; used to detect when the file was
59a7a13e 379 already downloaded and converted
6271f1ca
PH
380 fixup: Automatically correct known faults of the file.
381 One of:
382 - "never": do nothing
383 - "warn": only emit a warning
384 - "detect_or_warn": check whether we can do anything
62cd676c 385 about it, warn otherwise (default)
504f20dd 386 source_address: Client-side IP address to bind to.
1cf376f5 387 sleep_interval_requests: Number of seconds to sleep between requests
388 during extraction
7aa589a5
S
389 sleep_interval: Number of seconds to sleep before each download when
390 used alone or a lower bound of a range for randomized
391 sleep before each download (minimum possible number
392 of seconds to sleep) when used along with
393 max_sleep_interval.
394 max_sleep_interval:Upper bound of a range for randomized sleep before each
395 download (maximum possible number of seconds to sleep).
396 Must only be used along with sleep_interval.
397 Actual sleep time will be a random float from range
398 [sleep_interval; max_sleep_interval].
1cf376f5 399 sleep_interval_subtitles: Number of seconds to sleep before each subtitle download
cfb56d1a
PH
400 listformats: Print an overview of available video formats and exit.
401 list_thumbnails: Print a table of all thumbnails and exit.
0a41f331 402 match_filter: A function that gets called for every video with the signature
403 (info_dict, *, incomplete: bool) -> Optional[str]
404 For backward compatibility with youtube-dl, the signature
405 (info_dict) -> Optional[str] is also allowed.
406 - If it returns a message, the video is ignored.
407 - If it returns None, the video is downloaded.
408 - If it returns utils.NO_DEFAULT, the user is interactively
409 asked whether to download the video.
347de493 410 match_filter_func in utils.py is one example for this.
7e5db8c9 411 no_color: Do not emit color codes in output.
0a840f58 412 geo_bypass: Bypass geographic restriction via faking X-Forwarded-For
504f20dd 413 HTTP header
0a840f58 414 geo_bypass_country:
773f291d
S
415 Two-letter ISO 3166-2 country code that will be used for
416 explicit geographic restriction bypassing via faking
504f20dd 417 X-Forwarded-For HTTP header
5f95927a
S
418 geo_bypass_ip_block:
419 IP range in CIDR notation that will be used similarly to
504f20dd 420 geo_bypass_country
52a8a1e1 421 external_downloader: A dictionary of protocol keys and the executable of the
422 external downloader to use for it. The allowed protocols
423 are default|http|ftp|m3u8|dash|rtsp|rtmp|mms.
424 Set the value to 'native' to use the native downloader
53ed7066 425 compat_opts: Compatibility options. See "Differences in default behavior".
3acf6d38 426 The following options do not work when used through the API:
b5ae35ee 427 filename, abort-on-error, multistreams, no-live-chat, format-sort
dac5df5a 428 no-clean-infojson, no-playlist-metafiles, no-keep-subs, no-attach-info-json.
e4f02757 429 Refer __init__.py for their implementation
819e0531 430 progress_template: Dictionary of templates for progress outputs.
431 Allowed keys are 'download', 'postprocess',
432 'download-title' (console title) and 'postprocess-title'.
433 The template is mapped on a dictionary with keys 'progress' and 'info'
23326151 434 retry_sleep_functions: Dictionary of functions that takes the number of attempts
435 as argument and returns the time to sleep in seconds.
436 Allowed keys are 'http', 'fragment', 'file_access'
0f446365
SW
437 download_ranges: A callback function that gets called for every video with
438 the signature (info_dict, ydl) -> Iterable[Section].
439 Only the returned sections will be downloaded.
440 Each Section is a dict with the following keys:
5ec1b6b7 441 * start_time: Start time of the section in seconds
442 * end_time: End time of the section in seconds
443 * title: Section title (Optional)
444 * index: Section number (Optional)
0f446365 445 force_keyframes_at_cuts: Re-encode the video when downloading ranges to get precise cuts
a7dc6a89 446 noprogress: Do not print the progress bar
fe7e0c98 447
8222d8de 448 The following parameters are not used by YoutubeDL itself, they are used by
7a5c1cfe 449 the downloader (see yt_dlp/downloader/common.py):
51d9739f 450 nopart, updatetime, buffersize, ratelimit, throttledratelimit, min_filesize,
205a0654 451 max_filesize, test, noresizebuffer, retries, file_access_retries, fragment_retries,
a7dc6a89 452 continuedl, xattr_set_filesize, hls_use_mpegts, http_chunk_size,
59a7a13e 453 external_downloader_args, concurrent_fragment_downloads.
76b1bd67
JMF
454
455 The following options are used by the post processors:
c0b7d117
S
456 ffmpeg_location: Location of the ffmpeg/avconv binary; either the path
457 to the binary or its containing directory.
43820c03 458 postprocessor_args: A dictionary of postprocessor/executable keys (in lower case)
34488702 459 and a list of additional command-line arguments for the
460 postprocessor/executable. The dict can also have "PP+EXE" keys
461 which are used when the given exe is used by the given PP.
462 Use 'default' as the name for arguments to passed to all PP
463 For compatibility with youtube-dl, a single list of args
464 can also be used
e409895f 465
466 The following options are used by the extractors:
62bff2c1 467 extractor_retries: Number of times to retry for known errors
468 dynamic_mpd: Whether to process dynamic DASH manifests (default: True)
e409895f 469 hls_split_discontinuity: Split HLS playlists to different formats at
62bff2c1 470 discontinuities such as ad breaks (default: False)
5d3a0e79 471 extractor_args: A dictionary of arguments to be passed to the extractors.
472 See "EXTRACTOR ARGUMENTS" for details.
62b58c09 473 E.g. {'youtube': {'skip': ['dash', 'hls']}}
88f23a18 474 mark_watched: Mark videos watched (even with --simulate). Only for YouTube
1890fc63 475
476 The following options are deprecated and may be removed in the future:
477
7e9a6125 478 playliststart: - Use playlist_items
479 Playlist item to start at.
480 playlistend: - Use playlist_items
481 Playlist item to end at.
482 playlistreverse: - Use playlist_items
483 Download playlist items in reverse order.
1890fc63 484 forceurl: - Use forceprint
485 Force printing final URL.
486 forcetitle: - Use forceprint
487 Force printing title.
488 forceid: - Use forceprint
489 Force printing ID.
490 forcethumbnail: - Use forceprint
491 Force printing thumbnail URL.
492 forcedescription: - Use forceprint
493 Force printing description.
494 forcefilename: - Use forceprint
495 Force printing final filename.
496 forceduration: - Use forceprint
497 Force printing duration.
498 allsubtitles: - Use subtitleslangs = ['all']
499 Downloads all the subtitles of the video
500 (requires writesubtitles or writeautomaticsub)
501 include_ads: - Doesn't work
502 Download ads as well
503 call_home: - Not implemented
504 Boolean, true iff we are allowed to contact the
505 yt-dlp servers for debugging.
506 post_hooks: - Register a custom postprocessor
507 A list of functions that get called as the final step
508 for each video file, after all postprocessors have been
509 called. The filename will be passed as the only argument.
510 hls_prefer_native: - Use external_downloader = {'m3u8': 'native'} or {'m3u8': 'ffmpeg'}.
511 Use the native HLS downloader instead of ffmpeg/avconv
512 if True, otherwise use ffmpeg/avconv if False, otherwise
513 use downloader suggested by extractor if None.
514 prefer_ffmpeg: - avconv support is deprecated
515 If False, use avconv instead of ffmpeg if both are available,
516 otherwise prefer ffmpeg.
517 youtube_include_dash_manifest: - Use extractor_args
5d3a0e79 518 If True (default), DASH manifests and related
62bff2c1 519 data will be downloaded and processed by extractor.
520 You can reduce network I/O by disabling it if you don't
521 care about DASH. (only for youtube)
1890fc63 522 youtube_include_hls_manifest: - Use extractor_args
5d3a0e79 523 If True (default), HLS manifests and related
62bff2c1 524 data will be downloaded and processed by extractor.
525 You can reduce network I/O by disabling it if you don't
526 care about HLS. (only for youtube)
8222d8de
JMF
527 """
528
86e5f3ed 529 _NUMERIC_FIELDS = {
b8ed0f15 530 'width', 'height', 'asr', 'audio_channels', 'fps',
531 'tbr', 'abr', 'vbr', 'filesize', 'filesize_approx',
e6f21b3d 532 'timestamp', 'release_timestamp',
c9969434
S
533 'duration', 'view_count', 'like_count', 'dislike_count', 'repost_count',
534 'average_rating', 'comment_count', 'age_limit',
535 'start_time', 'end_time',
536 'chapter_number', 'season_number', 'episode_number',
537 'track_number', 'disc_number', 'release_year',
86e5f3ed 538 }
c9969434 539
6db9c4d5 540 _format_fields = {
541 # NB: Keep in sync with the docstring of extractor/common.py
a44ca5a4 542 'url', 'manifest_url', 'manifest_stream_number', 'ext', 'format', 'format_id', 'format_note',
b8ed0f15 543 'width', 'height', 'resolution', 'dynamic_range', 'tbr', 'abr', 'acodec', 'asr', 'audio_channels',
6db9c4d5 544 'vbr', 'fps', 'vcodec', 'container', 'filesize', 'filesize_approx',
545 'player_url', 'protocol', 'fragment_base_url', 'fragments', 'is_from_start',
546 'preference', 'language', 'language_preference', 'quality', 'source_preference',
547 'http_headers', 'stretched_ratio', 'no_resume', 'has_drm', 'downloader_options',
548 'page_url', 'app', 'play_path', 'tc_url', 'flash_version', 'rtmp_live', 'rtmp_conn', 'rtmp_protocol', 'rtmp_real_time'
549 }
48ee10ee 550 _format_selection_exts = {
8dc59305 551 'audio': set(MEDIA_EXTENSIONS.common_audio),
552 'video': set(MEDIA_EXTENSIONS.common_video + ('3gp', )),
553 'storyboards': set(MEDIA_EXTENSIONS.storyboards),
48ee10ee 554 }
555
3511266b 556 def __init__(self, params=None, auto_init=True):
883d4b1e 557 """Create a FileDownloader object with the given options.
558 @param auto_init Whether to load the default extractors and print header (if verbose).
49a57e70 559 Set to 'no_verbose_header' to not print the header
883d4b1e 560 """
e9f9a10f
JMF
561 if params is None:
562 params = {}
592b7485 563 self.params = params
8b7491c8 564 self._ies = {}
56c73665 565 self._ies_instances = {}
1e43a6f7 566 self._pps = {k: [] for k in POSTPROCESS_WHEN}
b35496d8 567 self._printed_messages = set()
1cf376f5 568 self._first_webpage_request = True
ab8e5e51 569 self._post_hooks = []
933605d7 570 self._progress_hooks = []
819e0531 571 self._postprocessor_hooks = []
8222d8de
JMF
572 self._download_retcode = 0
573 self._num_downloads = 0
9c906919 574 self._num_videos = 0
592b7485 575 self._playlist_level = 0
576 self._playlist_urls = set()
a0e07d31 577 self.cache = Cache(self)
34308b30 578
819e0531 579 windows_enable_vt_mode()
591bb9d3 580 stdout = sys.stderr if self.params.get('logtostderr') else sys.stdout
581 self._out_files = Namespace(
582 out=stdout,
583 error=sys.stderr,
584 screen=sys.stderr if self.params.get('quiet') else stdout,
585 console=None if compat_os_name == 'nt' else next(
cf4f42cb 586 filter(supports_terminal_sequences, (sys.stderr, sys.stdout)), None)
591bb9d3 587 )
588 self._allow_colors = Namespace(**{
589 type_: not self.params.get('no_color') and supports_terminal_sequences(stream)
64fa820c 590 for type_, stream in self._out_files.items_ if type_ != 'console'
591bb9d3 591 })
819e0531 592
6929b41a 593 # The code is left like this to be reused for future deprecations
594 MIN_SUPPORTED, MIN_RECOMMENDED = (3, 7), (3, 7)
eff42759 595 current_version = sys.version_info[:2]
596 if current_version < MIN_RECOMMENDED:
9d339c41 597 msg = ('Support for Python version %d.%d has been deprecated. '
24093d52 598 'See https://github.com/yt-dlp/yt-dlp/issues/3764 for more details.'
c6e07cf1 599 '\n You will no longer receive updates on this version')
eff42759 600 if current_version < MIN_SUPPORTED:
601 msg = 'Python version %d.%d is no longer supported'
602 self.deprecation_warning(
603 f'{msg}! Please update to Python %d.%d or above' % (*current_version, *MIN_RECOMMENDED))
a61f4b28 604
88acdbc2 605 if self.params.get('allow_unplayable_formats'):
606 self.report_warning(
ec11a9f4 607 f'You have asked for {self._format_err("UNPLAYABLE", self.Styles.EMPHASIS)} formats to be listed/downloaded. '
819e0531 608 'This is a developer option intended for debugging. \n'
609 ' If you experience any issues while using this option, '
ec11a9f4 610 f'{self._format_err("DO NOT", self.Styles.ERROR)} open a bug report')
88acdbc2 611
be5df5ee
S
612 def check_deprecated(param, option, suggestion):
613 if self.params.get(param) is not None:
86e5f3ed 614 self.report_warning(f'{option} is deprecated. Use {suggestion} instead')
be5df5ee
S
615 return True
616 return False
617
618 if check_deprecated('cn_verification_proxy', '--cn-verification-proxy', '--geo-verification-proxy'):
38cce791
YCH
619 if self.params.get('geo_verification_proxy') is None:
620 self.params['geo_verification_proxy'] = self.params['cn_verification_proxy']
621
0d1bb027 622 check_deprecated('autonumber', '--auto-number', '-o "%(autonumber)s-%(title)s.%(ext)s"')
623 check_deprecated('usetitle', '--title', '-o "%(title)s-%(id)s.%(ext)s"')
53ed7066 624 check_deprecated('useid', '--id', '-o "%(id)s.%(ext)s"')
0d1bb027 625
49a57e70 626 for msg in self.params.get('_warnings', []):
0d1bb027 627 self.report_warning(msg)
ee8dd27a 628 for msg in self.params.get('_deprecation_warnings', []):
629 self.deprecation_warning(msg)
0d1bb027 630
8a82af35 631 self.params['compat_opts'] = set(self.params.get('compat_opts', ()))
8a82af35 632 if 'list-formats' in self.params['compat_opts']:
ec11a9f4 633 self.params['listformats_table'] = False
634
b5ae35ee 635 if 'overwrites' not in self.params and self.params.get('nooverwrites') is not None:
b868936c 636 # nooverwrites was unnecessarily changed to overwrites
637 # in 0c3d0f51778b153f65c21906031c2e091fcfb641
638 # This ensures compatibility with both keys
639 self.params['overwrites'] = not self.params['nooverwrites']
b5ae35ee 640 elif self.params.get('overwrites') is None:
641 self.params.pop('overwrites', None)
b868936c 642 else:
643 self.params['nooverwrites'] = not self.params['overwrites']
b9d973be 644
455a15e2 645 self.params.setdefault('forceprint', {})
646 self.params.setdefault('print_to_file', {})
bb66c247 647
648 # Compatibility with older syntax
ca30f449 649 if not isinstance(params['forceprint'], dict):
455a15e2 650 self.params['forceprint'] = {'video': params['forceprint']}
ca30f449 651
455a15e2 652 if self.params.get('bidi_workaround', False):
1c088fa8
PH
653 try:
654 import pty
655 master, slave = pty.openpty()
ac668111 656 width = shutil.get_terminal_size().columns
591bb9d3 657 width_args = [] if width is None else ['-w', str(width)]
658 sp_kwargs = {'stdin': subprocess.PIPE, 'stdout': slave, 'stderr': self._out_files.error}
5d681e96 659 try:
d3c93ec2 660 self._output_process = Popen(['bidiv'] + width_args, **sp_kwargs)
5d681e96 661 except OSError:
d3c93ec2 662 self._output_process = Popen(['fribidi', '-c', 'UTF-8'] + width_args, **sp_kwargs)
5d681e96 663 self._output_channel = os.fdopen(master, 'rb')
1c088fa8 664 except OSError as ose:
66e7ace1 665 if ose.errno == errno.ENOENT:
49a57e70 666 self.report_warning(
667 'Could not find fribidi executable, ignoring --bidi-workaround. '
668 'Make sure that fribidi is an executable file in one of the directories in your $PATH.')
1c088fa8
PH
669 else:
670 raise
0783b09b 671
97ec5bc5 672 if auto_init:
673 if auto_init != 'no_verbose_header':
674 self.print_debug_header()
675 self.add_default_info_extractors()
676
3089bc74
S
677 if (sys.platform != 'win32'
678 and sys.getfilesystemencoding() in ['ascii', 'ANSI_X3.4-1968']
455a15e2 679 and not self.params.get('restrictfilenames', False)):
e9137224 680 # Unicode filesystem API will throw errors (#1474, #13027)
34308b30 681 self.report_warning(
6febd1c1 682 'Assuming --restrict-filenames since file system encoding '
1b725173 683 'cannot encode all characters. '
6febd1c1 684 'Set the LC_ALL environment variable to fix this.')
4a98cdbf 685 self.params['restrictfilenames'] = True
34308b30 686
bf1824b3 687 self._parse_outtmpl()
486dd09e 688
187986a8 689 # Creating format selector here allows us to catch syntax errors before the extraction
690 self.format_selector = (
fa9f30b8 691 self.params.get('format') if self.params.get('format') in (None, '-')
093a1710 692 else self.params['format'] if callable(self.params['format'])
187986a8 693 else self.build_format_selector(self.params['format']))
694
8b7539d2 695 # Set http_headers defaults according to std_headers
696 self.params['http_headers'] = merge_headers(std_headers, self.params.get('http_headers', {}))
697
013b50b7 698 hooks = {
699 'post_hooks': self.add_post_hook,
700 'progress_hooks': self.add_progress_hook,
701 'postprocessor_hooks': self.add_postprocessor_hook,
702 }
703 for opt, fn in hooks.items():
704 for ph in self.params.get(opt, []):
705 fn(ph)
71b640cc 706
5bfc8bee 707 for pp_def_raw in self.params.get('postprocessors', []):
708 pp_def = dict(pp_def_raw)
709 when = pp_def.pop('when', 'post_process')
710 self.add_post_processor(
f9934b96 711 get_postprocessor(pp_def.pop('key'))(self, **pp_def),
5bfc8bee 712 when=when)
713
97ec5bc5 714 self._setup_opener()
51fb4995
YCH
715 register_socks_protocols()
716
ed39cac5 717 def preload_download_archive(fn):
718 """Preload the archive, if any is specified"""
719 if fn is None:
720 return False
49a57e70 721 self.write_debug(f'Loading archive file {fn!r}')
ed39cac5 722 try:
723 with locked_file(fn, 'r', encoding='utf-8') as archive_file:
724 for line in archive_file:
725 self.archive.add(line.strip())
86e5f3ed 726 except OSError as ioe:
ed39cac5 727 if ioe.errno != errno.ENOENT:
728 raise
729 return False
730 return True
731
732 self.archive = set()
733 preload_download_archive(self.params.get('download_archive'))
734
7d4111ed
PH
735 def warn_if_short_id(self, argv):
736 # short YouTube ID starting with dash?
737 idxs = [
738 i for i, a in enumerate(argv)
739 if re.match(r'^-[0-9A-Za-z_-]{10}$', a)]
740 if idxs:
741 correct_argv = (
7a5c1cfe 742 ['yt-dlp']
3089bc74
S
743 + [a for i, a in enumerate(argv) if i not in idxs]
744 + ['--'] + [argv[i] for i in idxs]
7d4111ed
PH
745 )
746 self.report_warning(
747 'Long argument string detected. '
49a57e70 748 'Use -- to separate parameters and URLs, like this:\n%s' %
7d4111ed
PH
749 args_to_str(correct_argv))
750
8222d8de
JMF
751 def add_info_extractor(self, ie):
752 """Add an InfoExtractor object to the end of the list."""
8b7491c8 753 ie_key = ie.ie_key()
754 self._ies[ie_key] = ie
e52d7f85 755 if not isinstance(ie, type):
8b7491c8 756 self._ies_instances[ie_key] = ie
e52d7f85 757 ie.set_downloader(self)
8222d8de 758
8b7491c8 759 def _get_info_extractor_class(self, ie_key):
760 ie = self._ies.get(ie_key)
761 if ie is None:
762 ie = get_info_extractor(ie_key)
763 self.add_info_extractor(ie)
764 return ie
765
56c73665
JMF
766 def get_info_extractor(self, ie_key):
767 """
768 Get an instance of an IE with name ie_key, it will try to get one from
769 the _ies list, if there's no instance it will create a new one and add
770 it to the extractor list.
771 """
772 ie = self._ies_instances.get(ie_key)
773 if ie is None:
774 ie = get_info_extractor(ie_key)()
775 self.add_info_extractor(ie)
776 return ie
777
023fa8c4
JMF
778 def add_default_info_extractors(self):
779 """
780 Add the InfoExtractors returned by gen_extractors to the end of the list
781 """
e52d7f85 782 for ie in gen_extractor_classes():
023fa8c4
JMF
783 self.add_info_extractor(ie)
784
56d868db 785 def add_post_processor(self, pp, when='post_process'):
8222d8de 786 """Add a PostProcessor object to the end of the chain."""
8aa0e7cd 787 assert when in POSTPROCESS_WHEN, f'Invalid when={when}'
5bfa4862 788 self._pps[when].append(pp)
8222d8de
JMF
789 pp.set_downloader(self)
790
ab8e5e51
AM
791 def add_post_hook(self, ph):
792 """Add the post hook"""
793 self._post_hooks.append(ph)
794
933605d7 795 def add_progress_hook(self, ph):
819e0531 796 """Add the download progress hook"""
933605d7 797 self._progress_hooks.append(ph)
8ab470f1 798
819e0531 799 def add_postprocessor_hook(self, ph):
800 """Add the postprocessing progress hook"""
801 self._postprocessor_hooks.append(ph)
5bfc8bee 802 for pps in self._pps.values():
803 for pp in pps:
804 pp.add_progress_hook(ph)
819e0531 805
1c088fa8 806 def _bidi_workaround(self, message):
5d681e96 807 if not hasattr(self, '_output_channel'):
1c088fa8
PH
808 return message
809
5d681e96 810 assert hasattr(self, '_output_process')
14f25df2 811 assert isinstance(message, str)
6febd1c1 812 line_count = message.count('\n') + 1
0f06bcd7 813 self._output_process.stdin.write((message + '\n').encode())
5d681e96 814 self._output_process.stdin.flush()
0f06bcd7 815 res = ''.join(self._output_channel.readline().decode()
9e1a5b84 816 for _ in range(line_count))
6febd1c1 817 return res[:-len('\n')]
1c088fa8 818
b35496d8 819 def _write_string(self, message, out=None, only_once=False):
820 if only_once:
821 if message in self._printed_messages:
822 return
823 self._printed_messages.add(message)
824 write_string(message, out=out, encoding=self.params.get('encoding'))
734f90bb 825
cf4f42cb 826 def to_stdout(self, message, skip_eol=False, quiet=None):
0760b0a7 827 """Print message to stdout"""
cf4f42cb 828 if quiet is not None:
ae6a1b95 829 self.deprecation_warning('"YoutubeDL.to_stdout" no longer accepts the argument quiet. Use "YoutubeDL.to_screen" instead')
8a82af35 830 if skip_eol is not False:
831 self.deprecation_warning('"YoutubeDL.to_stdout" no longer accepts the argument skip_eol. Use "YoutubeDL.to_screen" instead')
0bf9dc1e 832 self._write_string(f'{self._bidi_workaround(message)}\n', self._out_files.out)
cf4f42cb 833
834 def to_screen(self, message, skip_eol=False, quiet=None):
835 """Print message to screen if not in quiet mode"""
8bf9319e 836 if self.params.get('logger'):
43afe285 837 self.params['logger'].debug(message)
cf4f42cb 838 return
839 if (self.params.get('quiet') if quiet is None else quiet) and not self.params.get('verbose'):
840 return
841 self._write_string(
842 '%s%s' % (self._bidi_workaround(message), ('' if skip_eol else '\n')),
591bb9d3 843 self._out_files.screen)
8222d8de 844
b35496d8 845 def to_stderr(self, message, only_once=False):
0760b0a7 846 """Print message to stderr"""
14f25df2 847 assert isinstance(message, str)
8bf9319e 848 if self.params.get('logger'):
43afe285
IB
849 self.params['logger'].error(message)
850 else:
5792c950 851 self._write_string(f'{self._bidi_workaround(message)}\n', self._out_files.error, only_once=only_once)
cf4f42cb 852
853 def _send_console_code(self, code):
591bb9d3 854 if compat_os_name == 'nt' or not self._out_files.console:
cf4f42cb 855 return
591bb9d3 856 self._write_string(code, self._out_files.console)
8222d8de 857
1e5b9a95
PH
858 def to_console_title(self, message):
859 if not self.params.get('consoletitle', False):
860 return
3efb96a6 861 message = remove_terminal_sequences(message)
4bede0d8
C
862 if compat_os_name == 'nt':
863 if ctypes.windll.kernel32.GetConsoleWindow():
864 # c_wchar_p() might not be necessary if `message` is
865 # already of type unicode()
866 ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(message))
cf4f42cb 867 else:
868 self._send_console_code(f'\033]0;{message}\007')
1e5b9a95 869
bdde425c 870 def save_console_title(self):
cf4f42cb 871 if not self.params.get('consoletitle') or self.params.get('simulate'):
bdde425c 872 return
592b7485 873 self._send_console_code('\033[22;0t') # Save the title on stack
bdde425c
PH
874
875 def restore_console_title(self):
cf4f42cb 876 if not self.params.get('consoletitle') or self.params.get('simulate'):
bdde425c 877 return
592b7485 878 self._send_console_code('\033[23;0t') # Restore the title from stack
bdde425c
PH
879
880 def __enter__(self):
881 self.save_console_title()
882 return self
883
884 def __exit__(self, *args):
885 self.restore_console_title()
f89197d7 886
dca08720 887 if self.params.get('cookiefile') is not None:
1bab3437 888 self.cookiejar.save(ignore_discard=True, ignore_expires=True)
bdde425c 889
fa9f30b8 890 def trouble(self, message=None, tb=None, is_error=True):
8222d8de
JMF
891 """Determine action to take when a download problem appears.
892
893 Depending on if the downloader has been configured to ignore
894 download errors or not, this method may throw an exception or
895 not when errors are found, after printing the message.
896
fa9f30b8 897 @param tb If given, is additional traceback information
898 @param is_error Whether to raise error according to ignorerrors
8222d8de
JMF
899 """
900 if message is not None:
901 self.to_stderr(message)
902 if self.params.get('verbose'):
903 if tb is None:
904 if sys.exc_info()[0]: # if .trouble has been called from an except block
6febd1c1 905 tb = ''
8222d8de 906 if hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
6febd1c1 907 tb += ''.join(traceback.format_exception(*sys.exc_info()[1].exc_info))
c0384f22 908 tb += encode_compat_str(traceback.format_exc())
8222d8de
JMF
909 else:
910 tb_data = traceback.format_list(traceback.extract_stack())
6febd1c1 911 tb = ''.join(tb_data)
c19bc311 912 if tb:
913 self.to_stderr(tb)
fa9f30b8 914 if not is_error:
915 return
b1940459 916 if not self.params.get('ignoreerrors'):
8222d8de
JMF
917 if sys.exc_info()[0] and hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
918 exc_info = sys.exc_info()[1].exc_info
919 else:
920 exc_info = sys.exc_info()
921 raise DownloadError(message, exc_info)
922 self._download_retcode = 1
923
19a03940 924 Styles = Namespace(
925 HEADERS='yellow',
926 EMPHASIS='light blue',
492272fe 927 FILENAME='green',
19a03940 928 ID='green',
929 DELIM='blue',
930 ERROR='red',
931 WARNING='yellow',
932 SUPPRESS='light black',
933 )
ec11a9f4 934
7578d77d 935 def _format_text(self, handle, allow_colors, text, f, fallback=None, *, test_encoding=False):
e5a998f3 936 text = str(text)
ec11a9f4 937 if test_encoding:
938 original_text = text
5c104538 939 # handle.encoding can be None. See https://github.com/yt-dlp/yt-dlp/issues/2711
940 encoding = self.params.get('encoding') or getattr(handle, 'encoding', None) or 'ascii'
ec11a9f4 941 text = text.encode(encoding, 'ignore').decode(encoding)
942 if fallback is not None and text != original_text:
943 text = fallback
7578d77d 944 return format_text(text, f) if allow_colors else text if fallback is None else fallback
ec11a9f4 945
591bb9d3 946 def _format_out(self, *args, **kwargs):
947 return self._format_text(self._out_files.out, self._allow_colors.out, *args, **kwargs)
948
ec11a9f4 949 def _format_screen(self, *args, **kwargs):
591bb9d3 950 return self._format_text(self._out_files.screen, self._allow_colors.screen, *args, **kwargs)
ec11a9f4 951
952 def _format_err(self, *args, **kwargs):
591bb9d3 953 return self._format_text(self._out_files.error, self._allow_colors.error, *args, **kwargs)
819e0531 954
c84aeac6 955 def report_warning(self, message, only_once=False):
8222d8de
JMF
956 '''
957 Print the message to stderr, it will be prefixed with 'WARNING:'
958 If stderr is a tty file the 'WARNING:' will be colored
959 '''
6d07ce01
JMF
960 if self.params.get('logger') is not None:
961 self.params['logger'].warning(message)
8222d8de 962 else:
ad8915b7
PH
963 if self.params.get('no_warnings'):
964 return
ec11a9f4 965 self.to_stderr(f'{self._format_err("WARNING:", self.Styles.WARNING)} {message}', only_once)
8222d8de 966
ee8dd27a 967 def deprecation_warning(self, message):
968 if self.params.get('logger') is not None:
a44ca5a4 969 self.params['logger'].warning(f'DeprecationWarning: {message}')
ee8dd27a 970 else:
971 self.to_stderr(f'{self._format_err("DeprecationWarning:", self.Styles.ERROR)} {message}', True)
972
fa9f30b8 973 def report_error(self, message, *args, **kwargs):
8222d8de
JMF
974 '''
975 Do the same as trouble, but prefixes the message with 'ERROR:', colored
976 in red if stderr is a tty file.
977 '''
fa9f30b8 978 self.trouble(f'{self._format_err("ERROR:", self.Styles.ERROR)} {message}', *args, **kwargs)
8222d8de 979
b35496d8 980 def write_debug(self, message, only_once=False):
0760b0a7 981 '''Log debug message or Print message to stderr'''
982 if not self.params.get('verbose', False):
983 return
8a82af35 984 message = f'[debug] {message}'
0760b0a7 985 if self.params.get('logger'):
986 self.params['logger'].debug(message)
987 else:
b35496d8 988 self.to_stderr(message, only_once)
0760b0a7 989
8222d8de
JMF
990 def report_file_already_downloaded(self, file_name):
991 """Report file has already been fully downloaded."""
992 try:
6febd1c1 993 self.to_screen('[download] %s has already been downloaded' % file_name)
ce02ed60 994 except UnicodeEncodeError:
6febd1c1 995 self.to_screen('[download] The file has already been downloaded')
8222d8de 996
0c3d0f51 997 def report_file_delete(self, file_name):
998 """Report that existing file will be deleted."""
999 try:
c25228e5 1000 self.to_screen('Deleting existing file %s' % file_name)
0c3d0f51 1001 except UnicodeEncodeError:
c25228e5 1002 self.to_screen('Deleting existing file')
0c3d0f51 1003
319b6059 1004 def raise_no_formats(self, info, forced=False, *, msg=None):
0a5a191a 1005 has_drm = info.get('_has_drm')
319b6059 1006 ignored, expected = self.params.get('ignore_no_formats_error'), bool(msg)
1007 msg = msg or has_drm and 'This video is DRM protected' or 'No video formats found!'
1008 if forced or not ignored:
1151c407 1009 raise ExtractorError(msg, video_id=info['id'], ie=info['extractor'],
319b6059 1010 expected=has_drm or ignored or expected)
88acdbc2 1011 else:
1012 self.report_warning(msg)
1013
de6000d9 1014 def parse_outtmpl(self):
bf1824b3 1015 self.deprecation_warning('"YoutubeDL.parse_outtmpl" is deprecated and may be removed in a future version')
1016 self._parse_outtmpl()
1017 return self.params['outtmpl']
1018
1019 def _parse_outtmpl(self):
7b2c3f47 1020 sanitize = IDENTITY
bf1824b3 1021 if self.params.get('restrictfilenames'): # Remove spaces in the default template
71ce444a 1022 sanitize = lambda x: x.replace(' - ', ' ').replace(' ', '-')
bf1824b3 1023
1024 outtmpl = self.params.setdefault('outtmpl', {})
1025 if not isinstance(outtmpl, dict):
1026 self.params['outtmpl'] = outtmpl = {'default': outtmpl}
1027 outtmpl.update({k: sanitize(v) for k, v in DEFAULT_OUTTMPL.items() if outtmpl.get(k) is None})
de6000d9 1028
21cd8fae 1029 def get_output_path(self, dir_type='', filename=None):
1030 paths = self.params.get('paths', {})
1031 assert isinstance(paths, dict)
1032 path = os.path.join(
1033 expand_path(paths.get('home', '').strip()),
1034 expand_path(paths.get(dir_type, '').strip()) if dir_type else '',
1035 filename or '')
21cd8fae 1036 return sanitize_path(path, force=self.params.get('windowsfilenames'))
1037
76a264ac 1038 @staticmethod
901130bb 1039 def _outtmpl_expandpath(outtmpl):
1040 # expand_path translates '%%' into '%' and '$$' into '$'
1041 # correspondingly that is not what we want since we need to keep
1042 # '%%' intact for template dict substitution step. Working around
1043 # with boundary-alike separator hack.
1044 sep = ''.join([random.choice(ascii_letters) for _ in range(32)])
86e5f3ed 1045 outtmpl = outtmpl.replace('%%', f'%{sep}%').replace('$$', f'${sep}$')
901130bb 1046
1047 # outtmpl should be expand_path'ed before template dict substitution
1048 # because meta fields may contain env variables we don't want to
62b58c09 1049 # be expanded. E.g. for outtmpl "%(title)s.%(ext)s" and
901130bb 1050 # title "Hello $PATH", we don't want `$PATH` to be expanded.
1051 return expand_path(outtmpl).replace(sep, '')
1052
1053 @staticmethod
1054 def escape_outtmpl(outtmpl):
1055 ''' Escape any remaining strings like %s, %abc% etc. '''
1056 return re.sub(
1057 STR_FORMAT_RE_TMPL.format('', '(?![%(\0])'),
1058 lambda mobj: ('' if mobj.group('has_key') else '%') + mobj.group(0),
1059 outtmpl)
1060
1061 @classmethod
1062 def validate_outtmpl(cls, outtmpl):
76a264ac 1063 ''' @return None or Exception object '''
7d1eb38a 1064 outtmpl = re.sub(
47cdc68e 1065 STR_FORMAT_RE_TMPL.format('[^)]*', '[ljhqBUDS]'),
7d1eb38a 1066 lambda mobj: f'{mobj.group(0)[:-1]}s',
1067 cls._outtmpl_expandpath(outtmpl))
76a264ac 1068 try:
7d1eb38a 1069 cls.escape_outtmpl(outtmpl) % collections.defaultdict(int)
76a264ac 1070 return None
1071 except ValueError as err:
1072 return err
1073
03b4de72 1074 @staticmethod
1075 def _copy_infodict(info_dict):
1076 info_dict = dict(info_dict)
09b49e1f 1077 info_dict.pop('__postprocessors', None)
415f8d51 1078 info_dict.pop('__pending_error', None)
03b4de72 1079 return info_dict
1080
e0fd9573 1081 def prepare_outtmpl(self, outtmpl, info_dict, sanitize=False):
1082 """ Make the outtmpl and info_dict suitable for substitution: ydl.escape_outtmpl(outtmpl) % info_dict
1083 @param sanitize Whether to sanitize the output as a filename.
1084 For backward compatibility, a function can also be passed
1085 """
1086
6e84b215 1087 info_dict.setdefault('epoch', int(time.time())) # keep epoch consistent once set
143db31d 1088
03b4de72 1089 info_dict = self._copy_infodict(info_dict)
752cda38 1090 info_dict['duration_string'] = ( # %(duration>%H-%M-%S)s is wrong if duration > 24hrs
53c18592 1091 formatSeconds(info_dict['duration'], '-' if sanitize else ':')
143db31d 1092 if info_dict.get('duration', None) is not None
1093 else None)
1d485a1a 1094 info_dict['autonumber'] = int(self.params.get('autonumber_start', 1) - 1 + self._num_downloads)
9c906919 1095 info_dict['video_autonumber'] = self._num_videos
752cda38 1096 if info_dict.get('resolution') is None:
1097 info_dict['resolution'] = self.format_resolution(info_dict, default=None)
143db31d 1098
e6f21b3d 1099 # For fields playlist_index, playlist_autonumber and autonumber convert all occurrences
143db31d 1100 # of %(field)s to %(field)0Nd for backward compatibility
1101 field_size_compat_map = {
0a5a191a 1102 'playlist_index': number_of_digits(info_dict.get('__last_playlist_index') or 0),
ec11a9f4 1103 'playlist_autonumber': number_of_digits(info_dict.get('n_entries') or 0),
752cda38 1104 'autonumber': self.params.get('autonumber_size') or 5,
143db31d 1105 }
752cda38 1106
385a27fa 1107 TMPL_DICT = {}
47cdc68e 1108 EXTERNAL_FORMAT_RE = re.compile(STR_FORMAT_RE_TMPL.format('[^)]*', f'[{STR_FORMAT_TYPES}ljhqBUDS]'))
385a27fa 1109 MATH_FUNCTIONS = {
1110 '+': float.__add__,
1111 '-': float.__sub__,
1112 }
e625be0d 1113 # Field is of the form key1.key2...
1114 # where keys (except first) can be string, int or slice
2b8a2973 1115 FIELD_RE = r'\w*(?:\.(?:\w+|{num}|{num}?(?::{num}?){{1,2}}))*'.format(num=r'(?:-?\d+)')
1d485a1a 1116 MATH_FIELD_RE = rf'(?:{FIELD_RE}|-?{NUMBER_RE})'
385a27fa 1117 MATH_OPERATORS_RE = r'(?:%s)' % '|'.join(map(re.escape, MATH_FUNCTIONS.keys()))
1d485a1a 1118 INTERNAL_FORMAT_RE = re.compile(rf'''(?x)
e625be0d 1119 (?P<negate>-)?
1d485a1a 1120 (?P<fields>{FIELD_RE})
1121 (?P<maths>(?:{MATH_OPERATORS_RE}{MATH_FIELD_RE})*)
e625be0d 1122 (?:>(?P<strf_format>.+?))?
34baa9fd 1123 (?P<remaining>
1124 (?P<alternate>(?<!\\),[^|&)]+)?
1125 (?:&(?P<replacement>.*?))?
1126 (?:\|(?P<default>.*?))?
1d485a1a 1127 )$''')
752cda38 1128
2b8a2973 1129 def _traverse_infodict(k):
1130 k = k.split('.')
1131 if k[0] == '':
1132 k.pop(0)
1133 return traverse_obj(info_dict, k, is_user_input=True, traverse_string=True)
76a264ac 1134
752cda38 1135 def get_value(mdict):
1136 # Object traversal
2b8a2973 1137 value = _traverse_infodict(mdict['fields'])
752cda38 1138 # Negative
1139 if mdict['negate']:
1140 value = float_or_none(value)
1141 if value is not None:
1142 value *= -1
1143 # Do maths
385a27fa 1144 offset_key = mdict['maths']
1145 if offset_key:
752cda38 1146 value = float_or_none(value)
1147 operator = None
385a27fa 1148 while offset_key:
1149 item = re.match(
1150 MATH_FIELD_RE if operator else MATH_OPERATORS_RE,
1151 offset_key).group(0)
1152 offset_key = offset_key[len(item):]
1153 if operator is None:
752cda38 1154 operator = MATH_FUNCTIONS[item]
385a27fa 1155 continue
1156 item, multiplier = (item[1:], -1) if item[0] == '-' else (item, 1)
1157 offset = float_or_none(item)
1158 if offset is None:
2b8a2973 1159 offset = float_or_none(_traverse_infodict(item))
385a27fa 1160 try:
1161 value = operator(value, multiplier * offset)
1162 except (TypeError, ZeroDivisionError):
1163 return None
1164 operator = None
752cda38 1165 # Datetime formatting
1166 if mdict['strf_format']:
7c37ff97 1167 value = strftime_or_none(value, mdict['strf_format'].replace('\\,', ','))
752cda38 1168
a6bcaf71 1169 # XXX: Workaround for https://github.com/yt-dlp/yt-dlp/issues/4485
1170 if sanitize and value == '':
1171 value = None
752cda38 1172 return value
1173
b868936c 1174 na = self.params.get('outtmpl_na_placeholder', 'NA')
1175
e0fd9573 1176 def filename_sanitizer(key, value, restricted=self.params.get('restrictfilenames')):
5c3895ff 1177 return sanitize_filename(str(value), restricted=restricted, is_id=(
1178 bool(re.search(r'(^|[_.])id(\.|$)', key))
8a82af35 1179 if 'filename-sanitization' in self.params['compat_opts']
5c3895ff 1180 else NO_DEFAULT))
e0fd9573 1181
1182 sanitizer = sanitize if callable(sanitize) else filename_sanitizer
1183 sanitize = bool(sanitize)
1184
6e84b215 1185 def _dumpjson_default(obj):
1186 if isinstance(obj, (set, LazyList)):
1187 return list(obj)
adbc4ec4 1188 return repr(obj)
6e84b215 1189
752cda38 1190 def create_key(outer_mobj):
1191 if not outer_mobj.group('has_key'):
b836dc94 1192 return outer_mobj.group(0)
752cda38 1193 key = outer_mobj.group('key')
752cda38 1194 mobj = re.match(INTERNAL_FORMAT_RE, key)
e0fd9573 1195 initial_field = mobj.group('fields') if mobj else ''
e978789f 1196 value, replacement, default = None, None, na
7c37ff97 1197 while mobj:
e625be0d 1198 mobj = mobj.groupdict()
7c37ff97 1199 default = mobj['default'] if mobj['default'] is not None else default
752cda38 1200 value = get_value(mobj)
e978789f 1201 replacement = mobj['replacement']
7c37ff97 1202 if value is None and mobj['alternate']:
34baa9fd 1203 mobj = re.match(INTERNAL_FORMAT_RE, mobj['remaining'][1:])
7c37ff97 1204 else:
1205 break
752cda38 1206
b868936c 1207 fmt = outer_mobj.group('format')
752cda38 1208 if fmt == 's' and value is not None and key in field_size_compat_map.keys():
86e5f3ed 1209 fmt = f'0{field_size_compat_map[key]:d}d'
752cda38 1210
e978789f 1211 value = default if value is None else value if replacement is None else replacement
752cda38 1212
4476d2c7 1213 flags = outer_mobj.group('conversion') or ''
7d1eb38a 1214 str_fmt = f'{fmt[:-1]}s'
524e2e4f 1215 if fmt[-1] == 'l': # list
4476d2c7 1216 delim = '\n' if '#' in flags else ', '
9e907ebd 1217 value, fmt = delim.join(map(str, variadic(value, allowed_types=(str, bytes)))), str_fmt
524e2e4f 1218 elif fmt[-1] == 'j': # json
4476d2c7 1219 value, fmt = json.dumps(value, default=_dumpjson_default, indent=4 if '#' in flags else None), str_fmt
47cdc68e 1220 elif fmt[-1] == 'h': # html
1221 value, fmt = escapeHTML(value), str_fmt
524e2e4f 1222 elif fmt[-1] == 'q': # quoted
4476d2c7 1223 value = map(str, variadic(value) if '#' in flags else [value])
1224 value, fmt = ' '.join(map(compat_shlex_quote, value)), str_fmt
524e2e4f 1225 elif fmt[-1] == 'B': # bytes
0f06bcd7 1226 value = f'%{str_fmt}'.encode() % str(value).encode()
f5aa5cfb 1227 value, fmt = value.decode('utf-8', 'ignore'), 's'
524e2e4f 1228 elif fmt[-1] == 'U': # unicode normalized
524e2e4f 1229 value, fmt = unicodedata.normalize(
1230 # "+" = compatibility equivalence, "#" = NFD
4476d2c7 1231 'NF%s%s' % ('K' if '+' in flags else '', 'D' if '#' in flags else 'C'),
524e2e4f 1232 value), str_fmt
e0fd9573 1233 elif fmt[-1] == 'D': # decimal suffix
abbeeebc 1234 num_fmt, fmt = fmt[:-1].replace('#', ''), 's'
1235 value = format_decimal_suffix(value, f'%{num_fmt}f%s' if num_fmt else '%d%s',
1236 factor=1024 if '#' in flags else 1000)
37893bb0 1237 elif fmt[-1] == 'S': # filename sanitization
e0fd9573 1238 value, fmt = filename_sanitizer(initial_field, value, restricted='#' in flags), str_fmt
7d1eb38a 1239 elif fmt[-1] == 'c':
524e2e4f 1240 if value:
1241 value = str(value)[0]
76a264ac 1242 else:
524e2e4f 1243 fmt = str_fmt
76a264ac 1244 elif fmt[-1] not in 'rs': # numeric
a439a3a4 1245 value = float_or_none(value)
752cda38 1246 if value is None:
1247 value, fmt = default, 's'
901130bb 1248
752cda38 1249 if sanitize:
1250 if fmt[-1] == 'r':
1251 # If value is an object, sanitize might convert it to a string
1252 # So we convert it to repr first
7d1eb38a 1253 value, fmt = repr(value), str_fmt
639f1cea 1254 if fmt[-1] in 'csr':
e0fd9573 1255 value = sanitizer(initial_field, value)
901130bb 1256
b868936c 1257 key = '%s\0%s' % (key.replace('%', '%\0'), outer_mobj.group('format'))
385a27fa 1258 TMPL_DICT[key] = value
b868936c 1259 return '{prefix}%({key}){fmt}'.format(key=key, fmt=fmt, prefix=outer_mobj.group('prefix'))
752cda38 1260
385a27fa 1261 return EXTERNAL_FORMAT_RE.sub(create_key, outtmpl), TMPL_DICT
143db31d 1262
819e0531 1263 def evaluate_outtmpl(self, outtmpl, info_dict, *args, **kwargs):
1264 outtmpl, info_dict = self.prepare_outtmpl(outtmpl, info_dict, *args, **kwargs)
1265 return self.escape_outtmpl(outtmpl) % info_dict
1266
5127e92a 1267 def _prepare_filename(self, info_dict, *, outtmpl=None, tmpl_type=None):
1268 assert None in (outtmpl, tmpl_type), 'outtmpl and tmpl_type are mutually exclusive'
1269 if outtmpl is None:
bf1824b3 1270 outtmpl = self.params['outtmpl'].get(tmpl_type or 'default', self.params['outtmpl']['default'])
8222d8de 1271 try:
5127e92a 1272 outtmpl = self._outtmpl_expandpath(outtmpl)
e0fd9573 1273 filename = self.evaluate_outtmpl(outtmpl, info_dict, True)
6a0546e3 1274 if not filename:
1275 return None
15da37c7 1276
5127e92a 1277 if tmpl_type in ('', 'temp'):
6a0546e3 1278 final_ext, ext = self.params.get('final_ext'), info_dict.get('ext')
1279 if final_ext and ext and final_ext != ext and filename.endswith(f'.{final_ext}'):
1280 filename = replace_extension(filename, ext, final_ext)
5127e92a 1281 elif tmpl_type:
6a0546e3 1282 force_ext = OUTTMPL_TYPES[tmpl_type]
1283 if force_ext:
1284 filename = replace_extension(filename, force_ext, info_dict.get('ext'))
de6000d9 1285
bdc3fd2f
U
1286 # https://github.com/blackjack4494/youtube-dlc/issues/85
1287 trim_file_name = self.params.get('trim_file_name', False)
1288 if trim_file_name:
5c22c63d 1289 no_ext, *ext = filename.rsplit('.', 2)
1290 filename = join_nonempty(no_ext[:trim_file_name], *ext, delim='.')
bdc3fd2f 1291
0202b52a 1292 return filename
8222d8de 1293 except ValueError as err:
6febd1c1 1294 self.report_error('Error in output template: ' + str(err) + ' (encoding: ' + repr(preferredencoding()) + ')')
8222d8de
JMF
1295 return None
1296
5127e92a 1297 def prepare_filename(self, info_dict, dir_type='', *, outtmpl=None, warn=False):
1298 """Generate the output filename"""
1299 if outtmpl:
1300 assert not dir_type, 'outtmpl and dir_type are mutually exclusive'
1301 dir_type = None
1302 filename = self._prepare_filename(info_dict, tmpl_type=dir_type, outtmpl=outtmpl)
80c03fa9 1303 if not filename and dir_type not in ('', 'temp'):
1304 return ''
de6000d9 1305
c84aeac6 1306 if warn:
21cd8fae 1307 if not self.params.get('paths'):
de6000d9 1308 pass
1309 elif filename == '-':
c84aeac6 1310 self.report_warning('--paths is ignored when an outputting to stdout', only_once=True)
de6000d9 1311 elif os.path.isabs(filename):
c84aeac6 1312 self.report_warning('--paths is ignored since an absolute path is given in output template', only_once=True)
de6000d9 1313 if filename == '-' or not filename:
1314 return filename
1315
21cd8fae 1316 return self.get_output_path(dir_type, filename)
0202b52a 1317
120fe513 1318 def _match_entry(self, info_dict, incomplete=False, silent=False):
ecdec191 1319 """ Returns None if the file should be downloaded """
8222d8de 1320
3bec830a 1321 video_title = info_dict.get('title', info_dict.get('id', 'entry'))
c77495e3 1322
8b0d7497 1323 def check_filter():
8b0d7497 1324 if 'title' in info_dict:
1325 # This can happen when we're just evaluating the playlist
1326 title = info_dict['title']
1327 matchtitle = self.params.get('matchtitle', False)
1328 if matchtitle:
1329 if not re.search(matchtitle, title, re.IGNORECASE):
1330 return '"' + title + '" title did not match pattern "' + matchtitle + '"'
1331 rejecttitle = self.params.get('rejecttitle', False)
1332 if rejecttitle:
1333 if re.search(rejecttitle, title, re.IGNORECASE):
1334 return '"' + title + '" title matched reject pattern "' + rejecttitle + '"'
1335 date = info_dict.get('upload_date')
1336 if date is not None:
1337 dateRange = self.params.get('daterange', DateRange())
1338 if date not in dateRange:
86e5f3ed 1339 return f'{date_from_str(date).isoformat()} upload date is not in range {dateRange}'
8b0d7497 1340 view_count = info_dict.get('view_count')
1341 if view_count is not None:
1342 min_views = self.params.get('min_views')
1343 if min_views is not None and view_count < min_views:
1344 return 'Skipping %s, because it has not reached minimum view count (%d/%d)' % (video_title, view_count, min_views)
1345 max_views = self.params.get('max_views')
1346 if max_views is not None and view_count > max_views:
1347 return 'Skipping %s, because it has exceeded the maximum view count (%d/%d)' % (video_title, view_count, max_views)
1348 if age_restricted(info_dict.get('age_limit'), self.params.get('age_limit')):
1349 return 'Skipping "%s" because it is age restricted' % video_title
8b0d7497 1350
8f18aca8 1351 match_filter = self.params.get('match_filter')
1352 if match_filter is not None:
1353 try:
1354 ret = match_filter(info_dict, incomplete=incomplete)
1355 except TypeError:
1356 # For backward compatibility
1357 ret = None if incomplete else match_filter(info_dict)
492272fe 1358 if ret is NO_DEFAULT:
1359 while True:
1360 filename = self._format_screen(self.prepare_filename(info_dict), self.Styles.FILENAME)
1361 reply = input(self._format_screen(
1362 f'Download "{filename}"? (Y/n): ', self.Styles.EMPHASIS)).lower().strip()
1363 if reply in {'y', ''}:
1364 return None
1365 elif reply == 'n':
1366 return f'Skipping {video_title}'
492272fe 1367 elif ret is not None:
8f18aca8 1368 return ret
8b0d7497 1369 return None
1370
c77495e3 1371 if self.in_download_archive(info_dict):
1372 reason = '%s has already been recorded in the archive' % video_title
1373 break_opt, break_err = 'break_on_existing', ExistingVideoReached
1374 else:
1375 reason = check_filter()
1376 break_opt, break_err = 'break_on_reject', RejectedVideoReached
8b0d7497 1377 if reason is not None:
120fe513 1378 if not silent:
1379 self.to_screen('[download] ' + reason)
c77495e3 1380 if self.params.get(break_opt, False):
1381 raise break_err()
8b0d7497 1382 return reason
fe7e0c98 1383
b6c45014
JMF
1384 @staticmethod
1385 def add_extra_info(info_dict, extra_info):
1386 '''Set the keys from extra_info in info dict if they are missing'''
1387 for key, value in extra_info.items():
1388 info_dict.setdefault(key, value)
1389
409e1828 1390 def extract_info(self, url, download=True, ie_key=None, extra_info=None,
61aa5ba3 1391 process=True, force_generic_extractor=False):
41d1cca3 1392 """
1393 Return a list with a dictionary for each video extracted.
1394
1395 Arguments:
1396 url -- URL to extract
1397
1398 Keyword arguments:
1399 download -- whether to download videos during extraction
1400 ie_key -- extractor key hint
1401 extra_info -- dictionary containing the extra values to add to each result
1402 process -- whether to resolve all unresolved references (URLs, playlist items),
1403 must be True for download to work.
1404 force_generic_extractor -- force using the generic extractor
1405 """
fe7e0c98 1406
409e1828 1407 if extra_info is None:
1408 extra_info = {}
1409
61aa5ba3 1410 if not ie_key and force_generic_extractor:
d22dec74
S
1411 ie_key = 'Generic'
1412
8222d8de 1413 if ie_key:
8b7491c8 1414 ies = {ie_key: self._get_info_extractor_class(ie_key)}
8222d8de
JMF
1415 else:
1416 ies = self._ies
1417
8b7491c8 1418 for ie_key, ie in ies.items():
8222d8de
JMF
1419 if not ie.suitable(url):
1420 continue
1421
1422 if not ie.working():
6febd1c1
PH
1423 self.report_warning('The program functionality for this site has been marked as broken, '
1424 'and will probably not work.')
8222d8de 1425
1151c407 1426 temp_id = ie.get_temp_id(url)
a0566bbf 1427 if temp_id is not None and self.in_download_archive({'id': temp_id, 'ie_key': ie_key}):
5e5be0c0 1428 self.to_screen(f'[{ie_key}] {temp_id}: has already been recorded in the archive')
1429 if self.params.get('break_on_existing', False):
1430 raise ExistingVideoReached()
a0566bbf 1431 break
8b7491c8 1432 return self.__extract_info(url, self.get_info_extractor(ie_key), download, extra_info, process)
a0566bbf 1433 else:
1434 self.report_error('no suitable InfoExtractor for URL %s' % url)
1435
7e88d7d7 1436 def _handle_extraction_exceptions(func):
b5ae35ee 1437 @functools.wraps(func)
a0566bbf 1438 def wrapper(self, *args, **kwargs):
6da22e7d 1439 while True:
1440 try:
1441 return func(self, *args, **kwargs)
1442 except (DownloadCancelled, LazyList.IndexError, PagedList.IndexError):
8222d8de 1443 raise
6da22e7d 1444 except ReExtractInfo as e:
1445 if e.expected:
1446 self.to_screen(f'{e}; Re-extracting data')
1447 else:
1448 self.to_stderr('\r')
1449 self.report_warning(f'{e}; Re-extracting data')
1450 continue
1451 except GeoRestrictedError as e:
1452 msg = e.msg
1453 if e.countries:
1454 msg += '\nThis video is available in %s.' % ', '.join(
1455 map(ISO3166Utils.short2full, e.countries))
1456 msg += '\nYou might want to use a VPN or a proxy server (with --proxy) to workaround.'
1457 self.report_error(msg)
1458 except ExtractorError as e: # An error we somewhat expected
1459 self.report_error(str(e), e.format_traceback())
1460 except Exception as e:
1461 if self.params.get('ignoreerrors'):
1462 self.report_error(str(e), tb=encode_compat_str(traceback.format_exc()))
1463 else:
1464 raise
1465 break
a0566bbf 1466 return wrapper
1467
693f0600 1468 def _wait_for_video(self, ie_result={}):
f2ebc5c7 1469 if (not self.params.get('wait_for_video')
1470 or ie_result.get('_type', 'video') != 'video'
1471 or ie_result.get('formats') or ie_result.get('url')):
1472 return
1473
1474 format_dur = lambda dur: '%02d:%02d:%02d' % timetuple_from_msec(dur * 1000)[:-1]
1475 last_msg = ''
1476
1477 def progress(msg):
1478 nonlocal last_msg
a7dc6a89 1479 full_msg = f'{msg}\n'
1480 if not self.params.get('noprogress'):
1481 full_msg = msg + ' ' * (len(last_msg) - len(msg)) + '\r'
1482 elif last_msg:
1483 return
1484 self.to_screen(full_msg, skip_eol=True)
f2ebc5c7 1485 last_msg = msg
1486
1487 min_wait, max_wait = self.params.get('wait_for_video')
1488 diff = try_get(ie_result, lambda x: x['release_timestamp'] - time.time())
1489 if diff is None and ie_result.get('live_status') == 'is_upcoming':
16c620bc 1490 diff = round(random.uniform(min_wait, max_wait) if (max_wait and min_wait) else (max_wait or min_wait), 0)
f2ebc5c7 1491 self.report_warning('Release time of video is not known')
693f0600 1492 elif ie_result and (diff or 0) <= 0:
f2ebc5c7 1493 self.report_warning('Video should already be available according to extracted info')
38d79fd1 1494 diff = min(max(diff or 0, min_wait or 0), max_wait or float('inf'))
f2ebc5c7 1495 self.to_screen(f'[wait] Waiting for {format_dur(diff)} - Press Ctrl+C to try now')
1496
1497 wait_till = time.time() + diff
1498 try:
1499 while True:
1500 diff = wait_till - time.time()
1501 if diff <= 0:
1502 progress('')
1503 raise ReExtractInfo('[wait] Wait period ended', expected=True)
1504 progress(f'[wait] Remaining time until next attempt: {self._format_screen(format_dur(diff), self.Styles.EMPHASIS)}')
1505 time.sleep(1)
1506 except KeyboardInterrupt:
1507 progress('')
1508 raise ReExtractInfo('[wait] Interrupted by user', expected=True)
1509 except BaseException as e:
1510 if not isinstance(e, ReExtractInfo):
1511 self.to_screen('')
1512 raise
1513
7e88d7d7 1514 @_handle_extraction_exceptions
58f197b7 1515 def __extract_info(self, url, ie, download, extra_info, process):
693f0600 1516 try:
1517 ie_result = ie.extract(url)
1518 except UserNotLive as e:
1519 if process:
1520 if self.params.get('wait_for_video'):
1521 self.report_warning(e)
1522 self._wait_for_video()
1523 raise
a0566bbf 1524 if ie_result is None: # Finished already (backwards compatibility; listformats and friends should be moved here)
cb794ee0 1525 self.report_warning(f'Extractor {ie.IE_NAME} returned nothing{bug_reports_message()}')
a0566bbf 1526 return
1527 if isinstance(ie_result, list):
1528 # Backwards compatibility: old IE result format
1529 ie_result = {
1530 '_type': 'compat_list',
1531 'entries': ie_result,
1532 }
e37d0efb 1533 if extra_info.get('original_url'):
1534 ie_result.setdefault('original_url', extra_info['original_url'])
a0566bbf 1535 self.add_default_extra_info(ie_result, ie, url)
1536 if process:
f2ebc5c7 1537 self._wait_for_video(ie_result)
a0566bbf 1538 return self.process_ie_result(ie_result, download, extra_info)
8222d8de 1539 else:
a0566bbf 1540 return ie_result
fe7e0c98 1541
ea38e55f 1542 def add_default_extra_info(self, ie_result, ie, url):
6033d980 1543 if url is not None:
1544 self.add_extra_info(ie_result, {
1545 'webpage_url': url,
1546 'original_url': url,
57ebfca3 1547 })
1548 webpage_url = ie_result.get('webpage_url')
1549 if webpage_url:
1550 self.add_extra_info(ie_result, {
1551 'webpage_url_basename': url_basename(webpage_url),
1552 'webpage_url_domain': get_domain(webpage_url),
6033d980 1553 })
1554 if ie is not None:
1555 self.add_extra_info(ie_result, {
1556 'extractor': ie.IE_NAME,
1557 'extractor_key': ie.ie_key(),
1558 })
ea38e55f 1559
58adec46 1560 def process_ie_result(self, ie_result, download=True, extra_info=None):
8222d8de
JMF
1561 """
1562 Take the result of the ie(may be modified) and resolve all unresolved
1563 references (URLs, playlist items).
1564
1565 It will also download the videos if 'download'.
1566 Returns the resolved ie_result.
1567 """
58adec46 1568 if extra_info is None:
1569 extra_info = {}
e8ee972c
PH
1570 result_type = ie_result.get('_type', 'video')
1571
057a5206 1572 if result_type in ('url', 'url_transparent'):
8f97a15d 1573 ie_result['url'] = sanitize_url(
1574 ie_result['url'], scheme='http' if self.params.get('prefer_insecure') else 'https')
e37d0efb 1575 if ie_result.get('original_url'):
1576 extra_info.setdefault('original_url', ie_result['original_url'])
1577
057a5206 1578 extract_flat = self.params.get('extract_flat', False)
3089bc74
S
1579 if ((extract_flat == 'in_playlist' and 'playlist' in extra_info)
1580 or extract_flat is True):
ecb54191 1581 info_copy = ie_result.copy()
6033d980 1582 ie = try_get(ie_result.get('ie_key'), self.get_info_extractor)
360167b9 1583 if ie and not ie_result.get('id'):
4614bc22 1584 info_copy['id'] = ie.get_temp_id(ie_result['url'])
6033d980 1585 self.add_default_extra_info(info_copy, ie, ie_result['url'])
4614bc22 1586 self.add_extra_info(info_copy, extra_info)
b5475f11 1587 info_copy, _ = self.pre_process(info_copy)
ecb54191 1588 self.__forced_printings(info_copy, self.prepare_filename(info_copy), incomplete=True)
415f8d51 1589 self._raise_pending_errors(info_copy)
4614bc22 1590 if self.params.get('force_write_download_archive', False):
1591 self.record_download_archive(info_copy)
e8ee972c
PH
1592 return ie_result
1593
8222d8de 1594 if result_type == 'video':
b6c45014 1595 self.add_extra_info(ie_result, extra_info)
9c2b75b5 1596 ie_result = self.process_video_result(ie_result, download=download)
415f8d51 1597 self._raise_pending_errors(ie_result)
28b0eb0f 1598 additional_urls = (ie_result or {}).get('additional_urls')
9c2b75b5 1599 if additional_urls:
e9f4ccd1 1600 # TODO: Improve MetadataParserPP to allow setting a list
14f25df2 1601 if isinstance(additional_urls, str):
9c2b75b5 1602 additional_urls = [additional_urls]
1603 self.to_screen(
1604 '[info] %s: %d additional URL(s) requested' % (ie_result['id'], len(additional_urls)))
1605 self.write_debug('Additional URLs: "%s"' % '", "'.join(additional_urls))
1606 ie_result['additional_entries'] = [
1607 self.extract_info(
b69fd25c 1608 url, download, extra_info=extra_info,
9c2b75b5 1609 force_generic_extractor=self.params.get('force_generic_extractor'))
1610 for url in additional_urls
1611 ]
1612 return ie_result
8222d8de
JMF
1613 elif result_type == 'url':
1614 # We have to add extra_info to the results because it may be
1615 # contained in a playlist
07cce701 1616 return self.extract_info(
1617 ie_result['url'], download,
1618 ie_key=ie_result.get('ie_key'),
1619 extra_info=extra_info)
7fc3fa05
PH
1620 elif result_type == 'url_transparent':
1621 # Use the information from the embedding page
1622 info = self.extract_info(
1623 ie_result['url'], ie_key=ie_result.get('ie_key'),
1624 extra_info=extra_info, download=False, process=False)
1625
1640eb09
S
1626 # extract_info may return None when ignoreerrors is enabled and
1627 # extraction failed with an error, don't crash and return early
1628 # in this case
1629 if not info:
1630 return info
1631
3975b4d2 1632 exempted_fields = {'_type', 'url', 'ie_key'}
1633 if not ie_result.get('section_end') and ie_result.get('section_start') is None:
1634 # For video clips, the id etc of the clip extractor should be used
1635 exempted_fields |= {'id', 'extractor', 'extractor_key'}
1636
412c617d 1637 new_result = info.copy()
3975b4d2 1638 new_result.update(filter_dict(ie_result, lambda k, v: v is not None and k not in exempted_fields))
7fc3fa05 1639
0563f7ac
S
1640 # Extracted info may not be a video result (i.e.
1641 # info.get('_type', 'video') != video) but rather an url or
1642 # url_transparent. In such cases outer metadata (from ie_result)
1643 # should be propagated to inner one (info). For this to happen
1644 # _type of info should be overridden with url_transparent. This
067aa17e 1645 # fixes issue from https://github.com/ytdl-org/youtube-dl/pull/11163.
0563f7ac
S
1646 if new_result.get('_type') == 'url':
1647 new_result['_type'] = 'url_transparent'
7fc3fa05
PH
1648
1649 return self.process_ie_result(
1650 new_result, download=download, extra_info=extra_info)
40fcba5e 1651 elif result_type in ('playlist', 'multi_video'):
30a074c2 1652 # Protect from infinite recursion due to recursively nested playlists
1653 # (see https://github.com/ytdl-org/youtube-dl/issues/27833)
1654 webpage_url = ie_result['webpage_url']
1655 if webpage_url in self._playlist_urls:
7e85e872 1656 self.to_screen(
30a074c2 1657 '[download] Skipping already downloaded playlist: %s'
1658 % ie_result.get('title') or ie_result.get('id'))
1659 return
7e85e872 1660
30a074c2 1661 self._playlist_level += 1
1662 self._playlist_urls.add(webpage_url)
03f83004 1663 self._fill_common_fields(ie_result, False)
bc516a3f 1664 self._sanitize_thumbnails(ie_result)
30a074c2 1665 try:
1666 return self.__process_playlist(ie_result, download)
1667 finally:
1668 self._playlist_level -= 1
1669 if not self._playlist_level:
1670 self._playlist_urls.clear()
8222d8de 1671 elif result_type == 'compat_list':
c9bf4114
PH
1672 self.report_warning(
1673 'Extractor %s returned a compat_list result. '
1674 'It needs to be updated.' % ie_result.get('extractor'))
5f6a1245 1675
8222d8de 1676 def _fixup(r):
b868936c 1677 self.add_extra_info(r, {
1678 'extractor': ie_result['extractor'],
1679 'webpage_url': ie_result['webpage_url'],
1680 'webpage_url_basename': url_basename(ie_result['webpage_url']),
0bb322b9 1681 'webpage_url_domain': get_domain(ie_result['webpage_url']),
b868936c 1682 'extractor_key': ie_result['extractor_key'],
1683 })
8222d8de
JMF
1684 return r
1685 ie_result['entries'] = [
b6c45014 1686 self.process_ie_result(_fixup(r), download, extra_info)
8222d8de
JMF
1687 for r in ie_result['entries']
1688 ]
1689 return ie_result
1690 else:
1691 raise Exception('Invalid result type: %s' % result_type)
1692
e92caff5 1693 def _ensure_dir_exists(self, path):
1694 return make_dir(path, self.report_error)
1695
3b603dbd 1696 @staticmethod
3bec830a 1697 def _playlist_infodict(ie_result, strict=False, **kwargs):
1698 info = {
1699 'playlist_count': ie_result.get('playlist_count'),
3b603dbd 1700 'playlist': ie_result.get('title') or ie_result.get('id'),
1701 'playlist_id': ie_result.get('id'),
1702 'playlist_title': ie_result.get('title'),
1703 'playlist_uploader': ie_result.get('uploader'),
1704 'playlist_uploader_id': ie_result.get('uploader_id'),
3b603dbd 1705 **kwargs,
1706 }
3bec830a 1707 if strict:
1708 return info
1709 return {
1710 **info,
1711 'playlist_index': 0,
1712 '__last_playlist_index': max(ie_result['requested_entries'] or (0, 0)),
1713 'extractor': ie_result['extractor'],
1714 'webpage_url': ie_result['webpage_url'],
1715 'webpage_url_basename': url_basename(ie_result['webpage_url']),
1716 'webpage_url_domain': get_domain(ie_result['webpage_url']),
1717 'extractor_key': ie_result['extractor_key'],
1718 }
3b603dbd 1719
30a074c2 1720 def __process_playlist(self, ie_result, download):
7e88d7d7 1721 """Process each entry in the playlist"""
f5ea4748 1722 assert ie_result['_type'] in ('playlist', 'multi_video')
1723
3bec830a 1724 common_info = self._playlist_infodict(ie_result, strict=True)
3955b207 1725 title = common_info.get('playlist') or '<Untitled>'
3bec830a 1726 if self._match_entry(common_info, incomplete=True) is not None:
1727 return
c6e07cf1 1728 self.to_screen(f'[download] Downloading {ie_result["_type"]}: {title}')
f0d785d3 1729
7e88d7d7 1730 all_entries = PlaylistEntries(self, ie_result)
7e9a6125 1731 entries = orderedSet(all_entries.get_requested_items(), lazy=True)
1732
1733 lazy = self.params.get('lazy_playlist')
1734 if lazy:
1735 resolved_entries, n_entries = [], 'N/A'
1736 ie_result['requested_entries'], ie_result['entries'] = None, None
1737 else:
1738 entries = resolved_entries = list(entries)
1739 n_entries = len(resolved_entries)
1740 ie_result['requested_entries'], ie_result['entries'] = tuple(zip(*resolved_entries)) or ([], [])
1741 if not ie_result.get('playlist_count'):
1742 # Better to do this after potentially exhausting entries
1743 ie_result['playlist_count'] = all_entries.get_full_count()
498f5606 1744
0647d925 1745 extra = self._playlist_infodict(ie_result, n_entries=int_or_none(n_entries))
1746 ie_copy = collections.ChainMap(ie_result, extra)
3bec830a 1747
e08a85d8 1748 _infojson_written = False
0bfc53d0 1749 write_playlist_files = self.params.get('allow_playlist_files', True)
1750 if write_playlist_files and self.params.get('list_thumbnails'):
1751 self.list_thumbnails(ie_result)
1752 if write_playlist_files and not self.params.get('simulate'):
e08a85d8 1753 _infojson_written = self._write_info_json(
1754 'playlist', ie_result, self.prepare_filename(ie_copy, 'pl_infojson'))
1755 if _infojson_written is None:
80c03fa9 1756 return
1757 if self._write_description('playlist', ie_result,
1758 self.prepare_filename(ie_copy, 'pl_description')) is None:
1759 return
681de68e 1760 # TODO: This should be passed to ThumbnailsConvertor if necessary
3bec830a 1761 self._write_thumbnails('playlist', ie_result, self.prepare_filename(ie_copy, 'pl_thumbnail'))
30a074c2 1762
7e9a6125 1763 if lazy:
1764 if self.params.get('playlistreverse') or self.params.get('playlistrandom'):
1765 self.report_warning('playlistreverse and playlistrandom are not supported with lazy_playlist', only_once=True)
1766 elif self.params.get('playlistreverse'):
1767 entries.reverse()
1768 elif self.params.get('playlistrandom'):
30a074c2 1769 random.shuffle(entries)
1770
7e88d7d7 1771 self.to_screen(f'[{ie_result["extractor"]}] Playlist {title}: Downloading {n_entries} videos'
1772 f'{format_field(ie_result, "playlist_count", " of %s")}')
30a074c2 1773
134c913c 1774 keep_resolved_entries = self.params.get('extract_flat') != 'discard'
1775 if self.params.get('extract_flat') == 'discard_in_playlist':
1776 keep_resolved_entries = ie_result['_type'] != 'playlist'
1777 if keep_resolved_entries:
1778 self.write_debug('The information of all playlist entries will be held in memory')
1779
26e2805c 1780 failures = 0
1781 max_failures = self.params.get('skip_playlist_after_errors') or float('inf')
7e9a6125 1782 for i, (playlist_index, entry) in enumerate(entries):
1783 if lazy:
1784 resolved_entries.append((playlist_index, entry))
3bec830a 1785 if not entry:
7e88d7d7 1786 continue
1787
7e88d7d7 1788 entry['__x_forwarded_for_ip'] = ie_result.get('__x_forwarded_for_ip')
7e9a6125 1789 if not lazy and 'playlist-index' in self.params.get('compat_opts', []):
1790 playlist_index = ie_result['requested_entries'][i]
1791
0647d925 1792 entry_copy = collections.ChainMap(entry, {
3bec830a 1793 **common_info,
3955b207 1794 'n_entries': int_or_none(n_entries),
71729754 1795 'playlist_index': playlist_index,
7e9a6125 1796 'playlist_autonumber': i + 1,
0647d925 1797 })
3bec830a 1798
0647d925 1799 if self._match_entry(entry_copy, incomplete=True) is not None:
f0ad6f8c 1800 # For compatabilty with youtube-dl. See https://github.com/yt-dlp/yt-dlp/issues/4369
1801 resolved_entries[i] = (playlist_index, NO_DEFAULT)
3bec830a 1802 continue
1803
1804 self.to_screen('[download] Downloading video %s of %s' % (
1805 self._format_screen(i + 1, self.Styles.ID), self._format_screen(n_entries, self.Styles.EMPHASIS)))
1806
a6ca61d4 1807 extra.update({
1808 'playlist_index': playlist_index,
1809 'playlist_autonumber': i + 1,
1810 })
3bec830a 1811 entry_result = self.__process_iterable_entry(entry, download, extra)
26e2805c 1812 if not entry_result:
1813 failures += 1
1814 if failures >= max_failures:
1815 self.report_error(
7e88d7d7 1816 f'Skipping the remaining entries in playlist "{title}" since {failures} items failed extraction')
26e2805c 1817 break
134c913c 1818 if keep_resolved_entries:
1819 resolved_entries[i] = (playlist_index, entry_result)
7e88d7d7 1820
1821 # Update with processed data
f0ad6f8c 1822 ie_result['requested_entries'] = [i for i, e in resolved_entries if e is not NO_DEFAULT]
1823 ie_result['entries'] = [e for _, e in resolved_entries if e is not NO_DEFAULT]
e08a85d8 1824
1825 # Write the updated info to json
cb96c5be 1826 if _infojson_written is True and self._write_info_json(
e08a85d8 1827 'updated playlist', ie_result,
1828 self.prepare_filename(ie_copy, 'pl_infojson'), overwrite=True) is None:
1829 return
ca30f449 1830
ed5835b4 1831 ie_result = self.run_all_pps('playlist', ie_result)
7e88d7d7 1832 self.to_screen(f'[download] Finished downloading playlist: {title}')
30a074c2 1833 return ie_result
1834
7e88d7d7 1835 @_handle_extraction_exceptions
a0566bbf 1836 def __process_iterable_entry(self, entry, download, extra_info):
1837 return self.process_ie_result(
1838 entry, download=download, extra_info=extra_info)
1839
67134eab
JMF
1840 def _build_format_filter(self, filter_spec):
1841 " Returns a function to filter the formats according to the filter_spec "
083c9df9
PH
1842
1843 OPERATORS = {
1844 '<': operator.lt,
1845 '<=': operator.le,
1846 '>': operator.gt,
1847 '>=': operator.ge,
1848 '=': operator.eq,
1849 '!=': operator.ne,
1850 }
67134eab 1851 operator_rex = re.compile(r'''(?x)\s*
187986a8 1852 (?P<key>width|height|tbr|abr|vbr|asr|filesize|filesize_approx|fps)\s*
1853 (?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
1854 (?P<value>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)\s*
083c9df9 1855 ''' % '|'.join(map(re.escape, OPERATORS.keys())))
187986a8 1856 m = operator_rex.fullmatch(filter_spec)
9ddb6925
S
1857 if m:
1858 try:
1859 comparison_value = int(m.group('value'))
1860 except ValueError:
1861 comparison_value = parse_filesize(m.group('value'))
1862 if comparison_value is None:
1863 comparison_value = parse_filesize(m.group('value') + 'B')
1864 if comparison_value is None:
1865 raise ValueError(
1866 'Invalid value %r in format specification %r' % (
67134eab 1867 m.group('value'), filter_spec))
9ddb6925
S
1868 op = OPERATORS[m.group('op')]
1869
083c9df9 1870 if not m:
9ddb6925
S
1871 STR_OPERATORS = {
1872 '=': operator.eq,
10d33b34
YCH
1873 '^=': lambda attr, value: attr.startswith(value),
1874 '$=': lambda attr, value: attr.endswith(value),
1875 '*=': lambda attr, value: value in attr,
1ce9a3cb 1876 '~=': lambda attr, value: value.search(attr) is not None
9ddb6925 1877 }
187986a8 1878 str_operator_rex = re.compile(r'''(?x)\s*
1879 (?P<key>[a-zA-Z0-9._-]+)\s*
1ce9a3cb
LF
1880 (?P<negation>!\s*)?(?P<op>%s)\s*(?P<none_inclusive>\?\s*)?
1881 (?P<quote>["'])?
1882 (?P<value>(?(quote)(?:(?!(?P=quote))[^\\]|\\.)+|[\w.-]+))
1883 (?(quote)(?P=quote))\s*
9ddb6925 1884 ''' % '|'.join(map(re.escape, STR_OPERATORS.keys())))
187986a8 1885 m = str_operator_rex.fullmatch(filter_spec)
9ddb6925 1886 if m:
1ce9a3cb
LF
1887 if m.group('op') == '~=':
1888 comparison_value = re.compile(m.group('value'))
1889 else:
1890 comparison_value = re.sub(r'''\\([\\"'])''', r'\1', m.group('value'))
2cc779f4
S
1891 str_op = STR_OPERATORS[m.group('op')]
1892 if m.group('negation'):
e118a879 1893 op = lambda attr, value: not str_op(attr, value)
2cc779f4
S
1894 else:
1895 op = str_op
083c9df9 1896
9ddb6925 1897 if not m:
187986a8 1898 raise SyntaxError('Invalid filter specification %r' % filter_spec)
083c9df9
PH
1899
1900 def _filter(f):
1901 actual_value = f.get(m.group('key'))
1902 if actual_value is None:
1903 return m.group('none_inclusive')
1904 return op(actual_value, comparison_value)
67134eab
JMF
1905 return _filter
1906
9f1a1c36 1907 def _check_formats(self, formats):
1908 for f in formats:
1909 self.to_screen('[info] Testing format %s' % f['format_id'])
75689fe5 1910 path = self.get_output_path('temp')
1911 if not self._ensure_dir_exists(f'{path}/'):
1912 continue
1913 temp_file = tempfile.NamedTemporaryFile(suffix='.tmp', delete=False, dir=path or None)
9f1a1c36 1914 temp_file.close()
1915 try:
1916 success, _ = self.dl(temp_file.name, f, test=True)
8a82af35 1917 except (DownloadError, OSError, ValueError) + network_exceptions:
9f1a1c36 1918 success = False
1919 finally:
1920 if os.path.exists(temp_file.name):
1921 try:
1922 os.remove(temp_file.name)
1923 except OSError:
1924 self.report_warning('Unable to delete temporary file "%s"' % temp_file.name)
1925 if success:
1926 yield f
1927 else:
1928 self.to_screen('[info] Unable to download format %s. Skipping...' % f['format_id'])
1929
0017d9ad 1930 def _default_format_spec(self, info_dict, download=True):
0017d9ad 1931
af0f7428
S
1932 def can_merge():
1933 merger = FFmpegMergerPP(self)
1934 return merger.available and merger.can_merge()
1935
91ebc640 1936 prefer_best = (
b7b04c78 1937 not self.params.get('simulate')
91ebc640 1938 and download
1939 and (
1940 not can_merge()
21633673 1941 or info_dict.get('is_live') and not self.params.get('live_from_start')
bf1824b3 1942 or self.params['outtmpl']['default'] == '-'))
53ed7066 1943 compat = (
1944 prefer_best
1945 or self.params.get('allow_multiple_audio_streams', False)
8a82af35 1946 or 'format-spec' in self.params['compat_opts'])
91ebc640 1947
1948 return (
53ed7066 1949 'best/bestvideo+bestaudio' if prefer_best
1950 else 'bestvideo*+bestaudio/best' if not compat
91ebc640 1951 else 'bestvideo+bestaudio/best')
0017d9ad 1952
67134eab
JMF
1953 def build_format_selector(self, format_spec):
1954 def syntax_error(note, start):
1955 message = (
1956 'Invalid format specification: '
86e5f3ed 1957 '{}\n\t{}\n\t{}^'.format(note, format_spec, ' ' * start[1]))
67134eab
JMF
1958 return SyntaxError(message)
1959
1960 PICKFIRST = 'PICKFIRST'
1961 MERGE = 'MERGE'
1962 SINGLE = 'SINGLE'
0130afb7 1963 GROUP = 'GROUP'
67134eab
JMF
1964 FormatSelector = collections.namedtuple('FormatSelector', ['type', 'selector', 'filters'])
1965
91ebc640 1966 allow_multiple_streams = {'audio': self.params.get('allow_multiple_audio_streams', False),
1967 'video': self.params.get('allow_multiple_video_streams', False)}
909d24dd 1968
9f1a1c36 1969 check_formats = self.params.get('check_formats') == 'selected'
e8e73840 1970
67134eab
JMF
1971 def _parse_filter(tokens):
1972 filter_parts = []
1973 for type, string, start, _, _ in tokens:
1974 if type == tokenize.OP and string == ']':
1975 return ''.join(filter_parts)
1976 else:
1977 filter_parts.append(string)
1978
232541df 1979 def _remove_unused_ops(tokens):
62b58c09
L
1980 # Remove operators that we don't use and join them with the surrounding strings.
1981 # E.g. 'mp4' '-' 'baseline' '-' '16x9' is converted to 'mp4-baseline-16x9'
232541df
JMF
1982 ALLOWED_OPS = ('/', '+', ',', '(', ')')
1983 last_string, last_start, last_end, last_line = None, None, None, None
1984 for type, string, start, end, line in tokens:
1985 if type == tokenize.OP and string == '[':
1986 if last_string:
1987 yield tokenize.NAME, last_string, last_start, last_end, last_line
1988 last_string = None
1989 yield type, string, start, end, line
1990 # everything inside brackets will be handled by _parse_filter
1991 for type, string, start, end, line in tokens:
1992 yield type, string, start, end, line
1993 if type == tokenize.OP and string == ']':
1994 break
1995 elif type == tokenize.OP and string in ALLOWED_OPS:
1996 if last_string:
1997 yield tokenize.NAME, last_string, last_start, last_end, last_line
1998 last_string = None
1999 yield type, string, start, end, line
2000 elif type in [tokenize.NAME, tokenize.NUMBER, tokenize.OP]:
2001 if not last_string:
2002 last_string = string
2003 last_start = start
2004 last_end = end
2005 else:
2006 last_string += string
2007 if last_string:
2008 yield tokenize.NAME, last_string, last_start, last_end, last_line
2009
cf2ac6df 2010 def _parse_format_selection(tokens, inside_merge=False, inside_choice=False, inside_group=False):
67134eab
JMF
2011 selectors = []
2012 current_selector = None
2013 for type, string, start, _, _ in tokens:
2014 # ENCODING is only defined in python 3.x
2015 if type == getattr(tokenize, 'ENCODING', None):
2016 continue
2017 elif type in [tokenize.NAME, tokenize.NUMBER]:
2018 current_selector = FormatSelector(SINGLE, string, [])
2019 elif type == tokenize.OP:
cf2ac6df
JMF
2020 if string == ')':
2021 if not inside_group:
2022 # ')' will be handled by the parentheses group
2023 tokens.restore_last_token()
67134eab 2024 break
cf2ac6df 2025 elif inside_merge and string in ['/', ',']:
0130afb7
JMF
2026 tokens.restore_last_token()
2027 break
cf2ac6df
JMF
2028 elif inside_choice and string == ',':
2029 tokens.restore_last_token()
2030 break
2031 elif string == ',':
0a31a350
JMF
2032 if not current_selector:
2033 raise syntax_error('"," must follow a format selector', start)
67134eab
JMF
2034 selectors.append(current_selector)
2035 current_selector = None
2036 elif string == '/':
d96d604e
JMF
2037 if not current_selector:
2038 raise syntax_error('"/" must follow a format selector', start)
67134eab 2039 first_choice = current_selector
cf2ac6df 2040 second_choice = _parse_format_selection(tokens, inside_choice=True)
f5f4a27a 2041 current_selector = FormatSelector(PICKFIRST, (first_choice, second_choice), [])
67134eab
JMF
2042 elif string == '[':
2043 if not current_selector:
2044 current_selector = FormatSelector(SINGLE, 'best', [])
2045 format_filter = _parse_filter(tokens)
2046 current_selector.filters.append(format_filter)
0130afb7
JMF
2047 elif string == '(':
2048 if current_selector:
2049 raise syntax_error('Unexpected "("', start)
cf2ac6df
JMF
2050 group = _parse_format_selection(tokens, inside_group=True)
2051 current_selector = FormatSelector(GROUP, group, [])
67134eab 2052 elif string == '+':
d03cfdce 2053 if not current_selector:
2054 raise syntax_error('Unexpected "+"', start)
2055 selector_1 = current_selector
2056 selector_2 = _parse_format_selection(tokens, inside_merge=True)
2057 if not selector_2:
2058 raise syntax_error('Expected a selector', start)
2059 current_selector = FormatSelector(MERGE, (selector_1, selector_2), [])
67134eab 2060 else:
86e5f3ed 2061 raise syntax_error(f'Operator not recognized: "{string}"', start)
67134eab
JMF
2062 elif type == tokenize.ENDMARKER:
2063 break
2064 if current_selector:
2065 selectors.append(current_selector)
2066 return selectors
2067
f8d4ad9a 2068 def _merge(formats_pair):
2069 format_1, format_2 = formats_pair
2070
2071 formats_info = []
2072 formats_info.extend(format_1.get('requested_formats', (format_1,)))
2073 formats_info.extend(format_2.get('requested_formats', (format_2,)))
2074
2075 if not allow_multiple_streams['video'] or not allow_multiple_streams['audio']:
551f9388 2076 get_no_more = {'video': False, 'audio': False}
f8d4ad9a 2077 for (i, fmt_info) in enumerate(formats_info):
551f9388 2078 if fmt_info.get('acodec') == fmt_info.get('vcodec') == 'none':
2079 formats_info.pop(i)
2080 continue
2081 for aud_vid in ['audio', 'video']:
f8d4ad9a 2082 if not allow_multiple_streams[aud_vid] and fmt_info.get(aud_vid[0] + 'codec') != 'none':
2083 if get_no_more[aud_vid]:
2084 formats_info.pop(i)
f5510afe 2085 break
f8d4ad9a 2086 get_no_more[aud_vid] = True
2087
2088 if len(formats_info) == 1:
2089 return formats_info[0]
2090
2091 video_fmts = [fmt_info for fmt_info in formats_info if fmt_info.get('vcodec') != 'none']
2092 audio_fmts = [fmt_info for fmt_info in formats_info if fmt_info.get('acodec') != 'none']
2093
2094 the_only_video = video_fmts[0] if len(video_fmts) == 1 else None
2095 the_only_audio = audio_fmts[0] if len(audio_fmts) == 1 else None
2096
fc61aff4
LL
2097 output_ext = get_compatible_ext(
2098 vcodecs=[f.get('vcodec') for f in video_fmts],
2099 acodecs=[f.get('acodec') for f in audio_fmts],
2100 vexts=[f['ext'] for f in video_fmts],
2101 aexts=[f['ext'] for f in audio_fmts],
2102 preferences=(try_call(lambda: self.params['merge_output_format'].split('/'))
2103 or self.params.get('prefer_free_formats') and ('webm', 'mkv')))
f8d4ad9a 2104
975a0d0d 2105 filtered = lambda *keys: filter(None, (traverse_obj(fmt, *keys) for fmt in formats_info))
2106
f8d4ad9a 2107 new_dict = {
2108 'requested_formats': formats_info,
975a0d0d 2109 'format': '+'.join(filtered('format')),
2110 'format_id': '+'.join(filtered('format_id')),
f8d4ad9a 2111 'ext': output_ext,
975a0d0d 2112 'protocol': '+'.join(map(determine_protocol, formats_info)),
093a1710 2113 'language': '+'.join(orderedSet(filtered('language'))) or None,
2114 'format_note': '+'.join(orderedSet(filtered('format_note'))) or None,
2115 'filesize_approx': sum(filtered('filesize', 'filesize_approx')) or None,
975a0d0d 2116 'tbr': sum(filtered('tbr', 'vbr', 'abr')),
f8d4ad9a 2117 }
2118
2119 if the_only_video:
2120 new_dict.update({
2121 'width': the_only_video.get('width'),
2122 'height': the_only_video.get('height'),
2123 'resolution': the_only_video.get('resolution') or self.format_resolution(the_only_video),
2124 'fps': the_only_video.get('fps'),
49a57e70 2125 'dynamic_range': the_only_video.get('dynamic_range'),
f8d4ad9a 2126 'vcodec': the_only_video.get('vcodec'),
2127 'vbr': the_only_video.get('vbr'),
2128 'stretched_ratio': the_only_video.get('stretched_ratio'),
2129 })
2130
2131 if the_only_audio:
2132 new_dict.update({
2133 'acodec': the_only_audio.get('acodec'),
2134 'abr': the_only_audio.get('abr'),
975a0d0d 2135 'asr': the_only_audio.get('asr'),
b8ed0f15 2136 'audio_channels': the_only_audio.get('audio_channels')
f8d4ad9a 2137 })
2138
2139 return new_dict
2140
e8e73840 2141 def _check_formats(formats):
981052c9 2142 if not check_formats:
2143 yield from formats
b5ac45b1 2144 return
9f1a1c36 2145 yield from self._check_formats(formats)
e8e73840 2146
67134eab 2147 def _build_selector_function(selector):
909d24dd 2148 if isinstance(selector, list): # ,
67134eab
JMF
2149 fs = [_build_selector_function(s) for s in selector]
2150
317f7ab6 2151 def selector_function(ctx):
67134eab 2152 for f in fs:
981052c9 2153 yield from f(ctx)
67134eab 2154 return selector_function
909d24dd 2155
2156 elif selector.type == GROUP: # ()
0130afb7 2157 selector_function = _build_selector_function(selector.selector)
909d24dd 2158
2159 elif selector.type == PICKFIRST: # /
67134eab
JMF
2160 fs = [_build_selector_function(s) for s in selector.selector]
2161
317f7ab6 2162 def selector_function(ctx):
67134eab 2163 for f in fs:
317f7ab6 2164 picked_formats = list(f(ctx))
67134eab
JMF
2165 if picked_formats:
2166 return picked_formats
2167 return []
67134eab 2168
981052c9 2169 elif selector.type == MERGE: # +
2170 selector_1, selector_2 = map(_build_selector_function, selector.selector)
2171
2172 def selector_function(ctx):
adbc4ec4 2173 for pair in itertools.product(selector_1(ctx), selector_2(ctx)):
981052c9 2174 yield _merge(pair)
2175
909d24dd 2176 elif selector.type == SINGLE: # atom
598d185d 2177 format_spec = selector.selector or 'best'
909d24dd 2178
f8d4ad9a 2179 # TODO: Add allvideo, allaudio etc by generalizing the code with best/worst selector
909d24dd 2180 if format_spec == 'all':
2181 def selector_function(ctx):
9222c381 2182 yield from _check_formats(ctx['formats'][::-1])
f8d4ad9a 2183 elif format_spec == 'mergeall':
2184 def selector_function(ctx):
316f2650 2185 formats = list(_check_formats(
2186 f for f in ctx['formats'] if f.get('vcodec') != 'none' or f.get('acodec') != 'none'))
e01d6aa4 2187 if not formats:
2188 return
921b76ca 2189 merged_format = formats[-1]
2190 for f in formats[-2::-1]:
f8d4ad9a 2191 merged_format = _merge((merged_format, f))
2192 yield merged_format
909d24dd 2193
2194 else:
85e801a9 2195 format_fallback, seperate_fallback, format_reverse, format_idx = False, None, True, 1
eff63539 2196 mobj = re.match(
2197 r'(?P<bw>best|worst|b|w)(?P<type>video|audio|v|a)?(?P<mod>\*)?(?:\.(?P<n>[1-9]\d*))?$',
2198 format_spec)
2199 if mobj is not None:
2200 format_idx = int_or_none(mobj.group('n'), default=1)
e8e73840 2201 format_reverse = mobj.group('bw')[0] == 'b'
eff63539 2202 format_type = (mobj.group('type') or [None])[0]
2203 not_format_type = {'v': 'a', 'a': 'v'}.get(format_type)
2204 format_modified = mobj.group('mod') is not None
909d24dd 2205
2206 format_fallback = not format_type and not format_modified # for b, w
8326b00a 2207 _filter_f = (
eff63539 2208 (lambda f: f.get('%scodec' % format_type) != 'none')
2209 if format_type and format_modified # bv*, ba*, wv*, wa*
2210 else (lambda f: f.get('%scodec' % not_format_type) == 'none')
2211 if format_type # bv, ba, wv, wa
2212 else (lambda f: f.get('vcodec') != 'none' and f.get('acodec') != 'none')
2213 if not format_modified # b, w
8326b00a 2214 else lambda f: True) # b*, w*
2215 filter_f = lambda f: _filter_f(f) and (
2216 f.get('vcodec') != 'none' or f.get('acodec') != 'none')
67134eab 2217 else:
48ee10ee 2218 if format_spec in self._format_selection_exts['audio']:
b11c04a8 2219 filter_f = lambda f: f.get('ext') == format_spec and f.get('acodec') != 'none'
48ee10ee 2220 elif format_spec in self._format_selection_exts['video']:
b11c04a8 2221 filter_f = lambda f: f.get('ext') == format_spec and f.get('acodec') != 'none' and f.get('vcodec') != 'none'
85e801a9 2222 seperate_fallback = lambda f: f.get('ext') == format_spec and f.get('vcodec') != 'none'
48ee10ee 2223 elif format_spec in self._format_selection_exts['storyboards']:
b11c04a8 2224 filter_f = lambda f: f.get('ext') == format_spec and f.get('acodec') == 'none' and f.get('vcodec') == 'none'
2225 else:
b5ae35ee 2226 filter_f = lambda f: f.get('format_id') == format_spec # id
909d24dd 2227
2228 def selector_function(ctx):
2229 formats = list(ctx['formats'])
909d24dd 2230 matches = list(filter(filter_f, formats)) if filter_f is not None else formats
85e801a9 2231 if not matches:
2232 if format_fallback and ctx['incomplete_formats']:
2233 # for extractors with incomplete formats (audio only (soundcloud)
2234 # or video only (imgur)) best/worst will fallback to
2235 # best/worst {video,audio}-only format
2236 matches = formats
2237 elif seperate_fallback and not ctx['has_merged_format']:
2238 # for compatibility with youtube-dl when there is no pre-merged format
2239 matches = list(filter(seperate_fallback, formats))
981052c9 2240 matches = LazyList(_check_formats(matches[::-1 if format_reverse else 1]))
2241 try:
e8e73840 2242 yield matches[format_idx - 1]
4abea8ca 2243 except LazyList.IndexError:
981052c9 2244 return
083c9df9 2245
67134eab 2246 filters = [self._build_format_filter(f) for f in selector.filters]
083c9df9 2247
317f7ab6 2248 def final_selector(ctx):
adbc4ec4 2249 ctx_copy = dict(ctx)
67134eab 2250 for _filter in filters:
317f7ab6
S
2251 ctx_copy['formats'] = list(filter(_filter, ctx_copy['formats']))
2252 return selector_function(ctx_copy)
67134eab 2253 return final_selector
083c9df9 2254
0f06bcd7 2255 stream = io.BytesIO(format_spec.encode())
0130afb7 2256 try:
f9934b96 2257 tokens = list(_remove_unused_ops(tokenize.tokenize(stream.readline)))
0130afb7
JMF
2258 except tokenize.TokenError:
2259 raise syntax_error('Missing closing/opening brackets or parenthesis', (0, len(format_spec)))
2260
86e5f3ed 2261 class TokenIterator:
0130afb7
JMF
2262 def __init__(self, tokens):
2263 self.tokens = tokens
2264 self.counter = 0
2265
2266 def __iter__(self):
2267 return self
2268
2269 def __next__(self):
2270 if self.counter >= len(self.tokens):
2271 raise StopIteration()
2272 value = self.tokens[self.counter]
2273 self.counter += 1
2274 return value
2275
2276 next = __next__
2277
2278 def restore_last_token(self):
2279 self.counter -= 1
2280
2281 parsed_selector = _parse_format_selection(iter(TokenIterator(tokens)))
67134eab 2282 return _build_selector_function(parsed_selector)
a9c58ad9 2283
e5660ee6 2284 def _calc_headers(self, info_dict):
8b7539d2 2285 res = merge_headers(self.params['http_headers'], info_dict.get('http_headers') or {})
e5660ee6 2286
c487cf00 2287 cookies = self._calc_cookies(info_dict['url'])
e5660ee6
JMF
2288 if cookies:
2289 res['Cookie'] = cookies
2290
0016b84e
S
2291 if 'X-Forwarded-For' not in res:
2292 x_forwarded_for_ip = info_dict.get('__x_forwarded_for_ip')
2293 if x_forwarded_for_ip:
2294 res['X-Forwarded-For'] = x_forwarded_for_ip
2295
e5660ee6
JMF
2296 return res
2297
c487cf00 2298 def _calc_cookies(self, url):
2299 pr = sanitized_Request(url)
e5660ee6 2300 self.cookiejar.add_cookie_header(pr)
662435f7 2301 return pr.get_header('Cookie')
e5660ee6 2302
9f1a1c36 2303 def _sort_thumbnails(self, thumbnails):
2304 thumbnails.sort(key=lambda t: (
2305 t.get('preference') if t.get('preference') is not None else -1,
2306 t.get('width') if t.get('width') is not None else -1,
2307 t.get('height') if t.get('height') is not None else -1,
2308 t.get('id') if t.get('id') is not None else '',
2309 t.get('url')))
2310
b0249bca 2311 def _sanitize_thumbnails(self, info_dict):
bc516a3f 2312 thumbnails = info_dict.get('thumbnails')
2313 if thumbnails is None:
2314 thumbnail = info_dict.get('thumbnail')
2315 if thumbnail:
2316 info_dict['thumbnails'] = thumbnails = [{'url': thumbnail}]
9f1a1c36 2317 if not thumbnails:
2318 return
2319
2320 def check_thumbnails(thumbnails):
2321 for t in thumbnails:
2322 self.to_screen(f'[info] Testing thumbnail {t["id"]}')
2323 try:
2324 self.urlopen(HEADRequest(t['url']))
2325 except network_exceptions as err:
2326 self.to_screen(f'[info] Unable to connect to thumbnail {t["id"]} URL {t["url"]!r} - {err}. Skipping...')
2327 continue
2328 yield t
2329
2330 self._sort_thumbnails(thumbnails)
2331 for i, t in enumerate(thumbnails):
2332 if t.get('id') is None:
2333 t['id'] = '%d' % i
2334 if t.get('width') and t.get('height'):
2335 t['resolution'] = '%dx%d' % (t['width'], t['height'])
2336 t['url'] = sanitize_url(t['url'])
2337
2338 if self.params.get('check_formats') is True:
282f5709 2339 info_dict['thumbnails'] = LazyList(check_thumbnails(thumbnails[::-1]), reverse=True)
9f1a1c36 2340 else:
2341 info_dict['thumbnails'] = thumbnails
bc516a3f 2342
03f83004
LNO
2343 def _fill_common_fields(self, info_dict, is_video=True):
2344 # TODO: move sanitization here
2345 if is_video:
2346 # playlists are allowed to lack "title"
d4736fdb 2347 title = info_dict.get('title', NO_DEFAULT)
2348 if title is NO_DEFAULT:
03f83004
LNO
2349 raise ExtractorError('Missing "title" field in extractor result',
2350 video_id=info_dict['id'], ie=info_dict['extractor'])
d4736fdb 2351 info_dict['fulltitle'] = title
2352 if not title:
2353 if title == '':
2354 self.write_debug('Extractor gave empty title. Creating a generic title')
2355 else:
2356 self.report_warning('Extractor failed to obtain "title". Creating a generic title instead')
1d485a1a 2357 info_dict['title'] = f'{info_dict["extractor"].replace(":", "-")} video #{info_dict["id"]}'
03f83004
LNO
2358
2359 if info_dict.get('duration') is not None:
2360 info_dict['duration_string'] = formatSeconds(info_dict['duration'])
2361
2362 for ts_key, date_key in (
2363 ('timestamp', 'upload_date'),
2364 ('release_timestamp', 'release_date'),
2365 ('modified_timestamp', 'modified_date'),
2366 ):
2367 if info_dict.get(date_key) is None and info_dict.get(ts_key) is not None:
2368 # Working around out-of-range timestamp values (e.g. negative ones on Windows,
2369 # see http://bugs.python.org/issue1646728)
19a03940 2370 with contextlib.suppress(ValueError, OverflowError, OSError):
03f83004
LNO
2371 upload_date = datetime.datetime.utcfromtimestamp(info_dict[ts_key])
2372 info_dict[date_key] = upload_date.strftime('%Y%m%d')
03f83004
LNO
2373
2374 live_keys = ('is_live', 'was_live')
2375 live_status = info_dict.get('live_status')
2376 if live_status is None:
2377 for key in live_keys:
2378 if info_dict.get(key) is False:
2379 continue
2380 if info_dict.get(key):
2381 live_status = key
2382 break
2383 if all(info_dict.get(key) is False for key in live_keys):
2384 live_status = 'not_live'
2385 if live_status:
2386 info_dict['live_status'] = live_status
2387 for key in live_keys:
2388 if info_dict.get(key) is None:
2389 info_dict[key] = (live_status == key)
2390
2391 # Auto generate title fields corresponding to the *_number fields when missing
2392 # in order to always have clean titles. This is very common for TV series.
2393 for field in ('chapter', 'season', 'episode'):
2394 if info_dict.get('%s_number' % field) is not None and not info_dict.get(field):
2395 info_dict[field] = '%s %d' % (field.capitalize(), info_dict['%s_number' % field])
2396
415f8d51 2397 def _raise_pending_errors(self, info):
2398 err = info.pop('__pending_error', None)
2399 if err:
2400 self.report_error(err, tb=False)
2401
dd82ffea
JMF
2402 def process_video_result(self, info_dict, download=True):
2403 assert info_dict.get('_type', 'video') == 'video'
9c906919 2404 self._num_videos += 1
dd82ffea 2405
bec1fad2 2406 if 'id' not in info_dict:
fc08bdd6 2407 raise ExtractorError('Missing "id" field in extractor result', ie=info_dict['extractor'])
2408 elif not info_dict.get('id'):
2409 raise ExtractorError('Extractor failed to obtain "id"', ie=info_dict['extractor'])
455a15e2 2410
c9969434
S
2411 def report_force_conversion(field, field_not, conversion):
2412 self.report_warning(
2413 '"%s" field is not %s - forcing %s conversion, there is an error in extractor'
2414 % (field, field_not, conversion))
2415
2416 def sanitize_string_field(info, string_field):
2417 field = info.get(string_field)
14f25df2 2418 if field is None or isinstance(field, str):
c9969434
S
2419 return
2420 report_force_conversion(string_field, 'a string', 'string')
14f25df2 2421 info[string_field] = str(field)
c9969434
S
2422
2423 def sanitize_numeric_fields(info):
2424 for numeric_field in self._NUMERIC_FIELDS:
2425 field = info.get(numeric_field)
f9934b96 2426 if field is None or isinstance(field, (int, float)):
c9969434
S
2427 continue
2428 report_force_conversion(numeric_field, 'numeric', 'int')
2429 info[numeric_field] = int_or_none(field)
2430
2431 sanitize_string_field(info_dict, 'id')
2432 sanitize_numeric_fields(info_dict)
3975b4d2 2433 if info_dict.get('section_end') and info_dict.get('section_start') is not None:
2434 info_dict['duration'] = round(info_dict['section_end'] - info_dict['section_start'], 3)
4c3f8c3f 2435 if (info_dict.get('duration') or 0) <= 0 and info_dict.pop('duration', None):
50e93e03 2436 self.report_warning('"duration" field is negative, there is an error in extractor')
be6217b2 2437
9eef7c4e 2438 chapters = info_dict.get('chapters') or []
a3976e07 2439 if chapters and chapters[0].get('start_time'):
2440 chapters.insert(0, {'start_time': 0})
2441
9eef7c4e 2442 dummy_chapter = {'end_time': 0, 'start_time': info_dict.get('duration')}
a3976e07 2443 for idx, (prev, current, next_) in enumerate(zip(
2444 (dummy_chapter, *chapters), chapters, (*chapters[1:], dummy_chapter)), 1):
9eef7c4e 2445 if current.get('start_time') is None:
2446 current['start_time'] = prev.get('end_time')
2447 if not current.get('end_time'):
2448 current['end_time'] = next_.get('start_time')
a3976e07 2449 if not current.get('title'):
2450 current['title'] = f'<Untitled Chapter {idx}>'
9eef7c4e 2451
dd82ffea
JMF
2452 if 'playlist' not in info_dict:
2453 # It isn't part of a playlist
2454 info_dict['playlist'] = None
2455 info_dict['playlist_index'] = None
2456
bc516a3f 2457 self._sanitize_thumbnails(info_dict)
d5519808 2458
536a55da 2459 thumbnail = info_dict.get('thumbnail')
bc516a3f 2460 thumbnails = info_dict.get('thumbnails')
536a55da
S
2461 if thumbnail:
2462 info_dict['thumbnail'] = sanitize_url(thumbnail)
2463 elif thumbnails:
d5519808
PH
2464 info_dict['thumbnail'] = thumbnails[-1]['url']
2465
ae30b840 2466 if info_dict.get('display_id') is None and 'id' in info_dict:
0afef30b
PH
2467 info_dict['display_id'] = info_dict['id']
2468
03f83004 2469 self._fill_common_fields(info_dict)
33d2fc2f 2470
05108a49
S
2471 for cc_kind in ('subtitles', 'automatic_captions'):
2472 cc = info_dict.get(cc_kind)
2473 if cc:
2474 for _, subtitle in cc.items():
2475 for subtitle_format in subtitle:
2476 if subtitle_format.get('url'):
2477 subtitle_format['url'] = sanitize_url(subtitle_format['url'])
2478 if subtitle_format.get('ext') is None:
2479 subtitle_format['ext'] = determine_ext(subtitle_format['url']).lower()
2480
2481 automatic_captions = info_dict.get('automatic_captions')
4bba3716 2482 subtitles = info_dict.get('subtitles')
4bba3716 2483
360e1ca5 2484 info_dict['requested_subtitles'] = self.process_subtitles(
05108a49 2485 info_dict['id'], subtitles, automatic_captions)
a504ced0 2486
dd82ffea
JMF
2487 if info_dict.get('formats') is None:
2488 # There's only one format available
2489 formats = [info_dict]
2490 else:
2491 formats = info_dict['formats']
2492
0a5a191a 2493 # or None ensures --clean-infojson removes it
2494 info_dict['_has_drm'] = any(f.get('has_drm') for f in formats) or None
88acdbc2 2495 if not self.params.get('allow_unplayable_formats'):
2496 formats = [f for f in formats if not f.get('has_drm')]
7356a444 2497 if info_dict['_has_drm'] and formats and all(
c0b6e5c7 2498 f.get('acodec') == f.get('vcodec') == 'none' for f in formats):
2499 self.report_warning(
2500 'This video is DRM protected and only images are available for download. '
2501 'Use --list-formats to see them')
88acdbc2 2502
319b6059 2503 get_from_start = not info_dict.get('is_live') or bool(self.params.get('live_from_start'))
2504 if not get_from_start:
2505 info_dict['title'] += ' ' + datetime.datetime.now().strftime('%Y-%m-%d %H:%M')
2506 if info_dict.get('is_live') and formats:
adbc4ec4 2507 formats = [f for f in formats if bool(f.get('is_from_start')) == get_from_start]
319b6059 2508 if get_from_start and not formats:
a44ca5a4 2509 self.raise_no_formats(info_dict, msg=(
2510 '--live-from-start is passed, but there are no formats that can be downloaded from the start. '
2511 'If you want to download from the current time, use --no-live-from-start'))
adbc4ec4 2512
db95dc13 2513 if not formats:
1151c407 2514 self.raise_no_formats(info_dict)
db95dc13 2515
73af5cc8
S
2516 def is_wellformed(f):
2517 url = f.get('url')
a5ac0c47 2518 if not url:
73af5cc8
S
2519 self.report_warning(
2520 '"url" field is missing or empty - skipping format, '
2521 'there is an error in extractor')
a5ac0c47
S
2522 return False
2523 if isinstance(url, bytes):
2524 sanitize_string_field(f, 'url')
2525 return True
73af5cc8
S
2526
2527 # Filter out malformed formats for better extraction robustness
2528 formats = list(filter(is_wellformed, formats))
2529
181c7053
S
2530 formats_dict = {}
2531
dd82ffea 2532 # We check that all the formats have the format and format_id fields
db95dc13 2533 for i, format in enumerate(formats):
c9969434
S
2534 sanitize_string_field(format, 'format_id')
2535 sanitize_numeric_fields(format)
dcf77cf1 2536 format['url'] = sanitize_url(format['url'])
e74e3b63 2537 if not format.get('format_id'):
14f25df2 2538 format['format_id'] = str(i)
e2effb08
S
2539 else:
2540 # Sanitize format_id from characters used in format selector expression
ec85ded8 2541 format['format_id'] = re.sub(r'[\s,/+\[\]()]', '_', format['format_id'])
181c7053
S
2542 format_id = format['format_id']
2543 if format_id not in formats_dict:
2544 formats_dict[format_id] = []
2545 formats_dict[format_id].append(format)
2546
2547 # Make sure all formats have unique format_id
03b4de72 2548 common_exts = set(itertools.chain(*self._format_selection_exts.values()))
181c7053 2549 for format_id, ambiguous_formats in formats_dict.items():
48ee10ee 2550 ambigious_id = len(ambiguous_formats) > 1
2551 for i, format in enumerate(ambiguous_formats):
2552 if ambigious_id:
181c7053 2553 format['format_id'] = '%s-%d' % (format_id, i)
48ee10ee 2554 if format.get('ext') is None:
2555 format['ext'] = determine_ext(format['url']).lower()
2556 # Ensure there is no conflict between id and ext in format selection
2557 # See https://github.com/yt-dlp/yt-dlp/issues/1282
2558 if format['format_id'] != format['ext'] and format['format_id'] in common_exts:
2559 format['format_id'] = 'f%s' % format['format_id']
181c7053
S
2560
2561 for i, format in enumerate(formats):
8c51aa65 2562 if format.get('format') is None:
6febd1c1 2563 format['format'] = '{id} - {res}{note}'.format(
8c51aa65
JMF
2564 id=format['format_id'],
2565 res=self.format_resolution(format),
b868936c 2566 note=format_field(format, 'format_note', ' (%s)'),
8c51aa65 2567 )
6f0be937 2568 if format.get('protocol') is None:
b5559424 2569 format['protocol'] = determine_protocol(format)
239df021 2570 if format.get('resolution') is None:
2571 format['resolution'] = self.format_resolution(format, default=None)
176f1866 2572 if format.get('dynamic_range') is None and format.get('vcodec') != 'none':
2573 format['dynamic_range'] = 'SDR'
f2fe69c7 2574 if (info_dict.get('duration') and format.get('tbr')
2575 and not format.get('filesize') and not format.get('filesize_approx')):
56ba69e4 2576 format['filesize_approx'] = int(info_dict['duration'] * format['tbr'] * (1024 / 8))
f2fe69c7 2577
e5660ee6
JMF
2578 # Add HTTP headers, so that external programs can use them from the
2579 # json output
2580 full_format_info = info_dict.copy()
2581 full_format_info.update(format)
2582 format['http_headers'] = self._calc_headers(full_format_info)
0016b84e
S
2583 # Remove private housekeeping stuff
2584 if '__x_forwarded_for_ip' in info_dict:
2585 del info_dict['__x_forwarded_for_ip']
dd82ffea 2586
9f1a1c36 2587 if self.params.get('check_formats') is True:
282f5709 2588 formats = LazyList(self._check_formats(formats[::-1]), reverse=True)
9f1a1c36 2589
88acdbc2 2590 if not formats or formats[0] is not info_dict:
b3d9ef88
JMF
2591 # only set the 'formats' fields if the original info_dict list them
2592 # otherwise we end up with a circular reference, the first (and unique)
f89197d7 2593 # element in the 'formats' field in info_dict is info_dict itself,
dfb1b146 2594 # which can't be exported to json
b3d9ef88 2595 info_dict['formats'] = formats
4ec82a72 2596
2597 info_dict, _ = self.pre_process(info_dict)
2598
6db9c4d5 2599 if self._match_entry(info_dict, incomplete=self._format_fields) is not None:
09b49e1f 2600 return info_dict
2601
2602 self.post_extract(info_dict)
2603 info_dict, _ = self.pre_process(info_dict, 'after_filter')
2604
093a1710 2605 # The pre-processors may have modified the formats
2606 formats = info_dict.get('formats', [info_dict])
2607
fa9f30b8 2608 list_only = self.params.get('simulate') is None and (
2609 self.params.get('list_thumbnails') or self.params.get('listformats') or self.params.get('listsubtitles'))
2610 interactive_format_selection = not list_only and self.format_selector == '-'
b7b04c78 2611 if self.params.get('list_thumbnails'):
2612 self.list_thumbnails(info_dict)
b7b04c78 2613 if self.params.get('listsubtitles'):
2614 if 'automatic_captions' in info_dict:
2615 self.list_subtitles(
2616 info_dict['id'], automatic_captions, 'automatic captions')
2617 self.list_subtitles(info_dict['id'], subtitles, 'subtitles')
fa9f30b8 2618 if self.params.get('listformats') or interactive_format_selection:
b69fd25c 2619 self.list_formats(info_dict)
169dbde9 2620 if list_only:
b7b04c78 2621 # Without this printing, -F --print-json will not work
169dbde9 2622 self.__forced_printings(info_dict, self.prepare_filename(info_dict), incomplete=True)
c487cf00 2623 return info_dict
bfaae0a7 2624
187986a8 2625 format_selector = self.format_selector
2626 if format_selector is None:
0017d9ad 2627 req_format = self._default_format_spec(info_dict, download=download)
0760b0a7 2628 self.write_debug('Default format spec: %s' % req_format)
187986a8 2629 format_selector = self.build_format_selector(req_format)
317f7ab6 2630
fa9f30b8 2631 while True:
2632 if interactive_format_selection:
2633 req_format = input(
2634 self._format_screen('\nEnter format selector: ', self.Styles.EMPHASIS))
2635 try:
2636 format_selector = self.build_format_selector(req_format)
2637 except SyntaxError as err:
2638 self.report_error(err, tb=False, is_error=False)
2639 continue
2640
85e801a9 2641 formats_to_download = list(format_selector({
fa9f30b8 2642 'formats': formats,
85e801a9 2643 'has_merged_format': any('none' not in (f.get('acodec'), f.get('vcodec')) for f in formats),
2644 'incomplete_formats': (
2645 # All formats are video-only or
2646 all(f.get('vcodec') != 'none' and f.get('acodec') == 'none' for f in formats)
2647 # all formats are audio-only
2648 or all(f.get('vcodec') == 'none' and f.get('acodec') != 'none' for f in formats)),
2649 }))
fa9f30b8 2650 if interactive_format_selection and not formats_to_download:
2651 self.report_error('Requested format is not available', tb=False, is_error=False)
2652 continue
2653 break
317f7ab6 2654
dd82ffea 2655 if not formats_to_download:
b7da73eb 2656 if not self.params.get('ignore_no_formats_error'):
c0b6e5c7 2657 raise ExtractorError(
2658 'Requested format is not available. Use --list-formats for a list of available formats',
2659 expected=True, video_id=info_dict['id'], ie=info_dict['extractor'])
b62fa6d7 2660 self.report_warning('Requested format is not available')
2661 # Process what we can, even without any available formats.
2662 formats_to_download = [{}]
a13e6848 2663
5ec1b6b7 2664 requested_ranges = self.params.get('download_ranges')
2665 if requested_ranges:
2666 requested_ranges = tuple(requested_ranges(info_dict, self))
2667
2668 best_format, downloaded_formats = formats_to_download[-1], []
b62fa6d7 2669 if download:
2670 if best_format:
5ec1b6b7 2671 def to_screen(*msg):
2672 self.to_screen(f'[info] {info_dict["id"]}: {" ".join(", ".join(variadic(m)) for m in msg)}')
2673
2674 to_screen(f'Downloading {len(formats_to_download)} format(s):',
2675 (f['format_id'] for f in formats_to_download))
2676 if requested_ranges:
2677 to_screen(f'Downloading {len(requested_ranges)} time ranges:',
2678 (f'{int(c["start_time"])}-{int(c["end_time"])}' for c in requested_ranges))
a13e6848 2679 max_downloads_reached = False
5ec1b6b7 2680
2681 for fmt, chapter in itertools.product(formats_to_download, requested_ranges or [{}]):
2682 new_info = self._copy_infodict(info_dict)
b7da73eb 2683 new_info.update(fmt)
3975b4d2 2684 offset, duration = info_dict.get('section_start') or 0, info_dict.get('duration') or float('inf')
2685 if chapter or offset:
5ec1b6b7 2686 new_info.update({
3975b4d2 2687 'section_start': offset + chapter.get('start_time', 0),
bc401608 2688 'section_end': offset + min(chapter.get('end_time', duration), duration),
5ec1b6b7 2689 'section_title': chapter.get('title'),
2690 'section_number': chapter.get('index'),
2691 })
2692 downloaded_formats.append(new_info)
a13e6848 2693 try:
2694 self.process_info(new_info)
2695 except MaxDownloadsReached:
2696 max_downloads_reached = True
415f8d51 2697 self._raise_pending_errors(new_info)
f46e2f9d 2698 # Remove copied info
2699 for key, val in tuple(new_info.items()):
2700 if info_dict.get(key) == val:
2701 new_info.pop(key)
a13e6848 2702 if max_downloads_reached:
2703 break
ebed8b37 2704
5ec1b6b7 2705 write_archive = {f.get('__write_download_archive', False) for f in downloaded_formats}
a13e6848 2706 assert write_archive.issubset({True, False, 'ignore'})
2707 if True in write_archive and False not in write_archive:
2708 self.record_download_archive(info_dict)
be72c624 2709
5ec1b6b7 2710 info_dict['requested_downloads'] = downloaded_formats
ed5835b4 2711 info_dict = self.run_all_pps('after_video', info_dict)
a13e6848 2712 if max_downloads_reached:
2713 raise MaxDownloadsReached()
ebed8b37 2714
49a57e70 2715 # We update the info dict with the selected best quality format (backwards compatibility)
be72c624 2716 info_dict.update(best_format)
dd82ffea
JMF
2717 return info_dict
2718
98c70d6f 2719 def process_subtitles(self, video_id, normal_subtitles, automatic_captions):
a504ced0 2720 """Select the requested subtitles and their format"""
d8a58ddc 2721 available_subs, normal_sub_langs = {}, []
98c70d6f
JMF
2722 if normal_subtitles and self.params.get('writesubtitles'):
2723 available_subs.update(normal_subtitles)
d8a58ddc 2724 normal_sub_langs = tuple(normal_subtitles.keys())
98c70d6f
JMF
2725 if automatic_captions and self.params.get('writeautomaticsub'):
2726 for lang, cap_info in automatic_captions.items():
360e1ca5
JMF
2727 if lang not in available_subs:
2728 available_subs[lang] = cap_info
2729
4d171848
JMF
2730 if (not self.params.get('writesubtitles') and not
2731 self.params.get('writeautomaticsub') or not
2732 available_subs):
2733 return None
a504ced0 2734
d8a58ddc 2735 all_sub_langs = tuple(available_subs.keys())
a504ced0 2736 if self.params.get('allsubtitles', False):
c32b0aab 2737 requested_langs = all_sub_langs
2738 elif self.params.get('subtitleslangs', False):
77c4a9ef 2739 # A list is used so that the order of languages will be the same as
2740 # given in subtitleslangs. See https://github.com/yt-dlp/yt-dlp/issues/1041
2741 requested_langs = []
2742 for lang_re in self.params.get('subtitleslangs'):
77c4a9ef 2743 discard = lang_re[0] == '-'
c32b0aab 2744 if discard:
77c4a9ef 2745 lang_re = lang_re[1:]
3aa91540 2746 if lang_re == 'all':
2747 if discard:
2748 requested_langs = []
2749 else:
2750 requested_langs.extend(all_sub_langs)
2751 continue
77c4a9ef 2752 current_langs = filter(re.compile(lang_re + '$').match, all_sub_langs)
c32b0aab 2753 if discard:
2754 for lang in current_langs:
77c4a9ef 2755 while lang in requested_langs:
2756 requested_langs.remove(lang)
c32b0aab 2757 else:
77c4a9ef 2758 requested_langs.extend(current_langs)
2759 requested_langs = orderedSet(requested_langs)
d8a58ddc 2760 elif normal_sub_langs:
2761 requested_langs = ['en'] if 'en' in normal_sub_langs else normal_sub_langs[:1]
a504ced0 2762 else:
d8a58ddc 2763 requested_langs = ['en'] if 'en' in all_sub_langs else all_sub_langs[:1]
ad3dc496 2764 if requested_langs:
2765 self.write_debug('Downloading subtitles: %s' % ', '.join(requested_langs))
a504ced0
JMF
2766
2767 formats_query = self.params.get('subtitlesformat', 'best')
2768 formats_preference = formats_query.split('/') if formats_query else []
2769 subs = {}
2770 for lang in requested_langs:
2771 formats = available_subs.get(lang)
2772 if formats is None:
86e5f3ed 2773 self.report_warning(f'{lang} subtitles not available for {video_id}')
a504ced0 2774 continue
a504ced0
JMF
2775 for ext in formats_preference:
2776 if ext == 'best':
2777 f = formats[-1]
2778 break
2779 matches = list(filter(lambda f: f['ext'] == ext, formats))
2780 if matches:
2781 f = matches[-1]
2782 break
2783 else:
2784 f = formats[-1]
2785 self.report_warning(
2786 'No subtitle format found matching "%s" for language %s, '
2787 'using %s' % (formats_query, lang, f['ext']))
2788 subs[lang] = f
2789 return subs
2790
bb66c247 2791 def _forceprint(self, key, info_dict):
2792 if info_dict is None:
2793 return
2794 info_copy = info_dict.copy()
2795 info_copy['formats_table'] = self.render_formats_table(info_dict)
2796 info_copy['thumbnails_table'] = self.render_thumbnails_table(info_dict)
2797 info_copy['subtitles_table'] = self.render_subtitles_table(info_dict.get('id'), info_dict.get('subtitles'))
2798 info_copy['automatic_captions_table'] = self.render_subtitles_table(info_dict.get('id'), info_dict.get('automatic_captions'))
2799
2800 def format_tmpl(tmpl):
2801 mobj = re.match(r'\w+(=?)$', tmpl)
2802 if mobj and mobj.group(1):
2803 return f'{tmpl[:-1]} = %({tmpl[:-1]})r'
2804 elif mobj:
2805 return f'%({tmpl})s'
2806 return tmpl
8130779d 2807
bb66c247 2808 for tmpl in self.params['forceprint'].get(key, []):
2809 self.to_stdout(self.evaluate_outtmpl(format_tmpl(tmpl), info_copy))
2810
2811 for tmpl, file_tmpl in self.params['print_to_file'].get(key, []):
5127e92a 2812 filename = self.prepare_filename(info_dict, outtmpl=file_tmpl)
bb66c247 2813 tmpl = format_tmpl(tmpl)
2814 self.to_screen(f'[info] Writing {tmpl!r} to: {filename}')
8d93e69d 2815 if self._ensure_dir_exists(filename):
86e5f3ed 2816 with open(filename, 'a', encoding='utf-8') as f:
8d93e69d 2817 f.write(self.evaluate_outtmpl(tmpl, info_copy) + '\n')
ca30f449 2818
d06daf23 2819 def __forced_printings(self, info_dict, filename, incomplete):
53c18592 2820 def print_mandatory(field, actual_field=None):
2821 if actual_field is None:
2822 actual_field = field
d06daf23 2823 if (self.params.get('force%s' % field, False)
53c18592 2824 and (not incomplete or info_dict.get(actual_field) is not None)):
2825 self.to_stdout(info_dict[actual_field])
d06daf23
S
2826
2827 def print_optional(field):
2828 if (self.params.get('force%s' % field, False)
2829 and info_dict.get(field) is not None):
2830 self.to_stdout(info_dict[field])
2831
53c18592 2832 info_dict = info_dict.copy()
2833 if filename is not None:
2834 info_dict['filename'] = filename
2835 if info_dict.get('requested_formats') is not None:
2836 # For RTMP URLs, also include the playpath
2837 info_dict['urls'] = '\n'.join(f['url'] + f.get('play_path', '') for f in info_dict['requested_formats'])
10331a26 2838 elif info_dict.get('url'):
53c18592 2839 info_dict['urls'] = info_dict['url'] + info_dict.get('play_path', '')
2840
bb66c247 2841 if (self.params.get('forcejson')
2842 or self.params['forceprint'].get('video')
2843 or self.params['print_to_file'].get('video')):
2b8a2973 2844 self.post_extract(info_dict)
bb66c247 2845 self._forceprint('video', info_dict)
53c18592 2846
d06daf23
S
2847 print_mandatory('title')
2848 print_mandatory('id')
53c18592 2849 print_mandatory('url', 'urls')
d06daf23
S
2850 print_optional('thumbnail')
2851 print_optional('description')
53c18592 2852 print_optional('filename')
b868936c 2853 if self.params.get('forceduration') and info_dict.get('duration') is not None:
d06daf23
S
2854 self.to_stdout(formatSeconds(info_dict['duration']))
2855 print_mandatory('format')
53c18592 2856
2b8a2973 2857 if self.params.get('forcejson'):
6e84b215 2858 self.to_stdout(json.dumps(self.sanitize_info(info_dict)))
d06daf23 2859
e8e73840 2860 def dl(self, name, info, subtitle=False, test=False):
88acdbc2 2861 if not info.get('url'):
1151c407 2862 self.raise_no_formats(info, True)
e8e73840 2863
2864 if test:
2865 verbose = self.params.get('verbose')
2866 params = {
2867 'test': True,
a169858f 2868 'quiet': self.params.get('quiet') or not verbose,
e8e73840 2869 'verbose': verbose,
2870 'noprogress': not verbose,
2871 'nopart': True,
2872 'skip_unavailable_fragments': False,
2873 'keep_fragments': False,
2874 'overwrites': True,
2875 '_no_ytdl_file': True,
2876 }
2877 else:
2878 params = self.params
96fccc10 2879 fd = get_suitable_downloader(info, params, to_stdout=(name == '-'))(self, params)
e8e73840 2880 if not test:
2881 for ph in self._progress_hooks:
2882 fd.add_progress_hook(ph)
42676437
M
2883 urls = '", "'.join(
2884 (f['url'].split(',')[0] + ',<data>' if f['url'].startswith('data:') else f['url'])
2885 for f in info.get('requested_formats', []) or [info])
3a408f9d 2886 self.write_debug(f'Invoking {fd.FD_NAME} downloader on "{urls}"')
03b4de72 2887
adbc4ec4
THD
2888 # Note: Ideally info should be a deep-copied so that hooks cannot modify it.
2889 # But it may contain objects that are not deep-copyable
2890 new_info = self._copy_infodict(info)
e8e73840 2891 if new_info.get('http_headers') is None:
2892 new_info['http_headers'] = self._calc_headers(new_info)
2893 return fd.download(name, new_info, subtitle)
2894
e04938ab 2895 def existing_file(self, filepaths, *, default_overwrite=True):
2896 existing_files = list(filter(os.path.exists, orderedSet(filepaths)))
2897 if existing_files and not self.params.get('overwrites', default_overwrite):
2898 return existing_files[0]
2899
2900 for file in existing_files:
2901 self.report_file_delete(file)
2902 os.remove(file)
2903 return None
2904
8222d8de 2905 def process_info(self, info_dict):
09b49e1f 2906 """Process a single resolved IE result. (Modifies it in-place)"""
8222d8de
JMF
2907
2908 assert info_dict.get('_type', 'video') == 'video'
f46e2f9d 2909 original_infodict = info_dict
fd288278 2910
4513a41a 2911 if 'format' not in info_dict and 'ext' in info_dict:
8222d8de
JMF
2912 info_dict['format'] = info_dict['ext']
2913
09b49e1f 2914 # This is mostly just for backward compatibility of process_info
2915 # As a side-effect, this allows for format-specific filters
c77495e3 2916 if self._match_entry(info_dict) is not None:
9e907ebd 2917 info_dict['__write_download_archive'] = 'ignore'
8222d8de
JMF
2918 return
2919
09b49e1f 2920 # Does nothing under normal operation - for backward compatibility of process_info
277d6ff5 2921 self.post_extract(info_dict)
0c14d66a 2922 self._num_downloads += 1
8222d8de 2923
dcf64d43 2924 # info_dict['_filename'] needs to be set for backward compatibility
de6000d9 2925 info_dict['_filename'] = full_filename = self.prepare_filename(info_dict, warn=True)
2926 temp_filename = self.prepare_filename(info_dict, 'temp')
0202b52a 2927 files_to_move = {}
8222d8de
JMF
2928
2929 # Forced printings
4513a41a 2930 self.__forced_printings(info_dict, full_filename, incomplete=('format' not in info_dict))
8222d8de 2931
ca6d59d2 2932 def check_max_downloads():
2933 if self._num_downloads >= float(self.params.get('max_downloads') or 'inf'):
2934 raise MaxDownloadsReached()
2935
b7b04c78 2936 if self.params.get('simulate'):
9e907ebd 2937 info_dict['__write_download_archive'] = self.params.get('force_write_download_archive')
ca6d59d2 2938 check_max_downloads()
8222d8de
JMF
2939 return
2940
de6000d9 2941 if full_filename is None:
8222d8de 2942 return
e92caff5 2943 if not self._ensure_dir_exists(encodeFilename(full_filename)):
0202b52a 2944 return
e92caff5 2945 if not self._ensure_dir_exists(encodeFilename(temp_filename)):
8222d8de
JMF
2946 return
2947
80c03fa9 2948 if self._write_description('video', info_dict,
2949 self.prepare_filename(info_dict, 'description')) is None:
2950 return
2951
2952 sub_files = self._write_subtitles(info_dict, temp_filename)
2953 if sub_files is None:
2954 return
2955 files_to_move.update(dict(sub_files))
2956
2957 thumb_files = self._write_thumbnails(
2958 'video', info_dict, temp_filename, self.prepare_filename(info_dict, 'thumbnail'))
2959 if thumb_files is None:
2960 return
2961 files_to_move.update(dict(thumb_files))
8222d8de 2962
80c03fa9 2963 infofn = self.prepare_filename(info_dict, 'infojson')
2964 _infojson_written = self._write_info_json('video', info_dict, infofn)
2965 if _infojson_written:
dac5df5a 2966 info_dict['infojson_filename'] = infofn
e75bb0d6 2967 # For backward compatibility, even though it was a private field
80c03fa9 2968 info_dict['__infojson_filename'] = infofn
2969 elif _infojson_written is None:
2970 return
2971
2972 # Note: Annotations are deprecated
2973 annofn = None
1fb07d10 2974 if self.params.get('writeannotations', False):
de6000d9 2975 annofn = self.prepare_filename(info_dict, 'annotation')
80c03fa9 2976 if annofn:
e92caff5 2977 if not self._ensure_dir_exists(encodeFilename(annofn)):
0202b52a 2978 return
0c3d0f51 2979 if not self.params.get('overwrites', True) and os.path.exists(encodeFilename(annofn)):
6febd1c1 2980 self.to_screen('[info] Video annotations are already present')
ffddb112
RA
2981 elif not info_dict.get('annotations'):
2982 self.report_warning('There are no annotations to write.')
7b6fefc9
PH
2983 else:
2984 try:
6febd1c1 2985 self.to_screen('[info] Writing video annotations to: ' + annofn)
86e5f3ed 2986 with open(encodeFilename(annofn), 'w', encoding='utf-8') as annofile:
7b6fefc9
PH
2987 annofile.write(info_dict['annotations'])
2988 except (KeyError, TypeError):
6febd1c1 2989 self.report_warning('There are no annotations to write.')
86e5f3ed 2990 except OSError:
6febd1c1 2991 self.report_error('Cannot write annotations file: ' + annofn)
7b6fefc9 2992 return
1fb07d10 2993
732044af 2994 # Write internet shortcut files
08438d2c 2995 def _write_link_file(link_type):
60f3e995 2996 url = try_get(info_dict['webpage_url'], iri_to_uri)
2997 if not url:
2998 self.report_warning(
2999 f'Cannot write internet shortcut file because the actual URL of "{info_dict["webpage_url"]}" is unknown')
3000 return True
08438d2c 3001 linkfn = replace_extension(self.prepare_filename(info_dict, 'link'), link_type, info_dict.get('ext'))
0e6b018a
Z
3002 if not self._ensure_dir_exists(encodeFilename(linkfn)):
3003 return False
10e3742e 3004 if self.params.get('overwrites', True) and os.path.exists(encodeFilename(linkfn)):
08438d2c 3005 self.to_screen(f'[info] Internet shortcut (.{link_type}) is already present')
3006 return True
3007 try:
3008 self.to_screen(f'[info] Writing internet shortcut (.{link_type}) to: {linkfn}')
86e5f3ed 3009 with open(encodeFilename(to_high_limit_path(linkfn)), 'w', encoding='utf-8',
3010 newline='\r\n' if link_type == 'url' else '\n') as linkfile:
60f3e995 3011 template_vars = {'url': url}
08438d2c 3012 if link_type == 'desktop':
3013 template_vars['filename'] = linkfn[:-(len(link_type) + 1)]
3014 linkfile.write(LINK_TEMPLATES[link_type] % template_vars)
86e5f3ed 3015 except OSError:
08438d2c 3016 self.report_error(f'Cannot write internet shortcut {linkfn}')
3017 return False
732044af 3018 return True
3019
08438d2c 3020 write_links = {
3021 'url': self.params.get('writeurllink'),
3022 'webloc': self.params.get('writewebloclink'),
3023 'desktop': self.params.get('writedesktoplink'),
3024 }
3025 if self.params.get('writelink'):
3026 link_type = ('webloc' if sys.platform == 'darwin'
3027 else 'desktop' if sys.platform.startswith('linux')
3028 else 'url')
3029 write_links[link_type] = True
3030
3031 if any(should_write and not _write_link_file(link_type)
3032 for link_type, should_write in write_links.items()):
3033 return
732044af 3034
f46e2f9d 3035 def replace_info_dict(new_info):
3036 nonlocal info_dict
3037 if new_info == info_dict:
3038 return
3039 info_dict.clear()
3040 info_dict.update(new_info)
3041
415f8d51 3042 new_info, files_to_move = self.pre_process(info_dict, 'before_dl', files_to_move)
3043 replace_info_dict(new_info)
56d868db 3044
a13e6848 3045 if self.params.get('skip_download'):
56d868db 3046 info_dict['filepath'] = temp_filename
3047 info_dict['__finaldir'] = os.path.dirname(os.path.abspath(encodeFilename(full_filename)))
3048 info_dict['__files_to_move'] = files_to_move
f46e2f9d 3049 replace_info_dict(self.run_pp(MoveFilesAfterDownloadPP(self, False), info_dict))
9e907ebd 3050 info_dict['__write_download_archive'] = self.params.get('force_write_download_archive')
56d868db 3051 else:
3052 # Download
b868936c 3053 info_dict.setdefault('__postprocessors', [])
4340deca 3054 try:
0202b52a 3055
e04938ab 3056 def existing_video_file(*filepaths):
6b591b29 3057 ext = info_dict.get('ext')
e04938ab 3058 converted = lambda file: replace_extension(file, self.params.get('final_ext') or ext, ext)
3059 file = self.existing_file(itertools.chain(*zip(map(converted, filepaths), filepaths)),
3060 default_overwrite=False)
3061 if file:
3062 info_dict['ext'] = os.path.splitext(file)[1][1:]
3063 return file
0202b52a 3064
7b2c3f47 3065 fd, success = None, True
fccf90e7 3066 if info_dict.get('protocol') or info_dict.get('url'):
56ba69e4 3067 fd = get_suitable_downloader(info_dict, self.params, to_stdout=temp_filename == '-')
3068 if fd is not FFmpegFD and (
3069 info_dict.get('section_start') or info_dict.get('section_end')):
7b2c3f47 3070 msg = ('This format cannot be partially downloaded' if FFmpegFD.available()
56ba69e4 3071 else 'You have requested downloading the video partially, but ffmpeg is not installed')
3072 self.report_error(f'{msg}. Aborting')
5ec1b6b7 3073 return
5ec1b6b7 3074
4340deca 3075 if info_dict.get('requested_formats') is not None:
81cd954a 3076 requested_formats = info_dict['requested_formats']
0202b52a 3077 old_ext = info_dict['ext']
4e3b637d 3078 if self.params.get('merge_output_format') is None:
4e3b637d 3079 if (info_dict['ext'] == 'webm'
3080 and info_dict.get('thumbnails')
3081 # check with type instead of pp_key, __name__, or isinstance
3082 # since we dont want any custom PPs to trigger this
c487cf00 3083 and any(type(pp) == EmbedThumbnailPP for pp in self._pps['post_process'])): # noqa: E721
4e3b637d 3084 info_dict['ext'] = 'mkv'
3085 self.report_warning(
3086 'webm doesn\'t support embedding a thumbnail, mkv will be used')
124bc071 3087 new_ext = info_dict['ext']
0202b52a 3088
124bc071 3089 def correct_ext(filename, ext=new_ext):
96fccc10 3090 if filename == '-':
3091 return filename
0202b52a 3092 filename_real_ext = os.path.splitext(filename)[1][1:]
3093 filename_wo_ext = (
3094 os.path.splitext(filename)[0]
124bc071 3095 if filename_real_ext in (old_ext, new_ext)
0202b52a 3096 else filename)
86e5f3ed 3097 return f'{filename_wo_ext}.{ext}'
0202b52a 3098
38c6902b 3099 # Ensure filename always has a correct extension for successful merge
0202b52a 3100 full_filename = correct_ext(full_filename)
3101 temp_filename = correct_ext(temp_filename)
e04938ab 3102 dl_filename = existing_video_file(full_filename, temp_filename)
1ea24129 3103 info_dict['__real_download'] = False
18e674b4 3104
7b2c3f47 3105 merger = FFmpegMergerPP(self)
adbc4ec4 3106 downloaded = []
dbf5416a 3107 if dl_filename is not None:
6c7274ec 3108 self.report_file_already_downloaded(dl_filename)
adbc4ec4
THD
3109 elif fd:
3110 for f in requested_formats if fd != FFmpegFD else []:
3111 f['filepath'] = fname = prepend_extension(
3112 correct_ext(temp_filename, info_dict['ext']),
3113 'f%s' % f['format_id'], info_dict['ext'])
3114 downloaded.append(fname)
dbf5416a 3115 info_dict['url'] = '\n'.join(f['url'] for f in requested_formats)
3116 success, real_download = self.dl(temp_filename, info_dict)
3117 info_dict['__real_download'] = real_download
18e674b4 3118 else:
18e674b4 3119 if self.params.get('allow_unplayable_formats'):
3120 self.report_warning(
3121 'You have requested merging of multiple formats '
3122 'while also allowing unplayable formats to be downloaded. '
3123 'The formats won\'t be merged to prevent data corruption.')
3124 elif not merger.available:
e8969bda 3125 msg = 'You have requested merging of multiple formats but ffmpeg is not installed'
3126 if not self.params.get('ignoreerrors'):
3127 self.report_error(f'{msg}. Aborting due to --abort-on-error')
3128 return
3129 self.report_warning(f'{msg}. The formats won\'t be merged')
18e674b4 3130
96fccc10 3131 if temp_filename == '-':
adbc4ec4 3132 reason = ('using a downloader other than ffmpeg' if FFmpegFD.can_merge_formats(info_dict, self.params)
96fccc10 3133 else 'but the formats are incompatible for simultaneous download' if merger.available
3134 else 'but ffmpeg is not installed')
3135 self.report_warning(
3136 f'You have requested downloading multiple formats to stdout {reason}. '
3137 'The formats will be streamed one after the other')
3138 fname = temp_filename
dbf5416a 3139 for f in requested_formats:
3140 new_info = dict(info_dict)
3141 del new_info['requested_formats']
3142 new_info.update(f)
96fccc10 3143 if temp_filename != '-':
124bc071 3144 fname = prepend_extension(
3145 correct_ext(temp_filename, new_info['ext']),
3146 'f%s' % f['format_id'], new_info['ext'])
96fccc10 3147 if not self._ensure_dir_exists(fname):
3148 return
a21e0ab1 3149 f['filepath'] = fname
96fccc10 3150 downloaded.append(fname)
dbf5416a 3151 partial_success, real_download = self.dl(fname, new_info)
3152 info_dict['__real_download'] = info_dict['__real_download'] or real_download
3153 success = success and partial_success
adbc4ec4
THD
3154
3155 if downloaded and merger.available and not self.params.get('allow_unplayable_formats'):
3156 info_dict['__postprocessors'].append(merger)
3157 info_dict['__files_to_merge'] = downloaded
3158 # Even if there were no downloads, it is being merged only now
3159 info_dict['__real_download'] = True
3160 else:
3161 for file in downloaded:
3162 files_to_move[file] = None
4340deca
P
3163 else:
3164 # Just a single file
e04938ab 3165 dl_filename = existing_video_file(full_filename, temp_filename)
6c7274ec 3166 if dl_filename is None or dl_filename == temp_filename:
3167 # dl_filename == temp_filename could mean that the file was partially downloaded with --no-part.
3168 # So we should try to resume the download
e8e73840 3169 success, real_download = self.dl(temp_filename, info_dict)
0202b52a 3170 info_dict['__real_download'] = real_download
6c7274ec 3171 else:
3172 self.report_file_already_downloaded(dl_filename)
0202b52a 3173
0202b52a 3174 dl_filename = dl_filename or temp_filename
c571435f 3175 info_dict['__finaldir'] = os.path.dirname(os.path.abspath(encodeFilename(full_filename)))
0202b52a 3176
3158150c 3177 except network_exceptions as err:
7960b056 3178 self.report_error('unable to download video data: %s' % error_to_compat_str(err))
4340deca 3179 return
86e5f3ed 3180 except OSError as err:
4340deca
P
3181 raise UnavailableVideoError(err)
3182 except (ContentTooShortError, ) as err:
86e5f3ed 3183 self.report_error(f'content too short (expected {err.expected} bytes and served {err.downloaded})')
4340deca 3184 return
8222d8de 3185
415f8d51 3186 self._raise_pending_errors(info_dict)
de6000d9 3187 if success and full_filename != '-':
f17f8651 3188
fd7cfb64 3189 def fixup():
3190 do_fixup = True
3191 fixup_policy = self.params.get('fixup')
3192 vid = info_dict['id']
3193
3194 if fixup_policy in ('ignore', 'never'):
3195 return
3196 elif fixup_policy == 'warn':
3fe75fdc 3197 do_fixup = 'warn'
f89b3e2d 3198 elif fixup_policy != 'force':
3199 assert fixup_policy in ('detect_or_warn', None)
3200 if not info_dict.get('__real_download'):
3201 do_fixup = False
fd7cfb64 3202
3203 def ffmpeg_fixup(cndn, msg, cls):
3fe75fdc 3204 if not (do_fixup and cndn):
fd7cfb64 3205 return
3fe75fdc 3206 elif do_fixup == 'warn':
fd7cfb64 3207 self.report_warning(f'{vid}: {msg}')
3208 return
3209 pp = cls(self)
3210 if pp.available:
3211 info_dict['__postprocessors'].append(pp)
3212 else:
3213 self.report_warning(f'{vid}: {msg}. Install ffmpeg to fix this automatically')
3214
3215 stretched_ratio = info_dict.get('stretched_ratio')
ca9def71
LNO
3216 ffmpeg_fixup(stretched_ratio not in (1, None),
3217 f'Non-uniform pixel ratio {stretched_ratio}',
3218 FFmpegFixupStretchedPP)
fd7cfb64 3219
993191c0 3220 downloader = get_suitable_downloader(info_dict, self.params) if 'protocol' in info_dict else None
24146491 3221 downloader = downloader.FD_NAME if downloader else None
adbc4ec4 3222
ca9def71
LNO
3223 ext = info_dict.get('ext')
3224 postprocessed_by_ffmpeg = info_dict.get('requested_formats') or any((
3225 isinstance(pp, FFmpegVideoConvertorPP)
3226 and resolve_recode_mapping(ext, pp.mapping)[0] not in (ext, None)
3227 ) for pp in self._pps['post_process'])
3228
3229 if not postprocessed_by_ffmpeg:
3230 ffmpeg_fixup(ext == 'm4a' and info_dict.get('container') == 'm4a_dash',
f2df4071 3231 'writing DASH m4a. Only some players support this container',
3232 FFmpegFixupM4aPP)
24146491 3233 ffmpeg_fixup(downloader == 'hlsnative' and not self.params.get('hls_use_mpegts')
494f5230 3234 or info_dict.get('is_live') and self.params.get('hls_use_mpegts') is None,
adbc4ec4
THD
3235 'Possible MPEG-TS in MP4 container or malformed AAC timestamps',
3236 FFmpegFixupM3u8PP)
3237 ffmpeg_fixup(info_dict.get('is_live') and downloader == 'DashSegmentsFD',
3238 'Possible duplicate MOOV atoms', FFmpegFixupDuplicateMoovPP)
3239
24146491 3240 ffmpeg_fixup(downloader == 'web_socket_fragment', 'Malformed timestamps detected', FFmpegFixupTimestampPP)
3241 ffmpeg_fixup(downloader == 'web_socket_fragment', 'Malformed duration detected', FFmpegFixupDurationPP)
fd7cfb64 3242
3243 fixup()
8222d8de 3244 try:
f46e2f9d 3245 replace_info_dict(self.post_process(dl_filename, info_dict, files_to_move))
af819c21 3246 except PostProcessingError as err:
3247 self.report_error('Postprocessing: %s' % str(err))
8222d8de 3248 return
ab8e5e51
AM
3249 try:
3250 for ph in self._post_hooks:
23c1a667 3251 ph(info_dict['filepath'])
ab8e5e51
AM
3252 except Exception as err:
3253 self.report_error('post hooks: %s' % str(err))
3254 return
9e907ebd 3255 info_dict['__write_download_archive'] = True
2d30509f 3256
c487cf00 3257 assert info_dict is original_infodict # Make sure the info_dict was modified in-place
a13e6848 3258 if self.params.get('force_write_download_archive'):
9e907ebd 3259 info_dict['__write_download_archive'] = True
ca6d59d2 3260 check_max_downloads()
8222d8de 3261
aa9369a2 3262 def __download_wrapper(self, func):
3263 @functools.wraps(func)
3264 def wrapper(*args, **kwargs):
3265 try:
3266 res = func(*args, **kwargs)
3267 except UnavailableVideoError as e:
3268 self.report_error(e)
b222c271 3269 except DownloadCancelled as e:
3270 self.to_screen(f'[info] {e}')
3271 if not self.params.get('break_per_url'):
3272 raise
aa9369a2 3273 else:
3274 if self.params.get('dump_single_json', False):
3275 self.post_extract(res)
3276 self.to_stdout(json.dumps(self.sanitize_info(res)))
3277 return wrapper
3278
8222d8de
JMF
3279 def download(self, url_list):
3280 """Download a given list of URLs."""
aa9369a2 3281 url_list = variadic(url_list) # Passing a single URL is a common mistake
bf1824b3 3282 outtmpl = self.params['outtmpl']['default']
3089bc74
S
3283 if (len(url_list) > 1
3284 and outtmpl != '-'
3285 and '%' not in outtmpl
3286 and self.params.get('max_downloads') != 1):
acd69589 3287 raise SameFileError(outtmpl)
8222d8de
JMF
3288
3289 for url in url_list:
aa9369a2 3290 self.__download_wrapper(self.extract_info)(
3291 url, force_generic_extractor=self.params.get('force_generic_extractor', False))
8222d8de
JMF
3292
3293 return self._download_retcode
3294
1dcc4c0c 3295 def download_with_info_file(self, info_filename):
31bd3925
JMF
3296 with contextlib.closing(fileinput.FileInput(
3297 [info_filename], mode='r',
3298 openhook=fileinput.hook_encoded('utf-8'))) as f:
3299 # FileInput doesn't have a read method, we can't call json.load
8012d892 3300 info = self.sanitize_info(json.loads('\n'.join(f)), self.params.get('clean_infojson', True))
d4943898 3301 try:
aa9369a2 3302 self.__download_wrapper(self.process_ie_result)(info, download=True)
f2ebc5c7 3303 except (DownloadError, EntryNotInPlaylist, ReExtractInfo) as e:
bf5f605e 3304 if not isinstance(e, EntryNotInPlaylist):
3305 self.to_stderr('\r')
d4943898
JMF
3306 webpage_url = info.get('webpage_url')
3307 if webpage_url is not None:
aa9369a2 3308 self.report_warning(f'The info failed to download: {e}; trying with URL {webpage_url}')
d4943898
JMF
3309 return self.download([webpage_url])
3310 else:
3311 raise
3312 return self._download_retcode
1dcc4c0c 3313
cb202fd2 3314 @staticmethod
8012d892 3315 def sanitize_info(info_dict, remove_private_keys=False):
3316 ''' Sanitize the infodict for converting to json '''
3ad56b42 3317 if info_dict is None:
3318 return info_dict
6e84b215 3319 info_dict.setdefault('epoch', int(time.time()))
6a5a30f9 3320 info_dict.setdefault('_type', 'video')
09b49e1f 3321
8012d892 3322 if remove_private_keys:
0a5a191a 3323 reject = lambda k, v: v is None or k.startswith('__') or k in {
f46e2f9d 3324 'requested_downloads', 'requested_formats', 'requested_subtitles', 'requested_entries',
0a5a191a 3325 'entries', 'filepath', '_filename', 'infojson_filename', 'original_url', 'playlist_autonumber',
6e84b215 3326 }
ae8f99e6 3327 else:
09b49e1f 3328 reject = lambda k, v: False
adbc4ec4
THD
3329
3330 def filter_fn(obj):
3331 if isinstance(obj, dict):
3332 return {k: filter_fn(v) for k, v in obj.items() if not reject(k, v)}
3333 elif isinstance(obj, (list, tuple, set, LazyList)):
3334 return list(map(filter_fn, obj))
3335 elif obj is None or isinstance(obj, (str, int, float, bool)):
3336 return obj
3337 else:
3338 return repr(obj)
3339
5226731e 3340 return filter_fn(info_dict)
cb202fd2 3341
8012d892 3342 @staticmethod
3343 def filter_requested_info(info_dict, actually_filter=True):
3344 ''' Alias of sanitize_info for backward compatibility '''
3345 return YoutubeDL.sanitize_info(info_dict, actually_filter)
3346
43d7f5a5 3347 def _delete_downloaded_files(self, *files_to_delete, info={}, msg=None):
3348 for filename in set(filter(None, files_to_delete)):
3349 if msg:
3350 self.to_screen(msg % filename)
3351 try:
3352 os.remove(filename)
3353 except OSError:
3354 self.report_warning(f'Unable to delete file {filename}')
3355 if filename in info.get('__files_to_move', []): # NB: Delete even if None
3356 del info['__files_to_move'][filename]
3357
ed5835b4 3358 @staticmethod
3359 def post_extract(info_dict):
3360 def actual_post_extract(info_dict):
3361 if info_dict.get('_type') in ('playlist', 'multi_video'):
3362 for video_dict in info_dict.get('entries', {}):
3363 actual_post_extract(video_dict or {})
3364 return
3365
09b49e1f 3366 post_extractor = info_dict.pop('__post_extractor', None) or (lambda: {})
3367 info_dict.update(post_extractor())
ed5835b4 3368
3369 actual_post_extract(info_dict or {})
3370
dcf64d43 3371 def run_pp(self, pp, infodict):
5bfa4862 3372 files_to_delete = []
dcf64d43 3373 if '__files_to_move' not in infodict:
3374 infodict['__files_to_move'] = {}
b1940459 3375 try:
3376 files_to_delete, infodict = pp.run(infodict)
3377 except PostProcessingError as e:
3378 # Must be True and not 'only_download'
3379 if self.params.get('ignoreerrors') is True:
3380 self.report_error(e)
3381 return infodict
3382 raise
3383
5bfa4862 3384 if not files_to_delete:
dcf64d43 3385 return infodict
5bfa4862 3386 if self.params.get('keepvideo', False):
3387 for f in files_to_delete:
dcf64d43 3388 infodict['__files_to_move'].setdefault(f, '')
5bfa4862 3389 else:
43d7f5a5 3390 self._delete_downloaded_files(
3391 *files_to_delete, info=infodict, msg='Deleting original file %s (pass -k to keep)')
dcf64d43 3392 return infodict
5bfa4862 3393
ed5835b4 3394 def run_all_pps(self, key, info, *, additional_pps=None):
bb66c247 3395 self._forceprint(key, info)
ed5835b4 3396 for pp in (additional_pps or []) + self._pps[key]:
dc5f409c 3397 info = self.run_pp(pp, info)
ed5835b4 3398 return info
277d6ff5 3399
56d868db 3400 def pre_process(self, ie_info, key='pre_process', files_to_move=None):
5bfa4862 3401 info = dict(ie_info)
56d868db 3402 info['__files_to_move'] = files_to_move or {}
415f8d51 3403 try:
3404 info = self.run_all_pps(key, info)
3405 except PostProcessingError as err:
3406 msg = f'Preprocessing: {err}'
3407 info.setdefault('__pending_error', msg)
3408 self.report_error(msg, is_error=False)
56d868db 3409 return info, info.pop('__files_to_move', None)
5bfa4862 3410
f46e2f9d 3411 def post_process(self, filename, info, files_to_move=None):
8222d8de 3412 """Run all the postprocessors on the given file."""
8222d8de 3413 info['filepath'] = filename
dcf64d43 3414 info['__files_to_move'] = files_to_move or {}
ed5835b4 3415 info = self.run_all_pps('post_process', info, additional_pps=info.get('__postprocessors'))
dcf64d43 3416 info = self.run_pp(MoveFilesAfterDownloadPP(self), info)
3417 del info['__files_to_move']
ed5835b4 3418 return self.run_all_pps('after_move', info)
c1c9a79c 3419
5db07df6 3420 def _make_archive_id(self, info_dict):
e9fef7ee
S
3421 video_id = info_dict.get('id')
3422 if not video_id:
3423 return
5db07df6
PH
3424 # Future-proof against any change in case
3425 # and backwards compatibility with prior versions
e9fef7ee 3426 extractor = info_dict.get('extractor_key') or info_dict.get('ie_key') # key in a playlist
7012b23c 3427 if extractor is None:
1211bb6d
S
3428 url = str_or_none(info_dict.get('url'))
3429 if not url:
3430 return
e9fef7ee 3431 # Try to find matching extractor for the URL and take its ie_key
8b7491c8 3432 for ie_key, ie in self._ies.items():
1211bb6d 3433 if ie.suitable(url):
8b7491c8 3434 extractor = ie_key
e9fef7ee
S
3435 break
3436 else:
3437 return
0647d925 3438 return make_archive_id(extractor, video_id)
5db07df6
PH
3439
3440 def in_download_archive(self, info_dict):
3441 fn = self.params.get('download_archive')
3442 if fn is None:
3443 return False
3444
1e8fe57e 3445 vid_ids = [self._make_archive_id(info_dict)]
3446 vid_ids.extend(info_dict.get('_old_archive_ids', []))
3447 return any(id_ in self.archive for id_ in vid_ids)
c1c9a79c
PH
3448
3449 def record_download_archive(self, info_dict):
3450 fn = self.params.get('download_archive')
3451 if fn is None:
3452 return
5db07df6
PH
3453 vid_id = self._make_archive_id(info_dict)
3454 assert vid_id
a13e6848 3455 self.write_debug(f'Adding to archive: {vid_id}')
c1c9a79c 3456 with locked_file(fn, 'a', encoding='utf-8') as archive_file:
6febd1c1 3457 archive_file.write(vid_id + '\n')
a45e8619 3458 self.archive.add(vid_id)
dd82ffea 3459
8c51aa65 3460 @staticmethod
8abeeb94 3461 def format_resolution(format, default='unknown'):
9359f3d4 3462 if format.get('vcodec') == 'none' and format.get('acodec') != 'none':
fb04e403 3463 return 'audio only'
f49d89ee
PH
3464 if format.get('resolution') is not None:
3465 return format['resolution']
35615307 3466 if format.get('width') and format.get('height'):
ff51ed58 3467 return '%dx%d' % (format['width'], format['height'])
35615307 3468 elif format.get('height'):
ff51ed58 3469 return '%sp' % format['height']
35615307 3470 elif format.get('width'):
ff51ed58 3471 return '%dx?' % format['width']
3472 return default
8c51aa65 3473
8130779d 3474 def _list_format_headers(self, *headers):
3475 if self.params.get('listformats_table', True) is not False:
591bb9d3 3476 return [self._format_out(header, self.Styles.HEADERS) for header in headers]
8130779d 3477 return headers
3478
c57f7757
PH
3479 def _format_note(self, fdict):
3480 res = ''
3481 if fdict.get('ext') in ['f4f', 'f4m']:
f304da8a 3482 res += '(unsupported)'
32f90364
PH
3483 if fdict.get('language'):
3484 if res:
3485 res += ' '
f304da8a 3486 res += '[%s]' % fdict['language']
c57f7757 3487 if fdict.get('format_note') is not None:
f304da8a 3488 if res:
3489 res += ' '
3490 res += fdict['format_note']
c57f7757 3491 if fdict.get('tbr') is not None:
f304da8a 3492 if res:
3493 res += ', '
3494 res += '%4dk' % fdict['tbr']
c57f7757
PH
3495 if fdict.get('container') is not None:
3496 if res:
3497 res += ', '
3498 res += '%s container' % fdict['container']
3089bc74
S
3499 if (fdict.get('vcodec') is not None
3500 and fdict.get('vcodec') != 'none'):
c57f7757
PH
3501 if res:
3502 res += ', '
3503 res += fdict['vcodec']
91c7271a 3504 if fdict.get('vbr') is not None:
c57f7757
PH
3505 res += '@'
3506 elif fdict.get('vbr') is not None and fdict.get('abr') is not None:
3507 res += 'video@'
3508 if fdict.get('vbr') is not None:
3509 res += '%4dk' % fdict['vbr']
fbb21cf5 3510 if fdict.get('fps') is not None:
5d583bdf
S
3511 if res:
3512 res += ', '
3513 res += '%sfps' % fdict['fps']
c57f7757
PH
3514 if fdict.get('acodec') is not None:
3515 if res:
3516 res += ', '
3517 if fdict['acodec'] == 'none':
3518 res += 'video only'
3519 else:
3520 res += '%-5s' % fdict['acodec']
3521 elif fdict.get('abr') is not None:
3522 if res:
3523 res += ', '
3524 res += 'audio'
3525 if fdict.get('abr') is not None:
3526 res += '@%3dk' % fdict['abr']
3527 if fdict.get('asr') is not None:
3528 res += ' (%5dHz)' % fdict['asr']
3529 if fdict.get('filesize') is not None:
3530 if res:
3531 res += ', '
3532 res += format_bytes(fdict['filesize'])
9732d77e
PH
3533 elif fdict.get('filesize_approx') is not None:
3534 if res:
3535 res += ', '
3536 res += '~' + format_bytes(fdict['filesize_approx'])
c57f7757 3537 return res
91c7271a 3538
8130779d 3539 def render_formats_table(self, info_dict):
b69fd25c 3540 if not info_dict.get('formats') and not info_dict.get('url'):
8130779d 3541 return None
b69fd25c 3542
94badb25 3543 formats = info_dict.get('formats', [info_dict])
8130779d 3544 if not self.params.get('listformats_table', True) is not False:
76d321f6 3545 table = [
3546 [
3547 format_field(f, 'format_id'),
3548 format_field(f, 'ext'),
3549 self.format_resolution(f),
8130779d 3550 self._format_note(f)
3551 ] for f in formats if f.get('preference') is None or f['preference'] >= -1000]
3552 return render_table(['format code', 'extension', 'resolution', 'note'], table, extra_gap=1)
3553
d816f61f 3554 def simplified_codec(f, field):
3555 assert field in ('acodec', 'vcodec')
3556 codec = f.get(field, 'unknown')
f5ea4748 3557 if not codec:
3558 return 'unknown'
3559 elif codec != 'none':
d816f61f 3560 return '.'.join(codec.split('.')[:4])
3561
3562 if field == 'vcodec' and f.get('acodec') == 'none':
3563 return 'images'
3564 elif field == 'acodec' and f.get('vcodec') == 'none':
3565 return ''
3566 return self._format_out('audio only' if field == 'vcodec' else 'video only',
3567 self.Styles.SUPPRESS)
3568
591bb9d3 3569 delim = self._format_out('\u2502', self.Styles.DELIM, '|', test_encoding=True)
8130779d 3570 table = [
3571 [
591bb9d3 3572 self._format_out(format_field(f, 'format_id'), self.Styles.ID),
8130779d 3573 format_field(f, 'ext'),
3574 format_field(f, func=self.format_resolution, ignore=('audio only', 'images')),
563e0bf8 3575 format_field(f, 'fps', '\t%d', func=round),
8130779d 3576 format_field(f, 'dynamic_range', '%s', ignore=(None, 'SDR')).replace('HDR', ''),
b8ed0f15 3577 format_field(f, 'audio_channels', '\t%s'),
8130779d 3578 delim,
3579 format_field(f, 'filesize', ' \t%s', func=format_bytes) + format_field(f, 'filesize_approx', '~\t%s', func=format_bytes),
563e0bf8 3580 format_field(f, 'tbr', '\t%dk', func=round),
8130779d 3581 shorten_protocol_name(f.get('protocol', '')),
3582 delim,
d816f61f 3583 simplified_codec(f, 'vcodec'),
563e0bf8 3584 format_field(f, 'vbr', '\t%dk', func=round),
d816f61f 3585 simplified_codec(f, 'acodec'),
563e0bf8 3586 format_field(f, 'abr', '\t%dk', func=round),
ae61d108 3587 format_field(f, 'asr', '\t%s', func=format_decimal_suffix),
8130779d 3588 join_nonempty(
591bb9d3 3589 self._format_out('UNSUPPORTED', 'light red') if f.get('ext') in ('f4f', 'f4m') else None,
8130779d 3590 format_field(f, 'language', '[%s]'),
3591 join_nonempty(format_field(f, 'format_note'),
3592 format_field(f, 'container', ignore=(None, f.get('ext'))),
3593 delim=', '),
3594 delim=' '),
3595 ] for f in formats if f.get('preference') is None or f['preference'] >= -1000]
3596 header_line = self._list_format_headers(
b8ed0f15 3597 'ID', 'EXT', 'RESOLUTION', '\tFPS', 'HDR', 'CH', delim, '\tFILESIZE', '\tTBR', 'PROTO',
8130779d 3598 delim, 'VCODEC', '\tVBR', 'ACODEC', '\tABR', '\tASR', 'MORE INFO')
3599
3600 return render_table(
3601 header_line, table, hide_empty=True,
591bb9d3 3602 delim=self._format_out('\u2500', self.Styles.DELIM, '-', test_encoding=True))
8130779d 3603
3604 def render_thumbnails_table(self, info_dict):
88f23a18 3605 thumbnails = list(info_dict.get('thumbnails') or [])
cfb56d1a 3606 if not thumbnails:
8130779d 3607 return None
3608 return render_table(
ec11a9f4 3609 self._list_format_headers('ID', 'Width', 'Height', 'URL'),
6970b600 3610 [[t.get('id'), t.get('width', 'unknown'), t.get('height', 'unknown'), t['url']] for t in thumbnails])
2412044c 3611
8130779d 3612 def render_subtitles_table(self, video_id, subtitles):
2412044c 3613 def _row(lang, formats):
49c258e1 3614 exts, names = zip(*((f['ext'], f.get('name') or 'unknown') for f in reversed(formats)))
2412044c 3615 if len(set(names)) == 1:
7aee40c1 3616 names = [] if names[0] == 'unknown' else names[:1]
2412044c 3617 return [lang, ', '.join(names), ', '.join(exts)]
3618
8130779d 3619 if not subtitles:
3620 return None
3621 return render_table(
ec11a9f4 3622 self._list_format_headers('Language', 'Name', 'Formats'),
2412044c 3623 [_row(lang, formats) for lang, formats in subtitles.items()],
8130779d 3624 hide_empty=True)
3625
3626 def __list_table(self, video_id, name, func, *args):
3627 table = func(*args)
3628 if not table:
3629 self.to_screen(f'{video_id} has no {name}')
3630 return
3631 self.to_screen(f'[info] Available {name} for {video_id}:')
3632 self.to_stdout(table)
3633
3634 def list_formats(self, info_dict):
3635 self.__list_table(info_dict['id'], 'formats', self.render_formats_table, info_dict)
3636
3637 def list_thumbnails(self, info_dict):
3638 self.__list_table(info_dict['id'], 'thumbnails', self.render_thumbnails_table, info_dict)
3639
3640 def list_subtitles(self, video_id, subtitles, name='subtitles'):
3641 self.__list_table(video_id, name, self.render_subtitles_table, video_id, subtitles)
a504ced0 3642
dca08720
PH
3643 def urlopen(self, req):
3644 """ Start an HTTP download """
f9934b96 3645 if isinstance(req, str):
67dda517 3646 req = sanitized_Request(req)
19a41fc6 3647 return self._opener.open(req, timeout=self._socket_timeout)
dca08720
PH
3648
3649 def print_debug_header(self):
3650 if not self.params.get('verbose'):
3651 return
49a57e70 3652
560738f3 3653 # These imports can be slow. So import them only as needed
3654 from .extractor.extractors import _LAZY_LOADER
3655 from .extractor.extractors import _PLUGIN_CLASSES as plugin_extractors
3656
49a57e70 3657 def get_encoding(stream):
2a938746 3658 ret = str(getattr(stream, 'encoding', 'missing (%s)' % type(stream).__name__))
49a57e70 3659 if not supports_terminal_sequences(stream):
53973b4d 3660 from .utils import WINDOWS_VT_MODE # Must be imported locally
e3c7d495 3661 ret += ' (No VT)' if WINDOWS_VT_MODE is False else ' (No ANSI)'
49a57e70 3662 return ret
3663
591bb9d3 3664 encoding_str = 'Encodings: locale %s, fs %s, pref %s, %s' % (
49a57e70 3665 locale.getpreferredencoding(),
3666 sys.getfilesystemencoding(),
591bb9d3 3667 self.get_encoding(),
3668 ', '.join(
64fa820c 3669 f'{key} {get_encoding(stream)}' for key, stream in self._out_files.items_
591bb9d3 3670 if stream is not None and key != 'console')
3671 )
883d4b1e 3672
3673 logger = self.params.get('logger')
3674 if logger:
3675 write_debug = lambda msg: logger.debug(f'[debug] {msg}')
3676 write_debug(encoding_str)
3677 else:
96565c7e 3678 write_string(f'[debug] {encoding_str}\n', encoding=None)
49a57e70 3679 write_debug = lambda msg: self._write_string(f'[debug] {msg}\n')
734f90bb 3680
4c88ff87 3681 source = detect_variant()
70b23409 3682 if VARIANT not in (None, 'pip'):
3683 source += '*'
36eaf303 3684 write_debug(join_nonempty(
3685 'yt-dlp version', __version__,
3686 f'[{RELEASE_GIT_HEAD}]' if RELEASE_GIT_HEAD else '',
3687 '' if source == 'unknown' else f'({source})',
3688 delim=' '))
6e21fdd2 3689 if not _LAZY_LOADER:
3690 if os.environ.get('YTDLP_NO_LAZY_EXTRACTORS'):
49a57e70 3691 write_debug('Lazy loading extractors is forcibly disabled')
6e21fdd2 3692 else:
49a57e70 3693 write_debug('Lazy loading extractors is disabled')
3ae5e797 3694 if plugin_extractors or plugin_postprocessors:
49a57e70 3695 write_debug('Plugins: %s' % [
3ae5e797 3696 '%s%s' % (klass.__name__, '' if klass.__name__ == name else f' as {name}')
3697 for name, klass in itertools.chain(plugin_extractors.items(), plugin_postprocessors.items())])
8a82af35 3698 if self.params['compat_opts']:
3699 write_debug('Compatibility options: %s' % ', '.join(self.params['compat_opts']))
36eaf303 3700
3701 if source == 'source':
dca08720 3702 try:
f0c9fb96 3703 stdout, _, _ = Popen.run(
36eaf303 3704 ['git', 'rev-parse', '--short', 'HEAD'],
f0c9fb96 3705 text=True, cwd=os.path.dirname(os.path.abspath(__file__)),
3706 stdout=subprocess.PIPE, stderr=subprocess.PIPE)
3707 if re.fullmatch('[0-9a-f]+', stdout.strip()):
3708 write_debug(f'Git HEAD: {stdout.strip()}')
70a1165b 3709 except Exception:
19a03940 3710 with contextlib.suppress(Exception):
36eaf303 3711 sys.exc_clear()
b300cda4 3712
b1f94422 3713 write_debug(system_identifier())
d28b5171 3714
8913ef74 3715 exe_versions, ffmpeg_features = FFmpegPostProcessor.get_versions_and_features(self)
3716 ffmpeg_features = {key for key, val in ffmpeg_features.items() if val}
3717 if ffmpeg_features:
19a03940 3718 exe_versions['ffmpeg'] += ' (%s)' % ','.join(sorted(ffmpeg_features))
8913ef74 3719
4c83c967 3720 exe_versions['rtmpdump'] = rtmpdump_version()
feee8d32 3721 exe_versions['phantomjs'] = PhantomJSwrapper._version()
d28b5171 3722 exe_str = ', '.join(
2831b468 3723 f'{exe} {v}' for exe, v in sorted(exe_versions.items()) if v
3724 ) or 'none'
49a57e70 3725 write_debug('exe versions: %s' % exe_str)
dca08720 3726
1d485a1a 3727 from .compat.compat_utils import get_package_info
9b8ee23b 3728 from .dependencies import available_dependencies
3729
3730 write_debug('Optional libraries: %s' % (', '.join(sorted({
1d485a1a 3731 join_nonempty(*get_package_info(m)) for m in available_dependencies.values()
9b8ee23b 3732 })) or 'none'))
2831b468 3733
97ec5bc5 3734 self._setup_opener()
dca08720
PH
3735 proxy_map = {}
3736 for handler in self._opener.handlers:
3737 if hasattr(handler, 'proxies'):
3738 proxy_map.update(handler.proxies)
49a57e70 3739 write_debug(f'Proxy map: {proxy_map}')
dca08720 3740
49a57e70 3741 # Not implemented
3742 if False and self.params.get('call_home'):
0f06bcd7 3743 ipaddr = self.urlopen('https://yt-dl.org/ip').read().decode()
49a57e70 3744 write_debug('Public IP address: %s' % ipaddr)
58b1f00d 3745 latest_version = self.urlopen(
0f06bcd7 3746 'https://yt-dl.org/latest/version').read().decode()
58b1f00d
PH
3747 if version_tuple(latest_version) > version_tuple(__version__):
3748 self.report_warning(
3749 'You are using an outdated version (newest version: %s)! '
3750 'See https://yt-dl.org/update if you need help updating.' %
3751 latest_version)
3752
e344693b 3753 def _setup_opener(self):
97ec5bc5 3754 if hasattr(self, '_opener'):
3755 return
6ad14cab 3756 timeout_val = self.params.get('socket_timeout')
17bddf3e 3757 self._socket_timeout = 20 if timeout_val is None else float(timeout_val)
6ad14cab 3758
982ee69a 3759 opts_cookiesfrombrowser = self.params.get('cookiesfrombrowser')
dca08720
PH
3760 opts_cookiefile = self.params.get('cookiefile')
3761 opts_proxy = self.params.get('proxy')
3762
982ee69a 3763 self.cookiejar = load_cookies(opts_cookiefile, opts_cookiesfrombrowser, self)
dca08720 3764
6a3f4c3f 3765 cookie_processor = YoutubeDLCookieProcessor(self.cookiejar)
dca08720
PH
3766 if opts_proxy is not None:
3767 if opts_proxy == '':
3768 proxies = {}
3769 else:
3770 proxies = {'http': opts_proxy, 'https': opts_proxy}
3771 else:
ac668111 3772 proxies = urllib.request.getproxies()
067aa17e 3773 # Set HTTPS proxy to HTTP one if given (https://github.com/ytdl-org/youtube-dl/issues/805)
dca08720
PH
3774 if 'http' in proxies and 'https' not in proxies:
3775 proxies['https'] = proxies['http']
91410c9b 3776 proxy_handler = PerRequestProxyHandler(proxies)
a0ddb8a2
PH
3777
3778 debuglevel = 1 if self.params.get('debug_printtraffic') else 0
be4a824d
PH
3779 https_handler = make_HTTPS_handler(self.params, debuglevel=debuglevel)
3780 ydlh = YoutubeDLHandler(self.params, debuglevel=debuglevel)
fca6dba8 3781 redirect_handler = YoutubeDLRedirectHandler()
f9934b96 3782 data_handler = urllib.request.DataHandler()
6240b0a2
JMF
3783
3784 # When passing our own FileHandler instance, build_opener won't add the
3785 # default FileHandler and allows us to disable the file protocol, which
3786 # can be used for malicious purposes (see
067aa17e 3787 # https://github.com/ytdl-org/youtube-dl/issues/8227)
ac668111 3788 file_handler = urllib.request.FileHandler()
6240b0a2
JMF
3789
3790 def file_open(*args, **kwargs):
ac668111 3791 raise urllib.error.URLError('file:// scheme is explicitly disabled in yt-dlp for security reasons')
6240b0a2
JMF
3792 file_handler.file_open = file_open
3793
ac668111 3794 opener = urllib.request.build_opener(
fca6dba8 3795 proxy_handler, https_handler, cookie_processor, ydlh, redirect_handler, data_handler, file_handler)
2461f79d 3796
dca08720
PH
3797 # Delete the default user-agent header, which would otherwise apply in
3798 # cases where our custom HTTP handler doesn't come into play
067aa17e 3799 # (See https://github.com/ytdl-org/youtube-dl/issues/1309 for details)
dca08720
PH
3800 opener.addheaders = []
3801 self._opener = opener
62fec3b2
PH
3802
3803 def encode(self, s):
3804 if isinstance(s, bytes):
3805 return s # Already encoded
3806
3807 try:
3808 return s.encode(self.get_encoding())
3809 except UnicodeEncodeError as err:
3810 err.reason = err.reason + '. Check your system encoding configuration or use the --encoding option.'
3811 raise
3812
3813 def get_encoding(self):
3814 encoding = self.params.get('encoding')
3815 if encoding is None:
3816 encoding = preferredencoding()
3817 return encoding
ec82d85a 3818
e08a85d8 3819 def _write_info_json(self, label, ie_result, infofn, overwrite=None):
cb96c5be 3820 ''' Write infojson and returns True = written, 'exists' = Already exists, False = skip, None = error '''
e08a85d8 3821 if overwrite is None:
3822 overwrite = self.params.get('overwrites', True)
80c03fa9 3823 if not self.params.get('writeinfojson'):
3824 return False
3825 elif not infofn:
3826 self.write_debug(f'Skipping writing {label} infojson')
3827 return False
3828 elif not self._ensure_dir_exists(infofn):
3829 return None
e08a85d8 3830 elif not overwrite and os.path.exists(infofn):
80c03fa9 3831 self.to_screen(f'[info] {label.title()} metadata is already present')
cb96c5be 3832 return 'exists'
3833
3834 self.to_screen(f'[info] Writing {label} metadata as JSON to: {infofn}')
3835 try:
3836 write_json_file(self.sanitize_info(ie_result, self.params.get('clean_infojson', True)), infofn)
3837 return True
86e5f3ed 3838 except OSError:
cb96c5be 3839 self.report_error(f'Cannot write {label} metadata to JSON file {infofn}')
3840 return None
80c03fa9 3841
3842 def _write_description(self, label, ie_result, descfn):
3843 ''' Write description and returns True = written, False = skip, None = error '''
3844 if not self.params.get('writedescription'):
3845 return False
3846 elif not descfn:
3847 self.write_debug(f'Skipping writing {label} description')
3848 return False
3849 elif not self._ensure_dir_exists(descfn):
3850 return None
3851 elif not self.params.get('overwrites', True) and os.path.exists(descfn):
3852 self.to_screen(f'[info] {label.title()} description is already present')
3853 elif ie_result.get('description') is None:
3854 self.report_warning(f'There\'s no {label} description to write')
3855 return False
3856 else:
3857 try:
3858 self.to_screen(f'[info] Writing {label} description to: {descfn}')
86e5f3ed 3859 with open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile:
80c03fa9 3860 descfile.write(ie_result['description'])
86e5f3ed 3861 except OSError:
80c03fa9 3862 self.report_error(f'Cannot write {label} description file {descfn}')
3863 return None
3864 return True
3865
3866 def _write_subtitles(self, info_dict, filename):
3867 ''' Write subtitles to file and return list of (sub_filename, final_sub_filename); or None if error'''
3868 ret = []
3869 subtitles = info_dict.get('requested_subtitles')
3870 if not subtitles or not (self.params.get('writesubtitles') or self.params.get('writeautomaticsub')):
3871 # subtitles download errors are already managed as troubles in relevant IE
3872 # that way it will silently go on when used with unsupporting IE
3873 return ret
3874
3875 sub_filename_base = self.prepare_filename(info_dict, 'subtitle')
3876 if not sub_filename_base:
3877 self.to_screen('[info] Skipping writing video subtitles')
3878 return ret
3879 for sub_lang, sub_info in subtitles.items():
3880 sub_format = sub_info['ext']
3881 sub_filename = subtitles_filename(filename, sub_lang, sub_format, info_dict.get('ext'))
3882 sub_filename_final = subtitles_filename(sub_filename_base, sub_lang, sub_format, info_dict.get('ext'))
e04938ab 3883 existing_sub = self.existing_file((sub_filename_final, sub_filename))
3884 if existing_sub:
80c03fa9 3885 self.to_screen(f'[info] Video subtitle {sub_lang}.{sub_format} is already present')
e04938ab 3886 sub_info['filepath'] = existing_sub
3887 ret.append((existing_sub, sub_filename_final))
80c03fa9 3888 continue
3889
3890 self.to_screen(f'[info] Writing video subtitles to: {sub_filename}')
3891 if sub_info.get('data') is not None:
3892 try:
3893 # Use newline='' to prevent conversion of newline characters
3894 # See https://github.com/ytdl-org/youtube-dl/issues/10268
86e5f3ed 3895 with open(sub_filename, 'w', encoding='utf-8', newline='') as subfile:
80c03fa9 3896 subfile.write(sub_info['data'])
3897 sub_info['filepath'] = sub_filename
3898 ret.append((sub_filename, sub_filename_final))
3899 continue
86e5f3ed 3900 except OSError:
80c03fa9 3901 self.report_error(f'Cannot write video subtitles file {sub_filename}')
3902 return None
3903
3904 try:
3905 sub_copy = sub_info.copy()
3906 sub_copy.setdefault('http_headers', info_dict.get('http_headers'))
3907 self.dl(sub_filename, sub_copy, subtitle=True)
3908 sub_info['filepath'] = sub_filename
3909 ret.append((sub_filename, sub_filename_final))
6020e05d 3910 except (DownloadError, ExtractorError, IOError, OSError, ValueError) + network_exceptions as err:
c70c418d 3911 msg = f'Unable to download video subtitles for {sub_lang!r}: {err}'
6020e05d 3912 if self.params.get('ignoreerrors') is not True: # False or 'only_download'
c70c418d 3913 if not self.params.get('ignoreerrors'):
3914 self.report_error(msg)
3915 raise DownloadError(msg)
3916 self.report_warning(msg)
519804a9 3917 return ret
80c03fa9 3918
3919 def _write_thumbnails(self, label, info_dict, filename, thumb_filename_base=None):
3920 ''' Write thumbnails to file and return list of (thumb_filename, final_thumb_filename) '''
6c4fd172 3921 write_all = self.params.get('write_all_thumbnails', False)
80c03fa9 3922 thumbnails, ret = [], []
6c4fd172 3923 if write_all or self.params.get('writethumbnail', False):
0202b52a 3924 thumbnails = info_dict.get('thumbnails') or []
6c4fd172 3925 multiple = write_all and len(thumbnails) > 1
ec82d85a 3926
80c03fa9 3927 if thumb_filename_base is None:
3928 thumb_filename_base = filename
3929 if thumbnails and not thumb_filename_base:
3930 self.write_debug(f'Skipping writing {label} thumbnail')
3931 return ret
3932
dd0228ce 3933 for idx, t in list(enumerate(thumbnails))[::-1]:
80c03fa9 3934 thumb_ext = (f'{t["id"]}.' if multiple else '') + determine_ext(t['url'], 'jpg')
aa9369a2 3935 thumb_display_id = f'{label} thumbnail {t["id"]}'
80c03fa9 3936 thumb_filename = replace_extension(filename, thumb_ext, info_dict.get('ext'))
3937 thumb_filename_final = replace_extension(thumb_filename_base, thumb_ext, info_dict.get('ext'))
ec82d85a 3938
e04938ab 3939 existing_thumb = self.existing_file((thumb_filename_final, thumb_filename))
3940 if existing_thumb:
aa9369a2 3941 self.to_screen('[info] %s is already present' % (
3942 thumb_display_id if multiple else f'{label} thumbnail').capitalize())
e04938ab 3943 t['filepath'] = existing_thumb
3944 ret.append((existing_thumb, thumb_filename_final))
ec82d85a 3945 else:
80c03fa9 3946 self.to_screen(f'[info] Downloading {thumb_display_id} ...')
ec82d85a 3947 try:
297e9952 3948 uf = self.urlopen(sanitized_Request(t['url'], headers=t.get('http_headers', {})))
80c03fa9 3949 self.to_screen(f'[info] Writing {thumb_display_id} to: {thumb_filename}')
d3d89c32 3950 with open(encodeFilename(thumb_filename), 'wb') as thumbf:
ec82d85a 3951 shutil.copyfileobj(uf, thumbf)
80c03fa9 3952 ret.append((thumb_filename, thumb_filename_final))
885cc0b7 3953 t['filepath'] = thumb_filename
3158150c 3954 except network_exceptions as err:
dd0228ce 3955 thumbnails.pop(idx)
80c03fa9 3956 self.report_warning(f'Unable to download {thumb_display_id}: {err}')
6c4fd172 3957 if ret and not write_all:
3958 break
0202b52a 3959 return ret