]> jfr.im git - yt-dlp.git/blame - yt_dlp/YoutubeDL.py
Determine merge container better (See desc) (#1482)
[yt-dlp.git] / yt_dlp / YoutubeDL.py
CommitLineData
26e63931 1import collections
31bd3925 2import contextlib
9d2ecdbc 3import datetime
c1c9a79c 4import errno
31bd3925 5import fileinput
b5ae35ee 6import functools
8222d8de 7import io
b82f815f 8import itertools
8694c600 9import json
62fec3b2 10import locale
083c9df9 11import operator
8222d8de 12import os
f8271158 13import random
8222d8de
JMF
14import re
15import shutil
dca08720 16import subprocess
8222d8de 17import sys
21cd8fae 18import tempfile
8222d8de 19import time
67134eab 20import tokenize
8222d8de 21import traceback
524e2e4f 22import unicodedata
f9934b96 23import urllib.request
961ea474
S
24from string import ascii_letters
25
f8271158 26from .cache import Cache
14f25df2 27from .compat import compat_os_name, compat_shlex_quote
982ee69a 28from .cookies import load_cookies
f8271158 29from .downloader import FFmpegFD, get_suitable_downloader, shorten_protocol_name
30from .downloader.rtmp import rtmpdump_version
f8271158 31from .extractor import gen_extractor_classes, get_info_extractor
32from .extractor.openload import PhantomJSwrapper
33from .minicurses import format_text
34from .postprocessor import _PLUGIN_CLASSES as plugin_postprocessors
35from .postprocessor import (
36 EmbedThumbnailPP,
37 FFmpegFixupDuplicateMoovPP,
38 FFmpegFixupDurationPP,
39 FFmpegFixupM3u8PP,
40 FFmpegFixupM4aPP,
41 FFmpegFixupStretchedPP,
42 FFmpegFixupTimestampPP,
43 FFmpegMergerPP,
44 FFmpegPostProcessor,
ca9def71 45 FFmpegVideoConvertorPP,
f8271158 46 MoveFilesAfterDownloadPP,
47 get_postprocessor,
48)
ca9def71 49from .postprocessor.ffmpeg import resolve_mapping as resolve_recode_mapping
f8271158 50from .update import detect_variant
8c25f81b 51from .utils import (
f8271158 52 DEFAULT_OUTTMPL,
7b2c3f47 53 IDENTITY,
f8271158 54 LINK_TEMPLATES,
8dc59305 55 MEDIA_EXTENSIONS,
f8271158 56 NO_DEFAULT,
1d485a1a 57 NUMBER_RE,
f8271158 58 OUTTMPL_TYPES,
59 POSTPROCESS_WHEN,
60 STR_FORMAT_RE_TMPL,
61 STR_FORMAT_TYPES,
62 ContentTooShortError,
63 DateRange,
64 DownloadCancelled,
65 DownloadError,
66 EntryNotInPlaylist,
67 ExistingVideoReached,
68 ExtractorError,
69 GeoRestrictedError,
70 HEADRequest,
f8271158 71 ISO3166Utils,
72 LazyList,
73 MaxDownloadsReached,
19a03940 74 Namespace,
f8271158 75 PagedList,
76 PerRequestProxyHandler,
7e88d7d7 77 PlaylistEntries,
f8271158 78 Popen,
79 PostProcessingError,
80 ReExtractInfo,
81 RejectedVideoReached,
82 SameFileError,
83 UnavailableVideoError,
693f0600 84 UserNotLive,
f8271158 85 YoutubeDLCookieProcessor,
86 YoutubeDLHandler,
87 YoutubeDLRedirectHandler,
eedb7ba5
S
88 age_restricted,
89 args_to_str,
cb794ee0 90 bug_reports_message,
ce02ed60 91 date_from_str,
ce02ed60 92 determine_ext,
b5559424 93 determine_protocol,
c0384f22 94 encode_compat_str,
ce02ed60 95 encodeFilename,
a06916d9 96 error_to_compat_str,
47cdc68e 97 escapeHTML,
590bc6f6 98 expand_path,
90137ca4 99 filter_dict,
e29663c6 100 float_or_none,
02dbf93f 101 format_bytes,
e0fd9573 102 format_decimal_suffix,
f8271158 103 format_field,
525ef922 104 formatSeconds,
fc61aff4 105 get_compatible_ext,
0bb322b9 106 get_domain,
c9969434 107 int_or_none,
732044af 108 iri_to_uri,
34921b43 109 join_nonempty,
ce02ed60 110 locked_file,
0647d925 111 make_archive_id,
0202b52a 112 make_dir,
dca08720 113 make_HTTPS_handler,
8b7539d2 114 merge_headers,
3158150c 115 network_exceptions,
ec11a9f4 116 number_of_digits,
cd6fc19e 117 orderedSet,
083c9df9 118 parse_filesize,
ce02ed60 119 preferredencoding,
eedb7ba5 120 prepend_extension,
51fb4995 121 register_socks_protocols,
3efb96a6 122 remove_terminal_sequences,
cfb56d1a 123 render_table,
eedb7ba5 124 replace_extension,
ce02ed60 125 sanitize_filename,
1bb5c511 126 sanitize_path,
dcf77cf1 127 sanitize_url,
67dda517 128 sanitized_Request,
e5660ee6 129 std_headers,
1211bb6d 130 str_or_none,
e29663c6 131 strftime_or_none,
ce02ed60 132 subtitles_filename,
819e0531 133 supports_terminal_sequences,
b1f94422 134 system_identifier,
f2ebc5c7 135 timetuple_from_msec,
732044af 136 to_high_limit_path,
324ad820 137 traverse_obj,
fc61aff4 138 try_call,
6033d980 139 try_get,
29eb5174 140 url_basename,
7d1eb38a 141 variadic,
58b1f00d 142 version_tuple,
53973b4d 143 windows_enable_vt_mode,
ce02ed60
PH
144 write_json_file,
145 write_string,
4f026faf 146)
f8271158 147from .version import RELEASE_GIT_HEAD, __version__
8222d8de 148
e9c0cdd3
YCH
149if compat_os_name == 'nt':
150 import ctypes
151
2459b6e1 152
86e5f3ed 153class YoutubeDL:
8222d8de
JMF
154 """YoutubeDL class.
155
156 YoutubeDL objects are the ones responsible of downloading the
157 actual video file and writing it to disk if the user has requested
158 it, among some other tasks. In most cases there should be one per
159 program. As, given a video URL, the downloader doesn't know how to
160 extract all the needed information, task that InfoExtractors do, it
161 has to pass the URL to one of them.
162
163 For this, YoutubeDL objects have a method that allows
164 InfoExtractors to be registered in a given order. When it is passed
165 a URL, the YoutubeDL object handles it to the first InfoExtractor it
166 finds that reports being able to handle it. The InfoExtractor extracts
167 all the information about the video or videos the URL refers to, and
168 YoutubeDL process the extracted information, possibly using a File
169 Downloader to download the video.
170
171 YoutubeDL objects accept a lot of parameters. In order not to saturate
172 the object constructor with arguments, it receives a dictionary of
173 options instead. These options are available through the params
174 attribute for the InfoExtractors to use. The YoutubeDL also
175 registers itself as the downloader in charge for the InfoExtractors
176 that are added to it, so this is a "mutual registration".
177
178 Available options:
179
180 username: Username for authentication purposes.
181 password: Password for authentication purposes.
180940e0 182 videopassword: Password for accessing a video.
1da50aa3
S
183 ap_mso: Adobe Pass multiple-system operator identifier.
184 ap_username: Multiple-system operator account username.
185 ap_password: Multiple-system operator account password.
8222d8de
JMF
186 usenetrc: Use netrc for authentication instead.
187 verbose: Print additional info to stdout.
188 quiet: Do not print messages to stdout.
ad8915b7 189 no_warnings: Do not print out anything for warnings.
bb66c247 190 forceprint: A dict with keys WHEN mapped to a list of templates to
191 print to stdout. The allowed keys are video or any of the
192 items in utils.POSTPROCESS_WHEN.
ca30f449 193 For compatibility, a single list is also accepted
bb66c247 194 print_to_file: A dict with keys WHEN (same as forceprint) mapped to
195 a list of tuples with (template, filename)
8694c600 196 forcejson: Force printing info_dict as JSON.
63e0be34
PH
197 dump_single_json: Force printing the info_dict of the whole playlist
198 (or video) as a single JSON line.
c25228e5 199 force_write_download_archive: Force writing download archive regardless
200 of 'skip_download' or 'simulate'.
b7b04c78 201 simulate: Do not download the video files. If unset (or None),
202 simulate only if listsubtitles, listformats or list_thumbnails is used
eb8a4433 203 format: Video format code. see "FORMAT SELECTION" for more details.
093a1710 204 You can also pass a function. The function takes 'ctx' as
205 argument and returns the formats to download.
206 See "build_format_selector" for an implementation
63ad4d43 207 allow_unplayable_formats: Allow unplayable formats to be extracted and downloaded.
b7da73eb 208 ignore_no_formats_error: Ignore "No video formats" error. Usefull for
209 extracting metadata even if the video is not actually
210 available for download (experimental)
0930b11f 211 format_sort: A list of fields by which to sort the video formats.
212 See "Sorting Formats" for more details.
c25228e5 213 format_sort_force: Force the given format_sort. see "Sorting Formats"
214 for more details.
08d30158 215 prefer_free_formats: Whether to prefer video formats with free containers
216 over non-free ones of same quality.
c25228e5 217 allow_multiple_video_streams: Allow multiple video streams to be merged
218 into a single file
219 allow_multiple_audio_streams: Allow multiple audio streams to be merged
220 into a single file
0ba692ac 221 check_formats Whether to test if the formats are downloadable.
9f1a1c36 222 Can be True (check all), False (check none),
223 'selected' (check selected formats),
0ba692ac 224 or None (check only if requested by extractor)
4524baf0 225 paths: Dictionary of output paths. The allowed keys are 'home'
226 'temp' and the keys of OUTTMPL_TYPES (in utils.py)
de6000d9 227 outtmpl: Dictionary of templates for output names. Allowed keys
4524baf0 228 are 'default' and the keys of OUTTMPL_TYPES (in utils.py).
34488702 229 For compatibility with youtube-dl, a single string can also be used
a820dc72
RA
230 outtmpl_na_placeholder: Placeholder for unavailable meta fields.
231 restrictfilenames: Do not allow "&" and spaces in file names
232 trim_file_name: Limit length of filename (extension excluded)
4524baf0 233 windowsfilenames: Force the filenames to be windows compatible
b1940459 234 ignoreerrors: Do not stop on download/postprocessing errors.
235 Can be 'only_download' to ignore only download errors.
236 Default is 'only_download' for CLI, but False for API
26e2805c 237 skip_playlist_after_errors: Number of allowed failures until the rest of
238 the playlist is skipped
d22dec74 239 force_generic_extractor: Force downloader to use the generic extractor
0c3d0f51 240 overwrites: Overwrite all video and metadata files if True,
241 overwrite only non-video files if None
242 and don't overwrite any file if False
34488702 243 For compatibility with youtube-dl,
244 "nooverwrites" may also be used instead
c14e88f0 245 playlist_items: Specific indices of playlist to download.
75822ca7 246 playlistrandom: Download playlist items in random order.
7e9a6125 247 lazy_playlist: Process playlist entries as they are received.
8222d8de
JMF
248 matchtitle: Download only matching titles.
249 rejecttitle: Reject downloads for matching titles.
8bf9319e 250 logger: Log messages to a logging.Logger instance.
8222d8de 251 logtostderr: Log messages to stderr instead of stdout.
819e0531 252 consoletitle: Display progress in console window's titlebar.
8222d8de
JMF
253 writedescription: Write the video description to a .description file
254 writeinfojson: Write the video description to a .info.json file
75d43ca0 255 clean_infojson: Remove private fields from the infojson
34488702 256 getcomments: Extract video comments. This will not be written to disk
06167fbb 257 unless writeinfojson is also given
1fb07d10 258 writeannotations: Write the video annotations to a .annotations.xml file
8222d8de 259 writethumbnail: Write the thumbnail image to a file
c25228e5 260 allow_playlist_files: Whether to write playlists' description, infojson etc
261 also to disk when using the 'write*' options
ec82d85a 262 write_all_thumbnails: Write all thumbnail formats to files
732044af 263 writelink: Write an internet shortcut file, depending on the
264 current platform (.url/.webloc/.desktop)
265 writeurllink: Write a Windows internet shortcut file (.url)
266 writewebloclink: Write a macOS internet shortcut file (.webloc)
267 writedesktoplink: Write a Linux internet shortcut file (.desktop)
8222d8de 268 writesubtitles: Write the video subtitles to a file
741dd8ea 269 writeautomaticsub: Write the automatically generated subtitles to a file
8222d8de 270 listsubtitles: Lists all available subtitles for the video
a504ced0 271 subtitlesformat: The format code for subtitles
c32b0aab 272 subtitleslangs: List of languages of the subtitles to download (can be regex).
273 The list may contain "all" to refer to all the available
274 subtitles. The language can be prefixed with a "-" to
275 exclude it from the requested languages. Eg: ['all', '-live_chat']
8222d8de
JMF
276 keepvideo: Keep the video file after post-processing
277 daterange: A DateRange object, download only if the upload_date is in the range.
278 skip_download: Skip the actual download of the video file
c35f9e72 279 cachedir: Location of the cache files in the filesystem.
a0e07d31 280 False to disable filesystem cache.
47192f92 281 noplaylist: Download single video instead of a playlist if in doubt.
8dbe9899
PH
282 age_limit: An integer representing the user's age in years.
283 Unsuitable videos for the given age are skipped.
5fe18bdb
PH
284 min_views: An integer representing the minimum view count the video
285 must have in order to not be skipped.
286 Videos without view count information are always
287 downloaded. None for no limit.
288 max_views: An integer representing the maximum view count.
289 Videos that are more popular than that are not
290 downloaded.
291 Videos without view count information are always
292 downloaded. None for no limit.
293 download_archive: File name of a file where all downloads are recorded.
c1c9a79c
PH
294 Videos already present in the file are not downloaded
295 again.
8a51f564 296 break_on_existing: Stop the download process after attempting to download a
297 file that is in the archive.
298 break_on_reject: Stop the download process when encountering a video that
299 has been filtered out.
b222c271 300 break_per_url: Whether break_on_reject and break_on_existing
301 should act on each input URL as opposed to for the entire queue
d76fa1f3 302 cookiefile: File name or text stream from where cookies should be read and dumped to
f59f5ef8
MB
303 cookiesfrombrowser: A tuple containing the name of the browser, the profile
304 name/pathfrom where cookies are loaded, and the name of the
305 keyring. Eg: ('chrome', ) or ('vivaldi', 'default', 'BASICTEXT')
f81c62a6 306 legacyserverconnect: Explicitly allow HTTPS connection to servers that do not
307 support RFC 5746 secure renegotiation
f59f5ef8 308 nocheckcertificate: Do not verify SSL certificates
bb58c9ed 309 client_certificate: Path to client certificate file in PEM format. May include the private key
310 client_certificate_key: Path to private key file for client certificate
311 client_certificate_password: Password for client certificate private key, if encrypted.
312 If not provided and the key is encrypted, yt-dlp will ask interactively
7e8c0af0 313 prefer_insecure: Use HTTP instead of HTTPS to retrieve information.
c6e07cf1 314 (Only supported by some extractors)
8b7539d2 315 http_headers: A dictionary of custom headers to be used for all requests
a1ee09e8 316 proxy: URL of the proxy server to use
38cce791 317 geo_verification_proxy: URL of the proxy to use for IP address verification
504f20dd 318 on geo-restricted sites.
e344693b 319 socket_timeout: Time to wait for unresponsive hosts, in seconds
0783b09b
PH
320 bidi_workaround: Work around buggy terminals without bidirectional text
321 support, using fridibi
a0ddb8a2 322 debug_printtraffic:Print out sent and received HTTP traffic
04b4d394
PH
323 default_search: Prepend this string if an input url is not valid.
324 'auto' for elaborate guessing
62fec3b2 325 encoding: Use this encoding instead of the system-specified.
134c913c 326 extract_flat: Whether to resolve and process url_results further
327 * False: Always process (default)
328 * True: Never process
329 * 'in_playlist': Do not process inside playlist/multi_video
330 * 'discard': Always process, but don't return the result
331 from inside playlist/multi_video
332 * 'discard_in_playlist': Same as "discard", but only for
333 playlists (not multi_video)
f2ebc5c7 334 wait_for_video: If given, wait for scheduled streams to become available.
335 The value should be a tuple containing the range
336 (min_secs, max_secs) to wait between retries
4f026faf 337 postprocessors: A list of dictionaries, each with an entry
71b640cc 338 * key: The name of the postprocessor. See
7a5c1cfe 339 yt_dlp/postprocessor/__init__.py for a list.
bb66c247 340 * when: When to run the postprocessor. Allowed values are
341 the entries of utils.POSTPROCESS_WHEN
56d868db 342 Assumed to be 'post_process' if not given
71b640cc
PH
343 progress_hooks: A list of functions that get called on download
344 progress, with a dictionary with the entries
5cda4eda 345 * status: One of "downloading", "error", or "finished".
ee69b99a 346 Check this first and ignore unknown values.
3ba7740d 347 * info_dict: The extracted info_dict
71b640cc 348
5cda4eda 349 If status is one of "downloading", or "finished", the
ee69b99a
PH
350 following properties may also be present:
351 * filename: The final filename (always present)
5cda4eda 352 * tmpfilename: The filename we're currently writing to
71b640cc
PH
353 * downloaded_bytes: Bytes on disk
354 * total_bytes: Size of the whole file, None if unknown
5cda4eda
PH
355 * total_bytes_estimate: Guess of the eventual file size,
356 None if unavailable.
357 * elapsed: The number of seconds since download started.
71b640cc
PH
358 * eta: The estimated time in seconds, None if unknown
359 * speed: The download speed in bytes/second, None if
360 unknown
5cda4eda
PH
361 * fragment_index: The counter of the currently
362 downloaded video fragment.
363 * fragment_count: The number of fragments (= individual
364 files that will be merged)
71b640cc
PH
365
366 Progress hooks are guaranteed to be called at least once
367 (with status "finished") if the download is successful.
819e0531 368 postprocessor_hooks: A list of functions that get called on postprocessing
369 progress, with a dictionary with the entries
370 * status: One of "started", "processing", or "finished".
371 Check this first and ignore unknown values.
372 * postprocessor: Name of the postprocessor
373 * info_dict: The extracted info_dict
374
375 Progress hooks are guaranteed to be called at least twice
376 (with status "started" and "finished") if the processing is successful.
fc61aff4 377 merge_output_format: "/" separated list of extensions to use when merging formats.
6b591b29 378 final_ext: Expected final extension; used to detect when the file was
59a7a13e 379 already downloaded and converted
6271f1ca
PH
380 fixup: Automatically correct known faults of the file.
381 One of:
382 - "never": do nothing
383 - "warn": only emit a warning
384 - "detect_or_warn": check whether we can do anything
62cd676c 385 about it, warn otherwise (default)
504f20dd 386 source_address: Client-side IP address to bind to.
1cf376f5 387 sleep_interval_requests: Number of seconds to sleep between requests
388 during extraction
7aa589a5
S
389 sleep_interval: Number of seconds to sleep before each download when
390 used alone or a lower bound of a range for randomized
391 sleep before each download (minimum possible number
392 of seconds to sleep) when used along with
393 max_sleep_interval.
394 max_sleep_interval:Upper bound of a range for randomized sleep before each
395 download (maximum possible number of seconds to sleep).
396 Must only be used along with sleep_interval.
397 Actual sleep time will be a random float from range
398 [sleep_interval; max_sleep_interval].
1cf376f5 399 sleep_interval_subtitles: Number of seconds to sleep before each subtitle download
cfb56d1a
PH
400 listformats: Print an overview of available video formats and exit.
401 list_thumbnails: Print a table of all thumbnails and exit.
0a41f331 402 match_filter: A function that gets called for every video with the signature
403 (info_dict, *, incomplete: bool) -> Optional[str]
404 For backward compatibility with youtube-dl, the signature
405 (info_dict) -> Optional[str] is also allowed.
406 - If it returns a message, the video is ignored.
407 - If it returns None, the video is downloaded.
408 - If it returns utils.NO_DEFAULT, the user is interactively
409 asked whether to download the video.
347de493 410 match_filter_func in utils.py is one example for this.
7e5db8c9 411 no_color: Do not emit color codes in output.
0a840f58 412 geo_bypass: Bypass geographic restriction via faking X-Forwarded-For
504f20dd 413 HTTP header
0a840f58 414 geo_bypass_country:
773f291d
S
415 Two-letter ISO 3166-2 country code that will be used for
416 explicit geographic restriction bypassing via faking
504f20dd 417 X-Forwarded-For HTTP header
5f95927a
S
418 geo_bypass_ip_block:
419 IP range in CIDR notation that will be used similarly to
504f20dd 420 geo_bypass_country
52a8a1e1 421 external_downloader: A dictionary of protocol keys and the executable of the
422 external downloader to use for it. The allowed protocols
423 are default|http|ftp|m3u8|dash|rtsp|rtmp|mms.
424 Set the value to 'native' to use the native downloader
53ed7066 425 compat_opts: Compatibility options. See "Differences in default behavior".
3acf6d38 426 The following options do not work when used through the API:
b5ae35ee 427 filename, abort-on-error, multistreams, no-live-chat, format-sort
dac5df5a 428 no-clean-infojson, no-playlist-metafiles, no-keep-subs, no-attach-info-json.
e4f02757 429 Refer __init__.py for their implementation
819e0531 430 progress_template: Dictionary of templates for progress outputs.
431 Allowed keys are 'download', 'postprocess',
432 'download-title' (console title) and 'postprocess-title'.
433 The template is mapped on a dictionary with keys 'progress' and 'info'
23326151 434 retry_sleep_functions: Dictionary of functions that takes the number of attempts
435 as argument and returns the time to sleep in seconds.
436 Allowed keys are 'http', 'fragment', 'file_access'
0f446365
SW
437 download_ranges: A callback function that gets called for every video with
438 the signature (info_dict, ydl) -> Iterable[Section].
439 Only the returned sections will be downloaded.
440 Each Section is a dict with the following keys:
5ec1b6b7 441 * start_time: Start time of the section in seconds
442 * end_time: End time of the section in seconds
443 * title: Section title (Optional)
444 * index: Section number (Optional)
0f446365 445 force_keyframes_at_cuts: Re-encode the video when downloading ranges to get precise cuts
a7dc6a89 446 noprogress: Do not print the progress bar
fe7e0c98 447
8222d8de 448 The following parameters are not used by YoutubeDL itself, they are used by
7a5c1cfe 449 the downloader (see yt_dlp/downloader/common.py):
51d9739f 450 nopart, updatetime, buffersize, ratelimit, throttledratelimit, min_filesize,
205a0654 451 max_filesize, test, noresizebuffer, retries, file_access_retries, fragment_retries,
a7dc6a89 452 continuedl, xattr_set_filesize, hls_use_mpegts, http_chunk_size,
59a7a13e 453 external_downloader_args, concurrent_fragment_downloads.
76b1bd67
JMF
454
455 The following options are used by the post processors:
c0b7d117
S
456 ffmpeg_location: Location of the ffmpeg/avconv binary; either the path
457 to the binary or its containing directory.
43820c03 458 postprocessor_args: A dictionary of postprocessor/executable keys (in lower case)
34488702 459 and a list of additional command-line arguments for the
460 postprocessor/executable. The dict can also have "PP+EXE" keys
461 which are used when the given exe is used by the given PP.
462 Use 'default' as the name for arguments to passed to all PP
463 For compatibility with youtube-dl, a single list of args
464 can also be used
e409895f 465
466 The following options are used by the extractors:
62bff2c1 467 extractor_retries: Number of times to retry for known errors
468 dynamic_mpd: Whether to process dynamic DASH manifests (default: True)
e409895f 469 hls_split_discontinuity: Split HLS playlists to different formats at
62bff2c1 470 discontinuities such as ad breaks (default: False)
5d3a0e79 471 extractor_args: A dictionary of arguments to be passed to the extractors.
472 See "EXTRACTOR ARGUMENTS" for details.
473 Eg: {'youtube': {'skip': ['dash', 'hls']}}
88f23a18 474 mark_watched: Mark videos watched (even with --simulate). Only for YouTube
1890fc63 475
476 The following options are deprecated and may be removed in the future:
477
7e9a6125 478 playliststart: - Use playlist_items
479 Playlist item to start at.
480 playlistend: - Use playlist_items
481 Playlist item to end at.
482 playlistreverse: - Use playlist_items
483 Download playlist items in reverse order.
1890fc63 484 forceurl: - Use forceprint
485 Force printing final URL.
486 forcetitle: - Use forceprint
487 Force printing title.
488 forceid: - Use forceprint
489 Force printing ID.
490 forcethumbnail: - Use forceprint
491 Force printing thumbnail URL.
492 forcedescription: - Use forceprint
493 Force printing description.
494 forcefilename: - Use forceprint
495 Force printing final filename.
496 forceduration: - Use forceprint
497 Force printing duration.
498 allsubtitles: - Use subtitleslangs = ['all']
499 Downloads all the subtitles of the video
500 (requires writesubtitles or writeautomaticsub)
501 include_ads: - Doesn't work
502 Download ads as well
503 call_home: - Not implemented
504 Boolean, true iff we are allowed to contact the
505 yt-dlp servers for debugging.
506 post_hooks: - Register a custom postprocessor
507 A list of functions that get called as the final step
508 for each video file, after all postprocessors have been
509 called. The filename will be passed as the only argument.
510 hls_prefer_native: - Use external_downloader = {'m3u8': 'native'} or {'m3u8': 'ffmpeg'}.
511 Use the native HLS downloader instead of ffmpeg/avconv
512 if True, otherwise use ffmpeg/avconv if False, otherwise
513 use downloader suggested by extractor if None.
514 prefer_ffmpeg: - avconv support is deprecated
515 If False, use avconv instead of ffmpeg if both are available,
516 otherwise prefer ffmpeg.
517 youtube_include_dash_manifest: - Use extractor_args
5d3a0e79 518 If True (default), DASH manifests and related
62bff2c1 519 data will be downloaded and processed by extractor.
520 You can reduce network I/O by disabling it if you don't
521 care about DASH. (only for youtube)
1890fc63 522 youtube_include_hls_manifest: - Use extractor_args
5d3a0e79 523 If True (default), HLS manifests and related
62bff2c1 524 data will be downloaded and processed by extractor.
525 You can reduce network I/O by disabling it if you don't
526 care about HLS. (only for youtube)
8222d8de
JMF
527 """
528
86e5f3ed 529 _NUMERIC_FIELDS = {
c9969434 530 'width', 'height', 'tbr', 'abr', 'asr', 'vbr', 'fps', 'filesize', 'filesize_approx',
e6f21b3d 531 'timestamp', 'release_timestamp',
c9969434
S
532 'duration', 'view_count', 'like_count', 'dislike_count', 'repost_count',
533 'average_rating', 'comment_count', 'age_limit',
534 'start_time', 'end_time',
535 'chapter_number', 'season_number', 'episode_number',
536 'track_number', 'disc_number', 'release_year',
86e5f3ed 537 }
c9969434 538
6db9c4d5 539 _format_fields = {
540 # NB: Keep in sync with the docstring of extractor/common.py
a44ca5a4 541 'url', 'manifest_url', 'manifest_stream_number', 'ext', 'format', 'format_id', 'format_note',
6db9c4d5 542 'width', 'height', 'resolution', 'dynamic_range', 'tbr', 'abr', 'acodec', 'asr',
543 'vbr', 'fps', 'vcodec', 'container', 'filesize', 'filesize_approx',
544 'player_url', 'protocol', 'fragment_base_url', 'fragments', 'is_from_start',
545 'preference', 'language', 'language_preference', 'quality', 'source_preference',
546 'http_headers', 'stretched_ratio', 'no_resume', 'has_drm', 'downloader_options',
547 'page_url', 'app', 'play_path', 'tc_url', 'flash_version', 'rtmp_live', 'rtmp_conn', 'rtmp_protocol', 'rtmp_real_time'
548 }
48ee10ee 549 _format_selection_exts = {
8dc59305 550 'audio': set(MEDIA_EXTENSIONS.common_audio),
551 'video': set(MEDIA_EXTENSIONS.common_video + ('3gp', )),
552 'storyboards': set(MEDIA_EXTENSIONS.storyboards),
48ee10ee 553 }
554
3511266b 555 def __init__(self, params=None, auto_init=True):
883d4b1e 556 """Create a FileDownloader object with the given options.
557 @param auto_init Whether to load the default extractors and print header (if verbose).
49a57e70 558 Set to 'no_verbose_header' to not print the header
883d4b1e 559 """
e9f9a10f
JMF
560 if params is None:
561 params = {}
592b7485 562 self.params = params
8b7491c8 563 self._ies = {}
56c73665 564 self._ies_instances = {}
1e43a6f7 565 self._pps = {k: [] for k in POSTPROCESS_WHEN}
b35496d8 566 self._printed_messages = set()
1cf376f5 567 self._first_webpage_request = True
ab8e5e51 568 self._post_hooks = []
933605d7 569 self._progress_hooks = []
819e0531 570 self._postprocessor_hooks = []
8222d8de
JMF
571 self._download_retcode = 0
572 self._num_downloads = 0
9c906919 573 self._num_videos = 0
592b7485 574 self._playlist_level = 0
575 self._playlist_urls = set()
a0e07d31 576 self.cache = Cache(self)
34308b30 577
819e0531 578 windows_enable_vt_mode()
591bb9d3 579 stdout = sys.stderr if self.params.get('logtostderr') else sys.stdout
580 self._out_files = Namespace(
581 out=stdout,
582 error=sys.stderr,
583 screen=sys.stderr if self.params.get('quiet') else stdout,
584 console=None if compat_os_name == 'nt' else next(
cf4f42cb 585 filter(supports_terminal_sequences, (sys.stderr, sys.stdout)), None)
591bb9d3 586 )
587 self._allow_colors = Namespace(**{
588 type_: not self.params.get('no_color') and supports_terminal_sequences(stream)
64fa820c 589 for type_, stream in self._out_files.items_ if type_ != 'console'
591bb9d3 590 })
819e0531 591
6929b41a 592 # The code is left like this to be reused for future deprecations
593 MIN_SUPPORTED, MIN_RECOMMENDED = (3, 7), (3, 7)
eff42759 594 current_version = sys.version_info[:2]
595 if current_version < MIN_RECOMMENDED:
9d339c41 596 msg = ('Support for Python version %d.%d has been deprecated. '
24093d52 597 'See https://github.com/yt-dlp/yt-dlp/issues/3764 for more details.'
c6e07cf1 598 '\n You will no longer receive updates on this version')
eff42759 599 if current_version < MIN_SUPPORTED:
600 msg = 'Python version %d.%d is no longer supported'
601 self.deprecation_warning(
602 f'{msg}! Please update to Python %d.%d or above' % (*current_version, *MIN_RECOMMENDED))
a61f4b28 603
88acdbc2 604 if self.params.get('allow_unplayable_formats'):
605 self.report_warning(
ec11a9f4 606 f'You have asked for {self._format_err("UNPLAYABLE", self.Styles.EMPHASIS)} formats to be listed/downloaded. '
819e0531 607 'This is a developer option intended for debugging. \n'
608 ' If you experience any issues while using this option, '
ec11a9f4 609 f'{self._format_err("DO NOT", self.Styles.ERROR)} open a bug report')
88acdbc2 610
be5df5ee
S
611 def check_deprecated(param, option, suggestion):
612 if self.params.get(param) is not None:
86e5f3ed 613 self.report_warning(f'{option} is deprecated. Use {suggestion} instead')
be5df5ee
S
614 return True
615 return False
616
617 if check_deprecated('cn_verification_proxy', '--cn-verification-proxy', '--geo-verification-proxy'):
38cce791
YCH
618 if self.params.get('geo_verification_proxy') is None:
619 self.params['geo_verification_proxy'] = self.params['cn_verification_proxy']
620
0d1bb027 621 check_deprecated('autonumber', '--auto-number', '-o "%(autonumber)s-%(title)s.%(ext)s"')
622 check_deprecated('usetitle', '--title', '-o "%(title)s-%(id)s.%(ext)s"')
53ed7066 623 check_deprecated('useid', '--id', '-o "%(id)s.%(ext)s"')
0d1bb027 624
49a57e70 625 for msg in self.params.get('_warnings', []):
0d1bb027 626 self.report_warning(msg)
ee8dd27a 627 for msg in self.params.get('_deprecation_warnings', []):
628 self.deprecation_warning(msg)
0d1bb027 629
8a82af35 630 self.params['compat_opts'] = set(self.params.get('compat_opts', ()))
8a82af35 631 if 'list-formats' in self.params['compat_opts']:
ec11a9f4 632 self.params['listformats_table'] = False
633
b5ae35ee 634 if 'overwrites' not in self.params and self.params.get('nooverwrites') is not None:
b868936c 635 # nooverwrites was unnecessarily changed to overwrites
636 # in 0c3d0f51778b153f65c21906031c2e091fcfb641
637 # This ensures compatibility with both keys
638 self.params['overwrites'] = not self.params['nooverwrites']
b5ae35ee 639 elif self.params.get('overwrites') is None:
640 self.params.pop('overwrites', None)
b868936c 641 else:
642 self.params['nooverwrites'] = not self.params['overwrites']
b9d973be 643
455a15e2 644 self.params.setdefault('forceprint', {})
645 self.params.setdefault('print_to_file', {})
bb66c247 646
647 # Compatibility with older syntax
ca30f449 648 if not isinstance(params['forceprint'], dict):
455a15e2 649 self.params['forceprint'] = {'video': params['forceprint']}
ca30f449 650
455a15e2 651 if self.params.get('bidi_workaround', False):
1c088fa8
PH
652 try:
653 import pty
654 master, slave = pty.openpty()
ac668111 655 width = shutil.get_terminal_size().columns
591bb9d3 656 width_args = [] if width is None else ['-w', str(width)]
657 sp_kwargs = {'stdin': subprocess.PIPE, 'stdout': slave, 'stderr': self._out_files.error}
5d681e96 658 try:
d3c93ec2 659 self._output_process = Popen(['bidiv'] + width_args, **sp_kwargs)
5d681e96 660 except OSError:
d3c93ec2 661 self._output_process = Popen(['fribidi', '-c', 'UTF-8'] + width_args, **sp_kwargs)
5d681e96 662 self._output_channel = os.fdopen(master, 'rb')
1c088fa8 663 except OSError as ose:
66e7ace1 664 if ose.errno == errno.ENOENT:
49a57e70 665 self.report_warning(
666 'Could not find fribidi executable, ignoring --bidi-workaround. '
667 'Make sure that fribidi is an executable file in one of the directories in your $PATH.')
1c088fa8
PH
668 else:
669 raise
0783b09b 670
97ec5bc5 671 if auto_init:
672 if auto_init != 'no_verbose_header':
673 self.print_debug_header()
674 self.add_default_info_extractors()
675
3089bc74
S
676 if (sys.platform != 'win32'
677 and sys.getfilesystemencoding() in ['ascii', 'ANSI_X3.4-1968']
455a15e2 678 and not self.params.get('restrictfilenames', False)):
e9137224 679 # Unicode filesystem API will throw errors (#1474, #13027)
34308b30 680 self.report_warning(
6febd1c1 681 'Assuming --restrict-filenames since file system encoding '
1b725173 682 'cannot encode all characters. '
6febd1c1 683 'Set the LC_ALL environment variable to fix this.')
4a98cdbf 684 self.params['restrictfilenames'] = True
34308b30 685
bf1824b3 686 self._parse_outtmpl()
486dd09e 687
187986a8 688 # Creating format selector here allows us to catch syntax errors before the extraction
689 self.format_selector = (
fa9f30b8 690 self.params.get('format') if self.params.get('format') in (None, '-')
093a1710 691 else self.params['format'] if callable(self.params['format'])
187986a8 692 else self.build_format_selector(self.params['format']))
693
8b7539d2 694 # Set http_headers defaults according to std_headers
695 self.params['http_headers'] = merge_headers(std_headers, self.params.get('http_headers', {}))
696
013b50b7 697 hooks = {
698 'post_hooks': self.add_post_hook,
699 'progress_hooks': self.add_progress_hook,
700 'postprocessor_hooks': self.add_postprocessor_hook,
701 }
702 for opt, fn in hooks.items():
703 for ph in self.params.get(opt, []):
704 fn(ph)
71b640cc 705
5bfc8bee 706 for pp_def_raw in self.params.get('postprocessors', []):
707 pp_def = dict(pp_def_raw)
708 when = pp_def.pop('when', 'post_process')
709 self.add_post_processor(
f9934b96 710 get_postprocessor(pp_def.pop('key'))(self, **pp_def),
5bfc8bee 711 when=when)
712
97ec5bc5 713 self._setup_opener()
51fb4995
YCH
714 register_socks_protocols()
715
ed39cac5 716 def preload_download_archive(fn):
717 """Preload the archive, if any is specified"""
718 if fn is None:
719 return False
49a57e70 720 self.write_debug(f'Loading archive file {fn!r}')
ed39cac5 721 try:
722 with locked_file(fn, 'r', encoding='utf-8') as archive_file:
723 for line in archive_file:
724 self.archive.add(line.strip())
86e5f3ed 725 except OSError as ioe:
ed39cac5 726 if ioe.errno != errno.ENOENT:
727 raise
728 return False
729 return True
730
731 self.archive = set()
732 preload_download_archive(self.params.get('download_archive'))
733
7d4111ed
PH
734 def warn_if_short_id(self, argv):
735 # short YouTube ID starting with dash?
736 idxs = [
737 i for i, a in enumerate(argv)
738 if re.match(r'^-[0-9A-Za-z_-]{10}$', a)]
739 if idxs:
740 correct_argv = (
7a5c1cfe 741 ['yt-dlp']
3089bc74
S
742 + [a for i, a in enumerate(argv) if i not in idxs]
743 + ['--'] + [argv[i] for i in idxs]
7d4111ed
PH
744 )
745 self.report_warning(
746 'Long argument string detected. '
49a57e70 747 'Use -- to separate parameters and URLs, like this:\n%s' %
7d4111ed
PH
748 args_to_str(correct_argv))
749
8222d8de
JMF
750 def add_info_extractor(self, ie):
751 """Add an InfoExtractor object to the end of the list."""
8b7491c8 752 ie_key = ie.ie_key()
753 self._ies[ie_key] = ie
e52d7f85 754 if not isinstance(ie, type):
8b7491c8 755 self._ies_instances[ie_key] = ie
e52d7f85 756 ie.set_downloader(self)
8222d8de 757
8b7491c8 758 def _get_info_extractor_class(self, ie_key):
759 ie = self._ies.get(ie_key)
760 if ie is None:
761 ie = get_info_extractor(ie_key)
762 self.add_info_extractor(ie)
763 return ie
764
56c73665
JMF
765 def get_info_extractor(self, ie_key):
766 """
767 Get an instance of an IE with name ie_key, it will try to get one from
768 the _ies list, if there's no instance it will create a new one and add
769 it to the extractor list.
770 """
771 ie = self._ies_instances.get(ie_key)
772 if ie is None:
773 ie = get_info_extractor(ie_key)()
774 self.add_info_extractor(ie)
775 return ie
776
023fa8c4
JMF
777 def add_default_info_extractors(self):
778 """
779 Add the InfoExtractors returned by gen_extractors to the end of the list
780 """
e52d7f85 781 for ie in gen_extractor_classes():
023fa8c4
JMF
782 self.add_info_extractor(ie)
783
56d868db 784 def add_post_processor(self, pp, when='post_process'):
8222d8de 785 """Add a PostProcessor object to the end of the chain."""
8aa0e7cd 786 assert when in POSTPROCESS_WHEN, f'Invalid when={when}'
5bfa4862 787 self._pps[when].append(pp)
8222d8de
JMF
788 pp.set_downloader(self)
789
ab8e5e51
AM
790 def add_post_hook(self, ph):
791 """Add the post hook"""
792 self._post_hooks.append(ph)
793
933605d7 794 def add_progress_hook(self, ph):
819e0531 795 """Add the download progress hook"""
933605d7 796 self._progress_hooks.append(ph)
8ab470f1 797
819e0531 798 def add_postprocessor_hook(self, ph):
799 """Add the postprocessing progress hook"""
800 self._postprocessor_hooks.append(ph)
5bfc8bee 801 for pps in self._pps.values():
802 for pp in pps:
803 pp.add_progress_hook(ph)
819e0531 804
1c088fa8 805 def _bidi_workaround(self, message):
5d681e96 806 if not hasattr(self, '_output_channel'):
1c088fa8
PH
807 return message
808
5d681e96 809 assert hasattr(self, '_output_process')
14f25df2 810 assert isinstance(message, str)
6febd1c1 811 line_count = message.count('\n') + 1
0f06bcd7 812 self._output_process.stdin.write((message + '\n').encode())
5d681e96 813 self._output_process.stdin.flush()
0f06bcd7 814 res = ''.join(self._output_channel.readline().decode()
9e1a5b84 815 for _ in range(line_count))
6febd1c1 816 return res[:-len('\n')]
1c088fa8 817
b35496d8 818 def _write_string(self, message, out=None, only_once=False):
819 if only_once:
820 if message in self._printed_messages:
821 return
822 self._printed_messages.add(message)
823 write_string(message, out=out, encoding=self.params.get('encoding'))
734f90bb 824
cf4f42cb 825 def to_stdout(self, message, skip_eol=False, quiet=None):
0760b0a7 826 """Print message to stdout"""
cf4f42cb 827 if quiet is not None:
ae6a1b95 828 self.deprecation_warning('"YoutubeDL.to_stdout" no longer accepts the argument quiet. Use "YoutubeDL.to_screen" instead')
8a82af35 829 if skip_eol is not False:
830 self.deprecation_warning('"YoutubeDL.to_stdout" no longer accepts the argument skip_eol. Use "YoutubeDL.to_screen" instead')
0bf9dc1e 831 self._write_string(f'{self._bidi_workaround(message)}\n', self._out_files.out)
cf4f42cb 832
833 def to_screen(self, message, skip_eol=False, quiet=None):
834 """Print message to screen if not in quiet mode"""
8bf9319e 835 if self.params.get('logger'):
43afe285 836 self.params['logger'].debug(message)
cf4f42cb 837 return
838 if (self.params.get('quiet') if quiet is None else quiet) and not self.params.get('verbose'):
839 return
840 self._write_string(
841 '%s%s' % (self._bidi_workaround(message), ('' if skip_eol else '\n')),
591bb9d3 842 self._out_files.screen)
8222d8de 843
b35496d8 844 def to_stderr(self, message, only_once=False):
0760b0a7 845 """Print message to stderr"""
14f25df2 846 assert isinstance(message, str)
8bf9319e 847 if self.params.get('logger'):
43afe285
IB
848 self.params['logger'].error(message)
849 else:
5792c950 850 self._write_string(f'{self._bidi_workaround(message)}\n', self._out_files.error, only_once=only_once)
cf4f42cb 851
852 def _send_console_code(self, code):
591bb9d3 853 if compat_os_name == 'nt' or not self._out_files.console:
cf4f42cb 854 return
591bb9d3 855 self._write_string(code, self._out_files.console)
8222d8de 856
1e5b9a95
PH
857 def to_console_title(self, message):
858 if not self.params.get('consoletitle', False):
859 return
3efb96a6 860 message = remove_terminal_sequences(message)
4bede0d8
C
861 if compat_os_name == 'nt':
862 if ctypes.windll.kernel32.GetConsoleWindow():
863 # c_wchar_p() might not be necessary if `message` is
864 # already of type unicode()
865 ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(message))
cf4f42cb 866 else:
867 self._send_console_code(f'\033]0;{message}\007')
1e5b9a95 868
bdde425c 869 def save_console_title(self):
cf4f42cb 870 if not self.params.get('consoletitle') or self.params.get('simulate'):
bdde425c 871 return
592b7485 872 self._send_console_code('\033[22;0t') # Save the title on stack
bdde425c
PH
873
874 def restore_console_title(self):
cf4f42cb 875 if not self.params.get('consoletitle') or self.params.get('simulate'):
bdde425c 876 return
592b7485 877 self._send_console_code('\033[23;0t') # Restore the title from stack
bdde425c
PH
878
879 def __enter__(self):
880 self.save_console_title()
881 return self
882
883 def __exit__(self, *args):
884 self.restore_console_title()
f89197d7 885
dca08720 886 if self.params.get('cookiefile') is not None:
1bab3437 887 self.cookiejar.save(ignore_discard=True, ignore_expires=True)
bdde425c 888
fa9f30b8 889 def trouble(self, message=None, tb=None, is_error=True):
8222d8de
JMF
890 """Determine action to take when a download problem appears.
891
892 Depending on if the downloader has been configured to ignore
893 download errors or not, this method may throw an exception or
894 not when errors are found, after printing the message.
895
fa9f30b8 896 @param tb If given, is additional traceback information
897 @param is_error Whether to raise error according to ignorerrors
8222d8de
JMF
898 """
899 if message is not None:
900 self.to_stderr(message)
901 if self.params.get('verbose'):
902 if tb is None:
903 if sys.exc_info()[0]: # if .trouble has been called from an except block
6febd1c1 904 tb = ''
8222d8de 905 if hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
6febd1c1 906 tb += ''.join(traceback.format_exception(*sys.exc_info()[1].exc_info))
c0384f22 907 tb += encode_compat_str(traceback.format_exc())
8222d8de
JMF
908 else:
909 tb_data = traceback.format_list(traceback.extract_stack())
6febd1c1 910 tb = ''.join(tb_data)
c19bc311 911 if tb:
912 self.to_stderr(tb)
fa9f30b8 913 if not is_error:
914 return
b1940459 915 if not self.params.get('ignoreerrors'):
8222d8de
JMF
916 if sys.exc_info()[0] and hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
917 exc_info = sys.exc_info()[1].exc_info
918 else:
919 exc_info = sys.exc_info()
920 raise DownloadError(message, exc_info)
921 self._download_retcode = 1
922
19a03940 923 Styles = Namespace(
924 HEADERS='yellow',
925 EMPHASIS='light blue',
492272fe 926 FILENAME='green',
19a03940 927 ID='green',
928 DELIM='blue',
929 ERROR='red',
930 WARNING='yellow',
931 SUPPRESS='light black',
932 )
ec11a9f4 933
7578d77d 934 def _format_text(self, handle, allow_colors, text, f, fallback=None, *, test_encoding=False):
e5a998f3 935 text = str(text)
ec11a9f4 936 if test_encoding:
937 original_text = text
5c104538 938 # handle.encoding can be None. See https://github.com/yt-dlp/yt-dlp/issues/2711
939 encoding = self.params.get('encoding') or getattr(handle, 'encoding', None) or 'ascii'
ec11a9f4 940 text = text.encode(encoding, 'ignore').decode(encoding)
941 if fallback is not None and text != original_text:
942 text = fallback
7578d77d 943 return format_text(text, f) if allow_colors else text if fallback is None else fallback
ec11a9f4 944
591bb9d3 945 def _format_out(self, *args, **kwargs):
946 return self._format_text(self._out_files.out, self._allow_colors.out, *args, **kwargs)
947
ec11a9f4 948 def _format_screen(self, *args, **kwargs):
591bb9d3 949 return self._format_text(self._out_files.screen, self._allow_colors.screen, *args, **kwargs)
ec11a9f4 950
951 def _format_err(self, *args, **kwargs):
591bb9d3 952 return self._format_text(self._out_files.error, self._allow_colors.error, *args, **kwargs)
819e0531 953
c84aeac6 954 def report_warning(self, message, only_once=False):
8222d8de
JMF
955 '''
956 Print the message to stderr, it will be prefixed with 'WARNING:'
957 If stderr is a tty file the 'WARNING:' will be colored
958 '''
6d07ce01
JMF
959 if self.params.get('logger') is not None:
960 self.params['logger'].warning(message)
8222d8de 961 else:
ad8915b7
PH
962 if self.params.get('no_warnings'):
963 return
ec11a9f4 964 self.to_stderr(f'{self._format_err("WARNING:", self.Styles.WARNING)} {message}', only_once)
8222d8de 965
ee8dd27a 966 def deprecation_warning(self, message):
967 if self.params.get('logger') is not None:
a44ca5a4 968 self.params['logger'].warning(f'DeprecationWarning: {message}')
ee8dd27a 969 else:
970 self.to_stderr(f'{self._format_err("DeprecationWarning:", self.Styles.ERROR)} {message}', True)
971
fa9f30b8 972 def report_error(self, message, *args, **kwargs):
8222d8de
JMF
973 '''
974 Do the same as trouble, but prefixes the message with 'ERROR:', colored
975 in red if stderr is a tty file.
976 '''
fa9f30b8 977 self.trouble(f'{self._format_err("ERROR:", self.Styles.ERROR)} {message}', *args, **kwargs)
8222d8de 978
b35496d8 979 def write_debug(self, message, only_once=False):
0760b0a7 980 '''Log debug message or Print message to stderr'''
981 if not self.params.get('verbose', False):
982 return
8a82af35 983 message = f'[debug] {message}'
0760b0a7 984 if self.params.get('logger'):
985 self.params['logger'].debug(message)
986 else:
b35496d8 987 self.to_stderr(message, only_once)
0760b0a7 988
8222d8de
JMF
989 def report_file_already_downloaded(self, file_name):
990 """Report file has already been fully downloaded."""
991 try:
6febd1c1 992 self.to_screen('[download] %s has already been downloaded' % file_name)
ce02ed60 993 except UnicodeEncodeError:
6febd1c1 994 self.to_screen('[download] The file has already been downloaded')
8222d8de 995
0c3d0f51 996 def report_file_delete(self, file_name):
997 """Report that existing file will be deleted."""
998 try:
c25228e5 999 self.to_screen('Deleting existing file %s' % file_name)
0c3d0f51 1000 except UnicodeEncodeError:
c25228e5 1001 self.to_screen('Deleting existing file')
0c3d0f51 1002
319b6059 1003 def raise_no_formats(self, info, forced=False, *, msg=None):
0a5a191a 1004 has_drm = info.get('_has_drm')
319b6059 1005 ignored, expected = self.params.get('ignore_no_formats_error'), bool(msg)
1006 msg = msg or has_drm and 'This video is DRM protected' or 'No video formats found!'
1007 if forced or not ignored:
1151c407 1008 raise ExtractorError(msg, video_id=info['id'], ie=info['extractor'],
319b6059 1009 expected=has_drm or ignored or expected)
88acdbc2 1010 else:
1011 self.report_warning(msg)
1012
de6000d9 1013 def parse_outtmpl(self):
bf1824b3 1014 self.deprecation_warning('"YoutubeDL.parse_outtmpl" is deprecated and may be removed in a future version')
1015 self._parse_outtmpl()
1016 return self.params['outtmpl']
1017
1018 def _parse_outtmpl(self):
7b2c3f47 1019 sanitize = IDENTITY
bf1824b3 1020 if self.params.get('restrictfilenames'): # Remove spaces in the default template
71ce444a 1021 sanitize = lambda x: x.replace(' - ', ' ').replace(' ', '-')
bf1824b3 1022
1023 outtmpl = self.params.setdefault('outtmpl', {})
1024 if not isinstance(outtmpl, dict):
1025 self.params['outtmpl'] = outtmpl = {'default': outtmpl}
1026 outtmpl.update({k: sanitize(v) for k, v in DEFAULT_OUTTMPL.items() if outtmpl.get(k) is None})
de6000d9 1027
21cd8fae 1028 def get_output_path(self, dir_type='', filename=None):
1029 paths = self.params.get('paths', {})
1030 assert isinstance(paths, dict)
1031 path = os.path.join(
1032 expand_path(paths.get('home', '').strip()),
1033 expand_path(paths.get(dir_type, '').strip()) if dir_type else '',
1034 filename or '')
21cd8fae 1035 return sanitize_path(path, force=self.params.get('windowsfilenames'))
1036
76a264ac 1037 @staticmethod
901130bb 1038 def _outtmpl_expandpath(outtmpl):
1039 # expand_path translates '%%' into '%' and '$$' into '$'
1040 # correspondingly that is not what we want since we need to keep
1041 # '%%' intact for template dict substitution step. Working around
1042 # with boundary-alike separator hack.
1043 sep = ''.join([random.choice(ascii_letters) for _ in range(32)])
86e5f3ed 1044 outtmpl = outtmpl.replace('%%', f'%{sep}%').replace('$$', f'${sep}$')
901130bb 1045
1046 # outtmpl should be expand_path'ed before template dict substitution
1047 # because meta fields may contain env variables we don't want to
1048 # be expanded. For example, for outtmpl "%(title)s.%(ext)s" and
1049 # title "Hello $PATH", we don't want `$PATH` to be expanded.
1050 return expand_path(outtmpl).replace(sep, '')
1051
1052 @staticmethod
1053 def escape_outtmpl(outtmpl):
1054 ''' Escape any remaining strings like %s, %abc% etc. '''
1055 return re.sub(
1056 STR_FORMAT_RE_TMPL.format('', '(?![%(\0])'),
1057 lambda mobj: ('' if mobj.group('has_key') else '%') + mobj.group(0),
1058 outtmpl)
1059
1060 @classmethod
1061 def validate_outtmpl(cls, outtmpl):
76a264ac 1062 ''' @return None or Exception object '''
7d1eb38a 1063 outtmpl = re.sub(
47cdc68e 1064 STR_FORMAT_RE_TMPL.format('[^)]*', '[ljhqBUDS]'),
7d1eb38a 1065 lambda mobj: f'{mobj.group(0)[:-1]}s',
1066 cls._outtmpl_expandpath(outtmpl))
76a264ac 1067 try:
7d1eb38a 1068 cls.escape_outtmpl(outtmpl) % collections.defaultdict(int)
76a264ac 1069 return None
1070 except ValueError as err:
1071 return err
1072
03b4de72 1073 @staticmethod
1074 def _copy_infodict(info_dict):
1075 info_dict = dict(info_dict)
09b49e1f 1076 info_dict.pop('__postprocessors', None)
415f8d51 1077 info_dict.pop('__pending_error', None)
03b4de72 1078 return info_dict
1079
e0fd9573 1080 def prepare_outtmpl(self, outtmpl, info_dict, sanitize=False):
1081 """ Make the outtmpl and info_dict suitable for substitution: ydl.escape_outtmpl(outtmpl) % info_dict
1082 @param sanitize Whether to sanitize the output as a filename.
1083 For backward compatibility, a function can also be passed
1084 """
1085
6e84b215 1086 info_dict.setdefault('epoch', int(time.time())) # keep epoch consistent once set
143db31d 1087
03b4de72 1088 info_dict = self._copy_infodict(info_dict)
752cda38 1089 info_dict['duration_string'] = ( # %(duration>%H-%M-%S)s is wrong if duration > 24hrs
53c18592 1090 formatSeconds(info_dict['duration'], '-' if sanitize else ':')
143db31d 1091 if info_dict.get('duration', None) is not None
1092 else None)
1d485a1a 1093 info_dict['autonumber'] = int(self.params.get('autonumber_start', 1) - 1 + self._num_downloads)
9c906919 1094 info_dict['video_autonumber'] = self._num_videos
752cda38 1095 if info_dict.get('resolution') is None:
1096 info_dict['resolution'] = self.format_resolution(info_dict, default=None)
143db31d 1097
e6f21b3d 1098 # For fields playlist_index, playlist_autonumber and autonumber convert all occurrences
143db31d 1099 # of %(field)s to %(field)0Nd for backward compatibility
1100 field_size_compat_map = {
0a5a191a 1101 'playlist_index': number_of_digits(info_dict.get('__last_playlist_index') or 0),
ec11a9f4 1102 'playlist_autonumber': number_of_digits(info_dict.get('n_entries') or 0),
752cda38 1103 'autonumber': self.params.get('autonumber_size') or 5,
143db31d 1104 }
752cda38 1105
385a27fa 1106 TMPL_DICT = {}
47cdc68e 1107 EXTERNAL_FORMAT_RE = re.compile(STR_FORMAT_RE_TMPL.format('[^)]*', f'[{STR_FORMAT_TYPES}ljhqBUDS]'))
385a27fa 1108 MATH_FUNCTIONS = {
1109 '+': float.__add__,
1110 '-': float.__sub__,
1111 }
e625be0d 1112 # Field is of the form key1.key2...
1113 # where keys (except first) can be string, int or slice
2b8a2973 1114 FIELD_RE = r'\w*(?:\.(?:\w+|{num}|{num}?(?::{num}?){{1,2}}))*'.format(num=r'(?:-?\d+)')
1d485a1a 1115 MATH_FIELD_RE = rf'(?:{FIELD_RE}|-?{NUMBER_RE})'
385a27fa 1116 MATH_OPERATORS_RE = r'(?:%s)' % '|'.join(map(re.escape, MATH_FUNCTIONS.keys()))
1d485a1a 1117 INTERNAL_FORMAT_RE = re.compile(rf'''(?x)
e625be0d 1118 (?P<negate>-)?
1d485a1a 1119 (?P<fields>{FIELD_RE})
1120 (?P<maths>(?:{MATH_OPERATORS_RE}{MATH_FIELD_RE})*)
e625be0d 1121 (?:>(?P<strf_format>.+?))?
34baa9fd 1122 (?P<remaining>
1123 (?P<alternate>(?<!\\),[^|&)]+)?
1124 (?:&(?P<replacement>.*?))?
1125 (?:\|(?P<default>.*?))?
1d485a1a 1126 )$''')
752cda38 1127
2b8a2973 1128 def _traverse_infodict(k):
1129 k = k.split('.')
1130 if k[0] == '':
1131 k.pop(0)
1132 return traverse_obj(info_dict, k, is_user_input=True, traverse_string=True)
76a264ac 1133
752cda38 1134 def get_value(mdict):
1135 # Object traversal
2b8a2973 1136 value = _traverse_infodict(mdict['fields'])
752cda38 1137 # Negative
1138 if mdict['negate']:
1139 value = float_or_none(value)
1140 if value is not None:
1141 value *= -1
1142 # Do maths
385a27fa 1143 offset_key = mdict['maths']
1144 if offset_key:
752cda38 1145 value = float_or_none(value)
1146 operator = None
385a27fa 1147 while offset_key:
1148 item = re.match(
1149 MATH_FIELD_RE if operator else MATH_OPERATORS_RE,
1150 offset_key).group(0)
1151 offset_key = offset_key[len(item):]
1152 if operator is None:
752cda38 1153 operator = MATH_FUNCTIONS[item]
385a27fa 1154 continue
1155 item, multiplier = (item[1:], -1) if item[0] == '-' else (item, 1)
1156 offset = float_or_none(item)
1157 if offset is None:
2b8a2973 1158 offset = float_or_none(_traverse_infodict(item))
385a27fa 1159 try:
1160 value = operator(value, multiplier * offset)
1161 except (TypeError, ZeroDivisionError):
1162 return None
1163 operator = None
752cda38 1164 # Datetime formatting
1165 if mdict['strf_format']:
7c37ff97 1166 value = strftime_or_none(value, mdict['strf_format'].replace('\\,', ','))
752cda38 1167
a6bcaf71 1168 # XXX: Workaround for https://github.com/yt-dlp/yt-dlp/issues/4485
1169 if sanitize and value == '':
1170 value = None
752cda38 1171 return value
1172
b868936c 1173 na = self.params.get('outtmpl_na_placeholder', 'NA')
1174
e0fd9573 1175 def filename_sanitizer(key, value, restricted=self.params.get('restrictfilenames')):
5c3895ff 1176 return sanitize_filename(str(value), restricted=restricted, is_id=(
1177 bool(re.search(r'(^|[_.])id(\.|$)', key))
8a82af35 1178 if 'filename-sanitization' in self.params['compat_opts']
5c3895ff 1179 else NO_DEFAULT))
e0fd9573 1180
1181 sanitizer = sanitize if callable(sanitize) else filename_sanitizer
1182 sanitize = bool(sanitize)
1183
6e84b215 1184 def _dumpjson_default(obj):
1185 if isinstance(obj, (set, LazyList)):
1186 return list(obj)
adbc4ec4 1187 return repr(obj)
6e84b215 1188
752cda38 1189 def create_key(outer_mobj):
1190 if not outer_mobj.group('has_key'):
b836dc94 1191 return outer_mobj.group(0)
752cda38 1192 key = outer_mobj.group('key')
752cda38 1193 mobj = re.match(INTERNAL_FORMAT_RE, key)
e0fd9573 1194 initial_field = mobj.group('fields') if mobj else ''
e978789f 1195 value, replacement, default = None, None, na
7c37ff97 1196 while mobj:
e625be0d 1197 mobj = mobj.groupdict()
7c37ff97 1198 default = mobj['default'] if mobj['default'] is not None else default
752cda38 1199 value = get_value(mobj)
e978789f 1200 replacement = mobj['replacement']
7c37ff97 1201 if value is None and mobj['alternate']:
34baa9fd 1202 mobj = re.match(INTERNAL_FORMAT_RE, mobj['remaining'][1:])
7c37ff97 1203 else:
1204 break
752cda38 1205
b868936c 1206 fmt = outer_mobj.group('format')
752cda38 1207 if fmt == 's' and value is not None and key in field_size_compat_map.keys():
86e5f3ed 1208 fmt = f'0{field_size_compat_map[key]:d}d'
752cda38 1209
e978789f 1210 value = default if value is None else value if replacement is None else replacement
752cda38 1211
4476d2c7 1212 flags = outer_mobj.group('conversion') or ''
7d1eb38a 1213 str_fmt = f'{fmt[:-1]}s'
524e2e4f 1214 if fmt[-1] == 'l': # list
4476d2c7 1215 delim = '\n' if '#' in flags else ', '
9e907ebd 1216 value, fmt = delim.join(map(str, variadic(value, allowed_types=(str, bytes)))), str_fmt
524e2e4f 1217 elif fmt[-1] == 'j': # json
4476d2c7 1218 value, fmt = json.dumps(value, default=_dumpjson_default, indent=4 if '#' in flags else None), str_fmt
47cdc68e 1219 elif fmt[-1] == 'h': # html
1220 value, fmt = escapeHTML(value), str_fmt
524e2e4f 1221 elif fmt[-1] == 'q': # quoted
4476d2c7 1222 value = map(str, variadic(value) if '#' in flags else [value])
1223 value, fmt = ' '.join(map(compat_shlex_quote, value)), str_fmt
524e2e4f 1224 elif fmt[-1] == 'B': # bytes
0f06bcd7 1225 value = f'%{str_fmt}'.encode() % str(value).encode()
f5aa5cfb 1226 value, fmt = value.decode('utf-8', 'ignore'), 's'
524e2e4f 1227 elif fmt[-1] == 'U': # unicode normalized
524e2e4f 1228 value, fmt = unicodedata.normalize(
1229 # "+" = compatibility equivalence, "#" = NFD
4476d2c7 1230 'NF%s%s' % ('K' if '+' in flags else '', 'D' if '#' in flags else 'C'),
524e2e4f 1231 value), str_fmt
e0fd9573 1232 elif fmt[-1] == 'D': # decimal suffix
abbeeebc 1233 num_fmt, fmt = fmt[:-1].replace('#', ''), 's'
1234 value = format_decimal_suffix(value, f'%{num_fmt}f%s' if num_fmt else '%d%s',
1235 factor=1024 if '#' in flags else 1000)
37893bb0 1236 elif fmt[-1] == 'S': # filename sanitization
e0fd9573 1237 value, fmt = filename_sanitizer(initial_field, value, restricted='#' in flags), str_fmt
7d1eb38a 1238 elif fmt[-1] == 'c':
524e2e4f 1239 if value:
1240 value = str(value)[0]
76a264ac 1241 else:
524e2e4f 1242 fmt = str_fmt
76a264ac 1243 elif fmt[-1] not in 'rs': # numeric
a439a3a4 1244 value = float_or_none(value)
752cda38 1245 if value is None:
1246 value, fmt = default, 's'
901130bb 1247
752cda38 1248 if sanitize:
1249 if fmt[-1] == 'r':
1250 # If value is an object, sanitize might convert it to a string
1251 # So we convert it to repr first
7d1eb38a 1252 value, fmt = repr(value), str_fmt
639f1cea 1253 if fmt[-1] in 'csr':
e0fd9573 1254 value = sanitizer(initial_field, value)
901130bb 1255
b868936c 1256 key = '%s\0%s' % (key.replace('%', '%\0'), outer_mobj.group('format'))
385a27fa 1257 TMPL_DICT[key] = value
b868936c 1258 return '{prefix}%({key}){fmt}'.format(key=key, fmt=fmt, prefix=outer_mobj.group('prefix'))
752cda38 1259
385a27fa 1260 return EXTERNAL_FORMAT_RE.sub(create_key, outtmpl), TMPL_DICT
143db31d 1261
819e0531 1262 def evaluate_outtmpl(self, outtmpl, info_dict, *args, **kwargs):
1263 outtmpl, info_dict = self.prepare_outtmpl(outtmpl, info_dict, *args, **kwargs)
1264 return self.escape_outtmpl(outtmpl) % info_dict
1265
5127e92a 1266 def _prepare_filename(self, info_dict, *, outtmpl=None, tmpl_type=None):
1267 assert None in (outtmpl, tmpl_type), 'outtmpl and tmpl_type are mutually exclusive'
1268 if outtmpl is None:
bf1824b3 1269 outtmpl = self.params['outtmpl'].get(tmpl_type or 'default', self.params['outtmpl']['default'])
8222d8de 1270 try:
5127e92a 1271 outtmpl = self._outtmpl_expandpath(outtmpl)
e0fd9573 1272 filename = self.evaluate_outtmpl(outtmpl, info_dict, True)
6a0546e3 1273 if not filename:
1274 return None
15da37c7 1275
5127e92a 1276 if tmpl_type in ('', 'temp'):
6a0546e3 1277 final_ext, ext = self.params.get('final_ext'), info_dict.get('ext')
1278 if final_ext and ext and final_ext != ext and filename.endswith(f'.{final_ext}'):
1279 filename = replace_extension(filename, ext, final_ext)
5127e92a 1280 elif tmpl_type:
6a0546e3 1281 force_ext = OUTTMPL_TYPES[tmpl_type]
1282 if force_ext:
1283 filename = replace_extension(filename, force_ext, info_dict.get('ext'))
de6000d9 1284
bdc3fd2f
U
1285 # https://github.com/blackjack4494/youtube-dlc/issues/85
1286 trim_file_name = self.params.get('trim_file_name', False)
1287 if trim_file_name:
5c22c63d 1288 no_ext, *ext = filename.rsplit('.', 2)
1289 filename = join_nonempty(no_ext[:trim_file_name], *ext, delim='.')
bdc3fd2f 1290
0202b52a 1291 return filename
8222d8de 1292 except ValueError as err:
6febd1c1 1293 self.report_error('Error in output template: ' + str(err) + ' (encoding: ' + repr(preferredencoding()) + ')')
8222d8de
JMF
1294 return None
1295
5127e92a 1296 def prepare_filename(self, info_dict, dir_type='', *, outtmpl=None, warn=False):
1297 """Generate the output filename"""
1298 if outtmpl:
1299 assert not dir_type, 'outtmpl and dir_type are mutually exclusive'
1300 dir_type = None
1301 filename = self._prepare_filename(info_dict, tmpl_type=dir_type, outtmpl=outtmpl)
80c03fa9 1302 if not filename and dir_type not in ('', 'temp'):
1303 return ''
de6000d9 1304
c84aeac6 1305 if warn:
21cd8fae 1306 if not self.params.get('paths'):
de6000d9 1307 pass
1308 elif filename == '-':
c84aeac6 1309 self.report_warning('--paths is ignored when an outputting to stdout', only_once=True)
de6000d9 1310 elif os.path.isabs(filename):
c84aeac6 1311 self.report_warning('--paths is ignored since an absolute path is given in output template', only_once=True)
de6000d9 1312 if filename == '-' or not filename:
1313 return filename
1314
21cd8fae 1315 return self.get_output_path(dir_type, filename)
0202b52a 1316
120fe513 1317 def _match_entry(self, info_dict, incomplete=False, silent=False):
ecdec191 1318 """ Returns None if the file should be downloaded """
8222d8de 1319
3bec830a 1320 video_title = info_dict.get('title', info_dict.get('id', 'entry'))
c77495e3 1321
8b0d7497 1322 def check_filter():
8b0d7497 1323 if 'title' in info_dict:
1324 # This can happen when we're just evaluating the playlist
1325 title = info_dict['title']
1326 matchtitle = self.params.get('matchtitle', False)
1327 if matchtitle:
1328 if not re.search(matchtitle, title, re.IGNORECASE):
1329 return '"' + title + '" title did not match pattern "' + matchtitle + '"'
1330 rejecttitle = self.params.get('rejecttitle', False)
1331 if rejecttitle:
1332 if re.search(rejecttitle, title, re.IGNORECASE):
1333 return '"' + title + '" title matched reject pattern "' + rejecttitle + '"'
1334 date = info_dict.get('upload_date')
1335 if date is not None:
1336 dateRange = self.params.get('daterange', DateRange())
1337 if date not in dateRange:
86e5f3ed 1338 return f'{date_from_str(date).isoformat()} upload date is not in range {dateRange}'
8b0d7497 1339 view_count = info_dict.get('view_count')
1340 if view_count is not None:
1341 min_views = self.params.get('min_views')
1342 if min_views is not None and view_count < min_views:
1343 return 'Skipping %s, because it has not reached minimum view count (%d/%d)' % (video_title, view_count, min_views)
1344 max_views = self.params.get('max_views')
1345 if max_views is not None and view_count > max_views:
1346 return 'Skipping %s, because it has exceeded the maximum view count (%d/%d)' % (video_title, view_count, max_views)
1347 if age_restricted(info_dict.get('age_limit'), self.params.get('age_limit')):
1348 return 'Skipping "%s" because it is age restricted' % video_title
8b0d7497 1349
8f18aca8 1350 match_filter = self.params.get('match_filter')
1351 if match_filter is not None:
1352 try:
1353 ret = match_filter(info_dict, incomplete=incomplete)
1354 except TypeError:
1355 # For backward compatibility
1356 ret = None if incomplete else match_filter(info_dict)
492272fe 1357 if ret is NO_DEFAULT:
1358 while True:
1359 filename = self._format_screen(self.prepare_filename(info_dict), self.Styles.FILENAME)
1360 reply = input(self._format_screen(
1361 f'Download "{filename}"? (Y/n): ', self.Styles.EMPHASIS)).lower().strip()
1362 if reply in {'y', ''}:
1363 return None
1364 elif reply == 'n':
1365 return f'Skipping {video_title}'
492272fe 1366 elif ret is not None:
8f18aca8 1367 return ret
8b0d7497 1368 return None
1369
c77495e3 1370 if self.in_download_archive(info_dict):
1371 reason = '%s has already been recorded in the archive' % video_title
1372 break_opt, break_err = 'break_on_existing', ExistingVideoReached
1373 else:
1374 reason = check_filter()
1375 break_opt, break_err = 'break_on_reject', RejectedVideoReached
8b0d7497 1376 if reason is not None:
120fe513 1377 if not silent:
1378 self.to_screen('[download] ' + reason)
c77495e3 1379 if self.params.get(break_opt, False):
1380 raise break_err()
8b0d7497 1381 return reason
fe7e0c98 1382
b6c45014
JMF
1383 @staticmethod
1384 def add_extra_info(info_dict, extra_info):
1385 '''Set the keys from extra_info in info dict if they are missing'''
1386 for key, value in extra_info.items():
1387 info_dict.setdefault(key, value)
1388
409e1828 1389 def extract_info(self, url, download=True, ie_key=None, extra_info=None,
61aa5ba3 1390 process=True, force_generic_extractor=False):
41d1cca3 1391 """
1392 Return a list with a dictionary for each video extracted.
1393
1394 Arguments:
1395 url -- URL to extract
1396
1397 Keyword arguments:
1398 download -- whether to download videos during extraction
1399 ie_key -- extractor key hint
1400 extra_info -- dictionary containing the extra values to add to each result
1401 process -- whether to resolve all unresolved references (URLs, playlist items),
1402 must be True for download to work.
1403 force_generic_extractor -- force using the generic extractor
1404 """
fe7e0c98 1405
409e1828 1406 if extra_info is None:
1407 extra_info = {}
1408
61aa5ba3 1409 if not ie_key and force_generic_extractor:
d22dec74
S
1410 ie_key = 'Generic'
1411
8222d8de 1412 if ie_key:
8b7491c8 1413 ies = {ie_key: self._get_info_extractor_class(ie_key)}
8222d8de
JMF
1414 else:
1415 ies = self._ies
1416
8b7491c8 1417 for ie_key, ie in ies.items():
8222d8de
JMF
1418 if not ie.suitable(url):
1419 continue
1420
1421 if not ie.working():
6febd1c1
PH
1422 self.report_warning('The program functionality for this site has been marked as broken, '
1423 'and will probably not work.')
8222d8de 1424
1151c407 1425 temp_id = ie.get_temp_id(url)
a0566bbf 1426 if temp_id is not None and self.in_download_archive({'id': temp_id, 'ie_key': ie_key}):
5e5be0c0 1427 self.to_screen(f'[{ie_key}] {temp_id}: has already been recorded in the archive')
1428 if self.params.get('break_on_existing', False):
1429 raise ExistingVideoReached()
a0566bbf 1430 break
8b7491c8 1431 return self.__extract_info(url, self.get_info_extractor(ie_key), download, extra_info, process)
a0566bbf 1432 else:
1433 self.report_error('no suitable InfoExtractor for URL %s' % url)
1434
7e88d7d7 1435 def _handle_extraction_exceptions(func):
b5ae35ee 1436 @functools.wraps(func)
a0566bbf 1437 def wrapper(self, *args, **kwargs):
6da22e7d 1438 while True:
1439 try:
1440 return func(self, *args, **kwargs)
1441 except (DownloadCancelled, LazyList.IndexError, PagedList.IndexError):
8222d8de 1442 raise
6da22e7d 1443 except ReExtractInfo as e:
1444 if e.expected:
1445 self.to_screen(f'{e}; Re-extracting data')
1446 else:
1447 self.to_stderr('\r')
1448 self.report_warning(f'{e}; Re-extracting data')
1449 continue
1450 except GeoRestrictedError as e:
1451 msg = e.msg
1452 if e.countries:
1453 msg += '\nThis video is available in %s.' % ', '.join(
1454 map(ISO3166Utils.short2full, e.countries))
1455 msg += '\nYou might want to use a VPN or a proxy server (with --proxy) to workaround.'
1456 self.report_error(msg)
1457 except ExtractorError as e: # An error we somewhat expected
1458 self.report_error(str(e), e.format_traceback())
1459 except Exception as e:
1460 if self.params.get('ignoreerrors'):
1461 self.report_error(str(e), tb=encode_compat_str(traceback.format_exc()))
1462 else:
1463 raise
1464 break
a0566bbf 1465 return wrapper
1466
693f0600 1467 def _wait_for_video(self, ie_result={}):
f2ebc5c7 1468 if (not self.params.get('wait_for_video')
1469 or ie_result.get('_type', 'video') != 'video'
1470 or ie_result.get('formats') or ie_result.get('url')):
1471 return
1472
1473 format_dur = lambda dur: '%02d:%02d:%02d' % timetuple_from_msec(dur * 1000)[:-1]
1474 last_msg = ''
1475
1476 def progress(msg):
1477 nonlocal last_msg
a7dc6a89 1478 full_msg = f'{msg}\n'
1479 if not self.params.get('noprogress'):
1480 full_msg = msg + ' ' * (len(last_msg) - len(msg)) + '\r'
1481 elif last_msg:
1482 return
1483 self.to_screen(full_msg, skip_eol=True)
f2ebc5c7 1484 last_msg = msg
1485
1486 min_wait, max_wait = self.params.get('wait_for_video')
1487 diff = try_get(ie_result, lambda x: x['release_timestamp'] - time.time())
1488 if diff is None and ie_result.get('live_status') == 'is_upcoming':
16c620bc 1489 diff = round(random.uniform(min_wait, max_wait) if (max_wait and min_wait) else (max_wait or min_wait), 0)
f2ebc5c7 1490 self.report_warning('Release time of video is not known')
693f0600 1491 elif ie_result and (diff or 0) <= 0:
f2ebc5c7 1492 self.report_warning('Video should already be available according to extracted info')
38d79fd1 1493 diff = min(max(diff or 0, min_wait or 0), max_wait or float('inf'))
f2ebc5c7 1494 self.to_screen(f'[wait] Waiting for {format_dur(diff)} - Press Ctrl+C to try now')
1495
1496 wait_till = time.time() + diff
1497 try:
1498 while True:
1499 diff = wait_till - time.time()
1500 if diff <= 0:
1501 progress('')
1502 raise ReExtractInfo('[wait] Wait period ended', expected=True)
1503 progress(f'[wait] Remaining time until next attempt: {self._format_screen(format_dur(diff), self.Styles.EMPHASIS)}')
1504 time.sleep(1)
1505 except KeyboardInterrupt:
1506 progress('')
1507 raise ReExtractInfo('[wait] Interrupted by user', expected=True)
1508 except BaseException as e:
1509 if not isinstance(e, ReExtractInfo):
1510 self.to_screen('')
1511 raise
1512
7e88d7d7 1513 @_handle_extraction_exceptions
58f197b7 1514 def __extract_info(self, url, ie, download, extra_info, process):
693f0600 1515 try:
1516 ie_result = ie.extract(url)
1517 except UserNotLive as e:
1518 if process:
1519 if self.params.get('wait_for_video'):
1520 self.report_warning(e)
1521 self._wait_for_video()
1522 raise
a0566bbf 1523 if ie_result is None: # Finished already (backwards compatibility; listformats and friends should be moved here)
cb794ee0 1524 self.report_warning(f'Extractor {ie.IE_NAME} returned nothing{bug_reports_message()}')
a0566bbf 1525 return
1526 if isinstance(ie_result, list):
1527 # Backwards compatibility: old IE result format
1528 ie_result = {
1529 '_type': 'compat_list',
1530 'entries': ie_result,
1531 }
e37d0efb 1532 if extra_info.get('original_url'):
1533 ie_result.setdefault('original_url', extra_info['original_url'])
a0566bbf 1534 self.add_default_extra_info(ie_result, ie, url)
1535 if process:
f2ebc5c7 1536 self._wait_for_video(ie_result)
a0566bbf 1537 return self.process_ie_result(ie_result, download, extra_info)
8222d8de 1538 else:
a0566bbf 1539 return ie_result
fe7e0c98 1540
ea38e55f 1541 def add_default_extra_info(self, ie_result, ie, url):
6033d980 1542 if url is not None:
1543 self.add_extra_info(ie_result, {
1544 'webpage_url': url,
1545 'original_url': url,
57ebfca3 1546 })
1547 webpage_url = ie_result.get('webpage_url')
1548 if webpage_url:
1549 self.add_extra_info(ie_result, {
1550 'webpage_url_basename': url_basename(webpage_url),
1551 'webpage_url_domain': get_domain(webpage_url),
6033d980 1552 })
1553 if ie is not None:
1554 self.add_extra_info(ie_result, {
1555 'extractor': ie.IE_NAME,
1556 'extractor_key': ie.ie_key(),
1557 })
ea38e55f 1558
58adec46 1559 def process_ie_result(self, ie_result, download=True, extra_info=None):
8222d8de
JMF
1560 """
1561 Take the result of the ie(may be modified) and resolve all unresolved
1562 references (URLs, playlist items).
1563
1564 It will also download the videos if 'download'.
1565 Returns the resolved ie_result.
1566 """
58adec46 1567 if extra_info is None:
1568 extra_info = {}
e8ee972c
PH
1569 result_type = ie_result.get('_type', 'video')
1570
057a5206 1571 if result_type in ('url', 'url_transparent'):
8f97a15d 1572 ie_result['url'] = sanitize_url(
1573 ie_result['url'], scheme='http' if self.params.get('prefer_insecure') else 'https')
e37d0efb 1574 if ie_result.get('original_url'):
1575 extra_info.setdefault('original_url', ie_result['original_url'])
1576
057a5206 1577 extract_flat = self.params.get('extract_flat', False)
3089bc74
S
1578 if ((extract_flat == 'in_playlist' and 'playlist' in extra_info)
1579 or extract_flat is True):
ecb54191 1580 info_copy = ie_result.copy()
6033d980 1581 ie = try_get(ie_result.get('ie_key'), self.get_info_extractor)
360167b9 1582 if ie and not ie_result.get('id'):
4614bc22 1583 info_copy['id'] = ie.get_temp_id(ie_result['url'])
6033d980 1584 self.add_default_extra_info(info_copy, ie, ie_result['url'])
4614bc22 1585 self.add_extra_info(info_copy, extra_info)
b5475f11 1586 info_copy, _ = self.pre_process(info_copy)
ecb54191 1587 self.__forced_printings(info_copy, self.prepare_filename(info_copy), incomplete=True)
415f8d51 1588 self._raise_pending_errors(info_copy)
4614bc22 1589 if self.params.get('force_write_download_archive', False):
1590 self.record_download_archive(info_copy)
e8ee972c
PH
1591 return ie_result
1592
8222d8de 1593 if result_type == 'video':
b6c45014 1594 self.add_extra_info(ie_result, extra_info)
9c2b75b5 1595 ie_result = self.process_video_result(ie_result, download=download)
415f8d51 1596 self._raise_pending_errors(ie_result)
28b0eb0f 1597 additional_urls = (ie_result or {}).get('additional_urls')
9c2b75b5 1598 if additional_urls:
e9f4ccd1 1599 # TODO: Improve MetadataParserPP to allow setting a list
14f25df2 1600 if isinstance(additional_urls, str):
9c2b75b5 1601 additional_urls = [additional_urls]
1602 self.to_screen(
1603 '[info] %s: %d additional URL(s) requested' % (ie_result['id'], len(additional_urls)))
1604 self.write_debug('Additional URLs: "%s"' % '", "'.join(additional_urls))
1605 ie_result['additional_entries'] = [
1606 self.extract_info(
b69fd25c 1607 url, download, extra_info=extra_info,
9c2b75b5 1608 force_generic_extractor=self.params.get('force_generic_extractor'))
1609 for url in additional_urls
1610 ]
1611 return ie_result
8222d8de
JMF
1612 elif result_type == 'url':
1613 # We have to add extra_info to the results because it may be
1614 # contained in a playlist
07cce701 1615 return self.extract_info(
1616 ie_result['url'], download,
1617 ie_key=ie_result.get('ie_key'),
1618 extra_info=extra_info)
7fc3fa05
PH
1619 elif result_type == 'url_transparent':
1620 # Use the information from the embedding page
1621 info = self.extract_info(
1622 ie_result['url'], ie_key=ie_result.get('ie_key'),
1623 extra_info=extra_info, download=False, process=False)
1624
1640eb09
S
1625 # extract_info may return None when ignoreerrors is enabled and
1626 # extraction failed with an error, don't crash and return early
1627 # in this case
1628 if not info:
1629 return info
1630
3975b4d2 1631 exempted_fields = {'_type', 'url', 'ie_key'}
1632 if not ie_result.get('section_end') and ie_result.get('section_start') is None:
1633 # For video clips, the id etc of the clip extractor should be used
1634 exempted_fields |= {'id', 'extractor', 'extractor_key'}
1635
412c617d 1636 new_result = info.copy()
3975b4d2 1637 new_result.update(filter_dict(ie_result, lambda k, v: v is not None and k not in exempted_fields))
7fc3fa05 1638
0563f7ac
S
1639 # Extracted info may not be a video result (i.e.
1640 # info.get('_type', 'video') != video) but rather an url or
1641 # url_transparent. In such cases outer metadata (from ie_result)
1642 # should be propagated to inner one (info). For this to happen
1643 # _type of info should be overridden with url_transparent. This
067aa17e 1644 # fixes issue from https://github.com/ytdl-org/youtube-dl/pull/11163.
0563f7ac
S
1645 if new_result.get('_type') == 'url':
1646 new_result['_type'] = 'url_transparent'
7fc3fa05
PH
1647
1648 return self.process_ie_result(
1649 new_result, download=download, extra_info=extra_info)
40fcba5e 1650 elif result_type in ('playlist', 'multi_video'):
30a074c2 1651 # Protect from infinite recursion due to recursively nested playlists
1652 # (see https://github.com/ytdl-org/youtube-dl/issues/27833)
1653 webpage_url = ie_result['webpage_url']
1654 if webpage_url in self._playlist_urls:
7e85e872 1655 self.to_screen(
30a074c2 1656 '[download] Skipping already downloaded playlist: %s'
1657 % ie_result.get('title') or ie_result.get('id'))
1658 return
7e85e872 1659
30a074c2 1660 self._playlist_level += 1
1661 self._playlist_urls.add(webpage_url)
03f83004 1662 self._fill_common_fields(ie_result, False)
bc516a3f 1663 self._sanitize_thumbnails(ie_result)
30a074c2 1664 try:
1665 return self.__process_playlist(ie_result, download)
1666 finally:
1667 self._playlist_level -= 1
1668 if not self._playlist_level:
1669 self._playlist_urls.clear()
8222d8de 1670 elif result_type == 'compat_list':
c9bf4114
PH
1671 self.report_warning(
1672 'Extractor %s returned a compat_list result. '
1673 'It needs to be updated.' % ie_result.get('extractor'))
5f6a1245 1674
8222d8de 1675 def _fixup(r):
b868936c 1676 self.add_extra_info(r, {
1677 'extractor': ie_result['extractor'],
1678 'webpage_url': ie_result['webpage_url'],
1679 'webpage_url_basename': url_basename(ie_result['webpage_url']),
0bb322b9 1680 'webpage_url_domain': get_domain(ie_result['webpage_url']),
b868936c 1681 'extractor_key': ie_result['extractor_key'],
1682 })
8222d8de
JMF
1683 return r
1684 ie_result['entries'] = [
b6c45014 1685 self.process_ie_result(_fixup(r), download, extra_info)
8222d8de
JMF
1686 for r in ie_result['entries']
1687 ]
1688 return ie_result
1689 else:
1690 raise Exception('Invalid result type: %s' % result_type)
1691
e92caff5 1692 def _ensure_dir_exists(self, path):
1693 return make_dir(path, self.report_error)
1694
3b603dbd 1695 @staticmethod
3bec830a 1696 def _playlist_infodict(ie_result, strict=False, **kwargs):
1697 info = {
1698 'playlist_count': ie_result.get('playlist_count'),
3b603dbd 1699 'playlist': ie_result.get('title') or ie_result.get('id'),
1700 'playlist_id': ie_result.get('id'),
1701 'playlist_title': ie_result.get('title'),
1702 'playlist_uploader': ie_result.get('uploader'),
1703 'playlist_uploader_id': ie_result.get('uploader_id'),
3b603dbd 1704 **kwargs,
1705 }
3bec830a 1706 if strict:
1707 return info
1708 return {
1709 **info,
1710 'playlist_index': 0,
1711 '__last_playlist_index': max(ie_result['requested_entries'] or (0, 0)),
1712 'extractor': ie_result['extractor'],
1713 'webpage_url': ie_result['webpage_url'],
1714 'webpage_url_basename': url_basename(ie_result['webpage_url']),
1715 'webpage_url_domain': get_domain(ie_result['webpage_url']),
1716 'extractor_key': ie_result['extractor_key'],
1717 }
3b603dbd 1718
30a074c2 1719 def __process_playlist(self, ie_result, download):
7e88d7d7 1720 """Process each entry in the playlist"""
f5ea4748 1721 assert ie_result['_type'] in ('playlist', 'multi_video')
1722
3bec830a 1723 common_info = self._playlist_infodict(ie_result, strict=True)
3955b207 1724 title = common_info.get('playlist') or '<Untitled>'
3bec830a 1725 if self._match_entry(common_info, incomplete=True) is not None:
1726 return
c6e07cf1 1727 self.to_screen(f'[download] Downloading {ie_result["_type"]}: {title}')
f0d785d3 1728
7e88d7d7 1729 all_entries = PlaylistEntries(self, ie_result)
7e9a6125 1730 entries = orderedSet(all_entries.get_requested_items(), lazy=True)
1731
1732 lazy = self.params.get('lazy_playlist')
1733 if lazy:
1734 resolved_entries, n_entries = [], 'N/A'
1735 ie_result['requested_entries'], ie_result['entries'] = None, None
1736 else:
1737 entries = resolved_entries = list(entries)
1738 n_entries = len(resolved_entries)
1739 ie_result['requested_entries'], ie_result['entries'] = tuple(zip(*resolved_entries)) or ([], [])
1740 if not ie_result.get('playlist_count'):
1741 # Better to do this after potentially exhausting entries
1742 ie_result['playlist_count'] = all_entries.get_full_count()
498f5606 1743
0647d925 1744 extra = self._playlist_infodict(ie_result, n_entries=int_or_none(n_entries))
1745 ie_copy = collections.ChainMap(ie_result, extra)
3bec830a 1746
e08a85d8 1747 _infojson_written = False
0bfc53d0 1748 write_playlist_files = self.params.get('allow_playlist_files', True)
1749 if write_playlist_files and self.params.get('list_thumbnails'):
1750 self.list_thumbnails(ie_result)
1751 if write_playlist_files and not self.params.get('simulate'):
e08a85d8 1752 _infojson_written = self._write_info_json(
1753 'playlist', ie_result, self.prepare_filename(ie_copy, 'pl_infojson'))
1754 if _infojson_written is None:
80c03fa9 1755 return
1756 if self._write_description('playlist', ie_result,
1757 self.prepare_filename(ie_copy, 'pl_description')) is None:
1758 return
681de68e 1759 # TODO: This should be passed to ThumbnailsConvertor if necessary
3bec830a 1760 self._write_thumbnails('playlist', ie_result, self.prepare_filename(ie_copy, 'pl_thumbnail'))
30a074c2 1761
7e9a6125 1762 if lazy:
1763 if self.params.get('playlistreverse') or self.params.get('playlistrandom'):
1764 self.report_warning('playlistreverse and playlistrandom are not supported with lazy_playlist', only_once=True)
1765 elif self.params.get('playlistreverse'):
1766 entries.reverse()
1767 elif self.params.get('playlistrandom'):
30a074c2 1768 random.shuffle(entries)
1769
7e88d7d7 1770 self.to_screen(f'[{ie_result["extractor"]}] Playlist {title}: Downloading {n_entries} videos'
1771 f'{format_field(ie_result, "playlist_count", " of %s")}')
30a074c2 1772
134c913c 1773 keep_resolved_entries = self.params.get('extract_flat') != 'discard'
1774 if self.params.get('extract_flat') == 'discard_in_playlist':
1775 keep_resolved_entries = ie_result['_type'] != 'playlist'
1776 if keep_resolved_entries:
1777 self.write_debug('The information of all playlist entries will be held in memory')
1778
26e2805c 1779 failures = 0
1780 max_failures = self.params.get('skip_playlist_after_errors') or float('inf')
7e9a6125 1781 for i, (playlist_index, entry) in enumerate(entries):
1782 if lazy:
1783 resolved_entries.append((playlist_index, entry))
3bec830a 1784 if not entry:
7e88d7d7 1785 continue
1786
7e88d7d7 1787 entry['__x_forwarded_for_ip'] = ie_result.get('__x_forwarded_for_ip')
7e9a6125 1788 if not lazy and 'playlist-index' in self.params.get('compat_opts', []):
1789 playlist_index = ie_result['requested_entries'][i]
1790
0647d925 1791 entry_copy = collections.ChainMap(entry, {
3bec830a 1792 **common_info,
3955b207 1793 'n_entries': int_or_none(n_entries),
71729754 1794 'playlist_index': playlist_index,
7e9a6125 1795 'playlist_autonumber': i + 1,
0647d925 1796 })
3bec830a 1797
0647d925 1798 if self._match_entry(entry_copy, incomplete=True) is not None:
3bec830a 1799 continue
1800
1801 self.to_screen('[download] Downloading video %s of %s' % (
1802 self._format_screen(i + 1, self.Styles.ID), self._format_screen(n_entries, self.Styles.EMPHASIS)))
1803
a6ca61d4 1804 extra.update({
1805 'playlist_index': playlist_index,
1806 'playlist_autonumber': i + 1,
1807 })
3bec830a 1808 entry_result = self.__process_iterable_entry(entry, download, extra)
26e2805c 1809 if not entry_result:
1810 failures += 1
1811 if failures >= max_failures:
1812 self.report_error(
7e88d7d7 1813 f'Skipping the remaining entries in playlist "{title}" since {failures} items failed extraction')
26e2805c 1814 break
134c913c 1815 if keep_resolved_entries:
1816 resolved_entries[i] = (playlist_index, entry_result)
7e88d7d7 1817
1818 # Update with processed data
7e9a6125 1819 ie_result['requested_entries'], ie_result['entries'] = tuple(zip(*resolved_entries)) or ([], [])
e08a85d8 1820
1821 # Write the updated info to json
cb96c5be 1822 if _infojson_written is True and self._write_info_json(
e08a85d8 1823 'updated playlist', ie_result,
1824 self.prepare_filename(ie_copy, 'pl_infojson'), overwrite=True) is None:
1825 return
ca30f449 1826
ed5835b4 1827 ie_result = self.run_all_pps('playlist', ie_result)
7e88d7d7 1828 self.to_screen(f'[download] Finished downloading playlist: {title}')
30a074c2 1829 return ie_result
1830
7e88d7d7 1831 @_handle_extraction_exceptions
a0566bbf 1832 def __process_iterable_entry(self, entry, download, extra_info):
1833 return self.process_ie_result(
1834 entry, download=download, extra_info=extra_info)
1835
67134eab
JMF
1836 def _build_format_filter(self, filter_spec):
1837 " Returns a function to filter the formats according to the filter_spec "
083c9df9
PH
1838
1839 OPERATORS = {
1840 '<': operator.lt,
1841 '<=': operator.le,
1842 '>': operator.gt,
1843 '>=': operator.ge,
1844 '=': operator.eq,
1845 '!=': operator.ne,
1846 }
67134eab 1847 operator_rex = re.compile(r'''(?x)\s*
187986a8 1848 (?P<key>width|height|tbr|abr|vbr|asr|filesize|filesize_approx|fps)\s*
1849 (?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
1850 (?P<value>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)\s*
083c9df9 1851 ''' % '|'.join(map(re.escape, OPERATORS.keys())))
187986a8 1852 m = operator_rex.fullmatch(filter_spec)
9ddb6925
S
1853 if m:
1854 try:
1855 comparison_value = int(m.group('value'))
1856 except ValueError:
1857 comparison_value = parse_filesize(m.group('value'))
1858 if comparison_value is None:
1859 comparison_value = parse_filesize(m.group('value') + 'B')
1860 if comparison_value is None:
1861 raise ValueError(
1862 'Invalid value %r in format specification %r' % (
67134eab 1863 m.group('value'), filter_spec))
9ddb6925
S
1864 op = OPERATORS[m.group('op')]
1865
083c9df9 1866 if not m:
9ddb6925
S
1867 STR_OPERATORS = {
1868 '=': operator.eq,
10d33b34
YCH
1869 '^=': lambda attr, value: attr.startswith(value),
1870 '$=': lambda attr, value: attr.endswith(value),
1871 '*=': lambda attr, value: value in attr,
1ce9a3cb 1872 '~=': lambda attr, value: value.search(attr) is not None
9ddb6925 1873 }
187986a8 1874 str_operator_rex = re.compile(r'''(?x)\s*
1875 (?P<key>[a-zA-Z0-9._-]+)\s*
1ce9a3cb
LF
1876 (?P<negation>!\s*)?(?P<op>%s)\s*(?P<none_inclusive>\?\s*)?
1877 (?P<quote>["'])?
1878 (?P<value>(?(quote)(?:(?!(?P=quote))[^\\]|\\.)+|[\w.-]+))
1879 (?(quote)(?P=quote))\s*
9ddb6925 1880 ''' % '|'.join(map(re.escape, STR_OPERATORS.keys())))
187986a8 1881 m = str_operator_rex.fullmatch(filter_spec)
9ddb6925 1882 if m:
1ce9a3cb
LF
1883 if m.group('op') == '~=':
1884 comparison_value = re.compile(m.group('value'))
1885 else:
1886 comparison_value = re.sub(r'''\\([\\"'])''', r'\1', m.group('value'))
2cc779f4
S
1887 str_op = STR_OPERATORS[m.group('op')]
1888 if m.group('negation'):
e118a879 1889 op = lambda attr, value: not str_op(attr, value)
2cc779f4
S
1890 else:
1891 op = str_op
083c9df9 1892
9ddb6925 1893 if not m:
187986a8 1894 raise SyntaxError('Invalid filter specification %r' % filter_spec)
083c9df9
PH
1895
1896 def _filter(f):
1897 actual_value = f.get(m.group('key'))
1898 if actual_value is None:
1899 return m.group('none_inclusive')
1900 return op(actual_value, comparison_value)
67134eab
JMF
1901 return _filter
1902
9f1a1c36 1903 def _check_formats(self, formats):
1904 for f in formats:
1905 self.to_screen('[info] Testing format %s' % f['format_id'])
75689fe5 1906 path = self.get_output_path('temp')
1907 if not self._ensure_dir_exists(f'{path}/'):
1908 continue
1909 temp_file = tempfile.NamedTemporaryFile(suffix='.tmp', delete=False, dir=path or None)
9f1a1c36 1910 temp_file.close()
1911 try:
1912 success, _ = self.dl(temp_file.name, f, test=True)
8a82af35 1913 except (DownloadError, OSError, ValueError) + network_exceptions:
9f1a1c36 1914 success = False
1915 finally:
1916 if os.path.exists(temp_file.name):
1917 try:
1918 os.remove(temp_file.name)
1919 except OSError:
1920 self.report_warning('Unable to delete temporary file "%s"' % temp_file.name)
1921 if success:
1922 yield f
1923 else:
1924 self.to_screen('[info] Unable to download format %s. Skipping...' % f['format_id'])
1925
0017d9ad 1926 def _default_format_spec(self, info_dict, download=True):
0017d9ad 1927
af0f7428
S
1928 def can_merge():
1929 merger = FFmpegMergerPP(self)
1930 return merger.available and merger.can_merge()
1931
91ebc640 1932 prefer_best = (
b7b04c78 1933 not self.params.get('simulate')
91ebc640 1934 and download
1935 and (
1936 not can_merge()
21633673 1937 or info_dict.get('is_live') and not self.params.get('live_from_start')
bf1824b3 1938 or self.params['outtmpl']['default'] == '-'))
53ed7066 1939 compat = (
1940 prefer_best
1941 or self.params.get('allow_multiple_audio_streams', False)
8a82af35 1942 or 'format-spec' in self.params['compat_opts'])
91ebc640 1943
1944 return (
53ed7066 1945 'best/bestvideo+bestaudio' if prefer_best
1946 else 'bestvideo*+bestaudio/best' if not compat
91ebc640 1947 else 'bestvideo+bestaudio/best')
0017d9ad 1948
67134eab
JMF
1949 def build_format_selector(self, format_spec):
1950 def syntax_error(note, start):
1951 message = (
1952 'Invalid format specification: '
86e5f3ed 1953 '{}\n\t{}\n\t{}^'.format(note, format_spec, ' ' * start[1]))
67134eab
JMF
1954 return SyntaxError(message)
1955
1956 PICKFIRST = 'PICKFIRST'
1957 MERGE = 'MERGE'
1958 SINGLE = 'SINGLE'
0130afb7 1959 GROUP = 'GROUP'
67134eab
JMF
1960 FormatSelector = collections.namedtuple('FormatSelector', ['type', 'selector', 'filters'])
1961
91ebc640 1962 allow_multiple_streams = {'audio': self.params.get('allow_multiple_audio_streams', False),
1963 'video': self.params.get('allow_multiple_video_streams', False)}
909d24dd 1964
9f1a1c36 1965 check_formats = self.params.get('check_formats') == 'selected'
e8e73840 1966
67134eab
JMF
1967 def _parse_filter(tokens):
1968 filter_parts = []
1969 for type, string, start, _, _ in tokens:
1970 if type == tokenize.OP and string == ']':
1971 return ''.join(filter_parts)
1972 else:
1973 filter_parts.append(string)
1974
232541df 1975 def _remove_unused_ops(tokens):
17cc1534 1976 # Remove operators that we don't use and join them with the surrounding strings
232541df
JMF
1977 # for example: 'mp4' '-' 'baseline' '-' '16x9' is converted to 'mp4-baseline-16x9'
1978 ALLOWED_OPS = ('/', '+', ',', '(', ')')
1979 last_string, last_start, last_end, last_line = None, None, None, None
1980 for type, string, start, end, line in tokens:
1981 if type == tokenize.OP and string == '[':
1982 if last_string:
1983 yield tokenize.NAME, last_string, last_start, last_end, last_line
1984 last_string = None
1985 yield type, string, start, end, line
1986 # everything inside brackets will be handled by _parse_filter
1987 for type, string, start, end, line in tokens:
1988 yield type, string, start, end, line
1989 if type == tokenize.OP and string == ']':
1990 break
1991 elif type == tokenize.OP and string in ALLOWED_OPS:
1992 if last_string:
1993 yield tokenize.NAME, last_string, last_start, last_end, last_line
1994 last_string = None
1995 yield type, string, start, end, line
1996 elif type in [tokenize.NAME, tokenize.NUMBER, tokenize.OP]:
1997 if not last_string:
1998 last_string = string
1999 last_start = start
2000 last_end = end
2001 else:
2002 last_string += string
2003 if last_string:
2004 yield tokenize.NAME, last_string, last_start, last_end, last_line
2005
cf2ac6df 2006 def _parse_format_selection(tokens, inside_merge=False, inside_choice=False, inside_group=False):
67134eab
JMF
2007 selectors = []
2008 current_selector = None
2009 for type, string, start, _, _ in tokens:
2010 # ENCODING is only defined in python 3.x
2011 if type == getattr(tokenize, 'ENCODING', None):
2012 continue
2013 elif type in [tokenize.NAME, tokenize.NUMBER]:
2014 current_selector = FormatSelector(SINGLE, string, [])
2015 elif type == tokenize.OP:
cf2ac6df
JMF
2016 if string == ')':
2017 if not inside_group:
2018 # ')' will be handled by the parentheses group
2019 tokens.restore_last_token()
67134eab 2020 break
cf2ac6df 2021 elif inside_merge and string in ['/', ',']:
0130afb7
JMF
2022 tokens.restore_last_token()
2023 break
cf2ac6df
JMF
2024 elif inside_choice and string == ',':
2025 tokens.restore_last_token()
2026 break
2027 elif string == ',':
0a31a350
JMF
2028 if not current_selector:
2029 raise syntax_error('"," must follow a format selector', start)
67134eab
JMF
2030 selectors.append(current_selector)
2031 current_selector = None
2032 elif string == '/':
d96d604e
JMF
2033 if not current_selector:
2034 raise syntax_error('"/" must follow a format selector', start)
67134eab 2035 first_choice = current_selector
cf2ac6df 2036 second_choice = _parse_format_selection(tokens, inside_choice=True)
f5f4a27a 2037 current_selector = FormatSelector(PICKFIRST, (first_choice, second_choice), [])
67134eab
JMF
2038 elif string == '[':
2039 if not current_selector:
2040 current_selector = FormatSelector(SINGLE, 'best', [])
2041 format_filter = _parse_filter(tokens)
2042 current_selector.filters.append(format_filter)
0130afb7
JMF
2043 elif string == '(':
2044 if current_selector:
2045 raise syntax_error('Unexpected "("', start)
cf2ac6df
JMF
2046 group = _parse_format_selection(tokens, inside_group=True)
2047 current_selector = FormatSelector(GROUP, group, [])
67134eab 2048 elif string == '+':
d03cfdce 2049 if not current_selector:
2050 raise syntax_error('Unexpected "+"', start)
2051 selector_1 = current_selector
2052 selector_2 = _parse_format_selection(tokens, inside_merge=True)
2053 if not selector_2:
2054 raise syntax_error('Expected a selector', start)
2055 current_selector = FormatSelector(MERGE, (selector_1, selector_2), [])
67134eab 2056 else:
86e5f3ed 2057 raise syntax_error(f'Operator not recognized: "{string}"', start)
67134eab
JMF
2058 elif type == tokenize.ENDMARKER:
2059 break
2060 if current_selector:
2061 selectors.append(current_selector)
2062 return selectors
2063
f8d4ad9a 2064 def _merge(formats_pair):
2065 format_1, format_2 = formats_pair
2066
2067 formats_info = []
2068 formats_info.extend(format_1.get('requested_formats', (format_1,)))
2069 formats_info.extend(format_2.get('requested_formats', (format_2,)))
2070
2071 if not allow_multiple_streams['video'] or not allow_multiple_streams['audio']:
551f9388 2072 get_no_more = {'video': False, 'audio': False}
f8d4ad9a 2073 for (i, fmt_info) in enumerate(formats_info):
551f9388 2074 if fmt_info.get('acodec') == fmt_info.get('vcodec') == 'none':
2075 formats_info.pop(i)
2076 continue
2077 for aud_vid in ['audio', 'video']:
f8d4ad9a 2078 if not allow_multiple_streams[aud_vid] and fmt_info.get(aud_vid[0] + 'codec') != 'none':
2079 if get_no_more[aud_vid]:
2080 formats_info.pop(i)
f5510afe 2081 break
f8d4ad9a 2082 get_no_more[aud_vid] = True
2083
2084 if len(formats_info) == 1:
2085 return formats_info[0]
2086
2087 video_fmts = [fmt_info for fmt_info in formats_info if fmt_info.get('vcodec') != 'none']
2088 audio_fmts = [fmt_info for fmt_info in formats_info if fmt_info.get('acodec') != 'none']
2089
2090 the_only_video = video_fmts[0] if len(video_fmts) == 1 else None
2091 the_only_audio = audio_fmts[0] if len(audio_fmts) == 1 else None
2092
fc61aff4
LL
2093 output_ext = get_compatible_ext(
2094 vcodecs=[f.get('vcodec') for f in video_fmts],
2095 acodecs=[f.get('acodec') for f in audio_fmts],
2096 vexts=[f['ext'] for f in video_fmts],
2097 aexts=[f['ext'] for f in audio_fmts],
2098 preferences=(try_call(lambda: self.params['merge_output_format'].split('/'))
2099 or self.params.get('prefer_free_formats') and ('webm', 'mkv')))
f8d4ad9a 2100
975a0d0d 2101 filtered = lambda *keys: filter(None, (traverse_obj(fmt, *keys) for fmt in formats_info))
2102
f8d4ad9a 2103 new_dict = {
2104 'requested_formats': formats_info,
975a0d0d 2105 'format': '+'.join(filtered('format')),
2106 'format_id': '+'.join(filtered('format_id')),
f8d4ad9a 2107 'ext': output_ext,
975a0d0d 2108 'protocol': '+'.join(map(determine_protocol, formats_info)),
093a1710 2109 'language': '+'.join(orderedSet(filtered('language'))) or None,
2110 'format_note': '+'.join(orderedSet(filtered('format_note'))) or None,
2111 'filesize_approx': sum(filtered('filesize', 'filesize_approx')) or None,
975a0d0d 2112 'tbr': sum(filtered('tbr', 'vbr', 'abr')),
f8d4ad9a 2113 }
2114
2115 if the_only_video:
2116 new_dict.update({
2117 'width': the_only_video.get('width'),
2118 'height': the_only_video.get('height'),
2119 'resolution': the_only_video.get('resolution') or self.format_resolution(the_only_video),
2120 'fps': the_only_video.get('fps'),
49a57e70 2121 'dynamic_range': the_only_video.get('dynamic_range'),
f8d4ad9a 2122 'vcodec': the_only_video.get('vcodec'),
2123 'vbr': the_only_video.get('vbr'),
2124 'stretched_ratio': the_only_video.get('stretched_ratio'),
2125 })
2126
2127 if the_only_audio:
2128 new_dict.update({
2129 'acodec': the_only_audio.get('acodec'),
2130 'abr': the_only_audio.get('abr'),
975a0d0d 2131 'asr': the_only_audio.get('asr'),
f8d4ad9a 2132 })
2133
2134 return new_dict
2135
e8e73840 2136 def _check_formats(formats):
981052c9 2137 if not check_formats:
2138 yield from formats
b5ac45b1 2139 return
9f1a1c36 2140 yield from self._check_formats(formats)
e8e73840 2141
67134eab 2142 def _build_selector_function(selector):
909d24dd 2143 if isinstance(selector, list): # ,
67134eab
JMF
2144 fs = [_build_selector_function(s) for s in selector]
2145
317f7ab6 2146 def selector_function(ctx):
67134eab 2147 for f in fs:
981052c9 2148 yield from f(ctx)
67134eab 2149 return selector_function
909d24dd 2150
2151 elif selector.type == GROUP: # ()
0130afb7 2152 selector_function = _build_selector_function(selector.selector)
909d24dd 2153
2154 elif selector.type == PICKFIRST: # /
67134eab
JMF
2155 fs = [_build_selector_function(s) for s in selector.selector]
2156
317f7ab6 2157 def selector_function(ctx):
67134eab 2158 for f in fs:
317f7ab6 2159 picked_formats = list(f(ctx))
67134eab
JMF
2160 if picked_formats:
2161 return picked_formats
2162 return []
67134eab 2163
981052c9 2164 elif selector.type == MERGE: # +
2165 selector_1, selector_2 = map(_build_selector_function, selector.selector)
2166
2167 def selector_function(ctx):
adbc4ec4 2168 for pair in itertools.product(selector_1(ctx), selector_2(ctx)):
981052c9 2169 yield _merge(pair)
2170
909d24dd 2171 elif selector.type == SINGLE: # atom
598d185d 2172 format_spec = selector.selector or 'best'
909d24dd 2173
f8d4ad9a 2174 # TODO: Add allvideo, allaudio etc by generalizing the code with best/worst selector
909d24dd 2175 if format_spec == 'all':
2176 def selector_function(ctx):
9222c381 2177 yield from _check_formats(ctx['formats'][::-1])
f8d4ad9a 2178 elif format_spec == 'mergeall':
2179 def selector_function(ctx):
316f2650 2180 formats = list(_check_formats(
2181 f for f in ctx['formats'] if f.get('vcodec') != 'none' or f.get('acodec') != 'none'))
e01d6aa4 2182 if not formats:
2183 return
921b76ca 2184 merged_format = formats[-1]
2185 for f in formats[-2::-1]:
f8d4ad9a 2186 merged_format = _merge((merged_format, f))
2187 yield merged_format
909d24dd 2188
2189 else:
85e801a9 2190 format_fallback, seperate_fallback, format_reverse, format_idx = False, None, True, 1
eff63539 2191 mobj = re.match(
2192 r'(?P<bw>best|worst|b|w)(?P<type>video|audio|v|a)?(?P<mod>\*)?(?:\.(?P<n>[1-9]\d*))?$',
2193 format_spec)
2194 if mobj is not None:
2195 format_idx = int_or_none(mobj.group('n'), default=1)
e8e73840 2196 format_reverse = mobj.group('bw')[0] == 'b'
eff63539 2197 format_type = (mobj.group('type') or [None])[0]
2198 not_format_type = {'v': 'a', 'a': 'v'}.get(format_type)
2199 format_modified = mobj.group('mod') is not None
909d24dd 2200
2201 format_fallback = not format_type and not format_modified # for b, w
8326b00a 2202 _filter_f = (
eff63539 2203 (lambda f: f.get('%scodec' % format_type) != 'none')
2204 if format_type and format_modified # bv*, ba*, wv*, wa*
2205 else (lambda f: f.get('%scodec' % not_format_type) == 'none')
2206 if format_type # bv, ba, wv, wa
2207 else (lambda f: f.get('vcodec') != 'none' and f.get('acodec') != 'none')
2208 if not format_modified # b, w
8326b00a 2209 else lambda f: True) # b*, w*
2210 filter_f = lambda f: _filter_f(f) and (
2211 f.get('vcodec') != 'none' or f.get('acodec') != 'none')
67134eab 2212 else:
48ee10ee 2213 if format_spec in self._format_selection_exts['audio']:
b11c04a8 2214 filter_f = lambda f: f.get('ext') == format_spec and f.get('acodec') != 'none'
48ee10ee 2215 elif format_spec in self._format_selection_exts['video']:
b11c04a8 2216 filter_f = lambda f: f.get('ext') == format_spec and f.get('acodec') != 'none' and f.get('vcodec') != 'none'
85e801a9 2217 seperate_fallback = lambda f: f.get('ext') == format_spec and f.get('vcodec') != 'none'
48ee10ee 2218 elif format_spec in self._format_selection_exts['storyboards']:
b11c04a8 2219 filter_f = lambda f: f.get('ext') == format_spec and f.get('acodec') == 'none' and f.get('vcodec') == 'none'
2220 else:
b5ae35ee 2221 filter_f = lambda f: f.get('format_id') == format_spec # id
909d24dd 2222
2223 def selector_function(ctx):
2224 formats = list(ctx['formats'])
909d24dd 2225 matches = list(filter(filter_f, formats)) if filter_f is not None else formats
85e801a9 2226 if not matches:
2227 if format_fallback and ctx['incomplete_formats']:
2228 # for extractors with incomplete formats (audio only (soundcloud)
2229 # or video only (imgur)) best/worst will fallback to
2230 # best/worst {video,audio}-only format
2231 matches = formats
2232 elif seperate_fallback and not ctx['has_merged_format']:
2233 # for compatibility with youtube-dl when there is no pre-merged format
2234 matches = list(filter(seperate_fallback, formats))
981052c9 2235 matches = LazyList(_check_formats(matches[::-1 if format_reverse else 1]))
2236 try:
e8e73840 2237 yield matches[format_idx - 1]
4abea8ca 2238 except LazyList.IndexError:
981052c9 2239 return
083c9df9 2240
67134eab 2241 filters = [self._build_format_filter(f) for f in selector.filters]
083c9df9 2242
317f7ab6 2243 def final_selector(ctx):
adbc4ec4 2244 ctx_copy = dict(ctx)
67134eab 2245 for _filter in filters:
317f7ab6
S
2246 ctx_copy['formats'] = list(filter(_filter, ctx_copy['formats']))
2247 return selector_function(ctx_copy)
67134eab 2248 return final_selector
083c9df9 2249
0f06bcd7 2250 stream = io.BytesIO(format_spec.encode())
0130afb7 2251 try:
f9934b96 2252 tokens = list(_remove_unused_ops(tokenize.tokenize(stream.readline)))
0130afb7
JMF
2253 except tokenize.TokenError:
2254 raise syntax_error('Missing closing/opening brackets or parenthesis', (0, len(format_spec)))
2255
86e5f3ed 2256 class TokenIterator:
0130afb7
JMF
2257 def __init__(self, tokens):
2258 self.tokens = tokens
2259 self.counter = 0
2260
2261 def __iter__(self):
2262 return self
2263
2264 def __next__(self):
2265 if self.counter >= len(self.tokens):
2266 raise StopIteration()
2267 value = self.tokens[self.counter]
2268 self.counter += 1
2269 return value
2270
2271 next = __next__
2272
2273 def restore_last_token(self):
2274 self.counter -= 1
2275
2276 parsed_selector = _parse_format_selection(iter(TokenIterator(tokens)))
67134eab 2277 return _build_selector_function(parsed_selector)
a9c58ad9 2278
e5660ee6 2279 def _calc_headers(self, info_dict):
8b7539d2 2280 res = merge_headers(self.params['http_headers'], info_dict.get('http_headers') or {})
e5660ee6 2281
c487cf00 2282 cookies = self._calc_cookies(info_dict['url'])
e5660ee6
JMF
2283 if cookies:
2284 res['Cookie'] = cookies
2285
0016b84e
S
2286 if 'X-Forwarded-For' not in res:
2287 x_forwarded_for_ip = info_dict.get('__x_forwarded_for_ip')
2288 if x_forwarded_for_ip:
2289 res['X-Forwarded-For'] = x_forwarded_for_ip
2290
e5660ee6
JMF
2291 return res
2292
c487cf00 2293 def _calc_cookies(self, url):
2294 pr = sanitized_Request(url)
e5660ee6 2295 self.cookiejar.add_cookie_header(pr)
662435f7 2296 return pr.get_header('Cookie')
e5660ee6 2297
9f1a1c36 2298 def _sort_thumbnails(self, thumbnails):
2299 thumbnails.sort(key=lambda t: (
2300 t.get('preference') if t.get('preference') is not None else -1,
2301 t.get('width') if t.get('width') is not None else -1,
2302 t.get('height') if t.get('height') is not None else -1,
2303 t.get('id') if t.get('id') is not None else '',
2304 t.get('url')))
2305
b0249bca 2306 def _sanitize_thumbnails(self, info_dict):
bc516a3f 2307 thumbnails = info_dict.get('thumbnails')
2308 if thumbnails is None:
2309 thumbnail = info_dict.get('thumbnail')
2310 if thumbnail:
2311 info_dict['thumbnails'] = thumbnails = [{'url': thumbnail}]
9f1a1c36 2312 if not thumbnails:
2313 return
2314
2315 def check_thumbnails(thumbnails):
2316 for t in thumbnails:
2317 self.to_screen(f'[info] Testing thumbnail {t["id"]}')
2318 try:
2319 self.urlopen(HEADRequest(t['url']))
2320 except network_exceptions as err:
2321 self.to_screen(f'[info] Unable to connect to thumbnail {t["id"]} URL {t["url"]!r} - {err}. Skipping...')
2322 continue
2323 yield t
2324
2325 self._sort_thumbnails(thumbnails)
2326 for i, t in enumerate(thumbnails):
2327 if t.get('id') is None:
2328 t['id'] = '%d' % i
2329 if t.get('width') and t.get('height'):
2330 t['resolution'] = '%dx%d' % (t['width'], t['height'])
2331 t['url'] = sanitize_url(t['url'])
2332
2333 if self.params.get('check_formats') is True:
282f5709 2334 info_dict['thumbnails'] = LazyList(check_thumbnails(thumbnails[::-1]), reverse=True)
9f1a1c36 2335 else:
2336 info_dict['thumbnails'] = thumbnails
bc516a3f 2337
03f83004
LNO
2338 def _fill_common_fields(self, info_dict, is_video=True):
2339 # TODO: move sanitization here
2340 if is_video:
2341 # playlists are allowed to lack "title"
d4736fdb 2342 title = info_dict.get('title', NO_DEFAULT)
2343 if title is NO_DEFAULT:
03f83004
LNO
2344 raise ExtractorError('Missing "title" field in extractor result',
2345 video_id=info_dict['id'], ie=info_dict['extractor'])
d4736fdb 2346 info_dict['fulltitle'] = title
2347 if not title:
2348 if title == '':
2349 self.write_debug('Extractor gave empty title. Creating a generic title')
2350 else:
2351 self.report_warning('Extractor failed to obtain "title". Creating a generic title instead')
1d485a1a 2352 info_dict['title'] = f'{info_dict["extractor"].replace(":", "-")} video #{info_dict["id"]}'
03f83004
LNO
2353
2354 if info_dict.get('duration') is not None:
2355 info_dict['duration_string'] = formatSeconds(info_dict['duration'])
2356
2357 for ts_key, date_key in (
2358 ('timestamp', 'upload_date'),
2359 ('release_timestamp', 'release_date'),
2360 ('modified_timestamp', 'modified_date'),
2361 ):
2362 if info_dict.get(date_key) is None and info_dict.get(ts_key) is not None:
2363 # Working around out-of-range timestamp values (e.g. negative ones on Windows,
2364 # see http://bugs.python.org/issue1646728)
19a03940 2365 with contextlib.suppress(ValueError, OverflowError, OSError):
03f83004
LNO
2366 upload_date = datetime.datetime.utcfromtimestamp(info_dict[ts_key])
2367 info_dict[date_key] = upload_date.strftime('%Y%m%d')
03f83004
LNO
2368
2369 live_keys = ('is_live', 'was_live')
2370 live_status = info_dict.get('live_status')
2371 if live_status is None:
2372 for key in live_keys:
2373 if info_dict.get(key) is False:
2374 continue
2375 if info_dict.get(key):
2376 live_status = key
2377 break
2378 if all(info_dict.get(key) is False for key in live_keys):
2379 live_status = 'not_live'
2380 if live_status:
2381 info_dict['live_status'] = live_status
2382 for key in live_keys:
2383 if info_dict.get(key) is None:
2384 info_dict[key] = (live_status == key)
2385
2386 # Auto generate title fields corresponding to the *_number fields when missing
2387 # in order to always have clean titles. This is very common for TV series.
2388 for field in ('chapter', 'season', 'episode'):
2389 if info_dict.get('%s_number' % field) is not None and not info_dict.get(field):
2390 info_dict[field] = '%s %d' % (field.capitalize(), info_dict['%s_number' % field])
2391
415f8d51 2392 def _raise_pending_errors(self, info):
2393 err = info.pop('__pending_error', None)
2394 if err:
2395 self.report_error(err, tb=False)
2396
dd82ffea
JMF
2397 def process_video_result(self, info_dict, download=True):
2398 assert info_dict.get('_type', 'video') == 'video'
9c906919 2399 self._num_videos += 1
dd82ffea 2400
bec1fad2 2401 if 'id' not in info_dict:
fc08bdd6 2402 raise ExtractorError('Missing "id" field in extractor result', ie=info_dict['extractor'])
2403 elif not info_dict.get('id'):
2404 raise ExtractorError('Extractor failed to obtain "id"', ie=info_dict['extractor'])
455a15e2 2405
c9969434
S
2406 def report_force_conversion(field, field_not, conversion):
2407 self.report_warning(
2408 '"%s" field is not %s - forcing %s conversion, there is an error in extractor'
2409 % (field, field_not, conversion))
2410
2411 def sanitize_string_field(info, string_field):
2412 field = info.get(string_field)
14f25df2 2413 if field is None or isinstance(field, str):
c9969434
S
2414 return
2415 report_force_conversion(string_field, 'a string', 'string')
14f25df2 2416 info[string_field] = str(field)
c9969434
S
2417
2418 def sanitize_numeric_fields(info):
2419 for numeric_field in self._NUMERIC_FIELDS:
2420 field = info.get(numeric_field)
f9934b96 2421 if field is None or isinstance(field, (int, float)):
c9969434
S
2422 continue
2423 report_force_conversion(numeric_field, 'numeric', 'int')
2424 info[numeric_field] = int_or_none(field)
2425
2426 sanitize_string_field(info_dict, 'id')
2427 sanitize_numeric_fields(info_dict)
3975b4d2 2428 if info_dict.get('section_end') and info_dict.get('section_start') is not None:
2429 info_dict['duration'] = round(info_dict['section_end'] - info_dict['section_start'], 3)
4c3f8c3f 2430 if (info_dict.get('duration') or 0) <= 0 and info_dict.pop('duration', None):
50e93e03 2431 self.report_warning('"duration" field is negative, there is an error in extractor')
be6217b2 2432
9eef7c4e 2433 chapters = info_dict.get('chapters') or []
a3976e07 2434 if chapters and chapters[0].get('start_time'):
2435 chapters.insert(0, {'start_time': 0})
2436
9eef7c4e 2437 dummy_chapter = {'end_time': 0, 'start_time': info_dict.get('duration')}
a3976e07 2438 for idx, (prev, current, next_) in enumerate(zip(
2439 (dummy_chapter, *chapters), chapters, (*chapters[1:], dummy_chapter)), 1):
9eef7c4e 2440 if current.get('start_time') is None:
2441 current['start_time'] = prev.get('end_time')
2442 if not current.get('end_time'):
2443 current['end_time'] = next_.get('start_time')
a3976e07 2444 if not current.get('title'):
2445 current['title'] = f'<Untitled Chapter {idx}>'
9eef7c4e 2446
dd82ffea
JMF
2447 if 'playlist' not in info_dict:
2448 # It isn't part of a playlist
2449 info_dict['playlist'] = None
2450 info_dict['playlist_index'] = None
2451
bc516a3f 2452 self._sanitize_thumbnails(info_dict)
d5519808 2453
536a55da 2454 thumbnail = info_dict.get('thumbnail')
bc516a3f 2455 thumbnails = info_dict.get('thumbnails')
536a55da
S
2456 if thumbnail:
2457 info_dict['thumbnail'] = sanitize_url(thumbnail)
2458 elif thumbnails:
d5519808
PH
2459 info_dict['thumbnail'] = thumbnails[-1]['url']
2460
ae30b840 2461 if info_dict.get('display_id') is None and 'id' in info_dict:
0afef30b
PH
2462 info_dict['display_id'] = info_dict['id']
2463
03f83004 2464 self._fill_common_fields(info_dict)
33d2fc2f 2465
05108a49
S
2466 for cc_kind in ('subtitles', 'automatic_captions'):
2467 cc = info_dict.get(cc_kind)
2468 if cc:
2469 for _, subtitle in cc.items():
2470 for subtitle_format in subtitle:
2471 if subtitle_format.get('url'):
2472 subtitle_format['url'] = sanitize_url(subtitle_format['url'])
2473 if subtitle_format.get('ext') is None:
2474 subtitle_format['ext'] = determine_ext(subtitle_format['url']).lower()
2475
2476 automatic_captions = info_dict.get('automatic_captions')
4bba3716 2477 subtitles = info_dict.get('subtitles')
4bba3716 2478
360e1ca5 2479 info_dict['requested_subtitles'] = self.process_subtitles(
05108a49 2480 info_dict['id'], subtitles, automatic_captions)
a504ced0 2481
dd82ffea
JMF
2482 if info_dict.get('formats') is None:
2483 # There's only one format available
2484 formats = [info_dict]
2485 else:
2486 formats = info_dict['formats']
2487
0a5a191a 2488 # or None ensures --clean-infojson removes it
2489 info_dict['_has_drm'] = any(f.get('has_drm') for f in formats) or None
88acdbc2 2490 if not self.params.get('allow_unplayable_formats'):
2491 formats = [f for f in formats if not f.get('has_drm')]
7356a444 2492 if info_dict['_has_drm'] and formats and all(
c0b6e5c7 2493 f.get('acodec') == f.get('vcodec') == 'none' for f in formats):
2494 self.report_warning(
2495 'This video is DRM protected and only images are available for download. '
2496 'Use --list-formats to see them')
88acdbc2 2497
319b6059 2498 get_from_start = not info_dict.get('is_live') or bool(self.params.get('live_from_start'))
2499 if not get_from_start:
2500 info_dict['title'] += ' ' + datetime.datetime.now().strftime('%Y-%m-%d %H:%M')
2501 if info_dict.get('is_live') and formats:
adbc4ec4 2502 formats = [f for f in formats if bool(f.get('is_from_start')) == get_from_start]
319b6059 2503 if get_from_start and not formats:
a44ca5a4 2504 self.raise_no_formats(info_dict, msg=(
2505 '--live-from-start is passed, but there are no formats that can be downloaded from the start. '
2506 'If you want to download from the current time, use --no-live-from-start'))
adbc4ec4 2507
db95dc13 2508 if not formats:
1151c407 2509 self.raise_no_formats(info_dict)
db95dc13 2510
73af5cc8
S
2511 def is_wellformed(f):
2512 url = f.get('url')
a5ac0c47 2513 if not url:
73af5cc8
S
2514 self.report_warning(
2515 '"url" field is missing or empty - skipping format, '
2516 'there is an error in extractor')
a5ac0c47
S
2517 return False
2518 if isinstance(url, bytes):
2519 sanitize_string_field(f, 'url')
2520 return True
73af5cc8
S
2521
2522 # Filter out malformed formats for better extraction robustness
2523 formats = list(filter(is_wellformed, formats))
2524
181c7053
S
2525 formats_dict = {}
2526
dd82ffea 2527 # We check that all the formats have the format and format_id fields
db95dc13 2528 for i, format in enumerate(formats):
c9969434
S
2529 sanitize_string_field(format, 'format_id')
2530 sanitize_numeric_fields(format)
dcf77cf1 2531 format['url'] = sanitize_url(format['url'])
e74e3b63 2532 if not format.get('format_id'):
14f25df2 2533 format['format_id'] = str(i)
e2effb08
S
2534 else:
2535 # Sanitize format_id from characters used in format selector expression
ec85ded8 2536 format['format_id'] = re.sub(r'[\s,/+\[\]()]', '_', format['format_id'])
181c7053
S
2537 format_id = format['format_id']
2538 if format_id not in formats_dict:
2539 formats_dict[format_id] = []
2540 formats_dict[format_id].append(format)
2541
2542 # Make sure all formats have unique format_id
03b4de72 2543 common_exts = set(itertools.chain(*self._format_selection_exts.values()))
181c7053 2544 for format_id, ambiguous_formats in formats_dict.items():
48ee10ee 2545 ambigious_id = len(ambiguous_formats) > 1
2546 for i, format in enumerate(ambiguous_formats):
2547 if ambigious_id:
181c7053 2548 format['format_id'] = '%s-%d' % (format_id, i)
48ee10ee 2549 if format.get('ext') is None:
2550 format['ext'] = determine_ext(format['url']).lower()
2551 # Ensure there is no conflict between id and ext in format selection
2552 # See https://github.com/yt-dlp/yt-dlp/issues/1282
2553 if format['format_id'] != format['ext'] and format['format_id'] in common_exts:
2554 format['format_id'] = 'f%s' % format['format_id']
181c7053
S
2555
2556 for i, format in enumerate(formats):
8c51aa65 2557 if format.get('format') is None:
6febd1c1 2558 format['format'] = '{id} - {res}{note}'.format(
8c51aa65
JMF
2559 id=format['format_id'],
2560 res=self.format_resolution(format),
b868936c 2561 note=format_field(format, 'format_note', ' (%s)'),
8c51aa65 2562 )
6f0be937 2563 if format.get('protocol') is None:
b5559424 2564 format['protocol'] = determine_protocol(format)
239df021 2565 if format.get('resolution') is None:
2566 format['resolution'] = self.format_resolution(format, default=None)
176f1866 2567 if format.get('dynamic_range') is None and format.get('vcodec') != 'none':
2568 format['dynamic_range'] = 'SDR'
f2fe69c7 2569 if (info_dict.get('duration') and format.get('tbr')
2570 and not format.get('filesize') and not format.get('filesize_approx')):
56ba69e4 2571 format['filesize_approx'] = int(info_dict['duration'] * format['tbr'] * (1024 / 8))
f2fe69c7 2572
e5660ee6
JMF
2573 # Add HTTP headers, so that external programs can use them from the
2574 # json output
2575 full_format_info = info_dict.copy()
2576 full_format_info.update(format)
2577 format['http_headers'] = self._calc_headers(full_format_info)
0016b84e
S
2578 # Remove private housekeeping stuff
2579 if '__x_forwarded_for_ip' in info_dict:
2580 del info_dict['__x_forwarded_for_ip']
dd82ffea 2581
9f1a1c36 2582 if self.params.get('check_formats') is True:
282f5709 2583 formats = LazyList(self._check_formats(formats[::-1]), reverse=True)
9f1a1c36 2584
88acdbc2 2585 if not formats or formats[0] is not info_dict:
b3d9ef88
JMF
2586 # only set the 'formats' fields if the original info_dict list them
2587 # otherwise we end up with a circular reference, the first (and unique)
f89197d7 2588 # element in the 'formats' field in info_dict is info_dict itself,
dfb1b146 2589 # which can't be exported to json
b3d9ef88 2590 info_dict['formats'] = formats
4ec82a72 2591
2592 info_dict, _ = self.pre_process(info_dict)
2593
6db9c4d5 2594 if self._match_entry(info_dict, incomplete=self._format_fields) is not None:
09b49e1f 2595 return info_dict
2596
2597 self.post_extract(info_dict)
2598 info_dict, _ = self.pre_process(info_dict, 'after_filter')
2599
093a1710 2600 # The pre-processors may have modified the formats
2601 formats = info_dict.get('formats', [info_dict])
2602
fa9f30b8 2603 list_only = self.params.get('simulate') is None and (
2604 self.params.get('list_thumbnails') or self.params.get('listformats') or self.params.get('listsubtitles'))
2605 interactive_format_selection = not list_only and self.format_selector == '-'
b7b04c78 2606 if self.params.get('list_thumbnails'):
2607 self.list_thumbnails(info_dict)
b7b04c78 2608 if self.params.get('listsubtitles'):
2609 if 'automatic_captions' in info_dict:
2610 self.list_subtitles(
2611 info_dict['id'], automatic_captions, 'automatic captions')
2612 self.list_subtitles(info_dict['id'], subtitles, 'subtitles')
fa9f30b8 2613 if self.params.get('listformats') or interactive_format_selection:
b69fd25c 2614 self.list_formats(info_dict)
169dbde9 2615 if list_only:
b7b04c78 2616 # Without this printing, -F --print-json will not work
169dbde9 2617 self.__forced_printings(info_dict, self.prepare_filename(info_dict), incomplete=True)
c487cf00 2618 return info_dict
bfaae0a7 2619
187986a8 2620 format_selector = self.format_selector
2621 if format_selector is None:
0017d9ad 2622 req_format = self._default_format_spec(info_dict, download=download)
0760b0a7 2623 self.write_debug('Default format spec: %s' % req_format)
187986a8 2624 format_selector = self.build_format_selector(req_format)
317f7ab6 2625
fa9f30b8 2626 while True:
2627 if interactive_format_selection:
2628 req_format = input(
2629 self._format_screen('\nEnter format selector: ', self.Styles.EMPHASIS))
2630 try:
2631 format_selector = self.build_format_selector(req_format)
2632 except SyntaxError as err:
2633 self.report_error(err, tb=False, is_error=False)
2634 continue
2635
85e801a9 2636 formats_to_download = list(format_selector({
fa9f30b8 2637 'formats': formats,
85e801a9 2638 'has_merged_format': any('none' not in (f.get('acodec'), f.get('vcodec')) for f in formats),
2639 'incomplete_formats': (
2640 # All formats are video-only or
2641 all(f.get('vcodec') != 'none' and f.get('acodec') == 'none' for f in formats)
2642 # all formats are audio-only
2643 or all(f.get('vcodec') == 'none' and f.get('acodec') != 'none' for f in formats)),
2644 }))
fa9f30b8 2645 if interactive_format_selection and not formats_to_download:
2646 self.report_error('Requested format is not available', tb=False, is_error=False)
2647 continue
2648 break
317f7ab6 2649
dd82ffea 2650 if not formats_to_download:
b7da73eb 2651 if not self.params.get('ignore_no_formats_error'):
c0b6e5c7 2652 raise ExtractorError(
2653 'Requested format is not available. Use --list-formats for a list of available formats',
2654 expected=True, video_id=info_dict['id'], ie=info_dict['extractor'])
b62fa6d7 2655 self.report_warning('Requested format is not available')
2656 # Process what we can, even without any available formats.
2657 formats_to_download = [{}]
a13e6848 2658
5ec1b6b7 2659 requested_ranges = self.params.get('download_ranges')
2660 if requested_ranges:
2661 requested_ranges = tuple(requested_ranges(info_dict, self))
2662
2663 best_format, downloaded_formats = formats_to_download[-1], []
b62fa6d7 2664 if download:
2665 if best_format:
5ec1b6b7 2666 def to_screen(*msg):
2667 self.to_screen(f'[info] {info_dict["id"]}: {" ".join(", ".join(variadic(m)) for m in msg)}')
2668
2669 to_screen(f'Downloading {len(formats_to_download)} format(s):',
2670 (f['format_id'] for f in formats_to_download))
2671 if requested_ranges:
2672 to_screen(f'Downloading {len(requested_ranges)} time ranges:',
2673 (f'{int(c["start_time"])}-{int(c["end_time"])}' for c in requested_ranges))
a13e6848 2674 max_downloads_reached = False
5ec1b6b7 2675
2676 for fmt, chapter in itertools.product(formats_to_download, requested_ranges or [{}]):
2677 new_info = self._copy_infodict(info_dict)
b7da73eb 2678 new_info.update(fmt)
3975b4d2 2679 offset, duration = info_dict.get('section_start') or 0, info_dict.get('duration') or float('inf')
2680 if chapter or offset:
5ec1b6b7 2681 new_info.update({
3975b4d2 2682 'section_start': offset + chapter.get('start_time', 0),
bc401608 2683 'section_end': offset + min(chapter.get('end_time', duration), duration),
5ec1b6b7 2684 'section_title': chapter.get('title'),
2685 'section_number': chapter.get('index'),
2686 })
2687 downloaded_formats.append(new_info)
a13e6848 2688 try:
2689 self.process_info(new_info)
2690 except MaxDownloadsReached:
2691 max_downloads_reached = True
415f8d51 2692 self._raise_pending_errors(new_info)
f46e2f9d 2693 # Remove copied info
2694 for key, val in tuple(new_info.items()):
2695 if info_dict.get(key) == val:
2696 new_info.pop(key)
a13e6848 2697 if max_downloads_reached:
2698 break
ebed8b37 2699
5ec1b6b7 2700 write_archive = {f.get('__write_download_archive', False) for f in downloaded_formats}
a13e6848 2701 assert write_archive.issubset({True, False, 'ignore'})
2702 if True in write_archive and False not in write_archive:
2703 self.record_download_archive(info_dict)
be72c624 2704
5ec1b6b7 2705 info_dict['requested_downloads'] = downloaded_formats
ed5835b4 2706 info_dict = self.run_all_pps('after_video', info_dict)
a13e6848 2707 if max_downloads_reached:
2708 raise MaxDownloadsReached()
ebed8b37 2709
49a57e70 2710 # We update the info dict with the selected best quality format (backwards compatibility)
be72c624 2711 info_dict.update(best_format)
dd82ffea
JMF
2712 return info_dict
2713
98c70d6f 2714 def process_subtitles(self, video_id, normal_subtitles, automatic_captions):
a504ced0 2715 """Select the requested subtitles and their format"""
d8a58ddc 2716 available_subs, normal_sub_langs = {}, []
98c70d6f
JMF
2717 if normal_subtitles and self.params.get('writesubtitles'):
2718 available_subs.update(normal_subtitles)
d8a58ddc 2719 normal_sub_langs = tuple(normal_subtitles.keys())
98c70d6f
JMF
2720 if automatic_captions and self.params.get('writeautomaticsub'):
2721 for lang, cap_info in automatic_captions.items():
360e1ca5
JMF
2722 if lang not in available_subs:
2723 available_subs[lang] = cap_info
2724
4d171848
JMF
2725 if (not self.params.get('writesubtitles') and not
2726 self.params.get('writeautomaticsub') or not
2727 available_subs):
2728 return None
a504ced0 2729
d8a58ddc 2730 all_sub_langs = tuple(available_subs.keys())
a504ced0 2731 if self.params.get('allsubtitles', False):
c32b0aab 2732 requested_langs = all_sub_langs
2733 elif self.params.get('subtitleslangs', False):
77c4a9ef 2734 # A list is used so that the order of languages will be the same as
2735 # given in subtitleslangs. See https://github.com/yt-dlp/yt-dlp/issues/1041
2736 requested_langs = []
2737 for lang_re in self.params.get('subtitleslangs'):
77c4a9ef 2738 discard = lang_re[0] == '-'
c32b0aab 2739 if discard:
77c4a9ef 2740 lang_re = lang_re[1:]
3aa91540 2741 if lang_re == 'all':
2742 if discard:
2743 requested_langs = []
2744 else:
2745 requested_langs.extend(all_sub_langs)
2746 continue
77c4a9ef 2747 current_langs = filter(re.compile(lang_re + '$').match, all_sub_langs)
c32b0aab 2748 if discard:
2749 for lang in current_langs:
77c4a9ef 2750 while lang in requested_langs:
2751 requested_langs.remove(lang)
c32b0aab 2752 else:
77c4a9ef 2753 requested_langs.extend(current_langs)
2754 requested_langs = orderedSet(requested_langs)
d8a58ddc 2755 elif normal_sub_langs:
2756 requested_langs = ['en'] if 'en' in normal_sub_langs else normal_sub_langs[:1]
a504ced0 2757 else:
d8a58ddc 2758 requested_langs = ['en'] if 'en' in all_sub_langs else all_sub_langs[:1]
ad3dc496 2759 if requested_langs:
2760 self.write_debug('Downloading subtitles: %s' % ', '.join(requested_langs))
a504ced0
JMF
2761
2762 formats_query = self.params.get('subtitlesformat', 'best')
2763 formats_preference = formats_query.split('/') if formats_query else []
2764 subs = {}
2765 for lang in requested_langs:
2766 formats = available_subs.get(lang)
2767 if formats is None:
86e5f3ed 2768 self.report_warning(f'{lang} subtitles not available for {video_id}')
a504ced0 2769 continue
a504ced0
JMF
2770 for ext in formats_preference:
2771 if ext == 'best':
2772 f = formats[-1]
2773 break
2774 matches = list(filter(lambda f: f['ext'] == ext, formats))
2775 if matches:
2776 f = matches[-1]
2777 break
2778 else:
2779 f = formats[-1]
2780 self.report_warning(
2781 'No subtitle format found matching "%s" for language %s, '
2782 'using %s' % (formats_query, lang, f['ext']))
2783 subs[lang] = f
2784 return subs
2785
bb66c247 2786 def _forceprint(self, key, info_dict):
2787 if info_dict is None:
2788 return
2789 info_copy = info_dict.copy()
2790 info_copy['formats_table'] = self.render_formats_table(info_dict)
2791 info_copy['thumbnails_table'] = self.render_thumbnails_table(info_dict)
2792 info_copy['subtitles_table'] = self.render_subtitles_table(info_dict.get('id'), info_dict.get('subtitles'))
2793 info_copy['automatic_captions_table'] = self.render_subtitles_table(info_dict.get('id'), info_dict.get('automatic_captions'))
2794
2795 def format_tmpl(tmpl):
2796 mobj = re.match(r'\w+(=?)$', tmpl)
2797 if mobj and mobj.group(1):
2798 return f'{tmpl[:-1]} = %({tmpl[:-1]})r'
2799 elif mobj:
2800 return f'%({tmpl})s'
2801 return tmpl
8130779d 2802
bb66c247 2803 for tmpl in self.params['forceprint'].get(key, []):
2804 self.to_stdout(self.evaluate_outtmpl(format_tmpl(tmpl), info_copy))
2805
2806 for tmpl, file_tmpl in self.params['print_to_file'].get(key, []):
5127e92a 2807 filename = self.prepare_filename(info_dict, outtmpl=file_tmpl)
bb66c247 2808 tmpl = format_tmpl(tmpl)
2809 self.to_screen(f'[info] Writing {tmpl!r} to: {filename}')
8d93e69d 2810 if self._ensure_dir_exists(filename):
86e5f3ed 2811 with open(filename, 'a', encoding='utf-8') as f:
8d93e69d 2812 f.write(self.evaluate_outtmpl(tmpl, info_copy) + '\n')
ca30f449 2813
d06daf23 2814 def __forced_printings(self, info_dict, filename, incomplete):
53c18592 2815 def print_mandatory(field, actual_field=None):
2816 if actual_field is None:
2817 actual_field = field
d06daf23 2818 if (self.params.get('force%s' % field, False)
53c18592 2819 and (not incomplete or info_dict.get(actual_field) is not None)):
2820 self.to_stdout(info_dict[actual_field])
d06daf23
S
2821
2822 def print_optional(field):
2823 if (self.params.get('force%s' % field, False)
2824 and info_dict.get(field) is not None):
2825 self.to_stdout(info_dict[field])
2826
53c18592 2827 info_dict = info_dict.copy()
2828 if filename is not None:
2829 info_dict['filename'] = filename
2830 if info_dict.get('requested_formats') is not None:
2831 # For RTMP URLs, also include the playpath
2832 info_dict['urls'] = '\n'.join(f['url'] + f.get('play_path', '') for f in info_dict['requested_formats'])
10331a26 2833 elif info_dict.get('url'):
53c18592 2834 info_dict['urls'] = info_dict['url'] + info_dict.get('play_path', '')
2835
bb66c247 2836 if (self.params.get('forcejson')
2837 or self.params['forceprint'].get('video')
2838 or self.params['print_to_file'].get('video')):
2b8a2973 2839 self.post_extract(info_dict)
bb66c247 2840 self._forceprint('video', info_dict)
53c18592 2841
d06daf23
S
2842 print_mandatory('title')
2843 print_mandatory('id')
53c18592 2844 print_mandatory('url', 'urls')
d06daf23
S
2845 print_optional('thumbnail')
2846 print_optional('description')
53c18592 2847 print_optional('filename')
b868936c 2848 if self.params.get('forceduration') and info_dict.get('duration') is not None:
d06daf23
S
2849 self.to_stdout(formatSeconds(info_dict['duration']))
2850 print_mandatory('format')
53c18592 2851
2b8a2973 2852 if self.params.get('forcejson'):
6e84b215 2853 self.to_stdout(json.dumps(self.sanitize_info(info_dict)))
d06daf23 2854
e8e73840 2855 def dl(self, name, info, subtitle=False, test=False):
88acdbc2 2856 if not info.get('url'):
1151c407 2857 self.raise_no_formats(info, True)
e8e73840 2858
2859 if test:
2860 verbose = self.params.get('verbose')
2861 params = {
2862 'test': True,
a169858f 2863 'quiet': self.params.get('quiet') or not verbose,
e8e73840 2864 'verbose': verbose,
2865 'noprogress': not verbose,
2866 'nopart': True,
2867 'skip_unavailable_fragments': False,
2868 'keep_fragments': False,
2869 'overwrites': True,
2870 '_no_ytdl_file': True,
2871 }
2872 else:
2873 params = self.params
96fccc10 2874 fd = get_suitable_downloader(info, params, to_stdout=(name == '-'))(self, params)
e8e73840 2875 if not test:
2876 for ph in self._progress_hooks:
2877 fd.add_progress_hook(ph)
42676437
M
2878 urls = '", "'.join(
2879 (f['url'].split(',')[0] + ',<data>' if f['url'].startswith('data:') else f['url'])
2880 for f in info.get('requested_formats', []) or [info])
3a408f9d 2881 self.write_debug(f'Invoking {fd.FD_NAME} downloader on "{urls}"')
03b4de72 2882
adbc4ec4
THD
2883 # Note: Ideally info should be a deep-copied so that hooks cannot modify it.
2884 # But it may contain objects that are not deep-copyable
2885 new_info = self._copy_infodict(info)
e8e73840 2886 if new_info.get('http_headers') is None:
2887 new_info['http_headers'] = self._calc_headers(new_info)
2888 return fd.download(name, new_info, subtitle)
2889
e04938ab 2890 def existing_file(self, filepaths, *, default_overwrite=True):
2891 existing_files = list(filter(os.path.exists, orderedSet(filepaths)))
2892 if existing_files and not self.params.get('overwrites', default_overwrite):
2893 return existing_files[0]
2894
2895 for file in existing_files:
2896 self.report_file_delete(file)
2897 os.remove(file)
2898 return None
2899
8222d8de 2900 def process_info(self, info_dict):
09b49e1f 2901 """Process a single resolved IE result. (Modifies it in-place)"""
8222d8de
JMF
2902
2903 assert info_dict.get('_type', 'video') == 'video'
f46e2f9d 2904 original_infodict = info_dict
fd288278 2905
4513a41a 2906 if 'format' not in info_dict and 'ext' in info_dict:
8222d8de
JMF
2907 info_dict['format'] = info_dict['ext']
2908
09b49e1f 2909 # This is mostly just for backward compatibility of process_info
2910 # As a side-effect, this allows for format-specific filters
c77495e3 2911 if self._match_entry(info_dict) is not None:
9e907ebd 2912 info_dict['__write_download_archive'] = 'ignore'
8222d8de
JMF
2913 return
2914
09b49e1f 2915 # Does nothing under normal operation - for backward compatibility of process_info
277d6ff5 2916 self.post_extract(info_dict)
0c14d66a 2917 self._num_downloads += 1
8222d8de 2918
dcf64d43 2919 # info_dict['_filename'] needs to be set for backward compatibility
de6000d9 2920 info_dict['_filename'] = full_filename = self.prepare_filename(info_dict, warn=True)
2921 temp_filename = self.prepare_filename(info_dict, 'temp')
0202b52a 2922 files_to_move = {}
8222d8de
JMF
2923
2924 # Forced printings
4513a41a 2925 self.__forced_printings(info_dict, full_filename, incomplete=('format' not in info_dict))
8222d8de 2926
ca6d59d2 2927 def check_max_downloads():
2928 if self._num_downloads >= float(self.params.get('max_downloads') or 'inf'):
2929 raise MaxDownloadsReached()
2930
b7b04c78 2931 if self.params.get('simulate'):
9e907ebd 2932 info_dict['__write_download_archive'] = self.params.get('force_write_download_archive')
ca6d59d2 2933 check_max_downloads()
8222d8de
JMF
2934 return
2935
de6000d9 2936 if full_filename is None:
8222d8de 2937 return
e92caff5 2938 if not self._ensure_dir_exists(encodeFilename(full_filename)):
0202b52a 2939 return
e92caff5 2940 if not self._ensure_dir_exists(encodeFilename(temp_filename)):
8222d8de
JMF
2941 return
2942
80c03fa9 2943 if self._write_description('video', info_dict,
2944 self.prepare_filename(info_dict, 'description')) is None:
2945 return
2946
2947 sub_files = self._write_subtitles(info_dict, temp_filename)
2948 if sub_files is None:
2949 return
2950 files_to_move.update(dict(sub_files))
2951
2952 thumb_files = self._write_thumbnails(
2953 'video', info_dict, temp_filename, self.prepare_filename(info_dict, 'thumbnail'))
2954 if thumb_files is None:
2955 return
2956 files_to_move.update(dict(thumb_files))
8222d8de 2957
80c03fa9 2958 infofn = self.prepare_filename(info_dict, 'infojson')
2959 _infojson_written = self._write_info_json('video', info_dict, infofn)
2960 if _infojson_written:
dac5df5a 2961 info_dict['infojson_filename'] = infofn
e75bb0d6 2962 # For backward compatibility, even though it was a private field
80c03fa9 2963 info_dict['__infojson_filename'] = infofn
2964 elif _infojson_written is None:
2965 return
2966
2967 # Note: Annotations are deprecated
2968 annofn = None
1fb07d10 2969 if self.params.get('writeannotations', False):
de6000d9 2970 annofn = self.prepare_filename(info_dict, 'annotation')
80c03fa9 2971 if annofn:
e92caff5 2972 if not self._ensure_dir_exists(encodeFilename(annofn)):
0202b52a 2973 return
0c3d0f51 2974 if not self.params.get('overwrites', True) and os.path.exists(encodeFilename(annofn)):
6febd1c1 2975 self.to_screen('[info] Video annotations are already present')
ffddb112
RA
2976 elif not info_dict.get('annotations'):
2977 self.report_warning('There are no annotations to write.')
7b6fefc9
PH
2978 else:
2979 try:
6febd1c1 2980 self.to_screen('[info] Writing video annotations to: ' + annofn)
86e5f3ed 2981 with open(encodeFilename(annofn), 'w', encoding='utf-8') as annofile:
7b6fefc9
PH
2982 annofile.write(info_dict['annotations'])
2983 except (KeyError, TypeError):
6febd1c1 2984 self.report_warning('There are no annotations to write.')
86e5f3ed 2985 except OSError:
6febd1c1 2986 self.report_error('Cannot write annotations file: ' + annofn)
7b6fefc9 2987 return
1fb07d10 2988
732044af 2989 # Write internet shortcut files
08438d2c 2990 def _write_link_file(link_type):
60f3e995 2991 url = try_get(info_dict['webpage_url'], iri_to_uri)
2992 if not url:
2993 self.report_warning(
2994 f'Cannot write internet shortcut file because the actual URL of "{info_dict["webpage_url"]}" is unknown')
2995 return True
08438d2c 2996 linkfn = replace_extension(self.prepare_filename(info_dict, 'link'), link_type, info_dict.get('ext'))
0e6b018a
Z
2997 if not self._ensure_dir_exists(encodeFilename(linkfn)):
2998 return False
10e3742e 2999 if self.params.get('overwrites', True) and os.path.exists(encodeFilename(linkfn)):
08438d2c 3000 self.to_screen(f'[info] Internet shortcut (.{link_type}) is already present')
3001 return True
3002 try:
3003 self.to_screen(f'[info] Writing internet shortcut (.{link_type}) to: {linkfn}')
86e5f3ed 3004 with open(encodeFilename(to_high_limit_path(linkfn)), 'w', encoding='utf-8',
3005 newline='\r\n' if link_type == 'url' else '\n') as linkfile:
60f3e995 3006 template_vars = {'url': url}
08438d2c 3007 if link_type == 'desktop':
3008 template_vars['filename'] = linkfn[:-(len(link_type) + 1)]
3009 linkfile.write(LINK_TEMPLATES[link_type] % template_vars)
86e5f3ed 3010 except OSError:
08438d2c 3011 self.report_error(f'Cannot write internet shortcut {linkfn}')
3012 return False
732044af 3013 return True
3014
08438d2c 3015 write_links = {
3016 'url': self.params.get('writeurllink'),
3017 'webloc': self.params.get('writewebloclink'),
3018 'desktop': self.params.get('writedesktoplink'),
3019 }
3020 if self.params.get('writelink'):
3021 link_type = ('webloc' if sys.platform == 'darwin'
3022 else 'desktop' if sys.platform.startswith('linux')
3023 else 'url')
3024 write_links[link_type] = True
3025
3026 if any(should_write and not _write_link_file(link_type)
3027 for link_type, should_write in write_links.items()):
3028 return
732044af 3029
f46e2f9d 3030 def replace_info_dict(new_info):
3031 nonlocal info_dict
3032 if new_info == info_dict:
3033 return
3034 info_dict.clear()
3035 info_dict.update(new_info)
3036
415f8d51 3037 new_info, files_to_move = self.pre_process(info_dict, 'before_dl', files_to_move)
3038 replace_info_dict(new_info)
56d868db 3039
a13e6848 3040 if self.params.get('skip_download'):
56d868db 3041 info_dict['filepath'] = temp_filename
3042 info_dict['__finaldir'] = os.path.dirname(os.path.abspath(encodeFilename(full_filename)))
3043 info_dict['__files_to_move'] = files_to_move
f46e2f9d 3044 replace_info_dict(self.run_pp(MoveFilesAfterDownloadPP(self, False), info_dict))
9e907ebd 3045 info_dict['__write_download_archive'] = self.params.get('force_write_download_archive')
56d868db 3046 else:
3047 # Download
b868936c 3048 info_dict.setdefault('__postprocessors', [])
4340deca 3049 try:
0202b52a 3050
e04938ab 3051 def existing_video_file(*filepaths):
6b591b29 3052 ext = info_dict.get('ext')
e04938ab 3053 converted = lambda file: replace_extension(file, self.params.get('final_ext') or ext, ext)
3054 file = self.existing_file(itertools.chain(*zip(map(converted, filepaths), filepaths)),
3055 default_overwrite=False)
3056 if file:
3057 info_dict['ext'] = os.path.splitext(file)[1][1:]
3058 return file
0202b52a 3059
7b2c3f47 3060 fd, success = None, True
fccf90e7 3061 if info_dict.get('protocol') or info_dict.get('url'):
56ba69e4 3062 fd = get_suitable_downloader(info_dict, self.params, to_stdout=temp_filename == '-')
3063 if fd is not FFmpegFD and (
3064 info_dict.get('section_start') or info_dict.get('section_end')):
7b2c3f47 3065 msg = ('This format cannot be partially downloaded' if FFmpegFD.available()
56ba69e4 3066 else 'You have requested downloading the video partially, but ffmpeg is not installed')
3067 self.report_error(f'{msg}. Aborting')
5ec1b6b7 3068 return
5ec1b6b7 3069
4340deca 3070 if info_dict.get('requested_formats') is not None:
81cd954a 3071 requested_formats = info_dict['requested_formats']
0202b52a 3072 old_ext = info_dict['ext']
4e3b637d 3073 if self.params.get('merge_output_format') is None:
4e3b637d 3074 if (info_dict['ext'] == 'webm'
3075 and info_dict.get('thumbnails')
3076 # check with type instead of pp_key, __name__, or isinstance
3077 # since we dont want any custom PPs to trigger this
c487cf00 3078 and any(type(pp) == EmbedThumbnailPP for pp in self._pps['post_process'])): # noqa: E721
4e3b637d 3079 info_dict['ext'] = 'mkv'
3080 self.report_warning(
3081 'webm doesn\'t support embedding a thumbnail, mkv will be used')
124bc071 3082 new_ext = info_dict['ext']
0202b52a 3083
124bc071 3084 def correct_ext(filename, ext=new_ext):
96fccc10 3085 if filename == '-':
3086 return filename
0202b52a 3087 filename_real_ext = os.path.splitext(filename)[1][1:]
3088 filename_wo_ext = (
3089 os.path.splitext(filename)[0]
124bc071 3090 if filename_real_ext in (old_ext, new_ext)
0202b52a 3091 else filename)
86e5f3ed 3092 return f'{filename_wo_ext}.{ext}'
0202b52a 3093
38c6902b 3094 # Ensure filename always has a correct extension for successful merge
0202b52a 3095 full_filename = correct_ext(full_filename)
3096 temp_filename = correct_ext(temp_filename)
e04938ab 3097 dl_filename = existing_video_file(full_filename, temp_filename)
1ea24129 3098 info_dict['__real_download'] = False
18e674b4 3099
7b2c3f47 3100 merger = FFmpegMergerPP(self)
adbc4ec4 3101 downloaded = []
dbf5416a 3102 if dl_filename is not None:
6c7274ec 3103 self.report_file_already_downloaded(dl_filename)
adbc4ec4
THD
3104 elif fd:
3105 for f in requested_formats if fd != FFmpegFD else []:
3106 f['filepath'] = fname = prepend_extension(
3107 correct_ext(temp_filename, info_dict['ext']),
3108 'f%s' % f['format_id'], info_dict['ext'])
3109 downloaded.append(fname)
dbf5416a 3110 info_dict['url'] = '\n'.join(f['url'] for f in requested_formats)
3111 success, real_download = self.dl(temp_filename, info_dict)
3112 info_dict['__real_download'] = real_download
18e674b4 3113 else:
18e674b4 3114 if self.params.get('allow_unplayable_formats'):
3115 self.report_warning(
3116 'You have requested merging of multiple formats '
3117 'while also allowing unplayable formats to be downloaded. '
3118 'The formats won\'t be merged to prevent data corruption.')
3119 elif not merger.available:
e8969bda 3120 msg = 'You have requested merging of multiple formats but ffmpeg is not installed'
3121 if not self.params.get('ignoreerrors'):
3122 self.report_error(f'{msg}. Aborting due to --abort-on-error')
3123 return
3124 self.report_warning(f'{msg}. The formats won\'t be merged')
18e674b4 3125
96fccc10 3126 if temp_filename == '-':
adbc4ec4 3127 reason = ('using a downloader other than ffmpeg' if FFmpegFD.can_merge_formats(info_dict, self.params)
96fccc10 3128 else 'but the formats are incompatible for simultaneous download' if merger.available
3129 else 'but ffmpeg is not installed')
3130 self.report_warning(
3131 f'You have requested downloading multiple formats to stdout {reason}. '
3132 'The formats will be streamed one after the other')
3133 fname = temp_filename
dbf5416a 3134 for f in requested_formats:
3135 new_info = dict(info_dict)
3136 del new_info['requested_formats']
3137 new_info.update(f)
96fccc10 3138 if temp_filename != '-':
124bc071 3139 fname = prepend_extension(
3140 correct_ext(temp_filename, new_info['ext']),
3141 'f%s' % f['format_id'], new_info['ext'])
96fccc10 3142 if not self._ensure_dir_exists(fname):
3143 return
a21e0ab1 3144 f['filepath'] = fname
96fccc10 3145 downloaded.append(fname)
dbf5416a 3146 partial_success, real_download = self.dl(fname, new_info)
3147 info_dict['__real_download'] = info_dict['__real_download'] or real_download
3148 success = success and partial_success
adbc4ec4
THD
3149
3150 if downloaded and merger.available and not self.params.get('allow_unplayable_formats'):
3151 info_dict['__postprocessors'].append(merger)
3152 info_dict['__files_to_merge'] = downloaded
3153 # Even if there were no downloads, it is being merged only now
3154 info_dict['__real_download'] = True
3155 else:
3156 for file in downloaded:
3157 files_to_move[file] = None
4340deca
P
3158 else:
3159 # Just a single file
e04938ab 3160 dl_filename = existing_video_file(full_filename, temp_filename)
6c7274ec 3161 if dl_filename is None or dl_filename == temp_filename:
3162 # dl_filename == temp_filename could mean that the file was partially downloaded with --no-part.
3163 # So we should try to resume the download
e8e73840 3164 success, real_download = self.dl(temp_filename, info_dict)
0202b52a 3165 info_dict['__real_download'] = real_download
6c7274ec 3166 else:
3167 self.report_file_already_downloaded(dl_filename)
0202b52a 3168
0202b52a 3169 dl_filename = dl_filename or temp_filename
c571435f 3170 info_dict['__finaldir'] = os.path.dirname(os.path.abspath(encodeFilename(full_filename)))
0202b52a 3171
3158150c 3172 except network_exceptions as err:
7960b056 3173 self.report_error('unable to download video data: %s' % error_to_compat_str(err))
4340deca 3174 return
86e5f3ed 3175 except OSError as err:
4340deca
P
3176 raise UnavailableVideoError(err)
3177 except (ContentTooShortError, ) as err:
86e5f3ed 3178 self.report_error(f'content too short (expected {err.expected} bytes and served {err.downloaded})')
4340deca 3179 return
8222d8de 3180
415f8d51 3181 self._raise_pending_errors(info_dict)
de6000d9 3182 if success and full_filename != '-':
f17f8651 3183
fd7cfb64 3184 def fixup():
3185 do_fixup = True
3186 fixup_policy = self.params.get('fixup')
3187 vid = info_dict['id']
3188
3189 if fixup_policy in ('ignore', 'never'):
3190 return
3191 elif fixup_policy == 'warn':
3fe75fdc 3192 do_fixup = 'warn'
f89b3e2d 3193 elif fixup_policy != 'force':
3194 assert fixup_policy in ('detect_or_warn', None)
3195 if not info_dict.get('__real_download'):
3196 do_fixup = False
fd7cfb64 3197
3198 def ffmpeg_fixup(cndn, msg, cls):
3fe75fdc 3199 if not (do_fixup and cndn):
fd7cfb64 3200 return
3fe75fdc 3201 elif do_fixup == 'warn':
fd7cfb64 3202 self.report_warning(f'{vid}: {msg}')
3203 return
3204 pp = cls(self)
3205 if pp.available:
3206 info_dict['__postprocessors'].append(pp)
3207 else:
3208 self.report_warning(f'{vid}: {msg}. Install ffmpeg to fix this automatically')
3209
3210 stretched_ratio = info_dict.get('stretched_ratio')
ca9def71
LNO
3211 ffmpeg_fixup(stretched_ratio not in (1, None),
3212 f'Non-uniform pixel ratio {stretched_ratio}',
3213 FFmpegFixupStretchedPP)
fd7cfb64 3214
993191c0 3215 downloader = get_suitable_downloader(info_dict, self.params) if 'protocol' in info_dict else None
24146491 3216 downloader = downloader.FD_NAME if downloader else None
adbc4ec4 3217
ca9def71
LNO
3218 ext = info_dict.get('ext')
3219 postprocessed_by_ffmpeg = info_dict.get('requested_formats') or any((
3220 isinstance(pp, FFmpegVideoConvertorPP)
3221 and resolve_recode_mapping(ext, pp.mapping)[0] not in (ext, None)
3222 ) for pp in self._pps['post_process'])
3223
3224 if not postprocessed_by_ffmpeg:
3225 ffmpeg_fixup(ext == 'm4a' and info_dict.get('container') == 'm4a_dash',
f2df4071 3226 'writing DASH m4a. Only some players support this container',
3227 FFmpegFixupM4aPP)
24146491 3228 ffmpeg_fixup(downloader == 'hlsnative' and not self.params.get('hls_use_mpegts')
494f5230 3229 or info_dict.get('is_live') and self.params.get('hls_use_mpegts') is None,
adbc4ec4
THD
3230 'Possible MPEG-TS in MP4 container or malformed AAC timestamps',
3231 FFmpegFixupM3u8PP)
3232 ffmpeg_fixup(info_dict.get('is_live') and downloader == 'DashSegmentsFD',
3233 'Possible duplicate MOOV atoms', FFmpegFixupDuplicateMoovPP)
3234
24146491 3235 ffmpeg_fixup(downloader == 'web_socket_fragment', 'Malformed timestamps detected', FFmpegFixupTimestampPP)
3236 ffmpeg_fixup(downloader == 'web_socket_fragment', 'Malformed duration detected', FFmpegFixupDurationPP)
fd7cfb64 3237
3238 fixup()
8222d8de 3239 try:
f46e2f9d 3240 replace_info_dict(self.post_process(dl_filename, info_dict, files_to_move))
af819c21 3241 except PostProcessingError as err:
3242 self.report_error('Postprocessing: %s' % str(err))
8222d8de 3243 return
ab8e5e51
AM
3244 try:
3245 for ph in self._post_hooks:
23c1a667 3246 ph(info_dict['filepath'])
ab8e5e51
AM
3247 except Exception as err:
3248 self.report_error('post hooks: %s' % str(err))
3249 return
9e907ebd 3250 info_dict['__write_download_archive'] = True
2d30509f 3251
c487cf00 3252 assert info_dict is original_infodict # Make sure the info_dict was modified in-place
a13e6848 3253 if self.params.get('force_write_download_archive'):
9e907ebd 3254 info_dict['__write_download_archive'] = True
ca6d59d2 3255 check_max_downloads()
8222d8de 3256
aa9369a2 3257 def __download_wrapper(self, func):
3258 @functools.wraps(func)
3259 def wrapper(*args, **kwargs):
3260 try:
3261 res = func(*args, **kwargs)
3262 except UnavailableVideoError as e:
3263 self.report_error(e)
b222c271 3264 except DownloadCancelled as e:
3265 self.to_screen(f'[info] {e}')
3266 if not self.params.get('break_per_url'):
3267 raise
aa9369a2 3268 else:
3269 if self.params.get('dump_single_json', False):
3270 self.post_extract(res)
3271 self.to_stdout(json.dumps(self.sanitize_info(res)))
3272 return wrapper
3273
8222d8de
JMF
3274 def download(self, url_list):
3275 """Download a given list of URLs."""
aa9369a2 3276 url_list = variadic(url_list) # Passing a single URL is a common mistake
bf1824b3 3277 outtmpl = self.params['outtmpl']['default']
3089bc74
S
3278 if (len(url_list) > 1
3279 and outtmpl != '-'
3280 and '%' not in outtmpl
3281 and self.params.get('max_downloads') != 1):
acd69589 3282 raise SameFileError(outtmpl)
8222d8de
JMF
3283
3284 for url in url_list:
aa9369a2 3285 self.__download_wrapper(self.extract_info)(
3286 url, force_generic_extractor=self.params.get('force_generic_extractor', False))
8222d8de
JMF
3287
3288 return self._download_retcode
3289
1dcc4c0c 3290 def download_with_info_file(self, info_filename):
31bd3925
JMF
3291 with contextlib.closing(fileinput.FileInput(
3292 [info_filename], mode='r',
3293 openhook=fileinput.hook_encoded('utf-8'))) as f:
3294 # FileInput doesn't have a read method, we can't call json.load
8012d892 3295 info = self.sanitize_info(json.loads('\n'.join(f)), self.params.get('clean_infojson', True))
d4943898 3296 try:
aa9369a2 3297 self.__download_wrapper(self.process_ie_result)(info, download=True)
f2ebc5c7 3298 except (DownloadError, EntryNotInPlaylist, ReExtractInfo) as e:
bf5f605e 3299 if not isinstance(e, EntryNotInPlaylist):
3300 self.to_stderr('\r')
d4943898
JMF
3301 webpage_url = info.get('webpage_url')
3302 if webpage_url is not None:
aa9369a2 3303 self.report_warning(f'The info failed to download: {e}; trying with URL {webpage_url}')
d4943898
JMF
3304 return self.download([webpage_url])
3305 else:
3306 raise
3307 return self._download_retcode
1dcc4c0c 3308
cb202fd2 3309 @staticmethod
8012d892 3310 def sanitize_info(info_dict, remove_private_keys=False):
3311 ''' Sanitize the infodict for converting to json '''
3ad56b42 3312 if info_dict is None:
3313 return info_dict
6e84b215 3314 info_dict.setdefault('epoch', int(time.time()))
6a5a30f9 3315 info_dict.setdefault('_type', 'video')
09b49e1f 3316
8012d892 3317 if remove_private_keys:
0a5a191a 3318 reject = lambda k, v: v is None or k.startswith('__') or k in {
f46e2f9d 3319 'requested_downloads', 'requested_formats', 'requested_subtitles', 'requested_entries',
0a5a191a 3320 'entries', 'filepath', '_filename', 'infojson_filename', 'original_url', 'playlist_autonumber',
6e84b215 3321 }
ae8f99e6 3322 else:
09b49e1f 3323 reject = lambda k, v: False
adbc4ec4
THD
3324
3325 def filter_fn(obj):
3326 if isinstance(obj, dict):
3327 return {k: filter_fn(v) for k, v in obj.items() if not reject(k, v)}
3328 elif isinstance(obj, (list, tuple, set, LazyList)):
3329 return list(map(filter_fn, obj))
3330 elif obj is None or isinstance(obj, (str, int, float, bool)):
3331 return obj
3332 else:
3333 return repr(obj)
3334
5226731e 3335 return filter_fn(info_dict)
cb202fd2 3336
8012d892 3337 @staticmethod
3338 def filter_requested_info(info_dict, actually_filter=True):
3339 ''' Alias of sanitize_info for backward compatibility '''
3340 return YoutubeDL.sanitize_info(info_dict, actually_filter)
3341
43d7f5a5 3342 def _delete_downloaded_files(self, *files_to_delete, info={}, msg=None):
3343 for filename in set(filter(None, files_to_delete)):
3344 if msg:
3345 self.to_screen(msg % filename)
3346 try:
3347 os.remove(filename)
3348 except OSError:
3349 self.report_warning(f'Unable to delete file {filename}')
3350 if filename in info.get('__files_to_move', []): # NB: Delete even if None
3351 del info['__files_to_move'][filename]
3352
ed5835b4 3353 @staticmethod
3354 def post_extract(info_dict):
3355 def actual_post_extract(info_dict):
3356 if info_dict.get('_type') in ('playlist', 'multi_video'):
3357 for video_dict in info_dict.get('entries', {}):
3358 actual_post_extract(video_dict or {})
3359 return
3360
09b49e1f 3361 post_extractor = info_dict.pop('__post_extractor', None) or (lambda: {})
3362 info_dict.update(post_extractor())
ed5835b4 3363
3364 actual_post_extract(info_dict or {})
3365
dcf64d43 3366 def run_pp(self, pp, infodict):
5bfa4862 3367 files_to_delete = []
dcf64d43 3368 if '__files_to_move' not in infodict:
3369 infodict['__files_to_move'] = {}
b1940459 3370 try:
3371 files_to_delete, infodict = pp.run(infodict)
3372 except PostProcessingError as e:
3373 # Must be True and not 'only_download'
3374 if self.params.get('ignoreerrors') is True:
3375 self.report_error(e)
3376 return infodict
3377 raise
3378
5bfa4862 3379 if not files_to_delete:
dcf64d43 3380 return infodict
5bfa4862 3381 if self.params.get('keepvideo', False):
3382 for f in files_to_delete:
dcf64d43 3383 infodict['__files_to_move'].setdefault(f, '')
5bfa4862 3384 else:
43d7f5a5 3385 self._delete_downloaded_files(
3386 *files_to_delete, info=infodict, msg='Deleting original file %s (pass -k to keep)')
dcf64d43 3387 return infodict
5bfa4862 3388
ed5835b4 3389 def run_all_pps(self, key, info, *, additional_pps=None):
bb66c247 3390 self._forceprint(key, info)
ed5835b4 3391 for pp in (additional_pps or []) + self._pps[key]:
dc5f409c 3392 info = self.run_pp(pp, info)
ed5835b4 3393 return info
277d6ff5 3394
56d868db 3395 def pre_process(self, ie_info, key='pre_process', files_to_move=None):
5bfa4862 3396 info = dict(ie_info)
56d868db 3397 info['__files_to_move'] = files_to_move or {}
415f8d51 3398 try:
3399 info = self.run_all_pps(key, info)
3400 except PostProcessingError as err:
3401 msg = f'Preprocessing: {err}'
3402 info.setdefault('__pending_error', msg)
3403 self.report_error(msg, is_error=False)
56d868db 3404 return info, info.pop('__files_to_move', None)
5bfa4862 3405
f46e2f9d 3406 def post_process(self, filename, info, files_to_move=None):
8222d8de 3407 """Run all the postprocessors on the given file."""
8222d8de 3408 info['filepath'] = filename
dcf64d43 3409 info['__files_to_move'] = files_to_move or {}
ed5835b4 3410 info = self.run_all_pps('post_process', info, additional_pps=info.get('__postprocessors'))
dcf64d43 3411 info = self.run_pp(MoveFilesAfterDownloadPP(self), info)
3412 del info['__files_to_move']
ed5835b4 3413 return self.run_all_pps('after_move', info)
c1c9a79c 3414
5db07df6 3415 def _make_archive_id(self, info_dict):
e9fef7ee
S
3416 video_id = info_dict.get('id')
3417 if not video_id:
3418 return
5db07df6
PH
3419 # Future-proof against any change in case
3420 # and backwards compatibility with prior versions
e9fef7ee 3421 extractor = info_dict.get('extractor_key') or info_dict.get('ie_key') # key in a playlist
7012b23c 3422 if extractor is None:
1211bb6d
S
3423 url = str_or_none(info_dict.get('url'))
3424 if not url:
3425 return
e9fef7ee 3426 # Try to find matching extractor for the URL and take its ie_key
8b7491c8 3427 for ie_key, ie in self._ies.items():
1211bb6d 3428 if ie.suitable(url):
8b7491c8 3429 extractor = ie_key
e9fef7ee
S
3430 break
3431 else:
3432 return
0647d925 3433 return make_archive_id(extractor, video_id)
5db07df6
PH
3434
3435 def in_download_archive(self, info_dict):
3436 fn = self.params.get('download_archive')
3437 if fn is None:
3438 return False
3439
1e8fe57e 3440 vid_ids = [self._make_archive_id(info_dict)]
3441 vid_ids.extend(info_dict.get('_old_archive_ids', []))
3442 return any(id_ in self.archive for id_ in vid_ids)
c1c9a79c
PH
3443
3444 def record_download_archive(self, info_dict):
3445 fn = self.params.get('download_archive')
3446 if fn is None:
3447 return
5db07df6
PH
3448 vid_id = self._make_archive_id(info_dict)
3449 assert vid_id
a13e6848 3450 self.write_debug(f'Adding to archive: {vid_id}')
c1c9a79c 3451 with locked_file(fn, 'a', encoding='utf-8') as archive_file:
6febd1c1 3452 archive_file.write(vid_id + '\n')
a45e8619 3453 self.archive.add(vid_id)
dd82ffea 3454
8c51aa65 3455 @staticmethod
8abeeb94 3456 def format_resolution(format, default='unknown'):
9359f3d4 3457 if format.get('vcodec') == 'none' and format.get('acodec') != 'none':
fb04e403 3458 return 'audio only'
f49d89ee
PH
3459 if format.get('resolution') is not None:
3460 return format['resolution']
35615307 3461 if format.get('width') and format.get('height'):
ff51ed58 3462 return '%dx%d' % (format['width'], format['height'])
35615307 3463 elif format.get('height'):
ff51ed58 3464 return '%sp' % format['height']
35615307 3465 elif format.get('width'):
ff51ed58 3466 return '%dx?' % format['width']
3467 return default
8c51aa65 3468
8130779d 3469 def _list_format_headers(self, *headers):
3470 if self.params.get('listformats_table', True) is not False:
591bb9d3 3471 return [self._format_out(header, self.Styles.HEADERS) for header in headers]
8130779d 3472 return headers
3473
c57f7757
PH
3474 def _format_note(self, fdict):
3475 res = ''
3476 if fdict.get('ext') in ['f4f', 'f4m']:
f304da8a 3477 res += '(unsupported)'
32f90364
PH
3478 if fdict.get('language'):
3479 if res:
3480 res += ' '
f304da8a 3481 res += '[%s]' % fdict['language']
c57f7757 3482 if fdict.get('format_note') is not None:
f304da8a 3483 if res:
3484 res += ' '
3485 res += fdict['format_note']
c57f7757 3486 if fdict.get('tbr') is not None:
f304da8a 3487 if res:
3488 res += ', '
3489 res += '%4dk' % fdict['tbr']
c57f7757
PH
3490 if fdict.get('container') is not None:
3491 if res:
3492 res += ', '
3493 res += '%s container' % fdict['container']
3089bc74
S
3494 if (fdict.get('vcodec') is not None
3495 and fdict.get('vcodec') != 'none'):
c57f7757
PH
3496 if res:
3497 res += ', '
3498 res += fdict['vcodec']
91c7271a 3499 if fdict.get('vbr') is not None:
c57f7757
PH
3500 res += '@'
3501 elif fdict.get('vbr') is not None and fdict.get('abr') is not None:
3502 res += 'video@'
3503 if fdict.get('vbr') is not None:
3504 res += '%4dk' % fdict['vbr']
fbb21cf5 3505 if fdict.get('fps') is not None:
5d583bdf
S
3506 if res:
3507 res += ', '
3508 res += '%sfps' % fdict['fps']
c57f7757
PH
3509 if fdict.get('acodec') is not None:
3510 if res:
3511 res += ', '
3512 if fdict['acodec'] == 'none':
3513 res += 'video only'
3514 else:
3515 res += '%-5s' % fdict['acodec']
3516 elif fdict.get('abr') is not None:
3517 if res:
3518 res += ', '
3519 res += 'audio'
3520 if fdict.get('abr') is not None:
3521 res += '@%3dk' % fdict['abr']
3522 if fdict.get('asr') is not None:
3523 res += ' (%5dHz)' % fdict['asr']
3524 if fdict.get('filesize') is not None:
3525 if res:
3526 res += ', '
3527 res += format_bytes(fdict['filesize'])
9732d77e
PH
3528 elif fdict.get('filesize_approx') is not None:
3529 if res:
3530 res += ', '
3531 res += '~' + format_bytes(fdict['filesize_approx'])
c57f7757 3532 return res
91c7271a 3533
8130779d 3534 def render_formats_table(self, info_dict):
b69fd25c 3535 if not info_dict.get('formats') and not info_dict.get('url'):
8130779d 3536 return None
b69fd25c 3537
94badb25 3538 formats = info_dict.get('formats', [info_dict])
8130779d 3539 if not self.params.get('listformats_table', True) is not False:
76d321f6 3540 table = [
3541 [
3542 format_field(f, 'format_id'),
3543 format_field(f, 'ext'),
3544 self.format_resolution(f),
8130779d 3545 self._format_note(f)
3546 ] for f in formats if f.get('preference') is None or f['preference'] >= -1000]
3547 return render_table(['format code', 'extension', 'resolution', 'note'], table, extra_gap=1)
3548
d816f61f 3549 def simplified_codec(f, field):
3550 assert field in ('acodec', 'vcodec')
3551 codec = f.get(field, 'unknown')
f5ea4748 3552 if not codec:
3553 return 'unknown'
3554 elif codec != 'none':
d816f61f 3555 return '.'.join(codec.split('.')[:4])
3556
3557 if field == 'vcodec' and f.get('acodec') == 'none':
3558 return 'images'
3559 elif field == 'acodec' and f.get('vcodec') == 'none':
3560 return ''
3561 return self._format_out('audio only' if field == 'vcodec' else 'video only',
3562 self.Styles.SUPPRESS)
3563
591bb9d3 3564 delim = self._format_out('\u2502', self.Styles.DELIM, '|', test_encoding=True)
8130779d 3565 table = [
3566 [
591bb9d3 3567 self._format_out(format_field(f, 'format_id'), self.Styles.ID),
8130779d 3568 format_field(f, 'ext'),
3569 format_field(f, func=self.format_resolution, ignore=('audio only', 'images')),
563e0bf8 3570 format_field(f, 'fps', '\t%d', func=round),
8130779d 3571 format_field(f, 'dynamic_range', '%s', ignore=(None, 'SDR')).replace('HDR', ''),
3572 delim,
3573 format_field(f, 'filesize', ' \t%s', func=format_bytes) + format_field(f, 'filesize_approx', '~\t%s', func=format_bytes),
563e0bf8 3574 format_field(f, 'tbr', '\t%dk', func=round),
8130779d 3575 shorten_protocol_name(f.get('protocol', '')),
3576 delim,
d816f61f 3577 simplified_codec(f, 'vcodec'),
563e0bf8 3578 format_field(f, 'vbr', '\t%dk', func=round),
d816f61f 3579 simplified_codec(f, 'acodec'),
563e0bf8 3580 format_field(f, 'abr', '\t%dk', func=round),
ae61d108 3581 format_field(f, 'asr', '\t%s', func=format_decimal_suffix),
8130779d 3582 join_nonempty(
591bb9d3 3583 self._format_out('UNSUPPORTED', 'light red') if f.get('ext') in ('f4f', 'f4m') else None,
8130779d 3584 format_field(f, 'language', '[%s]'),
3585 join_nonempty(format_field(f, 'format_note'),
3586 format_field(f, 'container', ignore=(None, f.get('ext'))),
3587 delim=', '),
3588 delim=' '),
3589 ] for f in formats if f.get('preference') is None or f['preference'] >= -1000]
3590 header_line = self._list_format_headers(
3591 'ID', 'EXT', 'RESOLUTION', '\tFPS', 'HDR', delim, '\tFILESIZE', '\tTBR', 'PROTO',
3592 delim, 'VCODEC', '\tVBR', 'ACODEC', '\tABR', '\tASR', 'MORE INFO')
3593
3594 return render_table(
3595 header_line, table, hide_empty=True,
591bb9d3 3596 delim=self._format_out('\u2500', self.Styles.DELIM, '-', test_encoding=True))
8130779d 3597
3598 def render_thumbnails_table(self, info_dict):
88f23a18 3599 thumbnails = list(info_dict.get('thumbnails') or [])
cfb56d1a 3600 if not thumbnails:
8130779d 3601 return None
3602 return render_table(
ec11a9f4 3603 self._list_format_headers('ID', 'Width', 'Height', 'URL'),
6970b600 3604 [[t.get('id'), t.get('width', 'unknown'), t.get('height', 'unknown'), t['url']] for t in thumbnails])
2412044c 3605
8130779d 3606 def render_subtitles_table(self, video_id, subtitles):
2412044c 3607 def _row(lang, formats):
49c258e1 3608 exts, names = zip(*((f['ext'], f.get('name') or 'unknown') for f in reversed(formats)))
2412044c 3609 if len(set(names)) == 1:
7aee40c1 3610 names = [] if names[0] == 'unknown' else names[:1]
2412044c 3611 return [lang, ', '.join(names), ', '.join(exts)]
3612
8130779d 3613 if not subtitles:
3614 return None
3615 return render_table(
ec11a9f4 3616 self._list_format_headers('Language', 'Name', 'Formats'),
2412044c 3617 [_row(lang, formats) for lang, formats in subtitles.items()],
8130779d 3618 hide_empty=True)
3619
3620 def __list_table(self, video_id, name, func, *args):
3621 table = func(*args)
3622 if not table:
3623 self.to_screen(f'{video_id} has no {name}')
3624 return
3625 self.to_screen(f'[info] Available {name} for {video_id}:')
3626 self.to_stdout(table)
3627
3628 def list_formats(self, info_dict):
3629 self.__list_table(info_dict['id'], 'formats', self.render_formats_table, info_dict)
3630
3631 def list_thumbnails(self, info_dict):
3632 self.__list_table(info_dict['id'], 'thumbnails', self.render_thumbnails_table, info_dict)
3633
3634 def list_subtitles(self, video_id, subtitles, name='subtitles'):
3635 self.__list_table(video_id, name, self.render_subtitles_table, video_id, subtitles)
a504ced0 3636
dca08720
PH
3637 def urlopen(self, req):
3638 """ Start an HTTP download """
f9934b96 3639 if isinstance(req, str):
67dda517 3640 req = sanitized_Request(req)
19a41fc6 3641 return self._opener.open(req, timeout=self._socket_timeout)
dca08720
PH
3642
3643 def print_debug_header(self):
3644 if not self.params.get('verbose'):
3645 return
49a57e70 3646
560738f3 3647 # These imports can be slow. So import them only as needed
3648 from .extractor.extractors import _LAZY_LOADER
3649 from .extractor.extractors import _PLUGIN_CLASSES as plugin_extractors
3650
49a57e70 3651 def get_encoding(stream):
2a938746 3652 ret = str(getattr(stream, 'encoding', 'missing (%s)' % type(stream).__name__))
49a57e70 3653 if not supports_terminal_sequences(stream):
53973b4d 3654 from .utils import WINDOWS_VT_MODE # Must be imported locally
e3c7d495 3655 ret += ' (No VT)' if WINDOWS_VT_MODE is False else ' (No ANSI)'
49a57e70 3656 return ret
3657
591bb9d3 3658 encoding_str = 'Encodings: locale %s, fs %s, pref %s, %s' % (
49a57e70 3659 locale.getpreferredencoding(),
3660 sys.getfilesystemencoding(),
591bb9d3 3661 self.get_encoding(),
3662 ', '.join(
64fa820c 3663 f'{key} {get_encoding(stream)}' for key, stream in self._out_files.items_
591bb9d3 3664 if stream is not None and key != 'console')
3665 )
883d4b1e 3666
3667 logger = self.params.get('logger')
3668 if logger:
3669 write_debug = lambda msg: logger.debug(f'[debug] {msg}')
3670 write_debug(encoding_str)
3671 else:
96565c7e 3672 write_string(f'[debug] {encoding_str}\n', encoding=None)
49a57e70 3673 write_debug = lambda msg: self._write_string(f'[debug] {msg}\n')
734f90bb 3674
4c88ff87 3675 source = detect_variant()
36eaf303 3676 write_debug(join_nonempty(
3677 'yt-dlp version', __version__,
3678 f'[{RELEASE_GIT_HEAD}]' if RELEASE_GIT_HEAD else '',
3679 '' if source == 'unknown' else f'({source})',
3680 delim=' '))
6e21fdd2 3681 if not _LAZY_LOADER:
3682 if os.environ.get('YTDLP_NO_LAZY_EXTRACTORS'):
49a57e70 3683 write_debug('Lazy loading extractors is forcibly disabled')
6e21fdd2 3684 else:
49a57e70 3685 write_debug('Lazy loading extractors is disabled')
3ae5e797 3686 if plugin_extractors or plugin_postprocessors:
49a57e70 3687 write_debug('Plugins: %s' % [
3ae5e797 3688 '%s%s' % (klass.__name__, '' if klass.__name__ == name else f' as {name}')
3689 for name, klass in itertools.chain(plugin_extractors.items(), plugin_postprocessors.items())])
8a82af35 3690 if self.params['compat_opts']:
3691 write_debug('Compatibility options: %s' % ', '.join(self.params['compat_opts']))
36eaf303 3692
3693 if source == 'source':
dca08720 3694 try:
f0c9fb96 3695 stdout, _, _ = Popen.run(
36eaf303 3696 ['git', 'rev-parse', '--short', 'HEAD'],
f0c9fb96 3697 text=True, cwd=os.path.dirname(os.path.abspath(__file__)),
3698 stdout=subprocess.PIPE, stderr=subprocess.PIPE)
3699 if re.fullmatch('[0-9a-f]+', stdout.strip()):
3700 write_debug(f'Git HEAD: {stdout.strip()}')
70a1165b 3701 except Exception:
19a03940 3702 with contextlib.suppress(Exception):
36eaf303 3703 sys.exc_clear()
b300cda4 3704
b1f94422 3705 write_debug(system_identifier())
d28b5171 3706
8913ef74 3707 exe_versions, ffmpeg_features = FFmpegPostProcessor.get_versions_and_features(self)
3708 ffmpeg_features = {key for key, val in ffmpeg_features.items() if val}
3709 if ffmpeg_features:
19a03940 3710 exe_versions['ffmpeg'] += ' (%s)' % ','.join(sorted(ffmpeg_features))
8913ef74 3711
4c83c967 3712 exe_versions['rtmpdump'] = rtmpdump_version()
feee8d32 3713 exe_versions['phantomjs'] = PhantomJSwrapper._version()
d28b5171 3714 exe_str = ', '.join(
2831b468 3715 f'{exe} {v}' for exe, v in sorted(exe_versions.items()) if v
3716 ) or 'none'
49a57e70 3717 write_debug('exe versions: %s' % exe_str)
dca08720 3718
1d485a1a 3719 from .compat.compat_utils import get_package_info
9b8ee23b 3720 from .dependencies import available_dependencies
3721
3722 write_debug('Optional libraries: %s' % (', '.join(sorted({
1d485a1a 3723 join_nonempty(*get_package_info(m)) for m in available_dependencies.values()
9b8ee23b 3724 })) or 'none'))
2831b468 3725
97ec5bc5 3726 self._setup_opener()
dca08720
PH
3727 proxy_map = {}
3728 for handler in self._opener.handlers:
3729 if hasattr(handler, 'proxies'):
3730 proxy_map.update(handler.proxies)
49a57e70 3731 write_debug(f'Proxy map: {proxy_map}')
dca08720 3732
49a57e70 3733 # Not implemented
3734 if False and self.params.get('call_home'):
0f06bcd7 3735 ipaddr = self.urlopen('https://yt-dl.org/ip').read().decode()
49a57e70 3736 write_debug('Public IP address: %s' % ipaddr)
58b1f00d 3737 latest_version = self.urlopen(
0f06bcd7 3738 'https://yt-dl.org/latest/version').read().decode()
58b1f00d
PH
3739 if version_tuple(latest_version) > version_tuple(__version__):
3740 self.report_warning(
3741 'You are using an outdated version (newest version: %s)! '
3742 'See https://yt-dl.org/update if you need help updating.' %
3743 latest_version)
3744
e344693b 3745 def _setup_opener(self):
97ec5bc5 3746 if hasattr(self, '_opener'):
3747 return
6ad14cab 3748 timeout_val = self.params.get('socket_timeout')
17bddf3e 3749 self._socket_timeout = 20 if timeout_val is None else float(timeout_val)
6ad14cab 3750
982ee69a 3751 opts_cookiesfrombrowser = self.params.get('cookiesfrombrowser')
dca08720
PH
3752 opts_cookiefile = self.params.get('cookiefile')
3753 opts_proxy = self.params.get('proxy')
3754
982ee69a 3755 self.cookiejar = load_cookies(opts_cookiefile, opts_cookiesfrombrowser, self)
dca08720 3756
6a3f4c3f 3757 cookie_processor = YoutubeDLCookieProcessor(self.cookiejar)
dca08720
PH
3758 if opts_proxy is not None:
3759 if opts_proxy == '':
3760 proxies = {}
3761 else:
3762 proxies = {'http': opts_proxy, 'https': opts_proxy}
3763 else:
ac668111 3764 proxies = urllib.request.getproxies()
067aa17e 3765 # Set HTTPS proxy to HTTP one if given (https://github.com/ytdl-org/youtube-dl/issues/805)
dca08720
PH
3766 if 'http' in proxies and 'https' not in proxies:
3767 proxies['https'] = proxies['http']
91410c9b 3768 proxy_handler = PerRequestProxyHandler(proxies)
a0ddb8a2
PH
3769
3770 debuglevel = 1 if self.params.get('debug_printtraffic') else 0
be4a824d
PH
3771 https_handler = make_HTTPS_handler(self.params, debuglevel=debuglevel)
3772 ydlh = YoutubeDLHandler(self.params, debuglevel=debuglevel)
fca6dba8 3773 redirect_handler = YoutubeDLRedirectHandler()
f9934b96 3774 data_handler = urllib.request.DataHandler()
6240b0a2
JMF
3775
3776 # When passing our own FileHandler instance, build_opener won't add the
3777 # default FileHandler and allows us to disable the file protocol, which
3778 # can be used for malicious purposes (see
067aa17e 3779 # https://github.com/ytdl-org/youtube-dl/issues/8227)
ac668111 3780 file_handler = urllib.request.FileHandler()
6240b0a2
JMF
3781
3782 def file_open(*args, **kwargs):
ac668111 3783 raise urllib.error.URLError('file:// scheme is explicitly disabled in yt-dlp for security reasons')
6240b0a2
JMF
3784 file_handler.file_open = file_open
3785
ac668111 3786 opener = urllib.request.build_opener(
fca6dba8 3787 proxy_handler, https_handler, cookie_processor, ydlh, redirect_handler, data_handler, file_handler)
2461f79d 3788
dca08720
PH
3789 # Delete the default user-agent header, which would otherwise apply in
3790 # cases where our custom HTTP handler doesn't come into play
067aa17e 3791 # (See https://github.com/ytdl-org/youtube-dl/issues/1309 for details)
dca08720
PH
3792 opener.addheaders = []
3793 self._opener = opener
62fec3b2
PH
3794
3795 def encode(self, s):
3796 if isinstance(s, bytes):
3797 return s # Already encoded
3798
3799 try:
3800 return s.encode(self.get_encoding())
3801 except UnicodeEncodeError as err:
3802 err.reason = err.reason + '. Check your system encoding configuration or use the --encoding option.'
3803 raise
3804
3805 def get_encoding(self):
3806 encoding = self.params.get('encoding')
3807 if encoding is None:
3808 encoding = preferredencoding()
3809 return encoding
ec82d85a 3810
e08a85d8 3811 def _write_info_json(self, label, ie_result, infofn, overwrite=None):
cb96c5be 3812 ''' Write infojson and returns True = written, 'exists' = Already exists, False = skip, None = error '''
e08a85d8 3813 if overwrite is None:
3814 overwrite = self.params.get('overwrites', True)
80c03fa9 3815 if not self.params.get('writeinfojson'):
3816 return False
3817 elif not infofn:
3818 self.write_debug(f'Skipping writing {label} infojson')
3819 return False
3820 elif not self._ensure_dir_exists(infofn):
3821 return None
e08a85d8 3822 elif not overwrite and os.path.exists(infofn):
80c03fa9 3823 self.to_screen(f'[info] {label.title()} metadata is already present')
cb96c5be 3824 return 'exists'
3825
3826 self.to_screen(f'[info] Writing {label} metadata as JSON to: {infofn}')
3827 try:
3828 write_json_file(self.sanitize_info(ie_result, self.params.get('clean_infojson', True)), infofn)
3829 return True
86e5f3ed 3830 except OSError:
cb96c5be 3831 self.report_error(f'Cannot write {label} metadata to JSON file {infofn}')
3832 return None
80c03fa9 3833
3834 def _write_description(self, label, ie_result, descfn):
3835 ''' Write description and returns True = written, False = skip, None = error '''
3836 if not self.params.get('writedescription'):
3837 return False
3838 elif not descfn:
3839 self.write_debug(f'Skipping writing {label} description')
3840 return False
3841 elif not self._ensure_dir_exists(descfn):
3842 return None
3843 elif not self.params.get('overwrites', True) and os.path.exists(descfn):
3844 self.to_screen(f'[info] {label.title()} description is already present')
3845 elif ie_result.get('description') is None:
3846 self.report_warning(f'There\'s no {label} description to write')
3847 return False
3848 else:
3849 try:
3850 self.to_screen(f'[info] Writing {label} description to: {descfn}')
86e5f3ed 3851 with open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile:
80c03fa9 3852 descfile.write(ie_result['description'])
86e5f3ed 3853 except OSError:
80c03fa9 3854 self.report_error(f'Cannot write {label} description file {descfn}')
3855 return None
3856 return True
3857
3858 def _write_subtitles(self, info_dict, filename):
3859 ''' Write subtitles to file and return list of (sub_filename, final_sub_filename); or None if error'''
3860 ret = []
3861 subtitles = info_dict.get('requested_subtitles')
3862 if not subtitles or not (self.params.get('writesubtitles') or self.params.get('writeautomaticsub')):
3863 # subtitles download errors are already managed as troubles in relevant IE
3864 # that way it will silently go on when used with unsupporting IE
3865 return ret
3866
3867 sub_filename_base = self.prepare_filename(info_dict, 'subtitle')
3868 if not sub_filename_base:
3869 self.to_screen('[info] Skipping writing video subtitles')
3870 return ret
3871 for sub_lang, sub_info in subtitles.items():
3872 sub_format = sub_info['ext']
3873 sub_filename = subtitles_filename(filename, sub_lang, sub_format, info_dict.get('ext'))
3874 sub_filename_final = subtitles_filename(sub_filename_base, sub_lang, sub_format, info_dict.get('ext'))
e04938ab 3875 existing_sub = self.existing_file((sub_filename_final, sub_filename))
3876 if existing_sub:
80c03fa9 3877 self.to_screen(f'[info] Video subtitle {sub_lang}.{sub_format} is already present')
e04938ab 3878 sub_info['filepath'] = existing_sub
3879 ret.append((existing_sub, sub_filename_final))
80c03fa9 3880 continue
3881
3882 self.to_screen(f'[info] Writing video subtitles to: {sub_filename}')
3883 if sub_info.get('data') is not None:
3884 try:
3885 # Use newline='' to prevent conversion of newline characters
3886 # See https://github.com/ytdl-org/youtube-dl/issues/10268
86e5f3ed 3887 with open(sub_filename, 'w', encoding='utf-8', newline='') as subfile:
80c03fa9 3888 subfile.write(sub_info['data'])
3889 sub_info['filepath'] = sub_filename
3890 ret.append((sub_filename, sub_filename_final))
3891 continue
86e5f3ed 3892 except OSError:
80c03fa9 3893 self.report_error(f'Cannot write video subtitles file {sub_filename}')
3894 return None
3895
3896 try:
3897 sub_copy = sub_info.copy()
3898 sub_copy.setdefault('http_headers', info_dict.get('http_headers'))
3899 self.dl(sub_filename, sub_copy, subtitle=True)
3900 sub_info['filepath'] = sub_filename
3901 ret.append((sub_filename, sub_filename_final))
6020e05d 3902 except (DownloadError, ExtractorError, IOError, OSError, ValueError) + network_exceptions as err:
c70c418d 3903 msg = f'Unable to download video subtitles for {sub_lang!r}: {err}'
6020e05d 3904 if self.params.get('ignoreerrors') is not True: # False or 'only_download'
c70c418d 3905 if not self.params.get('ignoreerrors'):
3906 self.report_error(msg)
3907 raise DownloadError(msg)
3908 self.report_warning(msg)
519804a9 3909 return ret
80c03fa9 3910
3911 def _write_thumbnails(self, label, info_dict, filename, thumb_filename_base=None):
3912 ''' Write thumbnails to file and return list of (thumb_filename, final_thumb_filename) '''
6c4fd172 3913 write_all = self.params.get('write_all_thumbnails', False)
80c03fa9 3914 thumbnails, ret = [], []
6c4fd172 3915 if write_all or self.params.get('writethumbnail', False):
0202b52a 3916 thumbnails = info_dict.get('thumbnails') or []
6c4fd172 3917 multiple = write_all and len(thumbnails) > 1
ec82d85a 3918
80c03fa9 3919 if thumb_filename_base is None:
3920 thumb_filename_base = filename
3921 if thumbnails and not thumb_filename_base:
3922 self.write_debug(f'Skipping writing {label} thumbnail')
3923 return ret
3924
dd0228ce 3925 for idx, t in list(enumerate(thumbnails))[::-1]:
80c03fa9 3926 thumb_ext = (f'{t["id"]}.' if multiple else '') + determine_ext(t['url'], 'jpg')
aa9369a2 3927 thumb_display_id = f'{label} thumbnail {t["id"]}'
80c03fa9 3928 thumb_filename = replace_extension(filename, thumb_ext, info_dict.get('ext'))
3929 thumb_filename_final = replace_extension(thumb_filename_base, thumb_ext, info_dict.get('ext'))
ec82d85a 3930
e04938ab 3931 existing_thumb = self.existing_file((thumb_filename_final, thumb_filename))
3932 if existing_thumb:
aa9369a2 3933 self.to_screen('[info] %s is already present' % (
3934 thumb_display_id if multiple else f'{label} thumbnail').capitalize())
e04938ab 3935 t['filepath'] = existing_thumb
3936 ret.append((existing_thumb, thumb_filename_final))
ec82d85a 3937 else:
80c03fa9 3938 self.to_screen(f'[info] Downloading {thumb_display_id} ...')
ec82d85a 3939 try:
297e9952 3940 uf = self.urlopen(sanitized_Request(t['url'], headers=t.get('http_headers', {})))
80c03fa9 3941 self.to_screen(f'[info] Writing {thumb_display_id} to: {thumb_filename}')
d3d89c32 3942 with open(encodeFilename(thumb_filename), 'wb') as thumbf:
ec82d85a 3943 shutil.copyfileobj(uf, thumbf)
80c03fa9 3944 ret.append((thumb_filename, thumb_filename_final))
885cc0b7 3945 t['filepath'] = thumb_filename
3158150c 3946 except network_exceptions as err:
dd0228ce 3947 thumbnails.pop(idx)
80c03fa9 3948 self.report_warning(f'Unable to download {thumb_display_id}: {err}')
6c4fd172 3949 if ret and not write_all:
3950 break
0202b52a 3951 return ret