]> jfr.im git - yt-dlp.git/blame - yt_dlp/YoutubeDL.py
Fix bug in 0647d9251f7285759109cc82693efee533346911
[yt-dlp.git] / yt_dlp / YoutubeDL.py
CommitLineData
26e63931 1import collections
31bd3925 2import contextlib
9d2ecdbc 3import datetime
c1c9a79c 4import errno
31bd3925 5import fileinput
b5ae35ee 6import functools
8222d8de 7import io
b82f815f 8import itertools
8694c600 9import json
62fec3b2 10import locale
083c9df9 11import operator
8222d8de 12import os
f8271158 13import random
8222d8de
JMF
14import re
15import shutil
dca08720 16import subprocess
8222d8de 17import sys
21cd8fae 18import tempfile
8222d8de 19import time
67134eab 20import tokenize
8222d8de 21import traceback
524e2e4f 22import unicodedata
f9934b96 23import urllib.request
961ea474
S
24from string import ascii_letters
25
f8271158 26from .cache import Cache
14f25df2 27from .compat import compat_os_name, compat_shlex_quote
982ee69a 28from .cookies import load_cookies
f8271158 29from .downloader import FFmpegFD, get_suitable_downloader, shorten_protocol_name
30from .downloader.rtmp import rtmpdump_version
f8271158 31from .extractor import gen_extractor_classes, get_info_extractor
32from .extractor.openload import PhantomJSwrapper
33from .minicurses import format_text
34from .postprocessor import _PLUGIN_CLASSES as plugin_postprocessors
35from .postprocessor import (
36 EmbedThumbnailPP,
37 FFmpegFixupDuplicateMoovPP,
38 FFmpegFixupDurationPP,
39 FFmpegFixupM3u8PP,
40 FFmpegFixupM4aPP,
41 FFmpegFixupStretchedPP,
42 FFmpegFixupTimestampPP,
43 FFmpegMergerPP,
44 FFmpegPostProcessor,
ca9def71 45 FFmpegVideoConvertorPP,
f8271158 46 MoveFilesAfterDownloadPP,
47 get_postprocessor,
48)
ca9def71 49from .postprocessor.ffmpeg import resolve_mapping as resolve_recode_mapping
f8271158 50from .update import detect_variant
8c25f81b 51from .utils import (
f8271158 52 DEFAULT_OUTTMPL,
7b2c3f47 53 IDENTITY,
f8271158 54 LINK_TEMPLATES,
8dc59305 55 MEDIA_EXTENSIONS,
f8271158 56 NO_DEFAULT,
1d485a1a 57 NUMBER_RE,
f8271158 58 OUTTMPL_TYPES,
59 POSTPROCESS_WHEN,
60 STR_FORMAT_RE_TMPL,
61 STR_FORMAT_TYPES,
62 ContentTooShortError,
63 DateRange,
64 DownloadCancelled,
65 DownloadError,
66 EntryNotInPlaylist,
67 ExistingVideoReached,
68 ExtractorError,
69 GeoRestrictedError,
70 HEADRequest,
f8271158 71 ISO3166Utils,
72 LazyList,
73 MaxDownloadsReached,
19a03940 74 Namespace,
f8271158 75 PagedList,
76 PerRequestProxyHandler,
7e88d7d7 77 PlaylistEntries,
f8271158 78 Popen,
79 PostProcessingError,
80 ReExtractInfo,
81 RejectedVideoReached,
82 SameFileError,
83 UnavailableVideoError,
693f0600 84 UserNotLive,
f8271158 85 YoutubeDLCookieProcessor,
86 YoutubeDLHandler,
87 YoutubeDLRedirectHandler,
eedb7ba5
S
88 age_restricted,
89 args_to_str,
cb794ee0 90 bug_reports_message,
ce02ed60 91 date_from_str,
ce02ed60 92 determine_ext,
b5559424 93 determine_protocol,
c0384f22 94 encode_compat_str,
ce02ed60 95 encodeFilename,
a06916d9 96 error_to_compat_str,
47cdc68e 97 escapeHTML,
590bc6f6 98 expand_path,
90137ca4 99 filter_dict,
e29663c6 100 float_or_none,
02dbf93f 101 format_bytes,
e0fd9573 102 format_decimal_suffix,
f8271158 103 format_field,
525ef922 104 formatSeconds,
0bb322b9 105 get_domain,
c9969434 106 int_or_none,
732044af 107 iri_to_uri,
34921b43 108 join_nonempty,
ce02ed60 109 locked_file,
0647d925 110 make_archive_id,
0202b52a 111 make_dir,
dca08720 112 make_HTTPS_handler,
8b7539d2 113 merge_headers,
3158150c 114 network_exceptions,
ec11a9f4 115 number_of_digits,
cd6fc19e 116 orderedSet,
083c9df9 117 parse_filesize,
ce02ed60 118 preferredencoding,
eedb7ba5 119 prepend_extension,
51fb4995 120 register_socks_protocols,
3efb96a6 121 remove_terminal_sequences,
cfb56d1a 122 render_table,
eedb7ba5 123 replace_extension,
ce02ed60 124 sanitize_filename,
1bb5c511 125 sanitize_path,
dcf77cf1 126 sanitize_url,
67dda517 127 sanitized_Request,
e5660ee6 128 std_headers,
1211bb6d 129 str_or_none,
e29663c6 130 strftime_or_none,
ce02ed60 131 subtitles_filename,
819e0531 132 supports_terminal_sequences,
b1f94422 133 system_identifier,
f2ebc5c7 134 timetuple_from_msec,
732044af 135 to_high_limit_path,
324ad820 136 traverse_obj,
6033d980 137 try_get,
29eb5174 138 url_basename,
7d1eb38a 139 variadic,
58b1f00d 140 version_tuple,
53973b4d 141 windows_enable_vt_mode,
ce02ed60
PH
142 write_json_file,
143 write_string,
4f026faf 144)
f8271158 145from .version import RELEASE_GIT_HEAD, __version__
8222d8de 146
e9c0cdd3
YCH
147if compat_os_name == 'nt':
148 import ctypes
149
2459b6e1 150
86e5f3ed 151class YoutubeDL:
8222d8de
JMF
152 """YoutubeDL class.
153
154 YoutubeDL objects are the ones responsible of downloading the
155 actual video file and writing it to disk if the user has requested
156 it, among some other tasks. In most cases there should be one per
157 program. As, given a video URL, the downloader doesn't know how to
158 extract all the needed information, task that InfoExtractors do, it
159 has to pass the URL to one of them.
160
161 For this, YoutubeDL objects have a method that allows
162 InfoExtractors to be registered in a given order. When it is passed
163 a URL, the YoutubeDL object handles it to the first InfoExtractor it
164 finds that reports being able to handle it. The InfoExtractor extracts
165 all the information about the video or videos the URL refers to, and
166 YoutubeDL process the extracted information, possibly using a File
167 Downloader to download the video.
168
169 YoutubeDL objects accept a lot of parameters. In order not to saturate
170 the object constructor with arguments, it receives a dictionary of
171 options instead. These options are available through the params
172 attribute for the InfoExtractors to use. The YoutubeDL also
173 registers itself as the downloader in charge for the InfoExtractors
174 that are added to it, so this is a "mutual registration".
175
176 Available options:
177
178 username: Username for authentication purposes.
179 password: Password for authentication purposes.
180940e0 180 videopassword: Password for accessing a video.
1da50aa3
S
181 ap_mso: Adobe Pass multiple-system operator identifier.
182 ap_username: Multiple-system operator account username.
183 ap_password: Multiple-system operator account password.
8222d8de
JMF
184 usenetrc: Use netrc for authentication instead.
185 verbose: Print additional info to stdout.
186 quiet: Do not print messages to stdout.
ad8915b7 187 no_warnings: Do not print out anything for warnings.
bb66c247 188 forceprint: A dict with keys WHEN mapped to a list of templates to
189 print to stdout. The allowed keys are video or any of the
190 items in utils.POSTPROCESS_WHEN.
ca30f449 191 For compatibility, a single list is also accepted
bb66c247 192 print_to_file: A dict with keys WHEN (same as forceprint) mapped to
193 a list of tuples with (template, filename)
8694c600 194 forcejson: Force printing info_dict as JSON.
63e0be34
PH
195 dump_single_json: Force printing the info_dict of the whole playlist
196 (or video) as a single JSON line.
c25228e5 197 force_write_download_archive: Force writing download archive regardless
198 of 'skip_download' or 'simulate'.
b7b04c78 199 simulate: Do not download the video files. If unset (or None),
200 simulate only if listsubtitles, listformats or list_thumbnails is used
eb8a4433 201 format: Video format code. see "FORMAT SELECTION" for more details.
093a1710 202 You can also pass a function. The function takes 'ctx' as
203 argument and returns the formats to download.
204 See "build_format_selector" for an implementation
63ad4d43 205 allow_unplayable_formats: Allow unplayable formats to be extracted and downloaded.
b7da73eb 206 ignore_no_formats_error: Ignore "No video formats" error. Usefull for
207 extracting metadata even if the video is not actually
208 available for download (experimental)
0930b11f 209 format_sort: A list of fields by which to sort the video formats.
210 See "Sorting Formats" for more details.
c25228e5 211 format_sort_force: Force the given format_sort. see "Sorting Formats"
212 for more details.
08d30158 213 prefer_free_formats: Whether to prefer video formats with free containers
214 over non-free ones of same quality.
c25228e5 215 allow_multiple_video_streams: Allow multiple video streams to be merged
216 into a single file
217 allow_multiple_audio_streams: Allow multiple audio streams to be merged
218 into a single file
0ba692ac 219 check_formats Whether to test if the formats are downloadable.
9f1a1c36 220 Can be True (check all), False (check none),
221 'selected' (check selected formats),
0ba692ac 222 or None (check only if requested by extractor)
4524baf0 223 paths: Dictionary of output paths. The allowed keys are 'home'
224 'temp' and the keys of OUTTMPL_TYPES (in utils.py)
de6000d9 225 outtmpl: Dictionary of templates for output names. Allowed keys
4524baf0 226 are 'default' and the keys of OUTTMPL_TYPES (in utils.py).
34488702 227 For compatibility with youtube-dl, a single string can also be used
a820dc72
RA
228 outtmpl_na_placeholder: Placeholder for unavailable meta fields.
229 restrictfilenames: Do not allow "&" and spaces in file names
230 trim_file_name: Limit length of filename (extension excluded)
4524baf0 231 windowsfilenames: Force the filenames to be windows compatible
b1940459 232 ignoreerrors: Do not stop on download/postprocessing errors.
233 Can be 'only_download' to ignore only download errors.
234 Default is 'only_download' for CLI, but False for API
26e2805c 235 skip_playlist_after_errors: Number of allowed failures until the rest of
236 the playlist is skipped
d22dec74 237 force_generic_extractor: Force downloader to use the generic extractor
0c3d0f51 238 overwrites: Overwrite all video and metadata files if True,
239 overwrite only non-video files if None
240 and don't overwrite any file if False
34488702 241 For compatibility with youtube-dl,
242 "nooverwrites" may also be used instead
c14e88f0 243 playlist_items: Specific indices of playlist to download.
75822ca7 244 playlistrandom: Download playlist items in random order.
7e9a6125 245 lazy_playlist: Process playlist entries as they are received.
8222d8de
JMF
246 matchtitle: Download only matching titles.
247 rejecttitle: Reject downloads for matching titles.
8bf9319e 248 logger: Log messages to a logging.Logger instance.
8222d8de 249 logtostderr: Log messages to stderr instead of stdout.
819e0531 250 consoletitle: Display progress in console window's titlebar.
8222d8de
JMF
251 writedescription: Write the video description to a .description file
252 writeinfojson: Write the video description to a .info.json file
75d43ca0 253 clean_infojson: Remove private fields from the infojson
34488702 254 getcomments: Extract video comments. This will not be written to disk
06167fbb 255 unless writeinfojson is also given
1fb07d10 256 writeannotations: Write the video annotations to a .annotations.xml file
8222d8de 257 writethumbnail: Write the thumbnail image to a file
c25228e5 258 allow_playlist_files: Whether to write playlists' description, infojson etc
259 also to disk when using the 'write*' options
ec82d85a 260 write_all_thumbnails: Write all thumbnail formats to files
732044af 261 writelink: Write an internet shortcut file, depending on the
262 current platform (.url/.webloc/.desktop)
263 writeurllink: Write a Windows internet shortcut file (.url)
264 writewebloclink: Write a macOS internet shortcut file (.webloc)
265 writedesktoplink: Write a Linux internet shortcut file (.desktop)
8222d8de 266 writesubtitles: Write the video subtitles to a file
741dd8ea 267 writeautomaticsub: Write the automatically generated subtitles to a file
8222d8de 268 listsubtitles: Lists all available subtitles for the video
a504ced0 269 subtitlesformat: The format code for subtitles
c32b0aab 270 subtitleslangs: List of languages of the subtitles to download (can be regex).
271 The list may contain "all" to refer to all the available
272 subtitles. The language can be prefixed with a "-" to
273 exclude it from the requested languages. Eg: ['all', '-live_chat']
8222d8de
JMF
274 keepvideo: Keep the video file after post-processing
275 daterange: A DateRange object, download only if the upload_date is in the range.
276 skip_download: Skip the actual download of the video file
c35f9e72 277 cachedir: Location of the cache files in the filesystem.
a0e07d31 278 False to disable filesystem cache.
47192f92 279 noplaylist: Download single video instead of a playlist if in doubt.
8dbe9899
PH
280 age_limit: An integer representing the user's age in years.
281 Unsuitable videos for the given age are skipped.
5fe18bdb
PH
282 min_views: An integer representing the minimum view count the video
283 must have in order to not be skipped.
284 Videos without view count information are always
285 downloaded. None for no limit.
286 max_views: An integer representing the maximum view count.
287 Videos that are more popular than that are not
288 downloaded.
289 Videos without view count information are always
290 downloaded. None for no limit.
291 download_archive: File name of a file where all downloads are recorded.
c1c9a79c
PH
292 Videos already present in the file are not downloaded
293 again.
8a51f564 294 break_on_existing: Stop the download process after attempting to download a
295 file that is in the archive.
296 break_on_reject: Stop the download process when encountering a video that
297 has been filtered out.
b222c271 298 break_per_url: Whether break_on_reject and break_on_existing
299 should act on each input URL as opposed to for the entire queue
d76fa1f3 300 cookiefile: File name or text stream from where cookies should be read and dumped to
f59f5ef8
MB
301 cookiesfrombrowser: A tuple containing the name of the browser, the profile
302 name/pathfrom where cookies are loaded, and the name of the
303 keyring. Eg: ('chrome', ) or ('vivaldi', 'default', 'BASICTEXT')
f81c62a6 304 legacyserverconnect: Explicitly allow HTTPS connection to servers that do not
305 support RFC 5746 secure renegotiation
f59f5ef8 306 nocheckcertificate: Do not verify SSL certificates
bb58c9ed 307 client_certificate: Path to client certificate file in PEM format. May include the private key
308 client_certificate_key: Path to private key file for client certificate
309 client_certificate_password: Password for client certificate private key, if encrypted.
310 If not provided and the key is encrypted, yt-dlp will ask interactively
7e8c0af0 311 prefer_insecure: Use HTTP instead of HTTPS to retrieve information.
c6e07cf1 312 (Only supported by some extractors)
8b7539d2 313 http_headers: A dictionary of custom headers to be used for all requests
a1ee09e8 314 proxy: URL of the proxy server to use
38cce791 315 geo_verification_proxy: URL of the proxy to use for IP address verification
504f20dd 316 on geo-restricted sites.
e344693b 317 socket_timeout: Time to wait for unresponsive hosts, in seconds
0783b09b
PH
318 bidi_workaround: Work around buggy terminals without bidirectional text
319 support, using fridibi
a0ddb8a2 320 debug_printtraffic:Print out sent and received HTTP traffic
04b4d394
PH
321 default_search: Prepend this string if an input url is not valid.
322 'auto' for elaborate guessing
62fec3b2 323 encoding: Use this encoding instead of the system-specified.
134c913c 324 extract_flat: Whether to resolve and process url_results further
325 * False: Always process (default)
326 * True: Never process
327 * 'in_playlist': Do not process inside playlist/multi_video
328 * 'discard': Always process, but don't return the result
329 from inside playlist/multi_video
330 * 'discard_in_playlist': Same as "discard", but only for
331 playlists (not multi_video)
f2ebc5c7 332 wait_for_video: If given, wait for scheduled streams to become available.
333 The value should be a tuple containing the range
334 (min_secs, max_secs) to wait between retries
4f026faf 335 postprocessors: A list of dictionaries, each with an entry
71b640cc 336 * key: The name of the postprocessor. See
7a5c1cfe 337 yt_dlp/postprocessor/__init__.py for a list.
bb66c247 338 * when: When to run the postprocessor. Allowed values are
339 the entries of utils.POSTPROCESS_WHEN
56d868db 340 Assumed to be 'post_process' if not given
71b640cc
PH
341 progress_hooks: A list of functions that get called on download
342 progress, with a dictionary with the entries
5cda4eda 343 * status: One of "downloading", "error", or "finished".
ee69b99a 344 Check this first and ignore unknown values.
3ba7740d 345 * info_dict: The extracted info_dict
71b640cc 346
5cda4eda 347 If status is one of "downloading", or "finished", the
ee69b99a
PH
348 following properties may also be present:
349 * filename: The final filename (always present)
5cda4eda 350 * tmpfilename: The filename we're currently writing to
71b640cc
PH
351 * downloaded_bytes: Bytes on disk
352 * total_bytes: Size of the whole file, None if unknown
5cda4eda
PH
353 * total_bytes_estimate: Guess of the eventual file size,
354 None if unavailable.
355 * elapsed: The number of seconds since download started.
71b640cc
PH
356 * eta: The estimated time in seconds, None if unknown
357 * speed: The download speed in bytes/second, None if
358 unknown
5cda4eda
PH
359 * fragment_index: The counter of the currently
360 downloaded video fragment.
361 * fragment_count: The number of fragments (= individual
362 files that will be merged)
71b640cc
PH
363
364 Progress hooks are guaranteed to be called at least once
365 (with status "finished") if the download is successful.
819e0531 366 postprocessor_hooks: A list of functions that get called on postprocessing
367 progress, with a dictionary with the entries
368 * status: One of "started", "processing", or "finished".
369 Check this first and ignore unknown values.
370 * postprocessor: Name of the postprocessor
371 * info_dict: The extracted info_dict
372
373 Progress hooks are guaranteed to be called at least twice
374 (with status "started" and "finished") if the processing is successful.
45598f15 375 merge_output_format: Extension to use when merging formats.
6b591b29 376 final_ext: Expected final extension; used to detect when the file was
59a7a13e 377 already downloaded and converted
6271f1ca
PH
378 fixup: Automatically correct known faults of the file.
379 One of:
380 - "never": do nothing
381 - "warn": only emit a warning
382 - "detect_or_warn": check whether we can do anything
62cd676c 383 about it, warn otherwise (default)
504f20dd 384 source_address: Client-side IP address to bind to.
1cf376f5 385 sleep_interval_requests: Number of seconds to sleep between requests
386 during extraction
7aa589a5
S
387 sleep_interval: Number of seconds to sleep before each download when
388 used alone or a lower bound of a range for randomized
389 sleep before each download (minimum possible number
390 of seconds to sleep) when used along with
391 max_sleep_interval.
392 max_sleep_interval:Upper bound of a range for randomized sleep before each
393 download (maximum possible number of seconds to sleep).
394 Must only be used along with sleep_interval.
395 Actual sleep time will be a random float from range
396 [sleep_interval; max_sleep_interval].
1cf376f5 397 sleep_interval_subtitles: Number of seconds to sleep before each subtitle download
cfb56d1a
PH
398 listformats: Print an overview of available video formats and exit.
399 list_thumbnails: Print a table of all thumbnails and exit.
0a41f331 400 match_filter: A function that gets called for every video with the signature
401 (info_dict, *, incomplete: bool) -> Optional[str]
402 For backward compatibility with youtube-dl, the signature
403 (info_dict) -> Optional[str] is also allowed.
404 - If it returns a message, the video is ignored.
405 - If it returns None, the video is downloaded.
406 - If it returns utils.NO_DEFAULT, the user is interactively
407 asked whether to download the video.
347de493 408 match_filter_func in utils.py is one example for this.
7e5db8c9 409 no_color: Do not emit color codes in output.
0a840f58 410 geo_bypass: Bypass geographic restriction via faking X-Forwarded-For
504f20dd 411 HTTP header
0a840f58 412 geo_bypass_country:
773f291d
S
413 Two-letter ISO 3166-2 country code that will be used for
414 explicit geographic restriction bypassing via faking
504f20dd 415 X-Forwarded-For HTTP header
5f95927a
S
416 geo_bypass_ip_block:
417 IP range in CIDR notation that will be used similarly to
504f20dd 418 geo_bypass_country
52a8a1e1 419 external_downloader: A dictionary of protocol keys and the executable of the
420 external downloader to use for it. The allowed protocols
421 are default|http|ftp|m3u8|dash|rtsp|rtmp|mms.
422 Set the value to 'native' to use the native downloader
53ed7066 423 compat_opts: Compatibility options. See "Differences in default behavior".
3acf6d38 424 The following options do not work when used through the API:
b5ae35ee 425 filename, abort-on-error, multistreams, no-live-chat, format-sort
dac5df5a 426 no-clean-infojson, no-playlist-metafiles, no-keep-subs, no-attach-info-json.
e4f02757 427 Refer __init__.py for their implementation
819e0531 428 progress_template: Dictionary of templates for progress outputs.
429 Allowed keys are 'download', 'postprocess',
430 'download-title' (console title) and 'postprocess-title'.
431 The template is mapped on a dictionary with keys 'progress' and 'info'
23326151 432 retry_sleep_functions: Dictionary of functions that takes the number of attempts
433 as argument and returns the time to sleep in seconds.
434 Allowed keys are 'http', 'fragment', 'file_access'
0f446365
SW
435 download_ranges: A callback function that gets called for every video with
436 the signature (info_dict, ydl) -> Iterable[Section].
437 Only the returned sections will be downloaded.
438 Each Section is a dict with the following keys:
5ec1b6b7 439 * start_time: Start time of the section in seconds
440 * end_time: End time of the section in seconds
441 * title: Section title (Optional)
442 * index: Section number (Optional)
0f446365 443 force_keyframes_at_cuts: Re-encode the video when downloading ranges to get precise cuts
a7dc6a89 444 noprogress: Do not print the progress bar
fe7e0c98 445
8222d8de 446 The following parameters are not used by YoutubeDL itself, they are used by
7a5c1cfe 447 the downloader (see yt_dlp/downloader/common.py):
51d9739f 448 nopart, updatetime, buffersize, ratelimit, throttledratelimit, min_filesize,
205a0654 449 max_filesize, test, noresizebuffer, retries, file_access_retries, fragment_retries,
a7dc6a89 450 continuedl, xattr_set_filesize, hls_use_mpegts, http_chunk_size,
59a7a13e 451 external_downloader_args, concurrent_fragment_downloads.
76b1bd67
JMF
452
453 The following options are used by the post processors:
c0b7d117
S
454 ffmpeg_location: Location of the ffmpeg/avconv binary; either the path
455 to the binary or its containing directory.
43820c03 456 postprocessor_args: A dictionary of postprocessor/executable keys (in lower case)
34488702 457 and a list of additional command-line arguments for the
458 postprocessor/executable. The dict can also have "PP+EXE" keys
459 which are used when the given exe is used by the given PP.
460 Use 'default' as the name for arguments to passed to all PP
461 For compatibility with youtube-dl, a single list of args
462 can also be used
e409895f 463
464 The following options are used by the extractors:
62bff2c1 465 extractor_retries: Number of times to retry for known errors
466 dynamic_mpd: Whether to process dynamic DASH manifests (default: True)
e409895f 467 hls_split_discontinuity: Split HLS playlists to different formats at
62bff2c1 468 discontinuities such as ad breaks (default: False)
5d3a0e79 469 extractor_args: A dictionary of arguments to be passed to the extractors.
470 See "EXTRACTOR ARGUMENTS" for details.
471 Eg: {'youtube': {'skip': ['dash', 'hls']}}
88f23a18 472 mark_watched: Mark videos watched (even with --simulate). Only for YouTube
1890fc63 473
474 The following options are deprecated and may be removed in the future:
475
7e9a6125 476 playliststart: - Use playlist_items
477 Playlist item to start at.
478 playlistend: - Use playlist_items
479 Playlist item to end at.
480 playlistreverse: - Use playlist_items
481 Download playlist items in reverse order.
1890fc63 482 forceurl: - Use forceprint
483 Force printing final URL.
484 forcetitle: - Use forceprint
485 Force printing title.
486 forceid: - Use forceprint
487 Force printing ID.
488 forcethumbnail: - Use forceprint
489 Force printing thumbnail URL.
490 forcedescription: - Use forceprint
491 Force printing description.
492 forcefilename: - Use forceprint
493 Force printing final filename.
494 forceduration: - Use forceprint
495 Force printing duration.
496 allsubtitles: - Use subtitleslangs = ['all']
497 Downloads all the subtitles of the video
498 (requires writesubtitles or writeautomaticsub)
499 include_ads: - Doesn't work
500 Download ads as well
501 call_home: - Not implemented
502 Boolean, true iff we are allowed to contact the
503 yt-dlp servers for debugging.
504 post_hooks: - Register a custom postprocessor
505 A list of functions that get called as the final step
506 for each video file, after all postprocessors have been
507 called. The filename will be passed as the only argument.
508 hls_prefer_native: - Use external_downloader = {'m3u8': 'native'} or {'m3u8': 'ffmpeg'}.
509 Use the native HLS downloader instead of ffmpeg/avconv
510 if True, otherwise use ffmpeg/avconv if False, otherwise
511 use downloader suggested by extractor if None.
512 prefer_ffmpeg: - avconv support is deprecated
513 If False, use avconv instead of ffmpeg if both are available,
514 otherwise prefer ffmpeg.
515 youtube_include_dash_manifest: - Use extractor_args
5d3a0e79 516 If True (default), DASH manifests and related
62bff2c1 517 data will be downloaded and processed by extractor.
518 You can reduce network I/O by disabling it if you don't
519 care about DASH. (only for youtube)
1890fc63 520 youtube_include_hls_manifest: - Use extractor_args
5d3a0e79 521 If True (default), HLS manifests and related
62bff2c1 522 data will be downloaded and processed by extractor.
523 You can reduce network I/O by disabling it if you don't
524 care about HLS. (only for youtube)
8222d8de
JMF
525 """
526
86e5f3ed 527 _NUMERIC_FIELDS = {
c9969434 528 'width', 'height', 'tbr', 'abr', 'asr', 'vbr', 'fps', 'filesize', 'filesize_approx',
e6f21b3d 529 'timestamp', 'release_timestamp',
c9969434
S
530 'duration', 'view_count', 'like_count', 'dislike_count', 'repost_count',
531 'average_rating', 'comment_count', 'age_limit',
532 'start_time', 'end_time',
533 'chapter_number', 'season_number', 'episode_number',
534 'track_number', 'disc_number', 'release_year',
86e5f3ed 535 }
c9969434 536
6db9c4d5 537 _format_fields = {
538 # NB: Keep in sync with the docstring of extractor/common.py
a44ca5a4 539 'url', 'manifest_url', 'manifest_stream_number', 'ext', 'format', 'format_id', 'format_note',
6db9c4d5 540 'width', 'height', 'resolution', 'dynamic_range', 'tbr', 'abr', 'acodec', 'asr',
541 'vbr', 'fps', 'vcodec', 'container', 'filesize', 'filesize_approx',
542 'player_url', 'protocol', 'fragment_base_url', 'fragments', 'is_from_start',
543 'preference', 'language', 'language_preference', 'quality', 'source_preference',
544 'http_headers', 'stretched_ratio', 'no_resume', 'has_drm', 'downloader_options',
545 'page_url', 'app', 'play_path', 'tc_url', 'flash_version', 'rtmp_live', 'rtmp_conn', 'rtmp_protocol', 'rtmp_real_time'
546 }
48ee10ee 547 _format_selection_exts = {
8dc59305 548 'audio': set(MEDIA_EXTENSIONS.common_audio),
549 'video': set(MEDIA_EXTENSIONS.common_video + ('3gp', )),
550 'storyboards': set(MEDIA_EXTENSIONS.storyboards),
48ee10ee 551 }
552
3511266b 553 def __init__(self, params=None, auto_init=True):
883d4b1e 554 """Create a FileDownloader object with the given options.
555 @param auto_init Whether to load the default extractors and print header (if verbose).
49a57e70 556 Set to 'no_verbose_header' to not print the header
883d4b1e 557 """
e9f9a10f
JMF
558 if params is None:
559 params = {}
592b7485 560 self.params = params
8b7491c8 561 self._ies = {}
56c73665 562 self._ies_instances = {}
1e43a6f7 563 self._pps = {k: [] for k in POSTPROCESS_WHEN}
b35496d8 564 self._printed_messages = set()
1cf376f5 565 self._first_webpage_request = True
ab8e5e51 566 self._post_hooks = []
933605d7 567 self._progress_hooks = []
819e0531 568 self._postprocessor_hooks = []
8222d8de
JMF
569 self._download_retcode = 0
570 self._num_downloads = 0
9c906919 571 self._num_videos = 0
592b7485 572 self._playlist_level = 0
573 self._playlist_urls = set()
a0e07d31 574 self.cache = Cache(self)
34308b30 575
819e0531 576 windows_enable_vt_mode()
591bb9d3 577 stdout = sys.stderr if self.params.get('logtostderr') else sys.stdout
578 self._out_files = Namespace(
579 out=stdout,
580 error=sys.stderr,
581 screen=sys.stderr if self.params.get('quiet') else stdout,
582 console=None if compat_os_name == 'nt' else next(
cf4f42cb 583 filter(supports_terminal_sequences, (sys.stderr, sys.stdout)), None)
591bb9d3 584 )
585 self._allow_colors = Namespace(**{
586 type_: not self.params.get('no_color') and supports_terminal_sequences(stream)
64fa820c 587 for type_, stream in self._out_files.items_ if type_ != 'console'
591bb9d3 588 })
819e0531 589
6929b41a 590 # The code is left like this to be reused for future deprecations
591 MIN_SUPPORTED, MIN_RECOMMENDED = (3, 7), (3, 7)
eff42759 592 current_version = sys.version_info[:2]
593 if current_version < MIN_RECOMMENDED:
9d339c41 594 msg = ('Support for Python version %d.%d has been deprecated. '
24093d52 595 'See https://github.com/yt-dlp/yt-dlp/issues/3764 for more details.'
c6e07cf1 596 '\n You will no longer receive updates on this version')
eff42759 597 if current_version < MIN_SUPPORTED:
598 msg = 'Python version %d.%d is no longer supported'
599 self.deprecation_warning(
600 f'{msg}! Please update to Python %d.%d or above' % (*current_version, *MIN_RECOMMENDED))
a61f4b28 601
88acdbc2 602 if self.params.get('allow_unplayable_formats'):
603 self.report_warning(
ec11a9f4 604 f'You have asked for {self._format_err("UNPLAYABLE", self.Styles.EMPHASIS)} formats to be listed/downloaded. '
819e0531 605 'This is a developer option intended for debugging. \n'
606 ' If you experience any issues while using this option, '
ec11a9f4 607 f'{self._format_err("DO NOT", self.Styles.ERROR)} open a bug report')
88acdbc2 608
be5df5ee
S
609 def check_deprecated(param, option, suggestion):
610 if self.params.get(param) is not None:
86e5f3ed 611 self.report_warning(f'{option} is deprecated. Use {suggestion} instead')
be5df5ee
S
612 return True
613 return False
614
615 if check_deprecated('cn_verification_proxy', '--cn-verification-proxy', '--geo-verification-proxy'):
38cce791
YCH
616 if self.params.get('geo_verification_proxy') is None:
617 self.params['geo_verification_proxy'] = self.params['cn_verification_proxy']
618
0d1bb027 619 check_deprecated('autonumber', '--auto-number', '-o "%(autonumber)s-%(title)s.%(ext)s"')
620 check_deprecated('usetitle', '--title', '-o "%(title)s-%(id)s.%(ext)s"')
53ed7066 621 check_deprecated('useid', '--id', '-o "%(id)s.%(ext)s"')
0d1bb027 622
49a57e70 623 for msg in self.params.get('_warnings', []):
0d1bb027 624 self.report_warning(msg)
ee8dd27a 625 for msg in self.params.get('_deprecation_warnings', []):
626 self.deprecation_warning(msg)
0d1bb027 627
8a82af35 628 self.params['compat_opts'] = set(self.params.get('compat_opts', ()))
8a82af35 629 if 'list-formats' in self.params['compat_opts']:
ec11a9f4 630 self.params['listformats_table'] = False
631
b5ae35ee 632 if 'overwrites' not in self.params and self.params.get('nooverwrites') is not None:
b868936c 633 # nooverwrites was unnecessarily changed to overwrites
634 # in 0c3d0f51778b153f65c21906031c2e091fcfb641
635 # This ensures compatibility with both keys
636 self.params['overwrites'] = not self.params['nooverwrites']
b5ae35ee 637 elif self.params.get('overwrites') is None:
638 self.params.pop('overwrites', None)
b868936c 639 else:
640 self.params['nooverwrites'] = not self.params['overwrites']
b9d973be 641
455a15e2 642 self.params.setdefault('forceprint', {})
643 self.params.setdefault('print_to_file', {})
bb66c247 644
645 # Compatibility with older syntax
ca30f449 646 if not isinstance(params['forceprint'], dict):
455a15e2 647 self.params['forceprint'] = {'video': params['forceprint']}
ca30f449 648
455a15e2 649 if self.params.get('bidi_workaround', False):
1c088fa8
PH
650 try:
651 import pty
652 master, slave = pty.openpty()
ac668111 653 width = shutil.get_terminal_size().columns
591bb9d3 654 width_args = [] if width is None else ['-w', str(width)]
655 sp_kwargs = {'stdin': subprocess.PIPE, 'stdout': slave, 'stderr': self._out_files.error}
5d681e96 656 try:
d3c93ec2 657 self._output_process = Popen(['bidiv'] + width_args, **sp_kwargs)
5d681e96 658 except OSError:
d3c93ec2 659 self._output_process = Popen(['fribidi', '-c', 'UTF-8'] + width_args, **sp_kwargs)
5d681e96 660 self._output_channel = os.fdopen(master, 'rb')
1c088fa8 661 except OSError as ose:
66e7ace1 662 if ose.errno == errno.ENOENT:
49a57e70 663 self.report_warning(
664 'Could not find fribidi executable, ignoring --bidi-workaround. '
665 'Make sure that fribidi is an executable file in one of the directories in your $PATH.')
1c088fa8
PH
666 else:
667 raise
0783b09b 668
97ec5bc5 669 if auto_init:
670 if auto_init != 'no_verbose_header':
671 self.print_debug_header()
672 self.add_default_info_extractors()
673
3089bc74
S
674 if (sys.platform != 'win32'
675 and sys.getfilesystemencoding() in ['ascii', 'ANSI_X3.4-1968']
455a15e2 676 and not self.params.get('restrictfilenames', False)):
e9137224 677 # Unicode filesystem API will throw errors (#1474, #13027)
34308b30 678 self.report_warning(
6febd1c1 679 'Assuming --restrict-filenames since file system encoding '
1b725173 680 'cannot encode all characters. '
6febd1c1 681 'Set the LC_ALL environment variable to fix this.')
4a98cdbf 682 self.params['restrictfilenames'] = True
34308b30 683
bf1824b3 684 self._parse_outtmpl()
486dd09e 685
187986a8 686 # Creating format selector here allows us to catch syntax errors before the extraction
687 self.format_selector = (
fa9f30b8 688 self.params.get('format') if self.params.get('format') in (None, '-')
093a1710 689 else self.params['format'] if callable(self.params['format'])
187986a8 690 else self.build_format_selector(self.params['format']))
691
8b7539d2 692 # Set http_headers defaults according to std_headers
693 self.params['http_headers'] = merge_headers(std_headers, self.params.get('http_headers', {}))
694
013b50b7 695 hooks = {
696 'post_hooks': self.add_post_hook,
697 'progress_hooks': self.add_progress_hook,
698 'postprocessor_hooks': self.add_postprocessor_hook,
699 }
700 for opt, fn in hooks.items():
701 for ph in self.params.get(opt, []):
702 fn(ph)
71b640cc 703
5bfc8bee 704 for pp_def_raw in self.params.get('postprocessors', []):
705 pp_def = dict(pp_def_raw)
706 when = pp_def.pop('when', 'post_process')
707 self.add_post_processor(
f9934b96 708 get_postprocessor(pp_def.pop('key'))(self, **pp_def),
5bfc8bee 709 when=when)
710
97ec5bc5 711 self._setup_opener()
51fb4995
YCH
712 register_socks_protocols()
713
ed39cac5 714 def preload_download_archive(fn):
715 """Preload the archive, if any is specified"""
716 if fn is None:
717 return False
49a57e70 718 self.write_debug(f'Loading archive file {fn!r}')
ed39cac5 719 try:
720 with locked_file(fn, 'r', encoding='utf-8') as archive_file:
721 for line in archive_file:
722 self.archive.add(line.strip())
86e5f3ed 723 except OSError as ioe:
ed39cac5 724 if ioe.errno != errno.ENOENT:
725 raise
726 return False
727 return True
728
729 self.archive = set()
730 preload_download_archive(self.params.get('download_archive'))
731
7d4111ed
PH
732 def warn_if_short_id(self, argv):
733 # short YouTube ID starting with dash?
734 idxs = [
735 i for i, a in enumerate(argv)
736 if re.match(r'^-[0-9A-Za-z_-]{10}$', a)]
737 if idxs:
738 correct_argv = (
7a5c1cfe 739 ['yt-dlp']
3089bc74
S
740 + [a for i, a in enumerate(argv) if i not in idxs]
741 + ['--'] + [argv[i] for i in idxs]
7d4111ed
PH
742 )
743 self.report_warning(
744 'Long argument string detected. '
49a57e70 745 'Use -- to separate parameters and URLs, like this:\n%s' %
7d4111ed
PH
746 args_to_str(correct_argv))
747
8222d8de
JMF
748 def add_info_extractor(self, ie):
749 """Add an InfoExtractor object to the end of the list."""
8b7491c8 750 ie_key = ie.ie_key()
751 self._ies[ie_key] = ie
e52d7f85 752 if not isinstance(ie, type):
8b7491c8 753 self._ies_instances[ie_key] = ie
e52d7f85 754 ie.set_downloader(self)
8222d8de 755
8b7491c8 756 def _get_info_extractor_class(self, ie_key):
757 ie = self._ies.get(ie_key)
758 if ie is None:
759 ie = get_info_extractor(ie_key)
760 self.add_info_extractor(ie)
761 return ie
762
56c73665
JMF
763 def get_info_extractor(self, ie_key):
764 """
765 Get an instance of an IE with name ie_key, it will try to get one from
766 the _ies list, if there's no instance it will create a new one and add
767 it to the extractor list.
768 """
769 ie = self._ies_instances.get(ie_key)
770 if ie is None:
771 ie = get_info_extractor(ie_key)()
772 self.add_info_extractor(ie)
773 return ie
774
023fa8c4
JMF
775 def add_default_info_extractors(self):
776 """
777 Add the InfoExtractors returned by gen_extractors to the end of the list
778 """
e52d7f85 779 for ie in gen_extractor_classes():
023fa8c4
JMF
780 self.add_info_extractor(ie)
781
56d868db 782 def add_post_processor(self, pp, when='post_process'):
8222d8de 783 """Add a PostProcessor object to the end of the chain."""
8aa0e7cd 784 assert when in POSTPROCESS_WHEN, f'Invalid when={when}'
5bfa4862 785 self._pps[when].append(pp)
8222d8de
JMF
786 pp.set_downloader(self)
787
ab8e5e51
AM
788 def add_post_hook(self, ph):
789 """Add the post hook"""
790 self._post_hooks.append(ph)
791
933605d7 792 def add_progress_hook(self, ph):
819e0531 793 """Add the download progress hook"""
933605d7 794 self._progress_hooks.append(ph)
8ab470f1 795
819e0531 796 def add_postprocessor_hook(self, ph):
797 """Add the postprocessing progress hook"""
798 self._postprocessor_hooks.append(ph)
5bfc8bee 799 for pps in self._pps.values():
800 for pp in pps:
801 pp.add_progress_hook(ph)
819e0531 802
1c088fa8 803 def _bidi_workaround(self, message):
5d681e96 804 if not hasattr(self, '_output_channel'):
1c088fa8
PH
805 return message
806
5d681e96 807 assert hasattr(self, '_output_process')
14f25df2 808 assert isinstance(message, str)
6febd1c1 809 line_count = message.count('\n') + 1
0f06bcd7 810 self._output_process.stdin.write((message + '\n').encode())
5d681e96 811 self._output_process.stdin.flush()
0f06bcd7 812 res = ''.join(self._output_channel.readline().decode()
9e1a5b84 813 for _ in range(line_count))
6febd1c1 814 return res[:-len('\n')]
1c088fa8 815
b35496d8 816 def _write_string(self, message, out=None, only_once=False):
817 if only_once:
818 if message in self._printed_messages:
819 return
820 self._printed_messages.add(message)
821 write_string(message, out=out, encoding=self.params.get('encoding'))
734f90bb 822
cf4f42cb 823 def to_stdout(self, message, skip_eol=False, quiet=None):
0760b0a7 824 """Print message to stdout"""
cf4f42cb 825 if quiet is not None:
ae6a1b95 826 self.deprecation_warning('"YoutubeDL.to_stdout" no longer accepts the argument quiet. Use "YoutubeDL.to_screen" instead')
8a82af35 827 if skip_eol is not False:
828 self.deprecation_warning('"YoutubeDL.to_stdout" no longer accepts the argument skip_eol. Use "YoutubeDL.to_screen" instead')
0bf9dc1e 829 self._write_string(f'{self._bidi_workaround(message)}\n', self._out_files.out)
cf4f42cb 830
831 def to_screen(self, message, skip_eol=False, quiet=None):
832 """Print message to screen if not in quiet mode"""
8bf9319e 833 if self.params.get('logger'):
43afe285 834 self.params['logger'].debug(message)
cf4f42cb 835 return
836 if (self.params.get('quiet') if quiet is None else quiet) and not self.params.get('verbose'):
837 return
838 self._write_string(
839 '%s%s' % (self._bidi_workaround(message), ('' if skip_eol else '\n')),
591bb9d3 840 self._out_files.screen)
8222d8de 841
b35496d8 842 def to_stderr(self, message, only_once=False):
0760b0a7 843 """Print message to stderr"""
14f25df2 844 assert isinstance(message, str)
8bf9319e 845 if self.params.get('logger'):
43afe285
IB
846 self.params['logger'].error(message)
847 else:
5792c950 848 self._write_string(f'{self._bidi_workaround(message)}\n', self._out_files.error, only_once=only_once)
cf4f42cb 849
850 def _send_console_code(self, code):
591bb9d3 851 if compat_os_name == 'nt' or not self._out_files.console:
cf4f42cb 852 return
591bb9d3 853 self._write_string(code, self._out_files.console)
8222d8de 854
1e5b9a95
PH
855 def to_console_title(self, message):
856 if not self.params.get('consoletitle', False):
857 return
3efb96a6 858 message = remove_terminal_sequences(message)
4bede0d8
C
859 if compat_os_name == 'nt':
860 if ctypes.windll.kernel32.GetConsoleWindow():
861 # c_wchar_p() might not be necessary if `message` is
862 # already of type unicode()
863 ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(message))
cf4f42cb 864 else:
865 self._send_console_code(f'\033]0;{message}\007')
1e5b9a95 866
bdde425c 867 def save_console_title(self):
cf4f42cb 868 if not self.params.get('consoletitle') or self.params.get('simulate'):
bdde425c 869 return
592b7485 870 self._send_console_code('\033[22;0t') # Save the title on stack
bdde425c
PH
871
872 def restore_console_title(self):
cf4f42cb 873 if not self.params.get('consoletitle') or self.params.get('simulate'):
bdde425c 874 return
592b7485 875 self._send_console_code('\033[23;0t') # Restore the title from stack
bdde425c
PH
876
877 def __enter__(self):
878 self.save_console_title()
879 return self
880
881 def __exit__(self, *args):
882 self.restore_console_title()
f89197d7 883
dca08720 884 if self.params.get('cookiefile') is not None:
1bab3437 885 self.cookiejar.save(ignore_discard=True, ignore_expires=True)
bdde425c 886
fa9f30b8 887 def trouble(self, message=None, tb=None, is_error=True):
8222d8de
JMF
888 """Determine action to take when a download problem appears.
889
890 Depending on if the downloader has been configured to ignore
891 download errors or not, this method may throw an exception or
892 not when errors are found, after printing the message.
893
fa9f30b8 894 @param tb If given, is additional traceback information
895 @param is_error Whether to raise error according to ignorerrors
8222d8de
JMF
896 """
897 if message is not None:
898 self.to_stderr(message)
899 if self.params.get('verbose'):
900 if tb is None:
901 if sys.exc_info()[0]: # if .trouble has been called from an except block
6febd1c1 902 tb = ''
8222d8de 903 if hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
6febd1c1 904 tb += ''.join(traceback.format_exception(*sys.exc_info()[1].exc_info))
c0384f22 905 tb += encode_compat_str(traceback.format_exc())
8222d8de
JMF
906 else:
907 tb_data = traceback.format_list(traceback.extract_stack())
6febd1c1 908 tb = ''.join(tb_data)
c19bc311 909 if tb:
910 self.to_stderr(tb)
fa9f30b8 911 if not is_error:
912 return
b1940459 913 if not self.params.get('ignoreerrors'):
8222d8de
JMF
914 if sys.exc_info()[0] and hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
915 exc_info = sys.exc_info()[1].exc_info
916 else:
917 exc_info = sys.exc_info()
918 raise DownloadError(message, exc_info)
919 self._download_retcode = 1
920
19a03940 921 Styles = Namespace(
922 HEADERS='yellow',
923 EMPHASIS='light blue',
492272fe 924 FILENAME='green',
19a03940 925 ID='green',
926 DELIM='blue',
927 ERROR='red',
928 WARNING='yellow',
929 SUPPRESS='light black',
930 )
ec11a9f4 931
7578d77d 932 def _format_text(self, handle, allow_colors, text, f, fallback=None, *, test_encoding=False):
e5a998f3 933 text = str(text)
ec11a9f4 934 if test_encoding:
935 original_text = text
5c104538 936 # handle.encoding can be None. See https://github.com/yt-dlp/yt-dlp/issues/2711
937 encoding = self.params.get('encoding') or getattr(handle, 'encoding', None) or 'ascii'
ec11a9f4 938 text = text.encode(encoding, 'ignore').decode(encoding)
939 if fallback is not None and text != original_text:
940 text = fallback
7578d77d 941 return format_text(text, f) if allow_colors else text if fallback is None else fallback
ec11a9f4 942
591bb9d3 943 def _format_out(self, *args, **kwargs):
944 return self._format_text(self._out_files.out, self._allow_colors.out, *args, **kwargs)
945
ec11a9f4 946 def _format_screen(self, *args, **kwargs):
591bb9d3 947 return self._format_text(self._out_files.screen, self._allow_colors.screen, *args, **kwargs)
ec11a9f4 948
949 def _format_err(self, *args, **kwargs):
591bb9d3 950 return self._format_text(self._out_files.error, self._allow_colors.error, *args, **kwargs)
819e0531 951
c84aeac6 952 def report_warning(self, message, only_once=False):
8222d8de
JMF
953 '''
954 Print the message to stderr, it will be prefixed with 'WARNING:'
955 If stderr is a tty file the 'WARNING:' will be colored
956 '''
6d07ce01
JMF
957 if self.params.get('logger') is not None:
958 self.params['logger'].warning(message)
8222d8de 959 else:
ad8915b7
PH
960 if self.params.get('no_warnings'):
961 return
ec11a9f4 962 self.to_stderr(f'{self._format_err("WARNING:", self.Styles.WARNING)} {message}', only_once)
8222d8de 963
ee8dd27a 964 def deprecation_warning(self, message):
965 if self.params.get('logger') is not None:
a44ca5a4 966 self.params['logger'].warning(f'DeprecationWarning: {message}')
ee8dd27a 967 else:
968 self.to_stderr(f'{self._format_err("DeprecationWarning:", self.Styles.ERROR)} {message}', True)
969
fa9f30b8 970 def report_error(self, message, *args, **kwargs):
8222d8de
JMF
971 '''
972 Do the same as trouble, but prefixes the message with 'ERROR:', colored
973 in red if stderr is a tty file.
974 '''
fa9f30b8 975 self.trouble(f'{self._format_err("ERROR:", self.Styles.ERROR)} {message}', *args, **kwargs)
8222d8de 976
b35496d8 977 def write_debug(self, message, only_once=False):
0760b0a7 978 '''Log debug message or Print message to stderr'''
979 if not self.params.get('verbose', False):
980 return
8a82af35 981 message = f'[debug] {message}'
0760b0a7 982 if self.params.get('logger'):
983 self.params['logger'].debug(message)
984 else:
b35496d8 985 self.to_stderr(message, only_once)
0760b0a7 986
8222d8de
JMF
987 def report_file_already_downloaded(self, file_name):
988 """Report file has already been fully downloaded."""
989 try:
6febd1c1 990 self.to_screen('[download] %s has already been downloaded' % file_name)
ce02ed60 991 except UnicodeEncodeError:
6febd1c1 992 self.to_screen('[download] The file has already been downloaded')
8222d8de 993
0c3d0f51 994 def report_file_delete(self, file_name):
995 """Report that existing file will be deleted."""
996 try:
c25228e5 997 self.to_screen('Deleting existing file %s' % file_name)
0c3d0f51 998 except UnicodeEncodeError:
c25228e5 999 self.to_screen('Deleting existing file')
0c3d0f51 1000
319b6059 1001 def raise_no_formats(self, info, forced=False, *, msg=None):
0a5a191a 1002 has_drm = info.get('_has_drm')
319b6059 1003 ignored, expected = self.params.get('ignore_no_formats_error'), bool(msg)
1004 msg = msg or has_drm and 'This video is DRM protected' or 'No video formats found!'
1005 if forced or not ignored:
1151c407 1006 raise ExtractorError(msg, video_id=info['id'], ie=info['extractor'],
319b6059 1007 expected=has_drm or ignored or expected)
88acdbc2 1008 else:
1009 self.report_warning(msg)
1010
de6000d9 1011 def parse_outtmpl(self):
bf1824b3 1012 self.deprecation_warning('"YoutubeDL.parse_outtmpl" is deprecated and may be removed in a future version')
1013 self._parse_outtmpl()
1014 return self.params['outtmpl']
1015
1016 def _parse_outtmpl(self):
7b2c3f47 1017 sanitize = IDENTITY
bf1824b3 1018 if self.params.get('restrictfilenames'): # Remove spaces in the default template
71ce444a 1019 sanitize = lambda x: x.replace(' - ', ' ').replace(' ', '-')
bf1824b3 1020
1021 outtmpl = self.params.setdefault('outtmpl', {})
1022 if not isinstance(outtmpl, dict):
1023 self.params['outtmpl'] = outtmpl = {'default': outtmpl}
1024 outtmpl.update({k: sanitize(v) for k, v in DEFAULT_OUTTMPL.items() if outtmpl.get(k) is None})
de6000d9 1025
21cd8fae 1026 def get_output_path(self, dir_type='', filename=None):
1027 paths = self.params.get('paths', {})
1028 assert isinstance(paths, dict)
1029 path = os.path.join(
1030 expand_path(paths.get('home', '').strip()),
1031 expand_path(paths.get(dir_type, '').strip()) if dir_type else '',
1032 filename or '')
21cd8fae 1033 return sanitize_path(path, force=self.params.get('windowsfilenames'))
1034
76a264ac 1035 @staticmethod
901130bb 1036 def _outtmpl_expandpath(outtmpl):
1037 # expand_path translates '%%' into '%' and '$$' into '$'
1038 # correspondingly that is not what we want since we need to keep
1039 # '%%' intact for template dict substitution step. Working around
1040 # with boundary-alike separator hack.
1041 sep = ''.join([random.choice(ascii_letters) for _ in range(32)])
86e5f3ed 1042 outtmpl = outtmpl.replace('%%', f'%{sep}%').replace('$$', f'${sep}$')
901130bb 1043
1044 # outtmpl should be expand_path'ed before template dict substitution
1045 # because meta fields may contain env variables we don't want to
1046 # be expanded. For example, for outtmpl "%(title)s.%(ext)s" and
1047 # title "Hello $PATH", we don't want `$PATH` to be expanded.
1048 return expand_path(outtmpl).replace(sep, '')
1049
1050 @staticmethod
1051 def escape_outtmpl(outtmpl):
1052 ''' Escape any remaining strings like %s, %abc% etc. '''
1053 return re.sub(
1054 STR_FORMAT_RE_TMPL.format('', '(?![%(\0])'),
1055 lambda mobj: ('' if mobj.group('has_key') else '%') + mobj.group(0),
1056 outtmpl)
1057
1058 @classmethod
1059 def validate_outtmpl(cls, outtmpl):
76a264ac 1060 ''' @return None or Exception object '''
7d1eb38a 1061 outtmpl = re.sub(
47cdc68e 1062 STR_FORMAT_RE_TMPL.format('[^)]*', '[ljhqBUDS]'),
7d1eb38a 1063 lambda mobj: f'{mobj.group(0)[:-1]}s',
1064 cls._outtmpl_expandpath(outtmpl))
76a264ac 1065 try:
7d1eb38a 1066 cls.escape_outtmpl(outtmpl) % collections.defaultdict(int)
76a264ac 1067 return None
1068 except ValueError as err:
1069 return err
1070
03b4de72 1071 @staticmethod
1072 def _copy_infodict(info_dict):
1073 info_dict = dict(info_dict)
09b49e1f 1074 info_dict.pop('__postprocessors', None)
415f8d51 1075 info_dict.pop('__pending_error', None)
03b4de72 1076 return info_dict
1077
e0fd9573 1078 def prepare_outtmpl(self, outtmpl, info_dict, sanitize=False):
1079 """ Make the outtmpl and info_dict suitable for substitution: ydl.escape_outtmpl(outtmpl) % info_dict
1080 @param sanitize Whether to sanitize the output as a filename.
1081 For backward compatibility, a function can also be passed
1082 """
1083
6e84b215 1084 info_dict.setdefault('epoch', int(time.time())) # keep epoch consistent once set
143db31d 1085
03b4de72 1086 info_dict = self._copy_infodict(info_dict)
752cda38 1087 info_dict['duration_string'] = ( # %(duration>%H-%M-%S)s is wrong if duration > 24hrs
53c18592 1088 formatSeconds(info_dict['duration'], '-' if sanitize else ':')
143db31d 1089 if info_dict.get('duration', None) is not None
1090 else None)
1d485a1a 1091 info_dict['autonumber'] = int(self.params.get('autonumber_start', 1) - 1 + self._num_downloads)
9c906919 1092 info_dict['video_autonumber'] = self._num_videos
752cda38 1093 if info_dict.get('resolution') is None:
1094 info_dict['resolution'] = self.format_resolution(info_dict, default=None)
143db31d 1095
e6f21b3d 1096 # For fields playlist_index, playlist_autonumber and autonumber convert all occurrences
143db31d 1097 # of %(field)s to %(field)0Nd for backward compatibility
1098 field_size_compat_map = {
0a5a191a 1099 'playlist_index': number_of_digits(info_dict.get('__last_playlist_index') or 0),
ec11a9f4 1100 'playlist_autonumber': number_of_digits(info_dict.get('n_entries') or 0),
752cda38 1101 'autonumber': self.params.get('autonumber_size') or 5,
143db31d 1102 }
752cda38 1103
385a27fa 1104 TMPL_DICT = {}
47cdc68e 1105 EXTERNAL_FORMAT_RE = re.compile(STR_FORMAT_RE_TMPL.format('[^)]*', f'[{STR_FORMAT_TYPES}ljhqBUDS]'))
385a27fa 1106 MATH_FUNCTIONS = {
1107 '+': float.__add__,
1108 '-': float.__sub__,
1109 }
e625be0d 1110 # Field is of the form key1.key2...
1111 # where keys (except first) can be string, int or slice
2b8a2973 1112 FIELD_RE = r'\w*(?:\.(?:\w+|{num}|{num}?(?::{num}?){{1,2}}))*'.format(num=r'(?:-?\d+)')
1d485a1a 1113 MATH_FIELD_RE = rf'(?:{FIELD_RE}|-?{NUMBER_RE})'
385a27fa 1114 MATH_OPERATORS_RE = r'(?:%s)' % '|'.join(map(re.escape, MATH_FUNCTIONS.keys()))
1d485a1a 1115 INTERNAL_FORMAT_RE = re.compile(rf'''(?x)
e625be0d 1116 (?P<negate>-)?
1d485a1a 1117 (?P<fields>{FIELD_RE})
1118 (?P<maths>(?:{MATH_OPERATORS_RE}{MATH_FIELD_RE})*)
e625be0d 1119 (?:>(?P<strf_format>.+?))?
34baa9fd 1120 (?P<remaining>
1121 (?P<alternate>(?<!\\),[^|&)]+)?
1122 (?:&(?P<replacement>.*?))?
1123 (?:\|(?P<default>.*?))?
1d485a1a 1124 )$''')
752cda38 1125
2b8a2973 1126 def _traverse_infodict(k):
1127 k = k.split('.')
1128 if k[0] == '':
1129 k.pop(0)
1130 return traverse_obj(info_dict, k, is_user_input=True, traverse_string=True)
76a264ac 1131
752cda38 1132 def get_value(mdict):
1133 # Object traversal
2b8a2973 1134 value = _traverse_infodict(mdict['fields'])
752cda38 1135 # Negative
1136 if mdict['negate']:
1137 value = float_or_none(value)
1138 if value is not None:
1139 value *= -1
1140 # Do maths
385a27fa 1141 offset_key = mdict['maths']
1142 if offset_key:
752cda38 1143 value = float_or_none(value)
1144 operator = None
385a27fa 1145 while offset_key:
1146 item = re.match(
1147 MATH_FIELD_RE if operator else MATH_OPERATORS_RE,
1148 offset_key).group(0)
1149 offset_key = offset_key[len(item):]
1150 if operator is None:
752cda38 1151 operator = MATH_FUNCTIONS[item]
385a27fa 1152 continue
1153 item, multiplier = (item[1:], -1) if item[0] == '-' else (item, 1)
1154 offset = float_or_none(item)
1155 if offset is None:
2b8a2973 1156 offset = float_or_none(_traverse_infodict(item))
385a27fa 1157 try:
1158 value = operator(value, multiplier * offset)
1159 except (TypeError, ZeroDivisionError):
1160 return None
1161 operator = None
752cda38 1162 # Datetime formatting
1163 if mdict['strf_format']:
7c37ff97 1164 value = strftime_or_none(value, mdict['strf_format'].replace('\\,', ','))
752cda38 1165
a6bcaf71 1166 # XXX: Workaround for https://github.com/yt-dlp/yt-dlp/issues/4485
1167 if sanitize and value == '':
1168 value = None
752cda38 1169 return value
1170
b868936c 1171 na = self.params.get('outtmpl_na_placeholder', 'NA')
1172
e0fd9573 1173 def filename_sanitizer(key, value, restricted=self.params.get('restrictfilenames')):
5c3895ff 1174 return sanitize_filename(str(value), restricted=restricted, is_id=(
1175 bool(re.search(r'(^|[_.])id(\.|$)', key))
8a82af35 1176 if 'filename-sanitization' in self.params['compat_opts']
5c3895ff 1177 else NO_DEFAULT))
e0fd9573 1178
1179 sanitizer = sanitize if callable(sanitize) else filename_sanitizer
1180 sanitize = bool(sanitize)
1181
6e84b215 1182 def _dumpjson_default(obj):
1183 if isinstance(obj, (set, LazyList)):
1184 return list(obj)
adbc4ec4 1185 return repr(obj)
6e84b215 1186
752cda38 1187 def create_key(outer_mobj):
1188 if not outer_mobj.group('has_key'):
b836dc94 1189 return outer_mobj.group(0)
752cda38 1190 key = outer_mobj.group('key')
752cda38 1191 mobj = re.match(INTERNAL_FORMAT_RE, key)
e0fd9573 1192 initial_field = mobj.group('fields') if mobj else ''
e978789f 1193 value, replacement, default = None, None, na
7c37ff97 1194 while mobj:
e625be0d 1195 mobj = mobj.groupdict()
7c37ff97 1196 default = mobj['default'] if mobj['default'] is not None else default
752cda38 1197 value = get_value(mobj)
e978789f 1198 replacement = mobj['replacement']
7c37ff97 1199 if value is None and mobj['alternate']:
34baa9fd 1200 mobj = re.match(INTERNAL_FORMAT_RE, mobj['remaining'][1:])
7c37ff97 1201 else:
1202 break
752cda38 1203
b868936c 1204 fmt = outer_mobj.group('format')
752cda38 1205 if fmt == 's' and value is not None and key in field_size_compat_map.keys():
86e5f3ed 1206 fmt = f'0{field_size_compat_map[key]:d}d'
752cda38 1207
e978789f 1208 value = default if value is None else value if replacement is None else replacement
752cda38 1209
4476d2c7 1210 flags = outer_mobj.group('conversion') or ''
7d1eb38a 1211 str_fmt = f'{fmt[:-1]}s'
524e2e4f 1212 if fmt[-1] == 'l': # list
4476d2c7 1213 delim = '\n' if '#' in flags else ', '
9e907ebd 1214 value, fmt = delim.join(map(str, variadic(value, allowed_types=(str, bytes)))), str_fmt
524e2e4f 1215 elif fmt[-1] == 'j': # json
4476d2c7 1216 value, fmt = json.dumps(value, default=_dumpjson_default, indent=4 if '#' in flags else None), str_fmt
47cdc68e 1217 elif fmt[-1] == 'h': # html
1218 value, fmt = escapeHTML(value), str_fmt
524e2e4f 1219 elif fmt[-1] == 'q': # quoted
4476d2c7 1220 value = map(str, variadic(value) if '#' in flags else [value])
1221 value, fmt = ' '.join(map(compat_shlex_quote, value)), str_fmt
524e2e4f 1222 elif fmt[-1] == 'B': # bytes
0f06bcd7 1223 value = f'%{str_fmt}'.encode() % str(value).encode()
f5aa5cfb 1224 value, fmt = value.decode('utf-8', 'ignore'), 's'
524e2e4f 1225 elif fmt[-1] == 'U': # unicode normalized
524e2e4f 1226 value, fmt = unicodedata.normalize(
1227 # "+" = compatibility equivalence, "#" = NFD
4476d2c7 1228 'NF%s%s' % ('K' if '+' in flags else '', 'D' if '#' in flags else 'C'),
524e2e4f 1229 value), str_fmt
e0fd9573 1230 elif fmt[-1] == 'D': # decimal suffix
abbeeebc 1231 num_fmt, fmt = fmt[:-1].replace('#', ''), 's'
1232 value = format_decimal_suffix(value, f'%{num_fmt}f%s' if num_fmt else '%d%s',
1233 factor=1024 if '#' in flags else 1000)
37893bb0 1234 elif fmt[-1] == 'S': # filename sanitization
e0fd9573 1235 value, fmt = filename_sanitizer(initial_field, value, restricted='#' in flags), str_fmt
7d1eb38a 1236 elif fmt[-1] == 'c':
524e2e4f 1237 if value:
1238 value = str(value)[0]
76a264ac 1239 else:
524e2e4f 1240 fmt = str_fmt
76a264ac 1241 elif fmt[-1] not in 'rs': # numeric
a439a3a4 1242 value = float_or_none(value)
752cda38 1243 if value is None:
1244 value, fmt = default, 's'
901130bb 1245
752cda38 1246 if sanitize:
1247 if fmt[-1] == 'r':
1248 # If value is an object, sanitize might convert it to a string
1249 # So we convert it to repr first
7d1eb38a 1250 value, fmt = repr(value), str_fmt
639f1cea 1251 if fmt[-1] in 'csr':
e0fd9573 1252 value = sanitizer(initial_field, value)
901130bb 1253
b868936c 1254 key = '%s\0%s' % (key.replace('%', '%\0'), outer_mobj.group('format'))
385a27fa 1255 TMPL_DICT[key] = value
b868936c 1256 return '{prefix}%({key}){fmt}'.format(key=key, fmt=fmt, prefix=outer_mobj.group('prefix'))
752cda38 1257
385a27fa 1258 return EXTERNAL_FORMAT_RE.sub(create_key, outtmpl), TMPL_DICT
143db31d 1259
819e0531 1260 def evaluate_outtmpl(self, outtmpl, info_dict, *args, **kwargs):
1261 outtmpl, info_dict = self.prepare_outtmpl(outtmpl, info_dict, *args, **kwargs)
1262 return self.escape_outtmpl(outtmpl) % info_dict
1263
5127e92a 1264 def _prepare_filename(self, info_dict, *, outtmpl=None, tmpl_type=None):
1265 assert None in (outtmpl, tmpl_type), 'outtmpl and tmpl_type are mutually exclusive'
1266 if outtmpl is None:
bf1824b3 1267 outtmpl = self.params['outtmpl'].get(tmpl_type or 'default', self.params['outtmpl']['default'])
8222d8de 1268 try:
5127e92a 1269 outtmpl = self._outtmpl_expandpath(outtmpl)
e0fd9573 1270 filename = self.evaluate_outtmpl(outtmpl, info_dict, True)
6a0546e3 1271 if not filename:
1272 return None
15da37c7 1273
5127e92a 1274 if tmpl_type in ('', 'temp'):
6a0546e3 1275 final_ext, ext = self.params.get('final_ext'), info_dict.get('ext')
1276 if final_ext and ext and final_ext != ext and filename.endswith(f'.{final_ext}'):
1277 filename = replace_extension(filename, ext, final_ext)
5127e92a 1278 elif tmpl_type:
6a0546e3 1279 force_ext = OUTTMPL_TYPES[tmpl_type]
1280 if force_ext:
1281 filename = replace_extension(filename, force_ext, info_dict.get('ext'))
de6000d9 1282
bdc3fd2f
U
1283 # https://github.com/blackjack4494/youtube-dlc/issues/85
1284 trim_file_name = self.params.get('trim_file_name', False)
1285 if trim_file_name:
5c22c63d 1286 no_ext, *ext = filename.rsplit('.', 2)
1287 filename = join_nonempty(no_ext[:trim_file_name], *ext, delim='.')
bdc3fd2f 1288
0202b52a 1289 return filename
8222d8de 1290 except ValueError as err:
6febd1c1 1291 self.report_error('Error in output template: ' + str(err) + ' (encoding: ' + repr(preferredencoding()) + ')')
8222d8de
JMF
1292 return None
1293
5127e92a 1294 def prepare_filename(self, info_dict, dir_type='', *, outtmpl=None, warn=False):
1295 """Generate the output filename"""
1296 if outtmpl:
1297 assert not dir_type, 'outtmpl and dir_type are mutually exclusive'
1298 dir_type = None
1299 filename = self._prepare_filename(info_dict, tmpl_type=dir_type, outtmpl=outtmpl)
80c03fa9 1300 if not filename and dir_type not in ('', 'temp'):
1301 return ''
de6000d9 1302
c84aeac6 1303 if warn:
21cd8fae 1304 if not self.params.get('paths'):
de6000d9 1305 pass
1306 elif filename == '-':
c84aeac6 1307 self.report_warning('--paths is ignored when an outputting to stdout', only_once=True)
de6000d9 1308 elif os.path.isabs(filename):
c84aeac6 1309 self.report_warning('--paths is ignored since an absolute path is given in output template', only_once=True)
de6000d9 1310 if filename == '-' or not filename:
1311 return filename
1312
21cd8fae 1313 return self.get_output_path(dir_type, filename)
0202b52a 1314
120fe513 1315 def _match_entry(self, info_dict, incomplete=False, silent=False):
ecdec191 1316 """ Returns None if the file should be downloaded """
8222d8de 1317
3bec830a 1318 video_title = info_dict.get('title', info_dict.get('id', 'entry'))
c77495e3 1319
8b0d7497 1320 def check_filter():
8b0d7497 1321 if 'title' in info_dict:
1322 # This can happen when we're just evaluating the playlist
1323 title = info_dict['title']
1324 matchtitle = self.params.get('matchtitle', False)
1325 if matchtitle:
1326 if not re.search(matchtitle, title, re.IGNORECASE):
1327 return '"' + title + '" title did not match pattern "' + matchtitle + '"'
1328 rejecttitle = self.params.get('rejecttitle', False)
1329 if rejecttitle:
1330 if re.search(rejecttitle, title, re.IGNORECASE):
1331 return '"' + title + '" title matched reject pattern "' + rejecttitle + '"'
1332 date = info_dict.get('upload_date')
1333 if date is not None:
1334 dateRange = self.params.get('daterange', DateRange())
1335 if date not in dateRange:
86e5f3ed 1336 return f'{date_from_str(date).isoformat()} upload date is not in range {dateRange}'
8b0d7497 1337 view_count = info_dict.get('view_count')
1338 if view_count is not None:
1339 min_views = self.params.get('min_views')
1340 if min_views is not None and view_count < min_views:
1341 return 'Skipping %s, because it has not reached minimum view count (%d/%d)' % (video_title, view_count, min_views)
1342 max_views = self.params.get('max_views')
1343 if max_views is not None and view_count > max_views:
1344 return 'Skipping %s, because it has exceeded the maximum view count (%d/%d)' % (video_title, view_count, max_views)
1345 if age_restricted(info_dict.get('age_limit'), self.params.get('age_limit')):
1346 return 'Skipping "%s" because it is age restricted' % video_title
8b0d7497 1347
8f18aca8 1348 match_filter = self.params.get('match_filter')
1349 if match_filter is not None:
1350 try:
1351 ret = match_filter(info_dict, incomplete=incomplete)
1352 except TypeError:
1353 # For backward compatibility
1354 ret = None if incomplete else match_filter(info_dict)
492272fe 1355 if ret is NO_DEFAULT:
1356 while True:
1357 filename = self._format_screen(self.prepare_filename(info_dict), self.Styles.FILENAME)
1358 reply = input(self._format_screen(
1359 f'Download "{filename}"? (Y/n): ', self.Styles.EMPHASIS)).lower().strip()
1360 if reply in {'y', ''}:
1361 return None
1362 elif reply == 'n':
1363 return f'Skipping {video_title}'
492272fe 1364 elif ret is not None:
8f18aca8 1365 return ret
8b0d7497 1366 return None
1367
c77495e3 1368 if self.in_download_archive(info_dict):
1369 reason = '%s has already been recorded in the archive' % video_title
1370 break_opt, break_err = 'break_on_existing', ExistingVideoReached
1371 else:
1372 reason = check_filter()
1373 break_opt, break_err = 'break_on_reject', RejectedVideoReached
8b0d7497 1374 if reason is not None:
120fe513 1375 if not silent:
1376 self.to_screen('[download] ' + reason)
c77495e3 1377 if self.params.get(break_opt, False):
1378 raise break_err()
8b0d7497 1379 return reason
fe7e0c98 1380
b6c45014
JMF
1381 @staticmethod
1382 def add_extra_info(info_dict, extra_info):
1383 '''Set the keys from extra_info in info dict if they are missing'''
1384 for key, value in extra_info.items():
1385 info_dict.setdefault(key, value)
1386
409e1828 1387 def extract_info(self, url, download=True, ie_key=None, extra_info=None,
61aa5ba3 1388 process=True, force_generic_extractor=False):
41d1cca3 1389 """
1390 Return a list with a dictionary for each video extracted.
1391
1392 Arguments:
1393 url -- URL to extract
1394
1395 Keyword arguments:
1396 download -- whether to download videos during extraction
1397 ie_key -- extractor key hint
1398 extra_info -- dictionary containing the extra values to add to each result
1399 process -- whether to resolve all unresolved references (URLs, playlist items),
1400 must be True for download to work.
1401 force_generic_extractor -- force using the generic extractor
1402 """
fe7e0c98 1403
409e1828 1404 if extra_info is None:
1405 extra_info = {}
1406
61aa5ba3 1407 if not ie_key and force_generic_extractor:
d22dec74
S
1408 ie_key = 'Generic'
1409
8222d8de 1410 if ie_key:
8b7491c8 1411 ies = {ie_key: self._get_info_extractor_class(ie_key)}
8222d8de
JMF
1412 else:
1413 ies = self._ies
1414
8b7491c8 1415 for ie_key, ie in ies.items():
8222d8de
JMF
1416 if not ie.suitable(url):
1417 continue
1418
1419 if not ie.working():
6febd1c1
PH
1420 self.report_warning('The program functionality for this site has been marked as broken, '
1421 'and will probably not work.')
8222d8de 1422
1151c407 1423 temp_id = ie.get_temp_id(url)
a0566bbf 1424 if temp_id is not None and self.in_download_archive({'id': temp_id, 'ie_key': ie_key}):
5e5be0c0 1425 self.to_screen(f'[{ie_key}] {temp_id}: has already been recorded in the archive')
1426 if self.params.get('break_on_existing', False):
1427 raise ExistingVideoReached()
a0566bbf 1428 break
8b7491c8 1429 return self.__extract_info(url, self.get_info_extractor(ie_key), download, extra_info, process)
a0566bbf 1430 else:
1431 self.report_error('no suitable InfoExtractor for URL %s' % url)
1432
7e88d7d7 1433 def _handle_extraction_exceptions(func):
b5ae35ee 1434 @functools.wraps(func)
a0566bbf 1435 def wrapper(self, *args, **kwargs):
6da22e7d 1436 while True:
1437 try:
1438 return func(self, *args, **kwargs)
1439 except (DownloadCancelled, LazyList.IndexError, PagedList.IndexError):
8222d8de 1440 raise
6da22e7d 1441 except ReExtractInfo as e:
1442 if e.expected:
1443 self.to_screen(f'{e}; Re-extracting data')
1444 else:
1445 self.to_stderr('\r')
1446 self.report_warning(f'{e}; Re-extracting data')
1447 continue
1448 except GeoRestrictedError as e:
1449 msg = e.msg
1450 if e.countries:
1451 msg += '\nThis video is available in %s.' % ', '.join(
1452 map(ISO3166Utils.short2full, e.countries))
1453 msg += '\nYou might want to use a VPN or a proxy server (with --proxy) to workaround.'
1454 self.report_error(msg)
1455 except ExtractorError as e: # An error we somewhat expected
1456 self.report_error(str(e), e.format_traceback())
1457 except Exception as e:
1458 if self.params.get('ignoreerrors'):
1459 self.report_error(str(e), tb=encode_compat_str(traceback.format_exc()))
1460 else:
1461 raise
1462 break
a0566bbf 1463 return wrapper
1464
693f0600 1465 def _wait_for_video(self, ie_result={}):
f2ebc5c7 1466 if (not self.params.get('wait_for_video')
1467 or ie_result.get('_type', 'video') != 'video'
1468 or ie_result.get('formats') or ie_result.get('url')):
1469 return
1470
1471 format_dur = lambda dur: '%02d:%02d:%02d' % timetuple_from_msec(dur * 1000)[:-1]
1472 last_msg = ''
1473
1474 def progress(msg):
1475 nonlocal last_msg
a7dc6a89 1476 full_msg = f'{msg}\n'
1477 if not self.params.get('noprogress'):
1478 full_msg = msg + ' ' * (len(last_msg) - len(msg)) + '\r'
1479 elif last_msg:
1480 return
1481 self.to_screen(full_msg, skip_eol=True)
f2ebc5c7 1482 last_msg = msg
1483
1484 min_wait, max_wait = self.params.get('wait_for_video')
1485 diff = try_get(ie_result, lambda x: x['release_timestamp'] - time.time())
1486 if diff is None and ie_result.get('live_status') == 'is_upcoming':
16c620bc 1487 diff = round(random.uniform(min_wait, max_wait) if (max_wait and min_wait) else (max_wait or min_wait), 0)
f2ebc5c7 1488 self.report_warning('Release time of video is not known')
693f0600 1489 elif ie_result and (diff or 0) <= 0:
f2ebc5c7 1490 self.report_warning('Video should already be available according to extracted info')
38d79fd1 1491 diff = min(max(diff or 0, min_wait or 0), max_wait or float('inf'))
f2ebc5c7 1492 self.to_screen(f'[wait] Waiting for {format_dur(diff)} - Press Ctrl+C to try now')
1493
1494 wait_till = time.time() + diff
1495 try:
1496 while True:
1497 diff = wait_till - time.time()
1498 if diff <= 0:
1499 progress('')
1500 raise ReExtractInfo('[wait] Wait period ended', expected=True)
1501 progress(f'[wait] Remaining time until next attempt: {self._format_screen(format_dur(diff), self.Styles.EMPHASIS)}')
1502 time.sleep(1)
1503 except KeyboardInterrupt:
1504 progress('')
1505 raise ReExtractInfo('[wait] Interrupted by user', expected=True)
1506 except BaseException as e:
1507 if not isinstance(e, ReExtractInfo):
1508 self.to_screen('')
1509 raise
1510
7e88d7d7 1511 @_handle_extraction_exceptions
58f197b7 1512 def __extract_info(self, url, ie, download, extra_info, process):
693f0600 1513 try:
1514 ie_result = ie.extract(url)
1515 except UserNotLive as e:
1516 if process:
1517 if self.params.get('wait_for_video'):
1518 self.report_warning(e)
1519 self._wait_for_video()
1520 raise
a0566bbf 1521 if ie_result is None: # Finished already (backwards compatibility; listformats and friends should be moved here)
cb794ee0 1522 self.report_warning(f'Extractor {ie.IE_NAME} returned nothing{bug_reports_message()}')
a0566bbf 1523 return
1524 if isinstance(ie_result, list):
1525 # Backwards compatibility: old IE result format
1526 ie_result = {
1527 '_type': 'compat_list',
1528 'entries': ie_result,
1529 }
e37d0efb 1530 if extra_info.get('original_url'):
1531 ie_result.setdefault('original_url', extra_info['original_url'])
a0566bbf 1532 self.add_default_extra_info(ie_result, ie, url)
1533 if process:
f2ebc5c7 1534 self._wait_for_video(ie_result)
a0566bbf 1535 return self.process_ie_result(ie_result, download, extra_info)
8222d8de 1536 else:
a0566bbf 1537 return ie_result
fe7e0c98 1538
ea38e55f 1539 def add_default_extra_info(self, ie_result, ie, url):
6033d980 1540 if url is not None:
1541 self.add_extra_info(ie_result, {
1542 'webpage_url': url,
1543 'original_url': url,
57ebfca3 1544 })
1545 webpage_url = ie_result.get('webpage_url')
1546 if webpage_url:
1547 self.add_extra_info(ie_result, {
1548 'webpage_url_basename': url_basename(webpage_url),
1549 'webpage_url_domain': get_domain(webpage_url),
6033d980 1550 })
1551 if ie is not None:
1552 self.add_extra_info(ie_result, {
1553 'extractor': ie.IE_NAME,
1554 'extractor_key': ie.ie_key(),
1555 })
ea38e55f 1556
58adec46 1557 def process_ie_result(self, ie_result, download=True, extra_info=None):
8222d8de
JMF
1558 """
1559 Take the result of the ie(may be modified) and resolve all unresolved
1560 references (URLs, playlist items).
1561
1562 It will also download the videos if 'download'.
1563 Returns the resolved ie_result.
1564 """
58adec46 1565 if extra_info is None:
1566 extra_info = {}
e8ee972c
PH
1567 result_type = ie_result.get('_type', 'video')
1568
057a5206 1569 if result_type in ('url', 'url_transparent'):
8f97a15d 1570 ie_result['url'] = sanitize_url(
1571 ie_result['url'], scheme='http' if self.params.get('prefer_insecure') else 'https')
e37d0efb 1572 if ie_result.get('original_url'):
1573 extra_info.setdefault('original_url', ie_result['original_url'])
1574
057a5206 1575 extract_flat = self.params.get('extract_flat', False)
3089bc74
S
1576 if ((extract_flat == 'in_playlist' and 'playlist' in extra_info)
1577 or extract_flat is True):
ecb54191 1578 info_copy = ie_result.copy()
6033d980 1579 ie = try_get(ie_result.get('ie_key'), self.get_info_extractor)
360167b9 1580 if ie and not ie_result.get('id'):
4614bc22 1581 info_copy['id'] = ie.get_temp_id(ie_result['url'])
6033d980 1582 self.add_default_extra_info(info_copy, ie, ie_result['url'])
4614bc22 1583 self.add_extra_info(info_copy, extra_info)
b5475f11 1584 info_copy, _ = self.pre_process(info_copy)
ecb54191 1585 self.__forced_printings(info_copy, self.prepare_filename(info_copy), incomplete=True)
415f8d51 1586 self._raise_pending_errors(info_copy)
4614bc22 1587 if self.params.get('force_write_download_archive', False):
1588 self.record_download_archive(info_copy)
e8ee972c
PH
1589 return ie_result
1590
8222d8de 1591 if result_type == 'video':
b6c45014 1592 self.add_extra_info(ie_result, extra_info)
9c2b75b5 1593 ie_result = self.process_video_result(ie_result, download=download)
415f8d51 1594 self._raise_pending_errors(ie_result)
28b0eb0f 1595 additional_urls = (ie_result or {}).get('additional_urls')
9c2b75b5 1596 if additional_urls:
e9f4ccd1 1597 # TODO: Improve MetadataParserPP to allow setting a list
14f25df2 1598 if isinstance(additional_urls, str):
9c2b75b5 1599 additional_urls = [additional_urls]
1600 self.to_screen(
1601 '[info] %s: %d additional URL(s) requested' % (ie_result['id'], len(additional_urls)))
1602 self.write_debug('Additional URLs: "%s"' % '", "'.join(additional_urls))
1603 ie_result['additional_entries'] = [
1604 self.extract_info(
b69fd25c 1605 url, download, extra_info=extra_info,
9c2b75b5 1606 force_generic_extractor=self.params.get('force_generic_extractor'))
1607 for url in additional_urls
1608 ]
1609 return ie_result
8222d8de
JMF
1610 elif result_type == 'url':
1611 # We have to add extra_info to the results because it may be
1612 # contained in a playlist
07cce701 1613 return self.extract_info(
1614 ie_result['url'], download,
1615 ie_key=ie_result.get('ie_key'),
1616 extra_info=extra_info)
7fc3fa05
PH
1617 elif result_type == 'url_transparent':
1618 # Use the information from the embedding page
1619 info = self.extract_info(
1620 ie_result['url'], ie_key=ie_result.get('ie_key'),
1621 extra_info=extra_info, download=False, process=False)
1622
1640eb09
S
1623 # extract_info may return None when ignoreerrors is enabled and
1624 # extraction failed with an error, don't crash and return early
1625 # in this case
1626 if not info:
1627 return info
1628
3975b4d2 1629 exempted_fields = {'_type', 'url', 'ie_key'}
1630 if not ie_result.get('section_end') and ie_result.get('section_start') is None:
1631 # For video clips, the id etc of the clip extractor should be used
1632 exempted_fields |= {'id', 'extractor', 'extractor_key'}
1633
412c617d 1634 new_result = info.copy()
3975b4d2 1635 new_result.update(filter_dict(ie_result, lambda k, v: v is not None and k not in exempted_fields))
7fc3fa05 1636
0563f7ac
S
1637 # Extracted info may not be a video result (i.e.
1638 # info.get('_type', 'video') != video) but rather an url or
1639 # url_transparent. In such cases outer metadata (from ie_result)
1640 # should be propagated to inner one (info). For this to happen
1641 # _type of info should be overridden with url_transparent. This
067aa17e 1642 # fixes issue from https://github.com/ytdl-org/youtube-dl/pull/11163.
0563f7ac
S
1643 if new_result.get('_type') == 'url':
1644 new_result['_type'] = 'url_transparent'
7fc3fa05
PH
1645
1646 return self.process_ie_result(
1647 new_result, download=download, extra_info=extra_info)
40fcba5e 1648 elif result_type in ('playlist', 'multi_video'):
30a074c2 1649 # Protect from infinite recursion due to recursively nested playlists
1650 # (see https://github.com/ytdl-org/youtube-dl/issues/27833)
1651 webpage_url = ie_result['webpage_url']
1652 if webpage_url in self._playlist_urls:
7e85e872 1653 self.to_screen(
30a074c2 1654 '[download] Skipping already downloaded playlist: %s'
1655 % ie_result.get('title') or ie_result.get('id'))
1656 return
7e85e872 1657
30a074c2 1658 self._playlist_level += 1
1659 self._playlist_urls.add(webpage_url)
03f83004 1660 self._fill_common_fields(ie_result, False)
bc516a3f 1661 self._sanitize_thumbnails(ie_result)
30a074c2 1662 try:
1663 return self.__process_playlist(ie_result, download)
1664 finally:
1665 self._playlist_level -= 1
1666 if not self._playlist_level:
1667 self._playlist_urls.clear()
8222d8de 1668 elif result_type == 'compat_list':
c9bf4114
PH
1669 self.report_warning(
1670 'Extractor %s returned a compat_list result. '
1671 'It needs to be updated.' % ie_result.get('extractor'))
5f6a1245 1672
8222d8de 1673 def _fixup(r):
b868936c 1674 self.add_extra_info(r, {
1675 'extractor': ie_result['extractor'],
1676 'webpage_url': ie_result['webpage_url'],
1677 'webpage_url_basename': url_basename(ie_result['webpage_url']),
0bb322b9 1678 'webpage_url_domain': get_domain(ie_result['webpage_url']),
b868936c 1679 'extractor_key': ie_result['extractor_key'],
1680 })
8222d8de
JMF
1681 return r
1682 ie_result['entries'] = [
b6c45014 1683 self.process_ie_result(_fixup(r), download, extra_info)
8222d8de
JMF
1684 for r in ie_result['entries']
1685 ]
1686 return ie_result
1687 else:
1688 raise Exception('Invalid result type: %s' % result_type)
1689
e92caff5 1690 def _ensure_dir_exists(self, path):
1691 return make_dir(path, self.report_error)
1692
3b603dbd 1693 @staticmethod
3bec830a 1694 def _playlist_infodict(ie_result, strict=False, **kwargs):
1695 info = {
1696 'playlist_count': ie_result.get('playlist_count'),
3b603dbd 1697 'playlist': ie_result.get('title') or ie_result.get('id'),
1698 'playlist_id': ie_result.get('id'),
1699 'playlist_title': ie_result.get('title'),
1700 'playlist_uploader': ie_result.get('uploader'),
1701 'playlist_uploader_id': ie_result.get('uploader_id'),
3b603dbd 1702 **kwargs,
1703 }
3bec830a 1704 if strict:
1705 return info
1706 return {
1707 **info,
1708 'playlist_index': 0,
1709 '__last_playlist_index': max(ie_result['requested_entries'] or (0, 0)),
1710 'extractor': ie_result['extractor'],
1711 'webpage_url': ie_result['webpage_url'],
1712 'webpage_url_basename': url_basename(ie_result['webpage_url']),
1713 'webpage_url_domain': get_domain(ie_result['webpage_url']),
1714 'extractor_key': ie_result['extractor_key'],
1715 }
3b603dbd 1716
30a074c2 1717 def __process_playlist(self, ie_result, download):
7e88d7d7 1718 """Process each entry in the playlist"""
f5ea4748 1719 assert ie_result['_type'] in ('playlist', 'multi_video')
1720
3bec830a 1721 common_info = self._playlist_infodict(ie_result, strict=True)
3955b207 1722 title = common_info.get('playlist') or '<Untitled>'
3bec830a 1723 if self._match_entry(common_info, incomplete=True) is not None:
1724 return
c6e07cf1 1725 self.to_screen(f'[download] Downloading {ie_result["_type"]}: {title}')
f0d785d3 1726
7e88d7d7 1727 all_entries = PlaylistEntries(self, ie_result)
7e9a6125 1728 entries = orderedSet(all_entries.get_requested_items(), lazy=True)
1729
1730 lazy = self.params.get('lazy_playlist')
1731 if lazy:
1732 resolved_entries, n_entries = [], 'N/A'
1733 ie_result['requested_entries'], ie_result['entries'] = None, None
1734 else:
1735 entries = resolved_entries = list(entries)
1736 n_entries = len(resolved_entries)
1737 ie_result['requested_entries'], ie_result['entries'] = tuple(zip(*resolved_entries)) or ([], [])
1738 if not ie_result.get('playlist_count'):
1739 # Better to do this after potentially exhausting entries
1740 ie_result['playlist_count'] = all_entries.get_full_count()
498f5606 1741
0647d925 1742 extra = self._playlist_infodict(ie_result, n_entries=int_or_none(n_entries))
1743 ie_copy = collections.ChainMap(ie_result, extra)
3bec830a 1744
e08a85d8 1745 _infojson_written = False
0bfc53d0 1746 write_playlist_files = self.params.get('allow_playlist_files', True)
1747 if write_playlist_files and self.params.get('list_thumbnails'):
1748 self.list_thumbnails(ie_result)
1749 if write_playlist_files and not self.params.get('simulate'):
e08a85d8 1750 _infojson_written = self._write_info_json(
1751 'playlist', ie_result, self.prepare_filename(ie_copy, 'pl_infojson'))
1752 if _infojson_written is None:
80c03fa9 1753 return
1754 if self._write_description('playlist', ie_result,
1755 self.prepare_filename(ie_copy, 'pl_description')) is None:
1756 return
681de68e 1757 # TODO: This should be passed to ThumbnailsConvertor if necessary
3bec830a 1758 self._write_thumbnails('playlist', ie_result, self.prepare_filename(ie_copy, 'pl_thumbnail'))
30a074c2 1759
7e9a6125 1760 if lazy:
1761 if self.params.get('playlistreverse') or self.params.get('playlistrandom'):
1762 self.report_warning('playlistreverse and playlistrandom are not supported with lazy_playlist', only_once=True)
1763 elif self.params.get('playlistreverse'):
1764 entries.reverse()
1765 elif self.params.get('playlistrandom'):
30a074c2 1766 random.shuffle(entries)
1767
7e88d7d7 1768 self.to_screen(f'[{ie_result["extractor"]}] Playlist {title}: Downloading {n_entries} videos'
1769 f'{format_field(ie_result, "playlist_count", " of %s")}')
30a074c2 1770
134c913c 1771 keep_resolved_entries = self.params.get('extract_flat') != 'discard'
1772 if self.params.get('extract_flat') == 'discard_in_playlist':
1773 keep_resolved_entries = ie_result['_type'] != 'playlist'
1774 if keep_resolved_entries:
1775 self.write_debug('The information of all playlist entries will be held in memory')
1776
26e2805c 1777 failures = 0
1778 max_failures = self.params.get('skip_playlist_after_errors') or float('inf')
7e9a6125 1779 for i, (playlist_index, entry) in enumerate(entries):
1780 if lazy:
1781 resolved_entries.append((playlist_index, entry))
3bec830a 1782 if not entry:
7e88d7d7 1783 continue
1784
7e88d7d7 1785 entry['__x_forwarded_for_ip'] = ie_result.get('__x_forwarded_for_ip')
7e9a6125 1786 if not lazy and 'playlist-index' in self.params.get('compat_opts', []):
1787 playlist_index = ie_result['requested_entries'][i]
1788
0647d925 1789 entry_copy = collections.ChainMap(entry, {
3bec830a 1790 **common_info,
3955b207 1791 'n_entries': int_or_none(n_entries),
71729754 1792 'playlist_index': playlist_index,
7e9a6125 1793 'playlist_autonumber': i + 1,
0647d925 1794 })
3bec830a 1795
0647d925 1796 if self._match_entry(entry_copy, incomplete=True) is not None:
3bec830a 1797 continue
1798
1799 self.to_screen('[download] Downloading video %s of %s' % (
1800 self._format_screen(i + 1, self.Styles.ID), self._format_screen(n_entries, self.Styles.EMPHASIS)))
1801
a6ca61d4 1802 extra.update({
1803 'playlist_index': playlist_index,
1804 'playlist_autonumber': i + 1,
1805 })
3bec830a 1806 entry_result = self.__process_iterable_entry(entry, download, extra)
26e2805c 1807 if not entry_result:
1808 failures += 1
1809 if failures >= max_failures:
1810 self.report_error(
7e88d7d7 1811 f'Skipping the remaining entries in playlist "{title}" since {failures} items failed extraction')
26e2805c 1812 break
134c913c 1813 if keep_resolved_entries:
1814 resolved_entries[i] = (playlist_index, entry_result)
7e88d7d7 1815
1816 # Update with processed data
7e9a6125 1817 ie_result['requested_entries'], ie_result['entries'] = tuple(zip(*resolved_entries)) or ([], [])
e08a85d8 1818
1819 # Write the updated info to json
cb96c5be 1820 if _infojson_written is True and self._write_info_json(
e08a85d8 1821 'updated playlist', ie_result,
1822 self.prepare_filename(ie_copy, 'pl_infojson'), overwrite=True) is None:
1823 return
ca30f449 1824
ed5835b4 1825 ie_result = self.run_all_pps('playlist', ie_result)
7e88d7d7 1826 self.to_screen(f'[download] Finished downloading playlist: {title}')
30a074c2 1827 return ie_result
1828
7e88d7d7 1829 @_handle_extraction_exceptions
a0566bbf 1830 def __process_iterable_entry(self, entry, download, extra_info):
1831 return self.process_ie_result(
1832 entry, download=download, extra_info=extra_info)
1833
67134eab
JMF
1834 def _build_format_filter(self, filter_spec):
1835 " Returns a function to filter the formats according to the filter_spec "
083c9df9
PH
1836
1837 OPERATORS = {
1838 '<': operator.lt,
1839 '<=': operator.le,
1840 '>': operator.gt,
1841 '>=': operator.ge,
1842 '=': operator.eq,
1843 '!=': operator.ne,
1844 }
67134eab 1845 operator_rex = re.compile(r'''(?x)\s*
187986a8 1846 (?P<key>width|height|tbr|abr|vbr|asr|filesize|filesize_approx|fps)\s*
1847 (?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
1848 (?P<value>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)\s*
083c9df9 1849 ''' % '|'.join(map(re.escape, OPERATORS.keys())))
187986a8 1850 m = operator_rex.fullmatch(filter_spec)
9ddb6925
S
1851 if m:
1852 try:
1853 comparison_value = int(m.group('value'))
1854 except ValueError:
1855 comparison_value = parse_filesize(m.group('value'))
1856 if comparison_value is None:
1857 comparison_value = parse_filesize(m.group('value') + 'B')
1858 if comparison_value is None:
1859 raise ValueError(
1860 'Invalid value %r in format specification %r' % (
67134eab 1861 m.group('value'), filter_spec))
9ddb6925
S
1862 op = OPERATORS[m.group('op')]
1863
083c9df9 1864 if not m:
9ddb6925
S
1865 STR_OPERATORS = {
1866 '=': operator.eq,
10d33b34
YCH
1867 '^=': lambda attr, value: attr.startswith(value),
1868 '$=': lambda attr, value: attr.endswith(value),
1869 '*=': lambda attr, value: value in attr,
1ce9a3cb 1870 '~=': lambda attr, value: value.search(attr) is not None
9ddb6925 1871 }
187986a8 1872 str_operator_rex = re.compile(r'''(?x)\s*
1873 (?P<key>[a-zA-Z0-9._-]+)\s*
1ce9a3cb
LF
1874 (?P<negation>!\s*)?(?P<op>%s)\s*(?P<none_inclusive>\?\s*)?
1875 (?P<quote>["'])?
1876 (?P<value>(?(quote)(?:(?!(?P=quote))[^\\]|\\.)+|[\w.-]+))
1877 (?(quote)(?P=quote))\s*
9ddb6925 1878 ''' % '|'.join(map(re.escape, STR_OPERATORS.keys())))
187986a8 1879 m = str_operator_rex.fullmatch(filter_spec)
9ddb6925 1880 if m:
1ce9a3cb
LF
1881 if m.group('op') == '~=':
1882 comparison_value = re.compile(m.group('value'))
1883 else:
1884 comparison_value = re.sub(r'''\\([\\"'])''', r'\1', m.group('value'))
2cc779f4
S
1885 str_op = STR_OPERATORS[m.group('op')]
1886 if m.group('negation'):
e118a879 1887 op = lambda attr, value: not str_op(attr, value)
2cc779f4
S
1888 else:
1889 op = str_op
083c9df9 1890
9ddb6925 1891 if not m:
187986a8 1892 raise SyntaxError('Invalid filter specification %r' % filter_spec)
083c9df9
PH
1893
1894 def _filter(f):
1895 actual_value = f.get(m.group('key'))
1896 if actual_value is None:
1897 return m.group('none_inclusive')
1898 return op(actual_value, comparison_value)
67134eab
JMF
1899 return _filter
1900
9f1a1c36 1901 def _check_formats(self, formats):
1902 for f in formats:
1903 self.to_screen('[info] Testing format %s' % f['format_id'])
75689fe5 1904 path = self.get_output_path('temp')
1905 if not self._ensure_dir_exists(f'{path}/'):
1906 continue
1907 temp_file = tempfile.NamedTemporaryFile(suffix='.tmp', delete=False, dir=path or None)
9f1a1c36 1908 temp_file.close()
1909 try:
1910 success, _ = self.dl(temp_file.name, f, test=True)
8a82af35 1911 except (DownloadError, OSError, ValueError) + network_exceptions:
9f1a1c36 1912 success = False
1913 finally:
1914 if os.path.exists(temp_file.name):
1915 try:
1916 os.remove(temp_file.name)
1917 except OSError:
1918 self.report_warning('Unable to delete temporary file "%s"' % temp_file.name)
1919 if success:
1920 yield f
1921 else:
1922 self.to_screen('[info] Unable to download format %s. Skipping...' % f['format_id'])
1923
0017d9ad 1924 def _default_format_spec(self, info_dict, download=True):
0017d9ad 1925
af0f7428
S
1926 def can_merge():
1927 merger = FFmpegMergerPP(self)
1928 return merger.available and merger.can_merge()
1929
91ebc640 1930 prefer_best = (
b7b04c78 1931 not self.params.get('simulate')
91ebc640 1932 and download
1933 and (
1934 not can_merge()
21633673 1935 or info_dict.get('is_live') and not self.params.get('live_from_start')
bf1824b3 1936 or self.params['outtmpl']['default'] == '-'))
53ed7066 1937 compat = (
1938 prefer_best
1939 or self.params.get('allow_multiple_audio_streams', False)
8a82af35 1940 or 'format-spec' in self.params['compat_opts'])
91ebc640 1941
1942 return (
53ed7066 1943 'best/bestvideo+bestaudio' if prefer_best
1944 else 'bestvideo*+bestaudio/best' if not compat
91ebc640 1945 else 'bestvideo+bestaudio/best')
0017d9ad 1946
67134eab
JMF
1947 def build_format_selector(self, format_spec):
1948 def syntax_error(note, start):
1949 message = (
1950 'Invalid format specification: '
86e5f3ed 1951 '{}\n\t{}\n\t{}^'.format(note, format_spec, ' ' * start[1]))
67134eab
JMF
1952 return SyntaxError(message)
1953
1954 PICKFIRST = 'PICKFIRST'
1955 MERGE = 'MERGE'
1956 SINGLE = 'SINGLE'
0130afb7 1957 GROUP = 'GROUP'
67134eab
JMF
1958 FormatSelector = collections.namedtuple('FormatSelector', ['type', 'selector', 'filters'])
1959
91ebc640 1960 allow_multiple_streams = {'audio': self.params.get('allow_multiple_audio_streams', False),
1961 'video': self.params.get('allow_multiple_video_streams', False)}
909d24dd 1962
9f1a1c36 1963 check_formats = self.params.get('check_formats') == 'selected'
e8e73840 1964
67134eab
JMF
1965 def _parse_filter(tokens):
1966 filter_parts = []
1967 for type, string, start, _, _ in tokens:
1968 if type == tokenize.OP and string == ']':
1969 return ''.join(filter_parts)
1970 else:
1971 filter_parts.append(string)
1972
232541df 1973 def _remove_unused_ops(tokens):
17cc1534 1974 # Remove operators that we don't use and join them with the surrounding strings
232541df
JMF
1975 # for example: 'mp4' '-' 'baseline' '-' '16x9' is converted to 'mp4-baseline-16x9'
1976 ALLOWED_OPS = ('/', '+', ',', '(', ')')
1977 last_string, last_start, last_end, last_line = None, None, None, None
1978 for type, string, start, end, line in tokens:
1979 if type == tokenize.OP and string == '[':
1980 if last_string:
1981 yield tokenize.NAME, last_string, last_start, last_end, last_line
1982 last_string = None
1983 yield type, string, start, end, line
1984 # everything inside brackets will be handled by _parse_filter
1985 for type, string, start, end, line in tokens:
1986 yield type, string, start, end, line
1987 if type == tokenize.OP and string == ']':
1988 break
1989 elif type == tokenize.OP and string in ALLOWED_OPS:
1990 if last_string:
1991 yield tokenize.NAME, last_string, last_start, last_end, last_line
1992 last_string = None
1993 yield type, string, start, end, line
1994 elif type in [tokenize.NAME, tokenize.NUMBER, tokenize.OP]:
1995 if not last_string:
1996 last_string = string
1997 last_start = start
1998 last_end = end
1999 else:
2000 last_string += string
2001 if last_string:
2002 yield tokenize.NAME, last_string, last_start, last_end, last_line
2003
cf2ac6df 2004 def _parse_format_selection(tokens, inside_merge=False, inside_choice=False, inside_group=False):
67134eab
JMF
2005 selectors = []
2006 current_selector = None
2007 for type, string, start, _, _ in tokens:
2008 # ENCODING is only defined in python 3.x
2009 if type == getattr(tokenize, 'ENCODING', None):
2010 continue
2011 elif type in [tokenize.NAME, tokenize.NUMBER]:
2012 current_selector = FormatSelector(SINGLE, string, [])
2013 elif type == tokenize.OP:
cf2ac6df
JMF
2014 if string == ')':
2015 if not inside_group:
2016 # ')' will be handled by the parentheses group
2017 tokens.restore_last_token()
67134eab 2018 break
cf2ac6df 2019 elif inside_merge and string in ['/', ',']:
0130afb7
JMF
2020 tokens.restore_last_token()
2021 break
cf2ac6df
JMF
2022 elif inside_choice and string == ',':
2023 tokens.restore_last_token()
2024 break
2025 elif string == ',':
0a31a350
JMF
2026 if not current_selector:
2027 raise syntax_error('"," must follow a format selector', start)
67134eab
JMF
2028 selectors.append(current_selector)
2029 current_selector = None
2030 elif string == '/':
d96d604e
JMF
2031 if not current_selector:
2032 raise syntax_error('"/" must follow a format selector', start)
67134eab 2033 first_choice = current_selector
cf2ac6df 2034 second_choice = _parse_format_selection(tokens, inside_choice=True)
f5f4a27a 2035 current_selector = FormatSelector(PICKFIRST, (first_choice, second_choice), [])
67134eab
JMF
2036 elif string == '[':
2037 if not current_selector:
2038 current_selector = FormatSelector(SINGLE, 'best', [])
2039 format_filter = _parse_filter(tokens)
2040 current_selector.filters.append(format_filter)
0130afb7
JMF
2041 elif string == '(':
2042 if current_selector:
2043 raise syntax_error('Unexpected "("', start)
cf2ac6df
JMF
2044 group = _parse_format_selection(tokens, inside_group=True)
2045 current_selector = FormatSelector(GROUP, group, [])
67134eab 2046 elif string == '+':
d03cfdce 2047 if not current_selector:
2048 raise syntax_error('Unexpected "+"', start)
2049 selector_1 = current_selector
2050 selector_2 = _parse_format_selection(tokens, inside_merge=True)
2051 if not selector_2:
2052 raise syntax_error('Expected a selector', start)
2053 current_selector = FormatSelector(MERGE, (selector_1, selector_2), [])
67134eab 2054 else:
86e5f3ed 2055 raise syntax_error(f'Operator not recognized: "{string}"', start)
67134eab
JMF
2056 elif type == tokenize.ENDMARKER:
2057 break
2058 if current_selector:
2059 selectors.append(current_selector)
2060 return selectors
2061
f8d4ad9a 2062 def _merge(formats_pair):
2063 format_1, format_2 = formats_pair
2064
2065 formats_info = []
2066 formats_info.extend(format_1.get('requested_formats', (format_1,)))
2067 formats_info.extend(format_2.get('requested_formats', (format_2,)))
2068
2069 if not allow_multiple_streams['video'] or not allow_multiple_streams['audio']:
551f9388 2070 get_no_more = {'video': False, 'audio': False}
f8d4ad9a 2071 for (i, fmt_info) in enumerate(formats_info):
551f9388 2072 if fmt_info.get('acodec') == fmt_info.get('vcodec') == 'none':
2073 formats_info.pop(i)
2074 continue
2075 for aud_vid in ['audio', 'video']:
f8d4ad9a 2076 if not allow_multiple_streams[aud_vid] and fmt_info.get(aud_vid[0] + 'codec') != 'none':
2077 if get_no_more[aud_vid]:
2078 formats_info.pop(i)
f5510afe 2079 break
f8d4ad9a 2080 get_no_more[aud_vid] = True
2081
2082 if len(formats_info) == 1:
2083 return formats_info[0]
2084
2085 video_fmts = [fmt_info for fmt_info in formats_info if fmt_info.get('vcodec') != 'none']
2086 audio_fmts = [fmt_info for fmt_info in formats_info if fmt_info.get('acodec') != 'none']
2087
2088 the_only_video = video_fmts[0] if len(video_fmts) == 1 else None
2089 the_only_audio = audio_fmts[0] if len(audio_fmts) == 1 else None
2090
2091 output_ext = self.params.get('merge_output_format')
2092 if not output_ext:
2093 if the_only_video:
2094 output_ext = the_only_video['ext']
2095 elif the_only_audio and not video_fmts:
2096 output_ext = the_only_audio['ext']
2097 else:
2098 output_ext = 'mkv'
2099
975a0d0d 2100 filtered = lambda *keys: filter(None, (traverse_obj(fmt, *keys) for fmt in formats_info))
2101
f8d4ad9a 2102 new_dict = {
2103 'requested_formats': formats_info,
975a0d0d 2104 'format': '+'.join(filtered('format')),
2105 'format_id': '+'.join(filtered('format_id')),
f8d4ad9a 2106 'ext': output_ext,
975a0d0d 2107 'protocol': '+'.join(map(determine_protocol, formats_info)),
093a1710 2108 'language': '+'.join(orderedSet(filtered('language'))) or None,
2109 'format_note': '+'.join(orderedSet(filtered('format_note'))) or None,
2110 'filesize_approx': sum(filtered('filesize', 'filesize_approx')) or None,
975a0d0d 2111 'tbr': sum(filtered('tbr', 'vbr', 'abr')),
f8d4ad9a 2112 }
2113
2114 if the_only_video:
2115 new_dict.update({
2116 'width': the_only_video.get('width'),
2117 'height': the_only_video.get('height'),
2118 'resolution': the_only_video.get('resolution') or self.format_resolution(the_only_video),
2119 'fps': the_only_video.get('fps'),
49a57e70 2120 'dynamic_range': the_only_video.get('dynamic_range'),
f8d4ad9a 2121 'vcodec': the_only_video.get('vcodec'),
2122 'vbr': the_only_video.get('vbr'),
2123 'stretched_ratio': the_only_video.get('stretched_ratio'),
2124 })
2125
2126 if the_only_audio:
2127 new_dict.update({
2128 'acodec': the_only_audio.get('acodec'),
2129 'abr': the_only_audio.get('abr'),
975a0d0d 2130 'asr': the_only_audio.get('asr'),
f8d4ad9a 2131 })
2132
2133 return new_dict
2134
e8e73840 2135 def _check_formats(formats):
981052c9 2136 if not check_formats:
2137 yield from formats
b5ac45b1 2138 return
9f1a1c36 2139 yield from self._check_formats(formats)
e8e73840 2140
67134eab 2141 def _build_selector_function(selector):
909d24dd 2142 if isinstance(selector, list): # ,
67134eab
JMF
2143 fs = [_build_selector_function(s) for s in selector]
2144
317f7ab6 2145 def selector_function(ctx):
67134eab 2146 for f in fs:
981052c9 2147 yield from f(ctx)
67134eab 2148 return selector_function
909d24dd 2149
2150 elif selector.type == GROUP: # ()
0130afb7 2151 selector_function = _build_selector_function(selector.selector)
909d24dd 2152
2153 elif selector.type == PICKFIRST: # /
67134eab
JMF
2154 fs = [_build_selector_function(s) for s in selector.selector]
2155
317f7ab6 2156 def selector_function(ctx):
67134eab 2157 for f in fs:
317f7ab6 2158 picked_formats = list(f(ctx))
67134eab
JMF
2159 if picked_formats:
2160 return picked_formats
2161 return []
67134eab 2162
981052c9 2163 elif selector.type == MERGE: # +
2164 selector_1, selector_2 = map(_build_selector_function, selector.selector)
2165
2166 def selector_function(ctx):
adbc4ec4 2167 for pair in itertools.product(selector_1(ctx), selector_2(ctx)):
981052c9 2168 yield _merge(pair)
2169
909d24dd 2170 elif selector.type == SINGLE: # atom
598d185d 2171 format_spec = selector.selector or 'best'
909d24dd 2172
f8d4ad9a 2173 # TODO: Add allvideo, allaudio etc by generalizing the code with best/worst selector
909d24dd 2174 if format_spec == 'all':
2175 def selector_function(ctx):
9222c381 2176 yield from _check_formats(ctx['formats'][::-1])
f8d4ad9a 2177 elif format_spec == 'mergeall':
2178 def selector_function(ctx):
316f2650 2179 formats = list(_check_formats(
2180 f for f in ctx['formats'] if f.get('vcodec') != 'none' or f.get('acodec') != 'none'))
e01d6aa4 2181 if not formats:
2182 return
921b76ca 2183 merged_format = formats[-1]
2184 for f in formats[-2::-1]:
f8d4ad9a 2185 merged_format = _merge((merged_format, f))
2186 yield merged_format
909d24dd 2187
2188 else:
85e801a9 2189 format_fallback, seperate_fallback, format_reverse, format_idx = False, None, True, 1
eff63539 2190 mobj = re.match(
2191 r'(?P<bw>best|worst|b|w)(?P<type>video|audio|v|a)?(?P<mod>\*)?(?:\.(?P<n>[1-9]\d*))?$',
2192 format_spec)
2193 if mobj is not None:
2194 format_idx = int_or_none(mobj.group('n'), default=1)
e8e73840 2195 format_reverse = mobj.group('bw')[0] == 'b'
eff63539 2196 format_type = (mobj.group('type') or [None])[0]
2197 not_format_type = {'v': 'a', 'a': 'v'}.get(format_type)
2198 format_modified = mobj.group('mod') is not None
909d24dd 2199
2200 format_fallback = not format_type and not format_modified # for b, w
8326b00a 2201 _filter_f = (
eff63539 2202 (lambda f: f.get('%scodec' % format_type) != 'none')
2203 if format_type and format_modified # bv*, ba*, wv*, wa*
2204 else (lambda f: f.get('%scodec' % not_format_type) == 'none')
2205 if format_type # bv, ba, wv, wa
2206 else (lambda f: f.get('vcodec') != 'none' and f.get('acodec') != 'none')
2207 if not format_modified # b, w
8326b00a 2208 else lambda f: True) # b*, w*
2209 filter_f = lambda f: _filter_f(f) and (
2210 f.get('vcodec') != 'none' or f.get('acodec') != 'none')
67134eab 2211 else:
48ee10ee 2212 if format_spec in self._format_selection_exts['audio']:
b11c04a8 2213 filter_f = lambda f: f.get('ext') == format_spec and f.get('acodec') != 'none'
48ee10ee 2214 elif format_spec in self._format_selection_exts['video']:
b11c04a8 2215 filter_f = lambda f: f.get('ext') == format_spec and f.get('acodec') != 'none' and f.get('vcodec') != 'none'
85e801a9 2216 seperate_fallback = lambda f: f.get('ext') == format_spec and f.get('vcodec') != 'none'
48ee10ee 2217 elif format_spec in self._format_selection_exts['storyboards']:
b11c04a8 2218 filter_f = lambda f: f.get('ext') == format_spec and f.get('acodec') == 'none' and f.get('vcodec') == 'none'
2219 else:
b5ae35ee 2220 filter_f = lambda f: f.get('format_id') == format_spec # id
909d24dd 2221
2222 def selector_function(ctx):
2223 formats = list(ctx['formats'])
909d24dd 2224 matches = list(filter(filter_f, formats)) if filter_f is not None else formats
85e801a9 2225 if not matches:
2226 if format_fallback and ctx['incomplete_formats']:
2227 # for extractors with incomplete formats (audio only (soundcloud)
2228 # or video only (imgur)) best/worst will fallback to
2229 # best/worst {video,audio}-only format
2230 matches = formats
2231 elif seperate_fallback and not ctx['has_merged_format']:
2232 # for compatibility with youtube-dl when there is no pre-merged format
2233 matches = list(filter(seperate_fallback, formats))
981052c9 2234 matches = LazyList(_check_formats(matches[::-1 if format_reverse else 1]))
2235 try:
e8e73840 2236 yield matches[format_idx - 1]
4abea8ca 2237 except LazyList.IndexError:
981052c9 2238 return
083c9df9 2239
67134eab 2240 filters = [self._build_format_filter(f) for f in selector.filters]
083c9df9 2241
317f7ab6 2242 def final_selector(ctx):
adbc4ec4 2243 ctx_copy = dict(ctx)
67134eab 2244 for _filter in filters:
317f7ab6
S
2245 ctx_copy['formats'] = list(filter(_filter, ctx_copy['formats']))
2246 return selector_function(ctx_copy)
67134eab 2247 return final_selector
083c9df9 2248
0f06bcd7 2249 stream = io.BytesIO(format_spec.encode())
0130afb7 2250 try:
f9934b96 2251 tokens = list(_remove_unused_ops(tokenize.tokenize(stream.readline)))
0130afb7
JMF
2252 except tokenize.TokenError:
2253 raise syntax_error('Missing closing/opening brackets or parenthesis', (0, len(format_spec)))
2254
86e5f3ed 2255 class TokenIterator:
0130afb7
JMF
2256 def __init__(self, tokens):
2257 self.tokens = tokens
2258 self.counter = 0
2259
2260 def __iter__(self):
2261 return self
2262
2263 def __next__(self):
2264 if self.counter >= len(self.tokens):
2265 raise StopIteration()
2266 value = self.tokens[self.counter]
2267 self.counter += 1
2268 return value
2269
2270 next = __next__
2271
2272 def restore_last_token(self):
2273 self.counter -= 1
2274
2275 parsed_selector = _parse_format_selection(iter(TokenIterator(tokens)))
67134eab 2276 return _build_selector_function(parsed_selector)
a9c58ad9 2277
e5660ee6 2278 def _calc_headers(self, info_dict):
8b7539d2 2279 res = merge_headers(self.params['http_headers'], info_dict.get('http_headers') or {})
e5660ee6 2280
c487cf00 2281 cookies = self._calc_cookies(info_dict['url'])
e5660ee6
JMF
2282 if cookies:
2283 res['Cookie'] = cookies
2284
0016b84e
S
2285 if 'X-Forwarded-For' not in res:
2286 x_forwarded_for_ip = info_dict.get('__x_forwarded_for_ip')
2287 if x_forwarded_for_ip:
2288 res['X-Forwarded-For'] = x_forwarded_for_ip
2289
e5660ee6
JMF
2290 return res
2291
c487cf00 2292 def _calc_cookies(self, url):
2293 pr = sanitized_Request(url)
e5660ee6 2294 self.cookiejar.add_cookie_header(pr)
662435f7 2295 return pr.get_header('Cookie')
e5660ee6 2296
9f1a1c36 2297 def _sort_thumbnails(self, thumbnails):
2298 thumbnails.sort(key=lambda t: (
2299 t.get('preference') if t.get('preference') is not None else -1,
2300 t.get('width') if t.get('width') is not None else -1,
2301 t.get('height') if t.get('height') is not None else -1,
2302 t.get('id') if t.get('id') is not None else '',
2303 t.get('url')))
2304
b0249bca 2305 def _sanitize_thumbnails(self, info_dict):
bc516a3f 2306 thumbnails = info_dict.get('thumbnails')
2307 if thumbnails is None:
2308 thumbnail = info_dict.get('thumbnail')
2309 if thumbnail:
2310 info_dict['thumbnails'] = thumbnails = [{'url': thumbnail}]
9f1a1c36 2311 if not thumbnails:
2312 return
2313
2314 def check_thumbnails(thumbnails):
2315 for t in thumbnails:
2316 self.to_screen(f'[info] Testing thumbnail {t["id"]}')
2317 try:
2318 self.urlopen(HEADRequest(t['url']))
2319 except network_exceptions as err:
2320 self.to_screen(f'[info] Unable to connect to thumbnail {t["id"]} URL {t["url"]!r} - {err}. Skipping...')
2321 continue
2322 yield t
2323
2324 self._sort_thumbnails(thumbnails)
2325 for i, t in enumerate(thumbnails):
2326 if t.get('id') is None:
2327 t['id'] = '%d' % i
2328 if t.get('width') and t.get('height'):
2329 t['resolution'] = '%dx%d' % (t['width'], t['height'])
2330 t['url'] = sanitize_url(t['url'])
2331
2332 if self.params.get('check_formats') is True:
282f5709 2333 info_dict['thumbnails'] = LazyList(check_thumbnails(thumbnails[::-1]), reverse=True)
9f1a1c36 2334 else:
2335 info_dict['thumbnails'] = thumbnails
bc516a3f 2336
03f83004
LNO
2337 def _fill_common_fields(self, info_dict, is_video=True):
2338 # TODO: move sanitization here
2339 if is_video:
2340 # playlists are allowed to lack "title"
d4736fdb 2341 title = info_dict.get('title', NO_DEFAULT)
2342 if title is NO_DEFAULT:
03f83004
LNO
2343 raise ExtractorError('Missing "title" field in extractor result',
2344 video_id=info_dict['id'], ie=info_dict['extractor'])
d4736fdb 2345 info_dict['fulltitle'] = title
2346 if not title:
2347 if title == '':
2348 self.write_debug('Extractor gave empty title. Creating a generic title')
2349 else:
2350 self.report_warning('Extractor failed to obtain "title". Creating a generic title instead')
1d485a1a 2351 info_dict['title'] = f'{info_dict["extractor"].replace(":", "-")} video #{info_dict["id"]}'
03f83004
LNO
2352
2353 if info_dict.get('duration') is not None:
2354 info_dict['duration_string'] = formatSeconds(info_dict['duration'])
2355
2356 for ts_key, date_key in (
2357 ('timestamp', 'upload_date'),
2358 ('release_timestamp', 'release_date'),
2359 ('modified_timestamp', 'modified_date'),
2360 ):
2361 if info_dict.get(date_key) is None and info_dict.get(ts_key) is not None:
2362 # Working around out-of-range timestamp values (e.g. negative ones on Windows,
2363 # see http://bugs.python.org/issue1646728)
19a03940 2364 with contextlib.suppress(ValueError, OverflowError, OSError):
03f83004
LNO
2365 upload_date = datetime.datetime.utcfromtimestamp(info_dict[ts_key])
2366 info_dict[date_key] = upload_date.strftime('%Y%m%d')
03f83004
LNO
2367
2368 live_keys = ('is_live', 'was_live')
2369 live_status = info_dict.get('live_status')
2370 if live_status is None:
2371 for key in live_keys:
2372 if info_dict.get(key) is False:
2373 continue
2374 if info_dict.get(key):
2375 live_status = key
2376 break
2377 if all(info_dict.get(key) is False for key in live_keys):
2378 live_status = 'not_live'
2379 if live_status:
2380 info_dict['live_status'] = live_status
2381 for key in live_keys:
2382 if info_dict.get(key) is None:
2383 info_dict[key] = (live_status == key)
2384
2385 # Auto generate title fields corresponding to the *_number fields when missing
2386 # in order to always have clean titles. This is very common for TV series.
2387 for field in ('chapter', 'season', 'episode'):
2388 if info_dict.get('%s_number' % field) is not None and not info_dict.get(field):
2389 info_dict[field] = '%s %d' % (field.capitalize(), info_dict['%s_number' % field])
2390
415f8d51 2391 def _raise_pending_errors(self, info):
2392 err = info.pop('__pending_error', None)
2393 if err:
2394 self.report_error(err, tb=False)
2395
dd82ffea
JMF
2396 def process_video_result(self, info_dict, download=True):
2397 assert info_dict.get('_type', 'video') == 'video'
9c906919 2398 self._num_videos += 1
dd82ffea 2399
bec1fad2 2400 if 'id' not in info_dict:
fc08bdd6 2401 raise ExtractorError('Missing "id" field in extractor result', ie=info_dict['extractor'])
2402 elif not info_dict.get('id'):
2403 raise ExtractorError('Extractor failed to obtain "id"', ie=info_dict['extractor'])
455a15e2 2404
c9969434
S
2405 def report_force_conversion(field, field_not, conversion):
2406 self.report_warning(
2407 '"%s" field is not %s - forcing %s conversion, there is an error in extractor'
2408 % (field, field_not, conversion))
2409
2410 def sanitize_string_field(info, string_field):
2411 field = info.get(string_field)
14f25df2 2412 if field is None or isinstance(field, str):
c9969434
S
2413 return
2414 report_force_conversion(string_field, 'a string', 'string')
14f25df2 2415 info[string_field] = str(field)
c9969434
S
2416
2417 def sanitize_numeric_fields(info):
2418 for numeric_field in self._NUMERIC_FIELDS:
2419 field = info.get(numeric_field)
f9934b96 2420 if field is None or isinstance(field, (int, float)):
c9969434
S
2421 continue
2422 report_force_conversion(numeric_field, 'numeric', 'int')
2423 info[numeric_field] = int_or_none(field)
2424
2425 sanitize_string_field(info_dict, 'id')
2426 sanitize_numeric_fields(info_dict)
3975b4d2 2427 if info_dict.get('section_end') and info_dict.get('section_start') is not None:
2428 info_dict['duration'] = round(info_dict['section_end'] - info_dict['section_start'], 3)
4c3f8c3f 2429 if (info_dict.get('duration') or 0) <= 0 and info_dict.pop('duration', None):
50e93e03 2430 self.report_warning('"duration" field is negative, there is an error in extractor')
be6217b2 2431
9eef7c4e 2432 chapters = info_dict.get('chapters') or []
a3976e07 2433 if chapters and chapters[0].get('start_time'):
2434 chapters.insert(0, {'start_time': 0})
2435
9eef7c4e 2436 dummy_chapter = {'end_time': 0, 'start_time': info_dict.get('duration')}
a3976e07 2437 for idx, (prev, current, next_) in enumerate(zip(
2438 (dummy_chapter, *chapters), chapters, (*chapters[1:], dummy_chapter)), 1):
9eef7c4e 2439 if current.get('start_time') is None:
2440 current['start_time'] = prev.get('end_time')
2441 if not current.get('end_time'):
2442 current['end_time'] = next_.get('start_time')
a3976e07 2443 if not current.get('title'):
2444 current['title'] = f'<Untitled Chapter {idx}>'
9eef7c4e 2445
dd82ffea
JMF
2446 if 'playlist' not in info_dict:
2447 # It isn't part of a playlist
2448 info_dict['playlist'] = None
2449 info_dict['playlist_index'] = None
2450
bc516a3f 2451 self._sanitize_thumbnails(info_dict)
d5519808 2452
536a55da 2453 thumbnail = info_dict.get('thumbnail')
bc516a3f 2454 thumbnails = info_dict.get('thumbnails')
536a55da
S
2455 if thumbnail:
2456 info_dict['thumbnail'] = sanitize_url(thumbnail)
2457 elif thumbnails:
d5519808
PH
2458 info_dict['thumbnail'] = thumbnails[-1]['url']
2459
ae30b840 2460 if info_dict.get('display_id') is None and 'id' in info_dict:
0afef30b
PH
2461 info_dict['display_id'] = info_dict['id']
2462
03f83004 2463 self._fill_common_fields(info_dict)
33d2fc2f 2464
05108a49
S
2465 for cc_kind in ('subtitles', 'automatic_captions'):
2466 cc = info_dict.get(cc_kind)
2467 if cc:
2468 for _, subtitle in cc.items():
2469 for subtitle_format in subtitle:
2470 if subtitle_format.get('url'):
2471 subtitle_format['url'] = sanitize_url(subtitle_format['url'])
2472 if subtitle_format.get('ext') is None:
2473 subtitle_format['ext'] = determine_ext(subtitle_format['url']).lower()
2474
2475 automatic_captions = info_dict.get('automatic_captions')
4bba3716 2476 subtitles = info_dict.get('subtitles')
4bba3716 2477
360e1ca5 2478 info_dict['requested_subtitles'] = self.process_subtitles(
05108a49 2479 info_dict['id'], subtitles, automatic_captions)
a504ced0 2480
dd82ffea
JMF
2481 if info_dict.get('formats') is None:
2482 # There's only one format available
2483 formats = [info_dict]
2484 else:
2485 formats = info_dict['formats']
2486
0a5a191a 2487 # or None ensures --clean-infojson removes it
2488 info_dict['_has_drm'] = any(f.get('has_drm') for f in formats) or None
88acdbc2 2489 if not self.params.get('allow_unplayable_formats'):
2490 formats = [f for f in formats if not f.get('has_drm')]
0a5a191a 2491 if info_dict['_has_drm'] and all(
c0b6e5c7 2492 f.get('acodec') == f.get('vcodec') == 'none' for f in formats):
2493 self.report_warning(
2494 'This video is DRM protected and only images are available for download. '
2495 'Use --list-formats to see them')
88acdbc2 2496
319b6059 2497 get_from_start = not info_dict.get('is_live') or bool(self.params.get('live_from_start'))
2498 if not get_from_start:
2499 info_dict['title'] += ' ' + datetime.datetime.now().strftime('%Y-%m-%d %H:%M')
2500 if info_dict.get('is_live') and formats:
adbc4ec4 2501 formats = [f for f in formats if bool(f.get('is_from_start')) == get_from_start]
319b6059 2502 if get_from_start and not formats:
a44ca5a4 2503 self.raise_no_formats(info_dict, msg=(
2504 '--live-from-start is passed, but there are no formats that can be downloaded from the start. '
2505 'If you want to download from the current time, use --no-live-from-start'))
adbc4ec4 2506
db95dc13 2507 if not formats:
1151c407 2508 self.raise_no_formats(info_dict)
db95dc13 2509
73af5cc8
S
2510 def is_wellformed(f):
2511 url = f.get('url')
a5ac0c47 2512 if not url:
73af5cc8
S
2513 self.report_warning(
2514 '"url" field is missing or empty - skipping format, '
2515 'there is an error in extractor')
a5ac0c47
S
2516 return False
2517 if isinstance(url, bytes):
2518 sanitize_string_field(f, 'url')
2519 return True
73af5cc8
S
2520
2521 # Filter out malformed formats for better extraction robustness
2522 formats = list(filter(is_wellformed, formats))
2523
181c7053
S
2524 formats_dict = {}
2525
dd82ffea 2526 # We check that all the formats have the format and format_id fields
db95dc13 2527 for i, format in enumerate(formats):
c9969434
S
2528 sanitize_string_field(format, 'format_id')
2529 sanitize_numeric_fields(format)
dcf77cf1 2530 format['url'] = sanitize_url(format['url'])
e74e3b63 2531 if not format.get('format_id'):
14f25df2 2532 format['format_id'] = str(i)
e2effb08
S
2533 else:
2534 # Sanitize format_id from characters used in format selector expression
ec85ded8 2535 format['format_id'] = re.sub(r'[\s,/+\[\]()]', '_', format['format_id'])
181c7053
S
2536 format_id = format['format_id']
2537 if format_id not in formats_dict:
2538 formats_dict[format_id] = []
2539 formats_dict[format_id].append(format)
2540
2541 # Make sure all formats have unique format_id
03b4de72 2542 common_exts = set(itertools.chain(*self._format_selection_exts.values()))
181c7053 2543 for format_id, ambiguous_formats in formats_dict.items():
48ee10ee 2544 ambigious_id = len(ambiguous_formats) > 1
2545 for i, format in enumerate(ambiguous_formats):
2546 if ambigious_id:
181c7053 2547 format['format_id'] = '%s-%d' % (format_id, i)
48ee10ee 2548 if format.get('ext') is None:
2549 format['ext'] = determine_ext(format['url']).lower()
2550 # Ensure there is no conflict between id and ext in format selection
2551 # See https://github.com/yt-dlp/yt-dlp/issues/1282
2552 if format['format_id'] != format['ext'] and format['format_id'] in common_exts:
2553 format['format_id'] = 'f%s' % format['format_id']
181c7053
S
2554
2555 for i, format in enumerate(formats):
8c51aa65 2556 if format.get('format') is None:
6febd1c1 2557 format['format'] = '{id} - {res}{note}'.format(
8c51aa65
JMF
2558 id=format['format_id'],
2559 res=self.format_resolution(format),
b868936c 2560 note=format_field(format, 'format_note', ' (%s)'),
8c51aa65 2561 )
6f0be937 2562 if format.get('protocol') is None:
b5559424 2563 format['protocol'] = determine_protocol(format)
239df021 2564 if format.get('resolution') is None:
2565 format['resolution'] = self.format_resolution(format, default=None)
176f1866 2566 if format.get('dynamic_range') is None and format.get('vcodec') != 'none':
2567 format['dynamic_range'] = 'SDR'
f2fe69c7 2568 if (info_dict.get('duration') and format.get('tbr')
2569 and not format.get('filesize') and not format.get('filesize_approx')):
56ba69e4 2570 format['filesize_approx'] = int(info_dict['duration'] * format['tbr'] * (1024 / 8))
f2fe69c7 2571
e5660ee6
JMF
2572 # Add HTTP headers, so that external programs can use them from the
2573 # json output
2574 full_format_info = info_dict.copy()
2575 full_format_info.update(format)
2576 format['http_headers'] = self._calc_headers(full_format_info)
0016b84e
S
2577 # Remove private housekeeping stuff
2578 if '__x_forwarded_for_ip' in info_dict:
2579 del info_dict['__x_forwarded_for_ip']
dd82ffea 2580
9f1a1c36 2581 if self.params.get('check_formats') is True:
282f5709 2582 formats = LazyList(self._check_formats(formats[::-1]), reverse=True)
9f1a1c36 2583
88acdbc2 2584 if not formats or formats[0] is not info_dict:
b3d9ef88
JMF
2585 # only set the 'formats' fields if the original info_dict list them
2586 # otherwise we end up with a circular reference, the first (and unique)
f89197d7 2587 # element in the 'formats' field in info_dict is info_dict itself,
dfb1b146 2588 # which can't be exported to json
b3d9ef88 2589 info_dict['formats'] = formats
4ec82a72 2590
2591 info_dict, _ = self.pre_process(info_dict)
2592
6db9c4d5 2593 if self._match_entry(info_dict, incomplete=self._format_fields) is not None:
09b49e1f 2594 return info_dict
2595
2596 self.post_extract(info_dict)
2597 info_dict, _ = self.pre_process(info_dict, 'after_filter')
2598
093a1710 2599 # The pre-processors may have modified the formats
2600 formats = info_dict.get('formats', [info_dict])
2601
fa9f30b8 2602 list_only = self.params.get('simulate') is None and (
2603 self.params.get('list_thumbnails') or self.params.get('listformats') or self.params.get('listsubtitles'))
2604 interactive_format_selection = not list_only and self.format_selector == '-'
b7b04c78 2605 if self.params.get('list_thumbnails'):
2606 self.list_thumbnails(info_dict)
b7b04c78 2607 if self.params.get('listsubtitles'):
2608 if 'automatic_captions' in info_dict:
2609 self.list_subtitles(
2610 info_dict['id'], automatic_captions, 'automatic captions')
2611 self.list_subtitles(info_dict['id'], subtitles, 'subtitles')
fa9f30b8 2612 if self.params.get('listformats') or interactive_format_selection:
b69fd25c 2613 self.list_formats(info_dict)
169dbde9 2614 if list_only:
b7b04c78 2615 # Without this printing, -F --print-json will not work
169dbde9 2616 self.__forced_printings(info_dict, self.prepare_filename(info_dict), incomplete=True)
c487cf00 2617 return info_dict
bfaae0a7 2618
187986a8 2619 format_selector = self.format_selector
2620 if format_selector is None:
0017d9ad 2621 req_format = self._default_format_spec(info_dict, download=download)
0760b0a7 2622 self.write_debug('Default format spec: %s' % req_format)
187986a8 2623 format_selector = self.build_format_selector(req_format)
317f7ab6 2624
fa9f30b8 2625 while True:
2626 if interactive_format_selection:
2627 req_format = input(
2628 self._format_screen('\nEnter format selector: ', self.Styles.EMPHASIS))
2629 try:
2630 format_selector = self.build_format_selector(req_format)
2631 except SyntaxError as err:
2632 self.report_error(err, tb=False, is_error=False)
2633 continue
2634
85e801a9 2635 formats_to_download = list(format_selector({
fa9f30b8 2636 'formats': formats,
85e801a9 2637 'has_merged_format': any('none' not in (f.get('acodec'), f.get('vcodec')) for f in formats),
2638 'incomplete_formats': (
2639 # All formats are video-only or
2640 all(f.get('vcodec') != 'none' and f.get('acodec') == 'none' for f in formats)
2641 # all formats are audio-only
2642 or all(f.get('vcodec') == 'none' and f.get('acodec') != 'none' for f in formats)),
2643 }))
fa9f30b8 2644 if interactive_format_selection and not formats_to_download:
2645 self.report_error('Requested format is not available', tb=False, is_error=False)
2646 continue
2647 break
317f7ab6 2648
dd82ffea 2649 if not formats_to_download:
b7da73eb 2650 if not self.params.get('ignore_no_formats_error'):
c0b6e5c7 2651 raise ExtractorError(
2652 'Requested format is not available. Use --list-formats for a list of available formats',
2653 expected=True, video_id=info_dict['id'], ie=info_dict['extractor'])
b62fa6d7 2654 self.report_warning('Requested format is not available')
2655 # Process what we can, even without any available formats.
2656 formats_to_download = [{}]
a13e6848 2657
5ec1b6b7 2658 requested_ranges = self.params.get('download_ranges')
2659 if requested_ranges:
2660 requested_ranges = tuple(requested_ranges(info_dict, self))
2661
2662 best_format, downloaded_formats = formats_to_download[-1], []
b62fa6d7 2663 if download:
2664 if best_format:
5ec1b6b7 2665 def to_screen(*msg):
2666 self.to_screen(f'[info] {info_dict["id"]}: {" ".join(", ".join(variadic(m)) for m in msg)}')
2667
2668 to_screen(f'Downloading {len(formats_to_download)} format(s):',
2669 (f['format_id'] for f in formats_to_download))
2670 if requested_ranges:
2671 to_screen(f'Downloading {len(requested_ranges)} time ranges:',
2672 (f'{int(c["start_time"])}-{int(c["end_time"])}' for c in requested_ranges))
a13e6848 2673 max_downloads_reached = False
5ec1b6b7 2674
2675 for fmt, chapter in itertools.product(formats_to_download, requested_ranges or [{}]):
2676 new_info = self._copy_infodict(info_dict)
b7da73eb 2677 new_info.update(fmt)
3975b4d2 2678 offset, duration = info_dict.get('section_start') or 0, info_dict.get('duration') or float('inf')
2679 if chapter or offset:
5ec1b6b7 2680 new_info.update({
3975b4d2 2681 'section_start': offset + chapter.get('start_time', 0),
bc401608 2682 'section_end': offset + min(chapter.get('end_time', duration), duration),
5ec1b6b7 2683 'section_title': chapter.get('title'),
2684 'section_number': chapter.get('index'),
2685 })
2686 downloaded_formats.append(new_info)
a13e6848 2687 try:
2688 self.process_info(new_info)
2689 except MaxDownloadsReached:
2690 max_downloads_reached = True
415f8d51 2691 self._raise_pending_errors(new_info)
f46e2f9d 2692 # Remove copied info
2693 for key, val in tuple(new_info.items()):
2694 if info_dict.get(key) == val:
2695 new_info.pop(key)
a13e6848 2696 if max_downloads_reached:
2697 break
ebed8b37 2698
5ec1b6b7 2699 write_archive = {f.get('__write_download_archive', False) for f in downloaded_formats}
a13e6848 2700 assert write_archive.issubset({True, False, 'ignore'})
2701 if True in write_archive and False not in write_archive:
2702 self.record_download_archive(info_dict)
be72c624 2703
5ec1b6b7 2704 info_dict['requested_downloads'] = downloaded_formats
ed5835b4 2705 info_dict = self.run_all_pps('after_video', info_dict)
a13e6848 2706 if max_downloads_reached:
2707 raise MaxDownloadsReached()
ebed8b37 2708
49a57e70 2709 # We update the info dict with the selected best quality format (backwards compatibility)
be72c624 2710 info_dict.update(best_format)
dd82ffea
JMF
2711 return info_dict
2712
98c70d6f 2713 def process_subtitles(self, video_id, normal_subtitles, automatic_captions):
a504ced0 2714 """Select the requested subtitles and their format"""
d8a58ddc 2715 available_subs, normal_sub_langs = {}, []
98c70d6f
JMF
2716 if normal_subtitles and self.params.get('writesubtitles'):
2717 available_subs.update(normal_subtitles)
d8a58ddc 2718 normal_sub_langs = tuple(normal_subtitles.keys())
98c70d6f
JMF
2719 if automatic_captions and self.params.get('writeautomaticsub'):
2720 for lang, cap_info in automatic_captions.items():
360e1ca5
JMF
2721 if lang not in available_subs:
2722 available_subs[lang] = cap_info
2723
4d171848
JMF
2724 if (not self.params.get('writesubtitles') and not
2725 self.params.get('writeautomaticsub') or not
2726 available_subs):
2727 return None
a504ced0 2728
d8a58ddc 2729 all_sub_langs = tuple(available_subs.keys())
a504ced0 2730 if self.params.get('allsubtitles', False):
c32b0aab 2731 requested_langs = all_sub_langs
2732 elif self.params.get('subtitleslangs', False):
77c4a9ef 2733 # A list is used so that the order of languages will be the same as
2734 # given in subtitleslangs. See https://github.com/yt-dlp/yt-dlp/issues/1041
2735 requested_langs = []
2736 for lang_re in self.params.get('subtitleslangs'):
77c4a9ef 2737 discard = lang_re[0] == '-'
c32b0aab 2738 if discard:
77c4a9ef 2739 lang_re = lang_re[1:]
3aa91540 2740 if lang_re == 'all':
2741 if discard:
2742 requested_langs = []
2743 else:
2744 requested_langs.extend(all_sub_langs)
2745 continue
77c4a9ef 2746 current_langs = filter(re.compile(lang_re + '$').match, all_sub_langs)
c32b0aab 2747 if discard:
2748 for lang in current_langs:
77c4a9ef 2749 while lang in requested_langs:
2750 requested_langs.remove(lang)
c32b0aab 2751 else:
77c4a9ef 2752 requested_langs.extend(current_langs)
2753 requested_langs = orderedSet(requested_langs)
d8a58ddc 2754 elif normal_sub_langs:
2755 requested_langs = ['en'] if 'en' in normal_sub_langs else normal_sub_langs[:1]
a504ced0 2756 else:
d8a58ddc 2757 requested_langs = ['en'] if 'en' in all_sub_langs else all_sub_langs[:1]
ad3dc496 2758 if requested_langs:
2759 self.write_debug('Downloading subtitles: %s' % ', '.join(requested_langs))
a504ced0
JMF
2760
2761 formats_query = self.params.get('subtitlesformat', 'best')
2762 formats_preference = formats_query.split('/') if formats_query else []
2763 subs = {}
2764 for lang in requested_langs:
2765 formats = available_subs.get(lang)
2766 if formats is None:
86e5f3ed 2767 self.report_warning(f'{lang} subtitles not available for {video_id}')
a504ced0 2768 continue
a504ced0
JMF
2769 for ext in formats_preference:
2770 if ext == 'best':
2771 f = formats[-1]
2772 break
2773 matches = list(filter(lambda f: f['ext'] == ext, formats))
2774 if matches:
2775 f = matches[-1]
2776 break
2777 else:
2778 f = formats[-1]
2779 self.report_warning(
2780 'No subtitle format found matching "%s" for language %s, '
2781 'using %s' % (formats_query, lang, f['ext']))
2782 subs[lang] = f
2783 return subs
2784
bb66c247 2785 def _forceprint(self, key, info_dict):
2786 if info_dict is None:
2787 return
2788 info_copy = info_dict.copy()
2789 info_copy['formats_table'] = self.render_formats_table(info_dict)
2790 info_copy['thumbnails_table'] = self.render_thumbnails_table(info_dict)
2791 info_copy['subtitles_table'] = self.render_subtitles_table(info_dict.get('id'), info_dict.get('subtitles'))
2792 info_copy['automatic_captions_table'] = self.render_subtitles_table(info_dict.get('id'), info_dict.get('automatic_captions'))
2793
2794 def format_tmpl(tmpl):
2795 mobj = re.match(r'\w+(=?)$', tmpl)
2796 if mobj and mobj.group(1):
2797 return f'{tmpl[:-1]} = %({tmpl[:-1]})r'
2798 elif mobj:
2799 return f'%({tmpl})s'
2800 return tmpl
8130779d 2801
bb66c247 2802 for tmpl in self.params['forceprint'].get(key, []):
2803 self.to_stdout(self.evaluate_outtmpl(format_tmpl(tmpl), info_copy))
2804
2805 for tmpl, file_tmpl in self.params['print_to_file'].get(key, []):
5127e92a 2806 filename = self.prepare_filename(info_dict, outtmpl=file_tmpl)
bb66c247 2807 tmpl = format_tmpl(tmpl)
2808 self.to_screen(f'[info] Writing {tmpl!r} to: {filename}')
8d93e69d 2809 if self._ensure_dir_exists(filename):
86e5f3ed 2810 with open(filename, 'a', encoding='utf-8') as f:
8d93e69d 2811 f.write(self.evaluate_outtmpl(tmpl, info_copy) + '\n')
ca30f449 2812
d06daf23 2813 def __forced_printings(self, info_dict, filename, incomplete):
53c18592 2814 def print_mandatory(field, actual_field=None):
2815 if actual_field is None:
2816 actual_field = field
d06daf23 2817 if (self.params.get('force%s' % field, False)
53c18592 2818 and (not incomplete or info_dict.get(actual_field) is not None)):
2819 self.to_stdout(info_dict[actual_field])
d06daf23
S
2820
2821 def print_optional(field):
2822 if (self.params.get('force%s' % field, False)
2823 and info_dict.get(field) is not None):
2824 self.to_stdout(info_dict[field])
2825
53c18592 2826 info_dict = info_dict.copy()
2827 if filename is not None:
2828 info_dict['filename'] = filename
2829 if info_dict.get('requested_formats') is not None:
2830 # For RTMP URLs, also include the playpath
2831 info_dict['urls'] = '\n'.join(f['url'] + f.get('play_path', '') for f in info_dict['requested_formats'])
10331a26 2832 elif info_dict.get('url'):
53c18592 2833 info_dict['urls'] = info_dict['url'] + info_dict.get('play_path', '')
2834
bb66c247 2835 if (self.params.get('forcejson')
2836 or self.params['forceprint'].get('video')
2837 or self.params['print_to_file'].get('video')):
2b8a2973 2838 self.post_extract(info_dict)
bb66c247 2839 self._forceprint('video', info_dict)
53c18592 2840
d06daf23
S
2841 print_mandatory('title')
2842 print_mandatory('id')
53c18592 2843 print_mandatory('url', 'urls')
d06daf23
S
2844 print_optional('thumbnail')
2845 print_optional('description')
53c18592 2846 print_optional('filename')
b868936c 2847 if self.params.get('forceduration') and info_dict.get('duration') is not None:
d06daf23
S
2848 self.to_stdout(formatSeconds(info_dict['duration']))
2849 print_mandatory('format')
53c18592 2850
2b8a2973 2851 if self.params.get('forcejson'):
6e84b215 2852 self.to_stdout(json.dumps(self.sanitize_info(info_dict)))
d06daf23 2853
e8e73840 2854 def dl(self, name, info, subtitle=False, test=False):
88acdbc2 2855 if not info.get('url'):
1151c407 2856 self.raise_no_formats(info, True)
e8e73840 2857
2858 if test:
2859 verbose = self.params.get('verbose')
2860 params = {
2861 'test': True,
a169858f 2862 'quiet': self.params.get('quiet') or not verbose,
e8e73840 2863 'verbose': verbose,
2864 'noprogress': not verbose,
2865 'nopart': True,
2866 'skip_unavailable_fragments': False,
2867 'keep_fragments': False,
2868 'overwrites': True,
2869 '_no_ytdl_file': True,
2870 }
2871 else:
2872 params = self.params
96fccc10 2873 fd = get_suitable_downloader(info, params, to_stdout=(name == '-'))(self, params)
e8e73840 2874 if not test:
2875 for ph in self._progress_hooks:
2876 fd.add_progress_hook(ph)
42676437
M
2877 urls = '", "'.join(
2878 (f['url'].split(',')[0] + ',<data>' if f['url'].startswith('data:') else f['url'])
2879 for f in info.get('requested_formats', []) or [info])
3a408f9d 2880 self.write_debug(f'Invoking {fd.FD_NAME} downloader on "{urls}"')
03b4de72 2881
adbc4ec4
THD
2882 # Note: Ideally info should be a deep-copied so that hooks cannot modify it.
2883 # But it may contain objects that are not deep-copyable
2884 new_info = self._copy_infodict(info)
e8e73840 2885 if new_info.get('http_headers') is None:
2886 new_info['http_headers'] = self._calc_headers(new_info)
2887 return fd.download(name, new_info, subtitle)
2888
e04938ab 2889 def existing_file(self, filepaths, *, default_overwrite=True):
2890 existing_files = list(filter(os.path.exists, orderedSet(filepaths)))
2891 if existing_files and not self.params.get('overwrites', default_overwrite):
2892 return existing_files[0]
2893
2894 for file in existing_files:
2895 self.report_file_delete(file)
2896 os.remove(file)
2897 return None
2898
8222d8de 2899 def process_info(self, info_dict):
09b49e1f 2900 """Process a single resolved IE result. (Modifies it in-place)"""
8222d8de
JMF
2901
2902 assert info_dict.get('_type', 'video') == 'video'
f46e2f9d 2903 original_infodict = info_dict
fd288278 2904
4513a41a 2905 if 'format' not in info_dict and 'ext' in info_dict:
8222d8de
JMF
2906 info_dict['format'] = info_dict['ext']
2907
09b49e1f 2908 # This is mostly just for backward compatibility of process_info
2909 # As a side-effect, this allows for format-specific filters
c77495e3 2910 if self._match_entry(info_dict) is not None:
9e907ebd 2911 info_dict['__write_download_archive'] = 'ignore'
8222d8de
JMF
2912 return
2913
09b49e1f 2914 # Does nothing under normal operation - for backward compatibility of process_info
277d6ff5 2915 self.post_extract(info_dict)
0c14d66a 2916 self._num_downloads += 1
8222d8de 2917
dcf64d43 2918 # info_dict['_filename'] needs to be set for backward compatibility
de6000d9 2919 info_dict['_filename'] = full_filename = self.prepare_filename(info_dict, warn=True)
2920 temp_filename = self.prepare_filename(info_dict, 'temp')
0202b52a 2921 files_to_move = {}
8222d8de
JMF
2922
2923 # Forced printings
4513a41a 2924 self.__forced_printings(info_dict, full_filename, incomplete=('format' not in info_dict))
8222d8de 2925
ca6d59d2 2926 def check_max_downloads():
2927 if self._num_downloads >= float(self.params.get('max_downloads') or 'inf'):
2928 raise MaxDownloadsReached()
2929
b7b04c78 2930 if self.params.get('simulate'):
9e907ebd 2931 info_dict['__write_download_archive'] = self.params.get('force_write_download_archive')
ca6d59d2 2932 check_max_downloads()
8222d8de
JMF
2933 return
2934
de6000d9 2935 if full_filename is None:
8222d8de 2936 return
e92caff5 2937 if not self._ensure_dir_exists(encodeFilename(full_filename)):
0202b52a 2938 return
e92caff5 2939 if not self._ensure_dir_exists(encodeFilename(temp_filename)):
8222d8de
JMF
2940 return
2941
80c03fa9 2942 if self._write_description('video', info_dict,
2943 self.prepare_filename(info_dict, 'description')) is None:
2944 return
2945
2946 sub_files = self._write_subtitles(info_dict, temp_filename)
2947 if sub_files is None:
2948 return
2949 files_to_move.update(dict(sub_files))
2950
2951 thumb_files = self._write_thumbnails(
2952 'video', info_dict, temp_filename, self.prepare_filename(info_dict, 'thumbnail'))
2953 if thumb_files is None:
2954 return
2955 files_to_move.update(dict(thumb_files))
8222d8de 2956
80c03fa9 2957 infofn = self.prepare_filename(info_dict, 'infojson')
2958 _infojson_written = self._write_info_json('video', info_dict, infofn)
2959 if _infojson_written:
dac5df5a 2960 info_dict['infojson_filename'] = infofn
e75bb0d6 2961 # For backward compatibility, even though it was a private field
80c03fa9 2962 info_dict['__infojson_filename'] = infofn
2963 elif _infojson_written is None:
2964 return
2965
2966 # Note: Annotations are deprecated
2967 annofn = None
1fb07d10 2968 if self.params.get('writeannotations', False):
de6000d9 2969 annofn = self.prepare_filename(info_dict, 'annotation')
80c03fa9 2970 if annofn:
e92caff5 2971 if not self._ensure_dir_exists(encodeFilename(annofn)):
0202b52a 2972 return
0c3d0f51 2973 if not self.params.get('overwrites', True) and os.path.exists(encodeFilename(annofn)):
6febd1c1 2974 self.to_screen('[info] Video annotations are already present')
ffddb112
RA
2975 elif not info_dict.get('annotations'):
2976 self.report_warning('There are no annotations to write.')
7b6fefc9
PH
2977 else:
2978 try:
6febd1c1 2979 self.to_screen('[info] Writing video annotations to: ' + annofn)
86e5f3ed 2980 with open(encodeFilename(annofn), 'w', encoding='utf-8') as annofile:
7b6fefc9
PH
2981 annofile.write(info_dict['annotations'])
2982 except (KeyError, TypeError):
6febd1c1 2983 self.report_warning('There are no annotations to write.')
86e5f3ed 2984 except OSError:
6febd1c1 2985 self.report_error('Cannot write annotations file: ' + annofn)
7b6fefc9 2986 return
1fb07d10 2987
732044af 2988 # Write internet shortcut files
08438d2c 2989 def _write_link_file(link_type):
60f3e995 2990 url = try_get(info_dict['webpage_url'], iri_to_uri)
2991 if not url:
2992 self.report_warning(
2993 f'Cannot write internet shortcut file because the actual URL of "{info_dict["webpage_url"]}" is unknown')
2994 return True
08438d2c 2995 linkfn = replace_extension(self.prepare_filename(info_dict, 'link'), link_type, info_dict.get('ext'))
0e6b018a
Z
2996 if not self._ensure_dir_exists(encodeFilename(linkfn)):
2997 return False
10e3742e 2998 if self.params.get('overwrites', True) and os.path.exists(encodeFilename(linkfn)):
08438d2c 2999 self.to_screen(f'[info] Internet shortcut (.{link_type}) is already present')
3000 return True
3001 try:
3002 self.to_screen(f'[info] Writing internet shortcut (.{link_type}) to: {linkfn}')
86e5f3ed 3003 with open(encodeFilename(to_high_limit_path(linkfn)), 'w', encoding='utf-8',
3004 newline='\r\n' if link_type == 'url' else '\n') as linkfile:
60f3e995 3005 template_vars = {'url': url}
08438d2c 3006 if link_type == 'desktop':
3007 template_vars['filename'] = linkfn[:-(len(link_type) + 1)]
3008 linkfile.write(LINK_TEMPLATES[link_type] % template_vars)
86e5f3ed 3009 except OSError:
08438d2c 3010 self.report_error(f'Cannot write internet shortcut {linkfn}')
3011 return False
732044af 3012 return True
3013
08438d2c 3014 write_links = {
3015 'url': self.params.get('writeurllink'),
3016 'webloc': self.params.get('writewebloclink'),
3017 'desktop': self.params.get('writedesktoplink'),
3018 }
3019 if self.params.get('writelink'):
3020 link_type = ('webloc' if sys.platform == 'darwin'
3021 else 'desktop' if sys.platform.startswith('linux')
3022 else 'url')
3023 write_links[link_type] = True
3024
3025 if any(should_write and not _write_link_file(link_type)
3026 for link_type, should_write in write_links.items()):
3027 return
732044af 3028
f46e2f9d 3029 def replace_info_dict(new_info):
3030 nonlocal info_dict
3031 if new_info == info_dict:
3032 return
3033 info_dict.clear()
3034 info_dict.update(new_info)
3035
415f8d51 3036 new_info, files_to_move = self.pre_process(info_dict, 'before_dl', files_to_move)
3037 replace_info_dict(new_info)
56d868db 3038
a13e6848 3039 if self.params.get('skip_download'):
56d868db 3040 info_dict['filepath'] = temp_filename
3041 info_dict['__finaldir'] = os.path.dirname(os.path.abspath(encodeFilename(full_filename)))
3042 info_dict['__files_to_move'] = files_to_move
f46e2f9d 3043 replace_info_dict(self.run_pp(MoveFilesAfterDownloadPP(self, False), info_dict))
9e907ebd 3044 info_dict['__write_download_archive'] = self.params.get('force_write_download_archive')
56d868db 3045 else:
3046 # Download
b868936c 3047 info_dict.setdefault('__postprocessors', [])
4340deca 3048 try:
0202b52a 3049
e04938ab 3050 def existing_video_file(*filepaths):
6b591b29 3051 ext = info_dict.get('ext')
e04938ab 3052 converted = lambda file: replace_extension(file, self.params.get('final_ext') or ext, ext)
3053 file = self.existing_file(itertools.chain(*zip(map(converted, filepaths), filepaths)),
3054 default_overwrite=False)
3055 if file:
3056 info_dict['ext'] = os.path.splitext(file)[1][1:]
3057 return file
0202b52a 3058
7b2c3f47 3059 fd, success = None, True
fccf90e7 3060 if info_dict.get('protocol') or info_dict.get('url'):
56ba69e4 3061 fd = get_suitable_downloader(info_dict, self.params, to_stdout=temp_filename == '-')
3062 if fd is not FFmpegFD and (
3063 info_dict.get('section_start') or info_dict.get('section_end')):
7b2c3f47 3064 msg = ('This format cannot be partially downloaded' if FFmpegFD.available()
56ba69e4 3065 else 'You have requested downloading the video partially, but ffmpeg is not installed')
3066 self.report_error(f'{msg}. Aborting')
5ec1b6b7 3067 return
5ec1b6b7 3068
4340deca 3069 if info_dict.get('requested_formats') is not None:
81cd954a
S
3070
3071 def compatible_formats(formats):
d03cfdce 3072 # TODO: some formats actually allow this (mkv, webm, ogg, mp4), but not all of them.
3073 video_formats = [format for format in formats if format.get('vcodec') != 'none']
3074 audio_formats = [format for format in formats if format.get('acodec') != 'none']
3075 if len(video_formats) > 2 or len(audio_formats) > 2:
3076 return False
3077
81cd954a 3078 # Check extension
86e5f3ed 3079 exts = {format.get('ext') for format in formats}
d03cfdce 3080 COMPATIBLE_EXTS = (
86e5f3ed 3081 {'mp3', 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'ismv', 'isma'},
3082 {'webm'},
d03cfdce 3083 )
3084 for ext_sets in COMPATIBLE_EXTS:
3085 if ext_sets.issuperset(exts):
3086 return True
81cd954a
S
3087 # TODO: Check acodec/vcodec
3088 return False
3089
3090 requested_formats = info_dict['requested_formats']
0202b52a 3091 old_ext = info_dict['ext']
4e3b637d 3092 if self.params.get('merge_output_format') is None:
3093 if not compatible_formats(requested_formats):
3094 info_dict['ext'] = 'mkv'
3095 self.report_warning(
3096 'Requested formats are incompatible for merge and will be merged into mkv')
3097 if (info_dict['ext'] == 'webm'
3098 and info_dict.get('thumbnails')
3099 # check with type instead of pp_key, __name__, or isinstance
3100 # since we dont want any custom PPs to trigger this
c487cf00 3101 and any(type(pp) == EmbedThumbnailPP for pp in self._pps['post_process'])): # noqa: E721
4e3b637d 3102 info_dict['ext'] = 'mkv'
3103 self.report_warning(
3104 'webm doesn\'t support embedding a thumbnail, mkv will be used')
124bc071 3105 new_ext = info_dict['ext']
0202b52a 3106
124bc071 3107 def correct_ext(filename, ext=new_ext):
96fccc10 3108 if filename == '-':
3109 return filename
0202b52a 3110 filename_real_ext = os.path.splitext(filename)[1][1:]
3111 filename_wo_ext = (
3112 os.path.splitext(filename)[0]
124bc071 3113 if filename_real_ext in (old_ext, new_ext)
0202b52a 3114 else filename)
86e5f3ed 3115 return f'{filename_wo_ext}.{ext}'
0202b52a 3116
38c6902b 3117 # Ensure filename always has a correct extension for successful merge
0202b52a 3118 full_filename = correct_ext(full_filename)
3119 temp_filename = correct_ext(temp_filename)
e04938ab 3120 dl_filename = existing_video_file(full_filename, temp_filename)
1ea24129 3121 info_dict['__real_download'] = False
18e674b4 3122
7b2c3f47 3123 merger = FFmpegMergerPP(self)
adbc4ec4 3124 downloaded = []
dbf5416a 3125 if dl_filename is not None:
6c7274ec 3126 self.report_file_already_downloaded(dl_filename)
adbc4ec4
THD
3127 elif fd:
3128 for f in requested_formats if fd != FFmpegFD else []:
3129 f['filepath'] = fname = prepend_extension(
3130 correct_ext(temp_filename, info_dict['ext']),
3131 'f%s' % f['format_id'], info_dict['ext'])
3132 downloaded.append(fname)
dbf5416a 3133 info_dict['url'] = '\n'.join(f['url'] for f in requested_formats)
3134 success, real_download = self.dl(temp_filename, info_dict)
3135 info_dict['__real_download'] = real_download
18e674b4 3136 else:
18e674b4 3137 if self.params.get('allow_unplayable_formats'):
3138 self.report_warning(
3139 'You have requested merging of multiple formats '
3140 'while also allowing unplayable formats to be downloaded. '
3141 'The formats won\'t be merged to prevent data corruption.')
3142 elif not merger.available:
e8969bda 3143 msg = 'You have requested merging of multiple formats but ffmpeg is not installed'
3144 if not self.params.get('ignoreerrors'):
3145 self.report_error(f'{msg}. Aborting due to --abort-on-error')
3146 return
3147 self.report_warning(f'{msg}. The formats won\'t be merged')
18e674b4 3148
96fccc10 3149 if temp_filename == '-':
adbc4ec4 3150 reason = ('using a downloader other than ffmpeg' if FFmpegFD.can_merge_formats(info_dict, self.params)
96fccc10 3151 else 'but the formats are incompatible for simultaneous download' if merger.available
3152 else 'but ffmpeg is not installed')
3153 self.report_warning(
3154 f'You have requested downloading multiple formats to stdout {reason}. '
3155 'The formats will be streamed one after the other')
3156 fname = temp_filename
dbf5416a 3157 for f in requested_formats:
3158 new_info = dict(info_dict)
3159 del new_info['requested_formats']
3160 new_info.update(f)
96fccc10 3161 if temp_filename != '-':
124bc071 3162 fname = prepend_extension(
3163 correct_ext(temp_filename, new_info['ext']),
3164 'f%s' % f['format_id'], new_info['ext'])
96fccc10 3165 if not self._ensure_dir_exists(fname):
3166 return
a21e0ab1 3167 f['filepath'] = fname
96fccc10 3168 downloaded.append(fname)
dbf5416a 3169 partial_success, real_download = self.dl(fname, new_info)
3170 info_dict['__real_download'] = info_dict['__real_download'] or real_download
3171 success = success and partial_success
adbc4ec4
THD
3172
3173 if downloaded and merger.available and not self.params.get('allow_unplayable_formats'):
3174 info_dict['__postprocessors'].append(merger)
3175 info_dict['__files_to_merge'] = downloaded
3176 # Even if there were no downloads, it is being merged only now
3177 info_dict['__real_download'] = True
3178 else:
3179 for file in downloaded:
3180 files_to_move[file] = None
4340deca
P
3181 else:
3182 # Just a single file
e04938ab 3183 dl_filename = existing_video_file(full_filename, temp_filename)
6c7274ec 3184 if dl_filename is None or dl_filename == temp_filename:
3185 # dl_filename == temp_filename could mean that the file was partially downloaded with --no-part.
3186 # So we should try to resume the download
e8e73840 3187 success, real_download = self.dl(temp_filename, info_dict)
0202b52a 3188 info_dict['__real_download'] = real_download
6c7274ec 3189 else:
3190 self.report_file_already_downloaded(dl_filename)
0202b52a 3191
0202b52a 3192 dl_filename = dl_filename or temp_filename
c571435f 3193 info_dict['__finaldir'] = os.path.dirname(os.path.abspath(encodeFilename(full_filename)))
0202b52a 3194
3158150c 3195 except network_exceptions as err:
7960b056 3196 self.report_error('unable to download video data: %s' % error_to_compat_str(err))
4340deca 3197 return
86e5f3ed 3198 except OSError as err:
4340deca
P
3199 raise UnavailableVideoError(err)
3200 except (ContentTooShortError, ) as err:
86e5f3ed 3201 self.report_error(f'content too short (expected {err.expected} bytes and served {err.downloaded})')
4340deca 3202 return
8222d8de 3203
415f8d51 3204 self._raise_pending_errors(info_dict)
de6000d9 3205 if success and full_filename != '-':
f17f8651 3206
fd7cfb64 3207 def fixup():
3208 do_fixup = True
3209 fixup_policy = self.params.get('fixup')
3210 vid = info_dict['id']
3211
3212 if fixup_policy in ('ignore', 'never'):
3213 return
3214 elif fixup_policy == 'warn':
3fe75fdc 3215 do_fixup = 'warn'
f89b3e2d 3216 elif fixup_policy != 'force':
3217 assert fixup_policy in ('detect_or_warn', None)
3218 if not info_dict.get('__real_download'):
3219 do_fixup = False
fd7cfb64 3220
3221 def ffmpeg_fixup(cndn, msg, cls):
3fe75fdc 3222 if not (do_fixup and cndn):
fd7cfb64 3223 return
3fe75fdc 3224 elif do_fixup == 'warn':
fd7cfb64 3225 self.report_warning(f'{vid}: {msg}')
3226 return
3227 pp = cls(self)
3228 if pp.available:
3229 info_dict['__postprocessors'].append(pp)
3230 else:
3231 self.report_warning(f'{vid}: {msg}. Install ffmpeg to fix this automatically')
3232
3233 stretched_ratio = info_dict.get('stretched_ratio')
ca9def71
LNO
3234 ffmpeg_fixup(stretched_ratio not in (1, None),
3235 f'Non-uniform pixel ratio {stretched_ratio}',
3236 FFmpegFixupStretchedPP)
fd7cfb64 3237
993191c0 3238 downloader = get_suitable_downloader(info_dict, self.params) if 'protocol' in info_dict else None
24146491 3239 downloader = downloader.FD_NAME if downloader else None
adbc4ec4 3240
ca9def71
LNO
3241 ext = info_dict.get('ext')
3242 postprocessed_by_ffmpeg = info_dict.get('requested_formats') or any((
3243 isinstance(pp, FFmpegVideoConvertorPP)
3244 and resolve_recode_mapping(ext, pp.mapping)[0] not in (ext, None)
3245 ) for pp in self._pps['post_process'])
3246
3247 if not postprocessed_by_ffmpeg:
3248 ffmpeg_fixup(ext == 'm4a' and info_dict.get('container') == 'm4a_dash',
f2df4071 3249 'writing DASH m4a. Only some players support this container',
3250 FFmpegFixupM4aPP)
24146491 3251 ffmpeg_fixup(downloader == 'hlsnative' and not self.params.get('hls_use_mpegts')
494f5230 3252 or info_dict.get('is_live') and self.params.get('hls_use_mpegts') is None,
adbc4ec4
THD
3253 'Possible MPEG-TS in MP4 container or malformed AAC timestamps',
3254 FFmpegFixupM3u8PP)
3255 ffmpeg_fixup(info_dict.get('is_live') and downloader == 'DashSegmentsFD',
3256 'Possible duplicate MOOV atoms', FFmpegFixupDuplicateMoovPP)
3257
24146491 3258 ffmpeg_fixup(downloader == 'web_socket_fragment', 'Malformed timestamps detected', FFmpegFixupTimestampPP)
3259 ffmpeg_fixup(downloader == 'web_socket_fragment', 'Malformed duration detected', FFmpegFixupDurationPP)
fd7cfb64 3260
3261 fixup()
8222d8de 3262 try:
f46e2f9d 3263 replace_info_dict(self.post_process(dl_filename, info_dict, files_to_move))
af819c21 3264 except PostProcessingError as err:
3265 self.report_error('Postprocessing: %s' % str(err))
8222d8de 3266 return
ab8e5e51
AM
3267 try:
3268 for ph in self._post_hooks:
23c1a667 3269 ph(info_dict['filepath'])
ab8e5e51
AM
3270 except Exception as err:
3271 self.report_error('post hooks: %s' % str(err))
3272 return
9e907ebd 3273 info_dict['__write_download_archive'] = True
2d30509f 3274
c487cf00 3275 assert info_dict is original_infodict # Make sure the info_dict was modified in-place
a13e6848 3276 if self.params.get('force_write_download_archive'):
9e907ebd 3277 info_dict['__write_download_archive'] = True
ca6d59d2 3278 check_max_downloads()
8222d8de 3279
aa9369a2 3280 def __download_wrapper(self, func):
3281 @functools.wraps(func)
3282 def wrapper(*args, **kwargs):
3283 try:
3284 res = func(*args, **kwargs)
3285 except UnavailableVideoError as e:
3286 self.report_error(e)
b222c271 3287 except DownloadCancelled as e:
3288 self.to_screen(f'[info] {e}')
3289 if not self.params.get('break_per_url'):
3290 raise
aa9369a2 3291 else:
3292 if self.params.get('dump_single_json', False):
3293 self.post_extract(res)
3294 self.to_stdout(json.dumps(self.sanitize_info(res)))
3295 return wrapper
3296
8222d8de
JMF
3297 def download(self, url_list):
3298 """Download a given list of URLs."""
aa9369a2 3299 url_list = variadic(url_list) # Passing a single URL is a common mistake
bf1824b3 3300 outtmpl = self.params['outtmpl']['default']
3089bc74
S
3301 if (len(url_list) > 1
3302 and outtmpl != '-'
3303 and '%' not in outtmpl
3304 and self.params.get('max_downloads') != 1):
acd69589 3305 raise SameFileError(outtmpl)
8222d8de
JMF
3306
3307 for url in url_list:
aa9369a2 3308 self.__download_wrapper(self.extract_info)(
3309 url, force_generic_extractor=self.params.get('force_generic_extractor', False))
8222d8de
JMF
3310
3311 return self._download_retcode
3312
1dcc4c0c 3313 def download_with_info_file(self, info_filename):
31bd3925
JMF
3314 with contextlib.closing(fileinput.FileInput(
3315 [info_filename], mode='r',
3316 openhook=fileinput.hook_encoded('utf-8'))) as f:
3317 # FileInput doesn't have a read method, we can't call json.load
8012d892 3318 info = self.sanitize_info(json.loads('\n'.join(f)), self.params.get('clean_infojson', True))
d4943898 3319 try:
aa9369a2 3320 self.__download_wrapper(self.process_ie_result)(info, download=True)
f2ebc5c7 3321 except (DownloadError, EntryNotInPlaylist, ReExtractInfo) as e:
bf5f605e 3322 if not isinstance(e, EntryNotInPlaylist):
3323 self.to_stderr('\r')
d4943898
JMF
3324 webpage_url = info.get('webpage_url')
3325 if webpage_url is not None:
aa9369a2 3326 self.report_warning(f'The info failed to download: {e}; trying with URL {webpage_url}')
d4943898
JMF
3327 return self.download([webpage_url])
3328 else:
3329 raise
3330 return self._download_retcode
1dcc4c0c 3331
cb202fd2 3332 @staticmethod
8012d892 3333 def sanitize_info(info_dict, remove_private_keys=False):
3334 ''' Sanitize the infodict for converting to json '''
3ad56b42 3335 if info_dict is None:
3336 return info_dict
6e84b215 3337 info_dict.setdefault('epoch', int(time.time()))
6a5a30f9 3338 info_dict.setdefault('_type', 'video')
09b49e1f 3339
8012d892 3340 if remove_private_keys:
0a5a191a 3341 reject = lambda k, v: v is None or k.startswith('__') or k in {
f46e2f9d 3342 'requested_downloads', 'requested_formats', 'requested_subtitles', 'requested_entries',
0a5a191a 3343 'entries', 'filepath', '_filename', 'infojson_filename', 'original_url', 'playlist_autonumber',
6e84b215 3344 }
ae8f99e6 3345 else:
09b49e1f 3346 reject = lambda k, v: False
adbc4ec4
THD
3347
3348 def filter_fn(obj):
3349 if isinstance(obj, dict):
3350 return {k: filter_fn(v) for k, v in obj.items() if not reject(k, v)}
3351 elif isinstance(obj, (list, tuple, set, LazyList)):
3352 return list(map(filter_fn, obj))
3353 elif obj is None or isinstance(obj, (str, int, float, bool)):
3354 return obj
3355 else:
3356 return repr(obj)
3357
5226731e 3358 return filter_fn(info_dict)
cb202fd2 3359
8012d892 3360 @staticmethod
3361 def filter_requested_info(info_dict, actually_filter=True):
3362 ''' Alias of sanitize_info for backward compatibility '''
3363 return YoutubeDL.sanitize_info(info_dict, actually_filter)
3364
43d7f5a5 3365 def _delete_downloaded_files(self, *files_to_delete, info={}, msg=None):
3366 for filename in set(filter(None, files_to_delete)):
3367 if msg:
3368 self.to_screen(msg % filename)
3369 try:
3370 os.remove(filename)
3371 except OSError:
3372 self.report_warning(f'Unable to delete file {filename}')
3373 if filename in info.get('__files_to_move', []): # NB: Delete even if None
3374 del info['__files_to_move'][filename]
3375
ed5835b4 3376 @staticmethod
3377 def post_extract(info_dict):
3378 def actual_post_extract(info_dict):
3379 if info_dict.get('_type') in ('playlist', 'multi_video'):
3380 for video_dict in info_dict.get('entries', {}):
3381 actual_post_extract(video_dict or {})
3382 return
3383
09b49e1f 3384 post_extractor = info_dict.pop('__post_extractor', None) or (lambda: {})
3385 info_dict.update(post_extractor())
ed5835b4 3386
3387 actual_post_extract(info_dict or {})
3388
dcf64d43 3389 def run_pp(self, pp, infodict):
5bfa4862 3390 files_to_delete = []
dcf64d43 3391 if '__files_to_move' not in infodict:
3392 infodict['__files_to_move'] = {}
b1940459 3393 try:
3394 files_to_delete, infodict = pp.run(infodict)
3395 except PostProcessingError as e:
3396 # Must be True and not 'only_download'
3397 if self.params.get('ignoreerrors') is True:
3398 self.report_error(e)
3399 return infodict
3400 raise
3401
5bfa4862 3402 if not files_to_delete:
dcf64d43 3403 return infodict
5bfa4862 3404 if self.params.get('keepvideo', False):
3405 for f in files_to_delete:
dcf64d43 3406 infodict['__files_to_move'].setdefault(f, '')
5bfa4862 3407 else:
43d7f5a5 3408 self._delete_downloaded_files(
3409 *files_to_delete, info=infodict, msg='Deleting original file %s (pass -k to keep)')
dcf64d43 3410 return infodict
5bfa4862 3411
ed5835b4 3412 def run_all_pps(self, key, info, *, additional_pps=None):
bb66c247 3413 self._forceprint(key, info)
ed5835b4 3414 for pp in (additional_pps or []) + self._pps[key]:
dc5f409c 3415 info = self.run_pp(pp, info)
ed5835b4 3416 return info
277d6ff5 3417
56d868db 3418 def pre_process(self, ie_info, key='pre_process', files_to_move=None):
5bfa4862 3419 info = dict(ie_info)
56d868db 3420 info['__files_to_move'] = files_to_move or {}
415f8d51 3421 try:
3422 info = self.run_all_pps(key, info)
3423 except PostProcessingError as err:
3424 msg = f'Preprocessing: {err}'
3425 info.setdefault('__pending_error', msg)
3426 self.report_error(msg, is_error=False)
56d868db 3427 return info, info.pop('__files_to_move', None)
5bfa4862 3428
f46e2f9d 3429 def post_process(self, filename, info, files_to_move=None):
8222d8de 3430 """Run all the postprocessors on the given file."""
8222d8de 3431 info['filepath'] = filename
dcf64d43 3432 info['__files_to_move'] = files_to_move or {}
ed5835b4 3433 info = self.run_all_pps('post_process', info, additional_pps=info.get('__postprocessors'))
dcf64d43 3434 info = self.run_pp(MoveFilesAfterDownloadPP(self), info)
3435 del info['__files_to_move']
ed5835b4 3436 return self.run_all_pps('after_move', info)
c1c9a79c 3437
5db07df6 3438 def _make_archive_id(self, info_dict):
e9fef7ee
S
3439 video_id = info_dict.get('id')
3440 if not video_id:
3441 return
5db07df6
PH
3442 # Future-proof against any change in case
3443 # and backwards compatibility with prior versions
e9fef7ee 3444 extractor = info_dict.get('extractor_key') or info_dict.get('ie_key') # key in a playlist
7012b23c 3445 if extractor is None:
1211bb6d
S
3446 url = str_or_none(info_dict.get('url'))
3447 if not url:
3448 return
e9fef7ee 3449 # Try to find matching extractor for the URL and take its ie_key
8b7491c8 3450 for ie_key, ie in self._ies.items():
1211bb6d 3451 if ie.suitable(url):
8b7491c8 3452 extractor = ie_key
e9fef7ee
S
3453 break
3454 else:
3455 return
0647d925 3456 return make_archive_id(extractor, video_id)
5db07df6
PH
3457
3458 def in_download_archive(self, info_dict):
3459 fn = self.params.get('download_archive')
3460 if fn is None:
3461 return False
3462
1e8fe57e 3463 vid_ids = [self._make_archive_id(info_dict)]
3464 vid_ids.extend(info_dict.get('_old_archive_ids', []))
3465 return any(id_ in self.archive for id_ in vid_ids)
c1c9a79c
PH
3466
3467 def record_download_archive(self, info_dict):
3468 fn = self.params.get('download_archive')
3469 if fn is None:
3470 return
5db07df6
PH
3471 vid_id = self._make_archive_id(info_dict)
3472 assert vid_id
a13e6848 3473 self.write_debug(f'Adding to archive: {vid_id}')
c1c9a79c 3474 with locked_file(fn, 'a', encoding='utf-8') as archive_file:
6febd1c1 3475 archive_file.write(vid_id + '\n')
a45e8619 3476 self.archive.add(vid_id)
dd82ffea 3477
8c51aa65 3478 @staticmethod
8abeeb94 3479 def format_resolution(format, default='unknown'):
9359f3d4 3480 if format.get('vcodec') == 'none' and format.get('acodec') != 'none':
fb04e403 3481 return 'audio only'
f49d89ee
PH
3482 if format.get('resolution') is not None:
3483 return format['resolution']
35615307 3484 if format.get('width') and format.get('height'):
ff51ed58 3485 return '%dx%d' % (format['width'], format['height'])
35615307 3486 elif format.get('height'):
ff51ed58 3487 return '%sp' % format['height']
35615307 3488 elif format.get('width'):
ff51ed58 3489 return '%dx?' % format['width']
3490 return default
8c51aa65 3491
8130779d 3492 def _list_format_headers(self, *headers):
3493 if self.params.get('listformats_table', True) is not False:
591bb9d3 3494 return [self._format_out(header, self.Styles.HEADERS) for header in headers]
8130779d 3495 return headers
3496
c57f7757
PH
3497 def _format_note(self, fdict):
3498 res = ''
3499 if fdict.get('ext') in ['f4f', 'f4m']:
f304da8a 3500 res += '(unsupported)'
32f90364
PH
3501 if fdict.get('language'):
3502 if res:
3503 res += ' '
f304da8a 3504 res += '[%s]' % fdict['language']
c57f7757 3505 if fdict.get('format_note') is not None:
f304da8a 3506 if res:
3507 res += ' '
3508 res += fdict['format_note']
c57f7757 3509 if fdict.get('tbr') is not None:
f304da8a 3510 if res:
3511 res += ', '
3512 res += '%4dk' % fdict['tbr']
c57f7757
PH
3513 if fdict.get('container') is not None:
3514 if res:
3515 res += ', '
3516 res += '%s container' % fdict['container']
3089bc74
S
3517 if (fdict.get('vcodec') is not None
3518 and fdict.get('vcodec') != 'none'):
c57f7757
PH
3519 if res:
3520 res += ', '
3521 res += fdict['vcodec']
91c7271a 3522 if fdict.get('vbr') is not None:
c57f7757
PH
3523 res += '@'
3524 elif fdict.get('vbr') is not None and fdict.get('abr') is not None:
3525 res += 'video@'
3526 if fdict.get('vbr') is not None:
3527 res += '%4dk' % fdict['vbr']
fbb21cf5 3528 if fdict.get('fps') is not None:
5d583bdf
S
3529 if res:
3530 res += ', '
3531 res += '%sfps' % fdict['fps']
c57f7757
PH
3532 if fdict.get('acodec') is not None:
3533 if res:
3534 res += ', '
3535 if fdict['acodec'] == 'none':
3536 res += 'video only'
3537 else:
3538 res += '%-5s' % fdict['acodec']
3539 elif fdict.get('abr') is not None:
3540 if res:
3541 res += ', '
3542 res += 'audio'
3543 if fdict.get('abr') is not None:
3544 res += '@%3dk' % fdict['abr']
3545 if fdict.get('asr') is not None:
3546 res += ' (%5dHz)' % fdict['asr']
3547 if fdict.get('filesize') is not None:
3548 if res:
3549 res += ', '
3550 res += format_bytes(fdict['filesize'])
9732d77e
PH
3551 elif fdict.get('filesize_approx') is not None:
3552 if res:
3553 res += ', '
3554 res += '~' + format_bytes(fdict['filesize_approx'])
c57f7757 3555 return res
91c7271a 3556
8130779d 3557 def render_formats_table(self, info_dict):
b69fd25c 3558 if not info_dict.get('formats') and not info_dict.get('url'):
8130779d 3559 return None
b69fd25c 3560
94badb25 3561 formats = info_dict.get('formats', [info_dict])
8130779d 3562 if not self.params.get('listformats_table', True) is not False:
76d321f6 3563 table = [
3564 [
3565 format_field(f, 'format_id'),
3566 format_field(f, 'ext'),
3567 self.format_resolution(f),
8130779d 3568 self._format_note(f)
3569 ] for f in formats if f.get('preference') is None or f['preference'] >= -1000]
3570 return render_table(['format code', 'extension', 'resolution', 'note'], table, extra_gap=1)
3571
d816f61f 3572 def simplified_codec(f, field):
3573 assert field in ('acodec', 'vcodec')
3574 codec = f.get(field, 'unknown')
f5ea4748 3575 if not codec:
3576 return 'unknown'
3577 elif codec != 'none':
d816f61f 3578 return '.'.join(codec.split('.')[:4])
3579
3580 if field == 'vcodec' and f.get('acodec') == 'none':
3581 return 'images'
3582 elif field == 'acodec' and f.get('vcodec') == 'none':
3583 return ''
3584 return self._format_out('audio only' if field == 'vcodec' else 'video only',
3585 self.Styles.SUPPRESS)
3586
591bb9d3 3587 delim = self._format_out('\u2502', self.Styles.DELIM, '|', test_encoding=True)
8130779d 3588 table = [
3589 [
591bb9d3 3590 self._format_out(format_field(f, 'format_id'), self.Styles.ID),
8130779d 3591 format_field(f, 'ext'),
3592 format_field(f, func=self.format_resolution, ignore=('audio only', 'images')),
563e0bf8 3593 format_field(f, 'fps', '\t%d', func=round),
8130779d 3594 format_field(f, 'dynamic_range', '%s', ignore=(None, 'SDR')).replace('HDR', ''),
3595 delim,
3596 format_field(f, 'filesize', ' \t%s', func=format_bytes) + format_field(f, 'filesize_approx', '~\t%s', func=format_bytes),
563e0bf8 3597 format_field(f, 'tbr', '\t%dk', func=round),
8130779d 3598 shorten_protocol_name(f.get('protocol', '')),
3599 delim,
d816f61f 3600 simplified_codec(f, 'vcodec'),
563e0bf8 3601 format_field(f, 'vbr', '\t%dk', func=round),
d816f61f 3602 simplified_codec(f, 'acodec'),
563e0bf8 3603 format_field(f, 'abr', '\t%dk', func=round),
ae61d108 3604 format_field(f, 'asr', '\t%s', func=format_decimal_suffix),
8130779d 3605 join_nonempty(
591bb9d3 3606 self._format_out('UNSUPPORTED', 'light red') if f.get('ext') in ('f4f', 'f4m') else None,
8130779d 3607 format_field(f, 'language', '[%s]'),
3608 join_nonempty(format_field(f, 'format_note'),
3609 format_field(f, 'container', ignore=(None, f.get('ext'))),
3610 delim=', '),
3611 delim=' '),
3612 ] for f in formats if f.get('preference') is None or f['preference'] >= -1000]
3613 header_line = self._list_format_headers(
3614 'ID', 'EXT', 'RESOLUTION', '\tFPS', 'HDR', delim, '\tFILESIZE', '\tTBR', 'PROTO',
3615 delim, 'VCODEC', '\tVBR', 'ACODEC', '\tABR', '\tASR', 'MORE INFO')
3616
3617 return render_table(
3618 header_line, table, hide_empty=True,
591bb9d3 3619 delim=self._format_out('\u2500', self.Styles.DELIM, '-', test_encoding=True))
8130779d 3620
3621 def render_thumbnails_table(self, info_dict):
88f23a18 3622 thumbnails = list(info_dict.get('thumbnails') or [])
cfb56d1a 3623 if not thumbnails:
8130779d 3624 return None
3625 return render_table(
ec11a9f4 3626 self._list_format_headers('ID', 'Width', 'Height', 'URL'),
6970b600 3627 [[t.get('id'), t.get('width', 'unknown'), t.get('height', 'unknown'), t['url']] for t in thumbnails])
2412044c 3628
8130779d 3629 def render_subtitles_table(self, video_id, subtitles):
2412044c 3630 def _row(lang, formats):
49c258e1 3631 exts, names = zip(*((f['ext'], f.get('name') or 'unknown') for f in reversed(formats)))
2412044c 3632 if len(set(names)) == 1:
7aee40c1 3633 names = [] if names[0] == 'unknown' else names[:1]
2412044c 3634 return [lang, ', '.join(names), ', '.join(exts)]
3635
8130779d 3636 if not subtitles:
3637 return None
3638 return render_table(
ec11a9f4 3639 self._list_format_headers('Language', 'Name', 'Formats'),
2412044c 3640 [_row(lang, formats) for lang, formats in subtitles.items()],
8130779d 3641 hide_empty=True)
3642
3643 def __list_table(self, video_id, name, func, *args):
3644 table = func(*args)
3645 if not table:
3646 self.to_screen(f'{video_id} has no {name}')
3647 return
3648 self.to_screen(f'[info] Available {name} for {video_id}:')
3649 self.to_stdout(table)
3650
3651 def list_formats(self, info_dict):
3652 self.__list_table(info_dict['id'], 'formats', self.render_formats_table, info_dict)
3653
3654 def list_thumbnails(self, info_dict):
3655 self.__list_table(info_dict['id'], 'thumbnails', self.render_thumbnails_table, info_dict)
3656
3657 def list_subtitles(self, video_id, subtitles, name='subtitles'):
3658 self.__list_table(video_id, name, self.render_subtitles_table, video_id, subtitles)
a504ced0 3659
dca08720
PH
3660 def urlopen(self, req):
3661 """ Start an HTTP download """
f9934b96 3662 if isinstance(req, str):
67dda517 3663 req = sanitized_Request(req)
19a41fc6 3664 return self._opener.open(req, timeout=self._socket_timeout)
dca08720
PH
3665
3666 def print_debug_header(self):
3667 if not self.params.get('verbose'):
3668 return
49a57e70 3669
560738f3 3670 # These imports can be slow. So import them only as needed
3671 from .extractor.extractors import _LAZY_LOADER
3672 from .extractor.extractors import _PLUGIN_CLASSES as plugin_extractors
3673
49a57e70 3674 def get_encoding(stream):
2a938746 3675 ret = str(getattr(stream, 'encoding', 'missing (%s)' % type(stream).__name__))
49a57e70 3676 if not supports_terminal_sequences(stream):
53973b4d 3677 from .utils import WINDOWS_VT_MODE # Must be imported locally
e3c7d495 3678 ret += ' (No VT)' if WINDOWS_VT_MODE is False else ' (No ANSI)'
49a57e70 3679 return ret
3680
591bb9d3 3681 encoding_str = 'Encodings: locale %s, fs %s, pref %s, %s' % (
49a57e70 3682 locale.getpreferredencoding(),
3683 sys.getfilesystemencoding(),
591bb9d3 3684 self.get_encoding(),
3685 ', '.join(
64fa820c 3686 f'{key} {get_encoding(stream)}' for key, stream in self._out_files.items_
591bb9d3 3687 if stream is not None and key != 'console')
3688 )
883d4b1e 3689
3690 logger = self.params.get('logger')
3691 if logger:
3692 write_debug = lambda msg: logger.debug(f'[debug] {msg}')
3693 write_debug(encoding_str)
3694 else:
96565c7e 3695 write_string(f'[debug] {encoding_str}\n', encoding=None)
49a57e70 3696 write_debug = lambda msg: self._write_string(f'[debug] {msg}\n')
734f90bb 3697
4c88ff87 3698 source = detect_variant()
36eaf303 3699 write_debug(join_nonempty(
3700 'yt-dlp version', __version__,
3701 f'[{RELEASE_GIT_HEAD}]' if RELEASE_GIT_HEAD else '',
3702 '' if source == 'unknown' else f'({source})',
3703 delim=' '))
6e21fdd2 3704 if not _LAZY_LOADER:
3705 if os.environ.get('YTDLP_NO_LAZY_EXTRACTORS'):
49a57e70 3706 write_debug('Lazy loading extractors is forcibly disabled')
6e21fdd2 3707 else:
49a57e70 3708 write_debug('Lazy loading extractors is disabled')
3ae5e797 3709 if plugin_extractors or plugin_postprocessors:
49a57e70 3710 write_debug('Plugins: %s' % [
3ae5e797 3711 '%s%s' % (klass.__name__, '' if klass.__name__ == name else f' as {name}')
3712 for name, klass in itertools.chain(plugin_extractors.items(), plugin_postprocessors.items())])
8a82af35 3713 if self.params['compat_opts']:
3714 write_debug('Compatibility options: %s' % ', '.join(self.params['compat_opts']))
36eaf303 3715
3716 if source == 'source':
dca08720 3717 try:
f0c9fb96 3718 stdout, _, _ = Popen.run(
36eaf303 3719 ['git', 'rev-parse', '--short', 'HEAD'],
f0c9fb96 3720 text=True, cwd=os.path.dirname(os.path.abspath(__file__)),
3721 stdout=subprocess.PIPE, stderr=subprocess.PIPE)
3722 if re.fullmatch('[0-9a-f]+', stdout.strip()):
3723 write_debug(f'Git HEAD: {stdout.strip()}')
70a1165b 3724 except Exception:
19a03940 3725 with contextlib.suppress(Exception):
36eaf303 3726 sys.exc_clear()
b300cda4 3727
b1f94422 3728 write_debug(system_identifier())
d28b5171 3729
8913ef74 3730 exe_versions, ffmpeg_features = FFmpegPostProcessor.get_versions_and_features(self)
3731 ffmpeg_features = {key for key, val in ffmpeg_features.items() if val}
3732 if ffmpeg_features:
19a03940 3733 exe_versions['ffmpeg'] += ' (%s)' % ','.join(sorted(ffmpeg_features))
8913ef74 3734
4c83c967 3735 exe_versions['rtmpdump'] = rtmpdump_version()
feee8d32 3736 exe_versions['phantomjs'] = PhantomJSwrapper._version()
d28b5171 3737 exe_str = ', '.join(
2831b468 3738 f'{exe} {v}' for exe, v in sorted(exe_versions.items()) if v
3739 ) or 'none'
49a57e70 3740 write_debug('exe versions: %s' % exe_str)
dca08720 3741
1d485a1a 3742 from .compat.compat_utils import get_package_info
9b8ee23b 3743 from .dependencies import available_dependencies
3744
3745 write_debug('Optional libraries: %s' % (', '.join(sorted({
1d485a1a 3746 join_nonempty(*get_package_info(m)) for m in available_dependencies.values()
9b8ee23b 3747 })) or 'none'))
2831b468 3748
97ec5bc5 3749 self._setup_opener()
dca08720
PH
3750 proxy_map = {}
3751 for handler in self._opener.handlers:
3752 if hasattr(handler, 'proxies'):
3753 proxy_map.update(handler.proxies)
49a57e70 3754 write_debug(f'Proxy map: {proxy_map}')
dca08720 3755
49a57e70 3756 # Not implemented
3757 if False and self.params.get('call_home'):
0f06bcd7 3758 ipaddr = self.urlopen('https://yt-dl.org/ip').read().decode()
49a57e70 3759 write_debug('Public IP address: %s' % ipaddr)
58b1f00d 3760 latest_version = self.urlopen(
0f06bcd7 3761 'https://yt-dl.org/latest/version').read().decode()
58b1f00d
PH
3762 if version_tuple(latest_version) > version_tuple(__version__):
3763 self.report_warning(
3764 'You are using an outdated version (newest version: %s)! '
3765 'See https://yt-dl.org/update if you need help updating.' %
3766 latest_version)
3767
e344693b 3768 def _setup_opener(self):
97ec5bc5 3769 if hasattr(self, '_opener'):
3770 return
6ad14cab 3771 timeout_val = self.params.get('socket_timeout')
17bddf3e 3772 self._socket_timeout = 20 if timeout_val is None else float(timeout_val)
6ad14cab 3773
982ee69a 3774 opts_cookiesfrombrowser = self.params.get('cookiesfrombrowser')
dca08720
PH
3775 opts_cookiefile = self.params.get('cookiefile')
3776 opts_proxy = self.params.get('proxy')
3777
982ee69a 3778 self.cookiejar = load_cookies(opts_cookiefile, opts_cookiesfrombrowser, self)
dca08720 3779
6a3f4c3f 3780 cookie_processor = YoutubeDLCookieProcessor(self.cookiejar)
dca08720
PH
3781 if opts_proxy is not None:
3782 if opts_proxy == '':
3783 proxies = {}
3784 else:
3785 proxies = {'http': opts_proxy, 'https': opts_proxy}
3786 else:
ac668111 3787 proxies = urllib.request.getproxies()
067aa17e 3788 # Set HTTPS proxy to HTTP one if given (https://github.com/ytdl-org/youtube-dl/issues/805)
dca08720
PH
3789 if 'http' in proxies and 'https' not in proxies:
3790 proxies['https'] = proxies['http']
91410c9b 3791 proxy_handler = PerRequestProxyHandler(proxies)
a0ddb8a2
PH
3792
3793 debuglevel = 1 if self.params.get('debug_printtraffic') else 0
be4a824d
PH
3794 https_handler = make_HTTPS_handler(self.params, debuglevel=debuglevel)
3795 ydlh = YoutubeDLHandler(self.params, debuglevel=debuglevel)
fca6dba8 3796 redirect_handler = YoutubeDLRedirectHandler()
f9934b96 3797 data_handler = urllib.request.DataHandler()
6240b0a2
JMF
3798
3799 # When passing our own FileHandler instance, build_opener won't add the
3800 # default FileHandler and allows us to disable the file protocol, which
3801 # can be used for malicious purposes (see
067aa17e 3802 # https://github.com/ytdl-org/youtube-dl/issues/8227)
ac668111 3803 file_handler = urllib.request.FileHandler()
6240b0a2
JMF
3804
3805 def file_open(*args, **kwargs):
ac668111 3806 raise urllib.error.URLError('file:// scheme is explicitly disabled in yt-dlp for security reasons')
6240b0a2
JMF
3807 file_handler.file_open = file_open
3808
ac668111 3809 opener = urllib.request.build_opener(
fca6dba8 3810 proxy_handler, https_handler, cookie_processor, ydlh, redirect_handler, data_handler, file_handler)
2461f79d 3811
dca08720
PH
3812 # Delete the default user-agent header, which would otherwise apply in
3813 # cases where our custom HTTP handler doesn't come into play
067aa17e 3814 # (See https://github.com/ytdl-org/youtube-dl/issues/1309 for details)
dca08720
PH
3815 opener.addheaders = []
3816 self._opener = opener
62fec3b2
PH
3817
3818 def encode(self, s):
3819 if isinstance(s, bytes):
3820 return s # Already encoded
3821
3822 try:
3823 return s.encode(self.get_encoding())
3824 except UnicodeEncodeError as err:
3825 err.reason = err.reason + '. Check your system encoding configuration or use the --encoding option.'
3826 raise
3827
3828 def get_encoding(self):
3829 encoding = self.params.get('encoding')
3830 if encoding is None:
3831 encoding = preferredencoding()
3832 return encoding
ec82d85a 3833
e08a85d8 3834 def _write_info_json(self, label, ie_result, infofn, overwrite=None):
cb96c5be 3835 ''' Write infojson and returns True = written, 'exists' = Already exists, False = skip, None = error '''
e08a85d8 3836 if overwrite is None:
3837 overwrite = self.params.get('overwrites', True)
80c03fa9 3838 if not self.params.get('writeinfojson'):
3839 return False
3840 elif not infofn:
3841 self.write_debug(f'Skipping writing {label} infojson')
3842 return False
3843 elif not self._ensure_dir_exists(infofn):
3844 return None
e08a85d8 3845 elif not overwrite and os.path.exists(infofn):
80c03fa9 3846 self.to_screen(f'[info] {label.title()} metadata is already present')
cb96c5be 3847 return 'exists'
3848
3849 self.to_screen(f'[info] Writing {label} metadata as JSON to: {infofn}')
3850 try:
3851 write_json_file(self.sanitize_info(ie_result, self.params.get('clean_infojson', True)), infofn)
3852 return True
86e5f3ed 3853 except OSError:
cb96c5be 3854 self.report_error(f'Cannot write {label} metadata to JSON file {infofn}')
3855 return None
80c03fa9 3856
3857 def _write_description(self, label, ie_result, descfn):
3858 ''' Write description and returns True = written, False = skip, None = error '''
3859 if not self.params.get('writedescription'):
3860 return False
3861 elif not descfn:
3862 self.write_debug(f'Skipping writing {label} description')
3863 return False
3864 elif not self._ensure_dir_exists(descfn):
3865 return None
3866 elif not self.params.get('overwrites', True) and os.path.exists(descfn):
3867 self.to_screen(f'[info] {label.title()} description is already present')
3868 elif ie_result.get('description') is None:
3869 self.report_warning(f'There\'s no {label} description to write')
3870 return False
3871 else:
3872 try:
3873 self.to_screen(f'[info] Writing {label} description to: {descfn}')
86e5f3ed 3874 with open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile:
80c03fa9 3875 descfile.write(ie_result['description'])
86e5f3ed 3876 except OSError:
80c03fa9 3877 self.report_error(f'Cannot write {label} description file {descfn}')
3878 return None
3879 return True
3880
3881 def _write_subtitles(self, info_dict, filename):
3882 ''' Write subtitles to file and return list of (sub_filename, final_sub_filename); or None if error'''
3883 ret = []
3884 subtitles = info_dict.get('requested_subtitles')
3885 if not subtitles or not (self.params.get('writesubtitles') or self.params.get('writeautomaticsub')):
3886 # subtitles download errors are already managed as troubles in relevant IE
3887 # that way it will silently go on when used with unsupporting IE
3888 return ret
3889
3890 sub_filename_base = self.prepare_filename(info_dict, 'subtitle')
3891 if not sub_filename_base:
3892 self.to_screen('[info] Skipping writing video subtitles')
3893 return ret
3894 for sub_lang, sub_info in subtitles.items():
3895 sub_format = sub_info['ext']
3896 sub_filename = subtitles_filename(filename, sub_lang, sub_format, info_dict.get('ext'))
3897 sub_filename_final = subtitles_filename(sub_filename_base, sub_lang, sub_format, info_dict.get('ext'))
e04938ab 3898 existing_sub = self.existing_file((sub_filename_final, sub_filename))
3899 if existing_sub:
80c03fa9 3900 self.to_screen(f'[info] Video subtitle {sub_lang}.{sub_format} is already present')
e04938ab 3901 sub_info['filepath'] = existing_sub
3902 ret.append((existing_sub, sub_filename_final))
80c03fa9 3903 continue
3904
3905 self.to_screen(f'[info] Writing video subtitles to: {sub_filename}')
3906 if sub_info.get('data') is not None:
3907 try:
3908 # Use newline='' to prevent conversion of newline characters
3909 # See https://github.com/ytdl-org/youtube-dl/issues/10268
86e5f3ed 3910 with open(sub_filename, 'w', encoding='utf-8', newline='') as subfile:
80c03fa9 3911 subfile.write(sub_info['data'])
3912 sub_info['filepath'] = sub_filename
3913 ret.append((sub_filename, sub_filename_final))
3914 continue
86e5f3ed 3915 except OSError:
80c03fa9 3916 self.report_error(f'Cannot write video subtitles file {sub_filename}')
3917 return None
3918
3919 try:
3920 sub_copy = sub_info.copy()
3921 sub_copy.setdefault('http_headers', info_dict.get('http_headers'))
3922 self.dl(sub_filename, sub_copy, subtitle=True)
3923 sub_info['filepath'] = sub_filename
3924 ret.append((sub_filename, sub_filename_final))
6020e05d 3925 except (DownloadError, ExtractorError, IOError, OSError, ValueError) + network_exceptions as err:
c70c418d 3926 msg = f'Unable to download video subtitles for {sub_lang!r}: {err}'
6020e05d 3927 if self.params.get('ignoreerrors') is not True: # False or 'only_download'
c70c418d 3928 if not self.params.get('ignoreerrors'):
3929 self.report_error(msg)
3930 raise DownloadError(msg)
3931 self.report_warning(msg)
519804a9 3932 return ret
80c03fa9 3933
3934 def _write_thumbnails(self, label, info_dict, filename, thumb_filename_base=None):
3935 ''' Write thumbnails to file and return list of (thumb_filename, final_thumb_filename) '''
6c4fd172 3936 write_all = self.params.get('write_all_thumbnails', False)
80c03fa9 3937 thumbnails, ret = [], []
6c4fd172 3938 if write_all or self.params.get('writethumbnail', False):
0202b52a 3939 thumbnails = info_dict.get('thumbnails') or []
6c4fd172 3940 multiple = write_all and len(thumbnails) > 1
ec82d85a 3941
80c03fa9 3942 if thumb_filename_base is None:
3943 thumb_filename_base = filename
3944 if thumbnails and not thumb_filename_base:
3945 self.write_debug(f'Skipping writing {label} thumbnail')
3946 return ret
3947
dd0228ce 3948 for idx, t in list(enumerate(thumbnails))[::-1]:
80c03fa9 3949 thumb_ext = (f'{t["id"]}.' if multiple else '') + determine_ext(t['url'], 'jpg')
aa9369a2 3950 thumb_display_id = f'{label} thumbnail {t["id"]}'
80c03fa9 3951 thumb_filename = replace_extension(filename, thumb_ext, info_dict.get('ext'))
3952 thumb_filename_final = replace_extension(thumb_filename_base, thumb_ext, info_dict.get('ext'))
ec82d85a 3953
e04938ab 3954 existing_thumb = self.existing_file((thumb_filename_final, thumb_filename))
3955 if existing_thumb:
aa9369a2 3956 self.to_screen('[info] %s is already present' % (
3957 thumb_display_id if multiple else f'{label} thumbnail').capitalize())
e04938ab 3958 t['filepath'] = existing_thumb
3959 ret.append((existing_thumb, thumb_filename_final))
ec82d85a 3960 else:
80c03fa9 3961 self.to_screen(f'[info] Downloading {thumb_display_id} ...')
ec82d85a 3962 try:
297e9952 3963 uf = self.urlopen(sanitized_Request(t['url'], headers=t.get('http_headers', {})))
80c03fa9 3964 self.to_screen(f'[info] Writing {thumb_display_id} to: {thumb_filename}')
d3d89c32 3965 with open(encodeFilename(thumb_filename), 'wb') as thumbf:
ec82d85a 3966 shutil.copyfileobj(uf, thumbf)
80c03fa9 3967 ret.append((thumb_filename, thumb_filename_final))
885cc0b7 3968 t['filepath'] = thumb_filename
3158150c 3969 except network_exceptions as err:
dd0228ce 3970 thumbnails.pop(idx)
80c03fa9 3971 self.report_warning(f'Unable to download {thumb_display_id}: {err}')
6c4fd172 3972 if ret and not write_all:
3973 break
0202b52a 3974 return ret