]> jfr.im git - yt-dlp.git/blame - yt_dlp/YoutubeDL.py
[docs] Misc improvements
[yt-dlp.git] / yt_dlp / YoutubeDL.py
CommitLineData
26e63931 1import collections
31bd3925 2import contextlib
9d2ecdbc 3import datetime
c1c9a79c 4import errno
31bd3925 5import fileinput
b5ae35ee 6import functools
8222d8de 7import io
b82f815f 8import itertools
8694c600 9import json
62fec3b2 10import locale
083c9df9 11import operator
8222d8de 12import os
f8271158 13import random
8222d8de
JMF
14import re
15import shutil
dca08720 16import subprocess
8222d8de 17import sys
21cd8fae 18import tempfile
8222d8de 19import time
67134eab 20import tokenize
8222d8de 21import traceback
524e2e4f 22import unicodedata
f9934b96 23import urllib.request
ec9311c4 24from string import Formatter, ascii_letters
961ea474 25
f8271158 26from .cache import Cache
14f25df2 27from .compat import compat_os_name, compat_shlex_quote
982ee69a 28from .cookies import load_cookies
f8271158 29from .downloader import FFmpegFD, get_suitable_downloader, shorten_protocol_name
30from .downloader.rtmp import rtmpdump_version
f8271158 31from .extractor import gen_extractor_classes, get_info_extractor
fe7866d0 32from .extractor.common import UnsupportedURLIE
f8271158 33from .extractor.openload import PhantomJSwrapper
34from .minicurses import format_text
8e40b9d1 35from .plugins import directories as plugin_directories
e756f45b 36from .postprocessor import _PLUGIN_CLASSES as plugin_pps
f8271158 37from .postprocessor import (
38 EmbedThumbnailPP,
39 FFmpegFixupDuplicateMoovPP,
40 FFmpegFixupDurationPP,
41 FFmpegFixupM3u8PP,
42 FFmpegFixupM4aPP,
43 FFmpegFixupStretchedPP,
44 FFmpegFixupTimestampPP,
45 FFmpegMergerPP,
46 FFmpegPostProcessor,
ca9def71 47 FFmpegVideoConvertorPP,
f8271158 48 MoveFilesAfterDownloadPP,
49 get_postprocessor,
50)
ca9def71 51from .postprocessor.ffmpeg import resolve_mapping as resolve_recode_mapping
b5e7a2e6 52from .update import REPOSITORY, current_git_head, detect_variant
8c25f81b 53from .utils import (
f8271158 54 DEFAULT_OUTTMPL,
7b2c3f47 55 IDENTITY,
f8271158 56 LINK_TEMPLATES,
8dc59305 57 MEDIA_EXTENSIONS,
f8271158 58 NO_DEFAULT,
1d485a1a 59 NUMBER_RE,
f8271158 60 OUTTMPL_TYPES,
61 POSTPROCESS_WHEN,
62 STR_FORMAT_RE_TMPL,
63 STR_FORMAT_TYPES,
64 ContentTooShortError,
65 DateRange,
66 DownloadCancelled,
67 DownloadError,
68 EntryNotInPlaylist,
69 ExistingVideoReached,
70 ExtractorError,
784320c9 71 FormatSorter,
f8271158 72 GeoRestrictedError,
73 HEADRequest,
f8271158 74 ISO3166Utils,
75 LazyList,
76 MaxDownloadsReached,
19a03940 77 Namespace,
f8271158 78 PagedList,
79 PerRequestProxyHandler,
7e88d7d7 80 PlaylistEntries,
f8271158 81 Popen,
82 PostProcessingError,
83 ReExtractInfo,
84 RejectedVideoReached,
85 SameFileError,
86 UnavailableVideoError,
693f0600 87 UserNotLive,
f8271158 88 YoutubeDLCookieProcessor,
89 YoutubeDLHandler,
90 YoutubeDLRedirectHandler,
eedb7ba5
S
91 age_restricted,
92 args_to_str,
cb794ee0 93 bug_reports_message,
ce02ed60 94 date_from_str,
da4db748 95 deprecation_warning,
ce02ed60 96 determine_ext,
b5559424 97 determine_protocol,
c0384f22 98 encode_compat_str,
ce02ed60 99 encodeFilename,
a06916d9 100 error_to_compat_str,
47cdc68e 101 escapeHTML,
590bc6f6 102 expand_path,
90137ca4 103 filter_dict,
e29663c6 104 float_or_none,
02dbf93f 105 format_bytes,
e0fd9573 106 format_decimal_suffix,
f8271158 107 format_field,
525ef922 108 formatSeconds,
fc61aff4 109 get_compatible_ext,
0bb322b9 110 get_domain,
c9969434 111 int_or_none,
732044af 112 iri_to_uri,
941e881e 113 is_path_like,
34921b43 114 join_nonempty,
ce02ed60 115 locked_file,
0647d925 116 make_archive_id,
0202b52a 117 make_dir,
dca08720 118 make_HTTPS_handler,
8b7539d2 119 merge_headers,
3158150c 120 network_exceptions,
ec11a9f4 121 number_of_digits,
cd6fc19e 122 orderedSet,
5314b521 123 orderedSet_from_options,
083c9df9 124 parse_filesize,
ce02ed60 125 preferredencoding,
eedb7ba5 126 prepend_extension,
51fb4995 127 register_socks_protocols,
3efb96a6 128 remove_terminal_sequences,
cfb56d1a 129 render_table,
eedb7ba5 130 replace_extension,
ce02ed60 131 sanitize_filename,
1bb5c511 132 sanitize_path,
dcf77cf1 133 sanitize_url,
67dda517 134 sanitized_Request,
e5660ee6 135 std_headers,
1211bb6d 136 str_or_none,
e29663c6 137 strftime_or_none,
ce02ed60 138 subtitles_filename,
819e0531 139 supports_terminal_sequences,
b1f94422 140 system_identifier,
f2ebc5c7 141 timetuple_from_msec,
732044af 142 to_high_limit_path,
324ad820 143 traverse_obj,
fc61aff4 144 try_call,
6033d980 145 try_get,
29eb5174 146 url_basename,
7d1eb38a 147 variadic,
58b1f00d 148 version_tuple,
53973b4d 149 windows_enable_vt_mode,
ce02ed60
PH
150 write_json_file,
151 write_string,
4f026faf 152)
29cb20bd 153from .version import CHANNEL, RELEASE_GIT_HEAD, VARIANT, __version__
8222d8de 154
e9c0cdd3
YCH
155if compat_os_name == 'nt':
156 import ctypes
157
2459b6e1 158
86e5f3ed 159class YoutubeDL:
8222d8de
JMF
160 """YoutubeDL class.
161
162 YoutubeDL objects are the ones responsible of downloading the
163 actual video file and writing it to disk if the user has requested
164 it, among some other tasks. In most cases there should be one per
165 program. As, given a video URL, the downloader doesn't know how to
166 extract all the needed information, task that InfoExtractors do, it
167 has to pass the URL to one of them.
168
169 For this, YoutubeDL objects have a method that allows
170 InfoExtractors to be registered in a given order. When it is passed
171 a URL, the YoutubeDL object handles it to the first InfoExtractor it
172 finds that reports being able to handle it. The InfoExtractor extracts
173 all the information about the video or videos the URL refers to, and
174 YoutubeDL process the extracted information, possibly using a File
175 Downloader to download the video.
176
177 YoutubeDL objects accept a lot of parameters. In order not to saturate
178 the object constructor with arguments, it receives a dictionary of
179 options instead. These options are available through the params
180 attribute for the InfoExtractors to use. The YoutubeDL also
181 registers itself as the downloader in charge for the InfoExtractors
182 that are added to it, so this is a "mutual registration".
183
184 Available options:
185
186 username: Username for authentication purposes.
187 password: Password for authentication purposes.
180940e0 188 videopassword: Password for accessing a video.
1da50aa3
S
189 ap_mso: Adobe Pass multiple-system operator identifier.
190 ap_username: Multiple-system operator account username.
191 ap_password: Multiple-system operator account password.
8222d8de 192 usenetrc: Use netrc for authentication instead.
c8bc203f 193 netrc_location: Location of the netrc file. Defaults to ~/.netrc.
8222d8de
JMF
194 verbose: Print additional info to stdout.
195 quiet: Do not print messages to stdout.
ad8915b7 196 no_warnings: Do not print out anything for warnings.
bb66c247 197 forceprint: A dict with keys WHEN mapped to a list of templates to
198 print to stdout. The allowed keys are video or any of the
199 items in utils.POSTPROCESS_WHEN.
ca30f449 200 For compatibility, a single list is also accepted
bb66c247 201 print_to_file: A dict with keys WHEN (same as forceprint) mapped to
202 a list of tuples with (template, filename)
8694c600 203 forcejson: Force printing info_dict as JSON.
63e0be34
PH
204 dump_single_json: Force printing the info_dict of the whole playlist
205 (or video) as a single JSON line.
c25228e5 206 force_write_download_archive: Force writing download archive regardless
207 of 'skip_download' or 'simulate'.
b7b04c78 208 simulate: Do not download the video files. If unset (or None),
209 simulate only if listsubtitles, listformats or list_thumbnails is used
eb8a4433 210 format: Video format code. see "FORMAT SELECTION" for more details.
093a1710 211 You can also pass a function. The function takes 'ctx' as
212 argument and returns the formats to download.
213 See "build_format_selector" for an implementation
63ad4d43 214 allow_unplayable_formats: Allow unplayable formats to be extracted and downloaded.
b7da73eb 215 ignore_no_formats_error: Ignore "No video formats" error. Usefull for
216 extracting metadata even if the video is not actually
217 available for download (experimental)
0930b11f 218 format_sort: A list of fields by which to sort the video formats.
219 See "Sorting Formats" for more details.
c25228e5 220 format_sort_force: Force the given format_sort. see "Sorting Formats"
221 for more details.
08d30158 222 prefer_free_formats: Whether to prefer video formats with free containers
223 over non-free ones of same quality.
c25228e5 224 allow_multiple_video_streams: Allow multiple video streams to be merged
225 into a single file
226 allow_multiple_audio_streams: Allow multiple audio streams to be merged
227 into a single file
0ba692ac 228 check_formats Whether to test if the formats are downloadable.
9f1a1c36 229 Can be True (check all), False (check none),
230 'selected' (check selected formats),
0ba692ac 231 or None (check only if requested by extractor)
4524baf0 232 paths: Dictionary of output paths. The allowed keys are 'home'
233 'temp' and the keys of OUTTMPL_TYPES (in utils.py)
de6000d9 234 outtmpl: Dictionary of templates for output names. Allowed keys
4524baf0 235 are 'default' and the keys of OUTTMPL_TYPES (in utils.py).
34488702 236 For compatibility with youtube-dl, a single string can also be used
a820dc72
RA
237 outtmpl_na_placeholder: Placeholder for unavailable meta fields.
238 restrictfilenames: Do not allow "&" and spaces in file names
239 trim_file_name: Limit length of filename (extension excluded)
4524baf0 240 windowsfilenames: Force the filenames to be windows compatible
b1940459 241 ignoreerrors: Do not stop on download/postprocessing errors.
242 Can be 'only_download' to ignore only download errors.
243 Default is 'only_download' for CLI, but False for API
26e2805c 244 skip_playlist_after_errors: Number of allowed failures until the rest of
245 the playlist is skipped
fe7866d0 246 allowed_extractors: List of regexes to match against extractor names that are allowed
0c3d0f51 247 overwrites: Overwrite all video and metadata files if True,
248 overwrite only non-video files if None
249 and don't overwrite any file if False
34488702 250 For compatibility with youtube-dl,
251 "nooverwrites" may also be used instead
c14e88f0 252 playlist_items: Specific indices of playlist to download.
75822ca7 253 playlistrandom: Download playlist items in random order.
7e9a6125 254 lazy_playlist: Process playlist entries as they are received.
8222d8de
JMF
255 matchtitle: Download only matching titles.
256 rejecttitle: Reject downloads for matching titles.
8bf9319e 257 logger: Log messages to a logging.Logger instance.
17ffed18 258 logtostderr: Print everything to stderr instead of stdout.
259 consoletitle: Display progress in console window's titlebar.
8222d8de
JMF
260 writedescription: Write the video description to a .description file
261 writeinfojson: Write the video description to a .info.json file
75d43ca0 262 clean_infojson: Remove private fields from the infojson
34488702 263 getcomments: Extract video comments. This will not be written to disk
06167fbb 264 unless writeinfojson is also given
1fb07d10 265 writeannotations: Write the video annotations to a .annotations.xml file
8222d8de 266 writethumbnail: Write the thumbnail image to a file
c25228e5 267 allow_playlist_files: Whether to write playlists' description, infojson etc
268 also to disk when using the 'write*' options
ec82d85a 269 write_all_thumbnails: Write all thumbnail formats to files
732044af 270 writelink: Write an internet shortcut file, depending on the
271 current platform (.url/.webloc/.desktop)
272 writeurllink: Write a Windows internet shortcut file (.url)
273 writewebloclink: Write a macOS internet shortcut file (.webloc)
274 writedesktoplink: Write a Linux internet shortcut file (.desktop)
8222d8de 275 writesubtitles: Write the video subtitles to a file
741dd8ea 276 writeautomaticsub: Write the automatically generated subtitles to a file
8222d8de 277 listsubtitles: Lists all available subtitles for the video
a504ced0 278 subtitlesformat: The format code for subtitles
c32b0aab 279 subtitleslangs: List of languages of the subtitles to download (can be regex).
280 The list may contain "all" to refer to all the available
281 subtitles. The language can be prefixed with a "-" to
62b58c09 282 exclude it from the requested languages, e.g. ['all', '-live_chat']
8222d8de
JMF
283 keepvideo: Keep the video file after post-processing
284 daterange: A DateRange object, download only if the upload_date is in the range.
285 skip_download: Skip the actual download of the video file
c35f9e72 286 cachedir: Location of the cache files in the filesystem.
a0e07d31 287 False to disable filesystem cache.
47192f92 288 noplaylist: Download single video instead of a playlist if in doubt.
8dbe9899
PH
289 age_limit: An integer representing the user's age in years.
290 Unsuitable videos for the given age are skipped.
5fe18bdb
PH
291 min_views: An integer representing the minimum view count the video
292 must have in order to not be skipped.
293 Videos without view count information are always
294 downloaded. None for no limit.
295 max_views: An integer representing the maximum view count.
296 Videos that are more popular than that are not
297 downloaded.
298 Videos without view count information are always
299 downloaded. None for no limit.
ae103564 300 download_archive: A set, or the name of a file where all downloads are recorded.
301 Videos already present in the file are not downloaded again.
8a51f564 302 break_on_existing: Stop the download process after attempting to download a
303 file that is in the archive.
b222c271 304 break_per_url: Whether break_on_reject and break_on_existing
305 should act on each input URL as opposed to for the entire queue
d76fa1f3 306 cookiefile: File name or text stream from where cookies should be read and dumped to
f59f5ef8 307 cookiesfrombrowser: A tuple containing the name of the browser, the profile
9bd13fe5 308 name/path from where cookies are loaded, the name of the keyring,
309 and the container name, e.g. ('chrome', ) or
310 ('vivaldi', 'default', 'BASICTEXT') or ('firefox', 'default', None, 'Meta')
f81c62a6 311 legacyserverconnect: Explicitly allow HTTPS connection to servers that do not
312 support RFC 5746 secure renegotiation
f59f5ef8 313 nocheckcertificate: Do not verify SSL certificates
bb58c9ed 314 client_certificate: Path to client certificate file in PEM format. May include the private key
315 client_certificate_key: Path to private key file for client certificate
316 client_certificate_password: Password for client certificate private key, if encrypted.
317 If not provided and the key is encrypted, yt-dlp will ask interactively
7e8c0af0 318 prefer_insecure: Use HTTP instead of HTTPS to retrieve information.
c6e07cf1 319 (Only supported by some extractors)
8300774c 320 enable_file_urls: Enable file:// URLs. This is disabled by default for security reasons.
8b7539d2 321 http_headers: A dictionary of custom headers to be used for all requests
a1ee09e8 322 proxy: URL of the proxy server to use
38cce791 323 geo_verification_proxy: URL of the proxy to use for IP address verification
504f20dd 324 on geo-restricted sites.
e344693b 325 socket_timeout: Time to wait for unresponsive hosts, in seconds
0783b09b
PH
326 bidi_workaround: Work around buggy terminals without bidirectional text
327 support, using fridibi
a0ddb8a2 328 debug_printtraffic:Print out sent and received HTTP traffic
04b4d394
PH
329 default_search: Prepend this string if an input url is not valid.
330 'auto' for elaborate guessing
62fec3b2 331 encoding: Use this encoding instead of the system-specified.
134c913c 332 extract_flat: Whether to resolve and process url_results further
333 * False: Always process (default)
334 * True: Never process
335 * 'in_playlist': Do not process inside playlist/multi_video
336 * 'discard': Always process, but don't return the result
337 from inside playlist/multi_video
338 * 'discard_in_playlist': Same as "discard", but only for
339 playlists (not multi_video)
f2ebc5c7 340 wait_for_video: If given, wait for scheduled streams to become available.
341 The value should be a tuple containing the range
342 (min_secs, max_secs) to wait between retries
4f026faf 343 postprocessors: A list of dictionaries, each with an entry
71b640cc 344 * key: The name of the postprocessor. See
7a5c1cfe 345 yt_dlp/postprocessor/__init__.py for a list.
bb66c247 346 * when: When to run the postprocessor. Allowed values are
347 the entries of utils.POSTPROCESS_WHEN
56d868db 348 Assumed to be 'post_process' if not given
71b640cc
PH
349 progress_hooks: A list of functions that get called on download
350 progress, with a dictionary with the entries
5cda4eda 351 * status: One of "downloading", "error", or "finished".
ee69b99a 352 Check this first and ignore unknown values.
3ba7740d 353 * info_dict: The extracted info_dict
71b640cc 354
5cda4eda 355 If status is one of "downloading", or "finished", the
ee69b99a
PH
356 following properties may also be present:
357 * filename: The final filename (always present)
5cda4eda 358 * tmpfilename: The filename we're currently writing to
71b640cc
PH
359 * downloaded_bytes: Bytes on disk
360 * total_bytes: Size of the whole file, None if unknown
5cda4eda
PH
361 * total_bytes_estimate: Guess of the eventual file size,
362 None if unavailable.
363 * elapsed: The number of seconds since download started.
71b640cc
PH
364 * eta: The estimated time in seconds, None if unknown
365 * speed: The download speed in bytes/second, None if
366 unknown
5cda4eda
PH
367 * fragment_index: The counter of the currently
368 downloaded video fragment.
369 * fragment_count: The number of fragments (= individual
370 files that will be merged)
71b640cc
PH
371
372 Progress hooks are guaranteed to be called at least once
373 (with status "finished") if the download is successful.
819e0531 374 postprocessor_hooks: A list of functions that get called on postprocessing
375 progress, with a dictionary with the entries
376 * status: One of "started", "processing", or "finished".
377 Check this first and ignore unknown values.
378 * postprocessor: Name of the postprocessor
379 * info_dict: The extracted info_dict
380
381 Progress hooks are guaranteed to be called at least twice
382 (with status "started" and "finished") if the processing is successful.
fc61aff4 383 merge_output_format: "/" separated list of extensions to use when merging formats.
6b591b29 384 final_ext: Expected final extension; used to detect when the file was
59a7a13e 385 already downloaded and converted
6271f1ca
PH
386 fixup: Automatically correct known faults of the file.
387 One of:
388 - "never": do nothing
389 - "warn": only emit a warning
390 - "detect_or_warn": check whether we can do anything
62cd676c 391 about it, warn otherwise (default)
504f20dd 392 source_address: Client-side IP address to bind to.
1cf376f5 393 sleep_interval_requests: Number of seconds to sleep between requests
394 during extraction
7aa589a5
S
395 sleep_interval: Number of seconds to sleep before each download when
396 used alone or a lower bound of a range for randomized
397 sleep before each download (minimum possible number
398 of seconds to sleep) when used along with
399 max_sleep_interval.
400 max_sleep_interval:Upper bound of a range for randomized sleep before each
401 download (maximum possible number of seconds to sleep).
402 Must only be used along with sleep_interval.
403 Actual sleep time will be a random float from range
404 [sleep_interval; max_sleep_interval].
1cf376f5 405 sleep_interval_subtitles: Number of seconds to sleep before each subtitle download
cfb56d1a
PH
406 listformats: Print an overview of available video formats and exit.
407 list_thumbnails: Print a table of all thumbnails and exit.
0a41f331 408 match_filter: A function that gets called for every video with the signature
409 (info_dict, *, incomplete: bool) -> Optional[str]
410 For backward compatibility with youtube-dl, the signature
411 (info_dict) -> Optional[str] is also allowed.
412 - If it returns a message, the video is ignored.
413 - If it returns None, the video is downloaded.
414 - If it returns utils.NO_DEFAULT, the user is interactively
415 asked whether to download the video.
fe2ce85a 416 - Raise utils.DownloadCancelled(msg) to abort remaining
417 downloads when a video is rejected.
347de493 418 match_filter_func in utils.py is one example for this.
7e5db8c9 419 no_color: Do not emit color codes in output.
0a840f58 420 geo_bypass: Bypass geographic restriction via faking X-Forwarded-For
504f20dd 421 HTTP header
0a840f58 422 geo_bypass_country:
773f291d
S
423 Two-letter ISO 3166-2 country code that will be used for
424 explicit geographic restriction bypassing via faking
504f20dd 425 X-Forwarded-For HTTP header
5f95927a
S
426 geo_bypass_ip_block:
427 IP range in CIDR notation that will be used similarly to
504f20dd 428 geo_bypass_country
52a8a1e1 429 external_downloader: A dictionary of protocol keys and the executable of the
430 external downloader to use for it. The allowed protocols
431 are default|http|ftp|m3u8|dash|rtsp|rtmp|mms.
432 Set the value to 'native' to use the native downloader
53ed7066 433 compat_opts: Compatibility options. See "Differences in default behavior".
3acf6d38 434 The following options do not work when used through the API:
b5ae35ee 435 filename, abort-on-error, multistreams, no-live-chat, format-sort
dac5df5a 436 no-clean-infojson, no-playlist-metafiles, no-keep-subs, no-attach-info-json.
e4f02757 437 Refer __init__.py for their implementation
819e0531 438 progress_template: Dictionary of templates for progress outputs.
439 Allowed keys are 'download', 'postprocess',
440 'download-title' (console title) and 'postprocess-title'.
441 The template is mapped on a dictionary with keys 'progress' and 'info'
23326151 442 retry_sleep_functions: Dictionary of functions that takes the number of attempts
443 as argument and returns the time to sleep in seconds.
444 Allowed keys are 'http', 'fragment', 'file_access'
0f446365
SW
445 download_ranges: A callback function that gets called for every video with
446 the signature (info_dict, ydl) -> Iterable[Section].
447 Only the returned sections will be downloaded.
448 Each Section is a dict with the following keys:
5ec1b6b7 449 * start_time: Start time of the section in seconds
450 * end_time: End time of the section in seconds
451 * title: Section title (Optional)
452 * index: Section number (Optional)
0f446365 453 force_keyframes_at_cuts: Re-encode the video when downloading ranges to get precise cuts
a7dc6a89 454 noprogress: Do not print the progress bar
a831c2ea 455 live_from_start: Whether to download livestreams videos from the start
fe7e0c98 456
8222d8de 457 The following parameters are not used by YoutubeDL itself, they are used by
7a5c1cfe 458 the downloader (see yt_dlp/downloader/common.py):
51d9739f 459 nopart, updatetime, buffersize, ratelimit, throttledratelimit, min_filesize,
205a0654 460 max_filesize, test, noresizebuffer, retries, file_access_retries, fragment_retries,
a7dc6a89 461 continuedl, xattr_set_filesize, hls_use_mpegts, http_chunk_size,
59a7a13e 462 external_downloader_args, concurrent_fragment_downloads.
76b1bd67
JMF
463
464 The following options are used by the post processors:
c0b7d117
S
465 ffmpeg_location: Location of the ffmpeg/avconv binary; either the path
466 to the binary or its containing directory.
43820c03 467 postprocessor_args: A dictionary of postprocessor/executable keys (in lower case)
34488702 468 and a list of additional command-line arguments for the
469 postprocessor/executable. The dict can also have "PP+EXE" keys
470 which are used when the given exe is used by the given PP.
471 Use 'default' as the name for arguments to passed to all PP
472 For compatibility with youtube-dl, a single list of args
473 can also be used
e409895f 474
475 The following options are used by the extractors:
62bff2c1 476 extractor_retries: Number of times to retry for known errors
477 dynamic_mpd: Whether to process dynamic DASH manifests (default: True)
e409895f 478 hls_split_discontinuity: Split HLS playlists to different formats at
62bff2c1 479 discontinuities such as ad breaks (default: False)
5d3a0e79 480 extractor_args: A dictionary of arguments to be passed to the extractors.
481 See "EXTRACTOR ARGUMENTS" for details.
62b58c09 482 E.g. {'youtube': {'skip': ['dash', 'hls']}}
88f23a18 483 mark_watched: Mark videos watched (even with --simulate). Only for YouTube
1890fc63 484
485 The following options are deprecated and may be removed in the future:
486
fe2ce85a 487 break_on_reject: Stop the download process when encountering a video that
488 has been filtered out.
489 - `raise DownloadCancelled(msg)` in match_filter instead
fe7866d0 490 force_generic_extractor: Force downloader to use the generic extractor
491 - Use allowed_extractors = ['generic', 'default']
7e9a6125 492 playliststart: - Use playlist_items
493 Playlist item to start at.
494 playlistend: - Use playlist_items
495 Playlist item to end at.
496 playlistreverse: - Use playlist_items
497 Download playlist items in reverse order.
1890fc63 498 forceurl: - Use forceprint
499 Force printing final URL.
500 forcetitle: - Use forceprint
501 Force printing title.
502 forceid: - Use forceprint
503 Force printing ID.
504 forcethumbnail: - Use forceprint
505 Force printing thumbnail URL.
506 forcedescription: - Use forceprint
507 Force printing description.
508 forcefilename: - Use forceprint
509 Force printing final filename.
510 forceduration: - Use forceprint
511 Force printing duration.
512 allsubtitles: - Use subtitleslangs = ['all']
513 Downloads all the subtitles of the video
514 (requires writesubtitles or writeautomaticsub)
515 include_ads: - Doesn't work
516 Download ads as well
517 call_home: - Not implemented
518 Boolean, true iff we are allowed to contact the
519 yt-dlp servers for debugging.
520 post_hooks: - Register a custom postprocessor
521 A list of functions that get called as the final step
522 for each video file, after all postprocessors have been
523 called. The filename will be passed as the only argument.
524 hls_prefer_native: - Use external_downloader = {'m3u8': 'native'} or {'m3u8': 'ffmpeg'}.
525 Use the native HLS downloader instead of ffmpeg/avconv
526 if True, otherwise use ffmpeg/avconv if False, otherwise
527 use downloader suggested by extractor if None.
528 prefer_ffmpeg: - avconv support is deprecated
529 If False, use avconv instead of ffmpeg if both are available,
530 otherwise prefer ffmpeg.
531 youtube_include_dash_manifest: - Use extractor_args
5d3a0e79 532 If True (default), DASH manifests and related
62bff2c1 533 data will be downloaded and processed by extractor.
534 You can reduce network I/O by disabling it if you don't
535 care about DASH. (only for youtube)
1890fc63 536 youtube_include_hls_manifest: - Use extractor_args
5d3a0e79 537 If True (default), HLS manifests and related
62bff2c1 538 data will be downloaded and processed by extractor.
539 You can reduce network I/O by disabling it if you don't
540 care about HLS. (only for youtube)
8222d8de
JMF
541 """
542
86e5f3ed 543 _NUMERIC_FIELDS = {
b8ed0f15 544 'width', 'height', 'asr', 'audio_channels', 'fps',
545 'tbr', 'abr', 'vbr', 'filesize', 'filesize_approx',
e6f21b3d 546 'timestamp', 'release_timestamp',
c9969434
S
547 'duration', 'view_count', 'like_count', 'dislike_count', 'repost_count',
548 'average_rating', 'comment_count', 'age_limit',
549 'start_time', 'end_time',
550 'chapter_number', 'season_number', 'episode_number',
551 'track_number', 'disc_number', 'release_year',
86e5f3ed 552 }
c9969434 553
6db9c4d5 554 _format_fields = {
555 # NB: Keep in sync with the docstring of extractor/common.py
a44ca5a4 556 'url', 'manifest_url', 'manifest_stream_number', 'ext', 'format', 'format_id', 'format_note',
105bfd90 557 'width', 'height', 'aspect_ratio', 'resolution', 'dynamic_range', 'tbr', 'abr', 'acodec', 'asr', 'audio_channels',
d5d1df8a 558 'vbr', 'fps', 'vcodec', 'container', 'filesize', 'filesize_approx', 'rows', 'columns',
6db9c4d5 559 'player_url', 'protocol', 'fragment_base_url', 'fragments', 'is_from_start',
560 'preference', 'language', 'language_preference', 'quality', 'source_preference',
7e68567e 561 'http_headers', 'stretched_ratio', 'no_resume', 'has_drm', 'extra_param_to_segment_url', 'hls_aes', 'downloader_options',
6db9c4d5 562 'page_url', 'app', 'play_path', 'tc_url', 'flash_version', 'rtmp_live', 'rtmp_conn', 'rtmp_protocol', 'rtmp_real_time'
563 }
48ee10ee 564 _format_selection_exts = {
8dc59305 565 'audio': set(MEDIA_EXTENSIONS.common_audio),
566 'video': set(MEDIA_EXTENSIONS.common_video + ('3gp', )),
567 'storyboards': set(MEDIA_EXTENSIONS.storyboards),
48ee10ee 568 }
569
3511266b 570 def __init__(self, params=None, auto_init=True):
883d4b1e 571 """Create a FileDownloader object with the given options.
572 @param auto_init Whether to load the default extractors and print header (if verbose).
49a57e70 573 Set to 'no_verbose_header' to not print the header
883d4b1e 574 """
e9f9a10f
JMF
575 if params is None:
576 params = {}
592b7485 577 self.params = params
8b7491c8 578 self._ies = {}
56c73665 579 self._ies_instances = {}
1e43a6f7 580 self._pps = {k: [] for k in POSTPROCESS_WHEN}
b35496d8 581 self._printed_messages = set()
1cf376f5 582 self._first_webpage_request = True
ab8e5e51 583 self._post_hooks = []
933605d7 584 self._progress_hooks = []
819e0531 585 self._postprocessor_hooks = []
8222d8de
JMF
586 self._download_retcode = 0
587 self._num_downloads = 0
9c906919 588 self._num_videos = 0
592b7485 589 self._playlist_level = 0
590 self._playlist_urls = set()
a0e07d31 591 self.cache = Cache(self)
34308b30 592
591bb9d3 593 stdout = sys.stderr if self.params.get('logtostderr') else sys.stdout
594 self._out_files = Namespace(
595 out=stdout,
596 error=sys.stderr,
597 screen=sys.stderr if self.params.get('quiet') else stdout,
598 console=None if compat_os_name == 'nt' else next(
cf4f42cb 599 filter(supports_terminal_sequences, (sys.stderr, sys.stdout)), None)
591bb9d3 600 )
f0795149 601
602 try:
603 windows_enable_vt_mode()
604 except Exception as e:
605 self.write_debug(f'Failed to enable VT mode: {e}')
606
591bb9d3 607 self._allow_colors = Namespace(**{
608 type_: not self.params.get('no_color') and supports_terminal_sequences(stream)
64fa820c 609 for type_, stream in self._out_files.items_ if type_ != 'console'
591bb9d3 610 })
819e0531 611
6929b41a 612 # The code is left like this to be reused for future deprecations
613 MIN_SUPPORTED, MIN_RECOMMENDED = (3, 7), (3, 7)
eff42759 614 current_version = sys.version_info[:2]
615 if current_version < MIN_RECOMMENDED:
9d339c41 616 msg = ('Support for Python version %d.%d has been deprecated. '
24093d52 617 'See https://github.com/yt-dlp/yt-dlp/issues/3764 for more details.'
c6e07cf1 618 '\n You will no longer receive updates on this version')
eff42759 619 if current_version < MIN_SUPPORTED:
620 msg = 'Python version %d.%d is no longer supported'
5b28cef7 621 self.deprecated_feature(
eff42759 622 f'{msg}! Please update to Python %d.%d or above' % (*current_version, *MIN_RECOMMENDED))
a61f4b28 623
88acdbc2 624 if self.params.get('allow_unplayable_formats'):
625 self.report_warning(
ec11a9f4 626 f'You have asked for {self._format_err("UNPLAYABLE", self.Styles.EMPHASIS)} formats to be listed/downloaded. '
819e0531 627 'This is a developer option intended for debugging. \n'
628 ' If you experience any issues while using this option, '
ec11a9f4 629 f'{self._format_err("DO NOT", self.Styles.ERROR)} open a bug report')
88acdbc2 630
497074f0 631 if self.params.get('bidi_workaround', False):
632 try:
633 import pty
634 master, slave = pty.openpty()
635 width = shutil.get_terminal_size().columns
636 width_args = [] if width is None else ['-w', str(width)]
637 sp_kwargs = {'stdin': subprocess.PIPE, 'stdout': slave, 'stderr': self._out_files.error}
638 try:
639 self._output_process = Popen(['bidiv'] + width_args, **sp_kwargs)
640 except OSError:
641 self._output_process = Popen(['fribidi', '-c', 'UTF-8'] + width_args, **sp_kwargs)
642 self._output_channel = os.fdopen(master, 'rb')
643 except OSError as ose:
644 if ose.errno == errno.ENOENT:
645 self.report_warning(
646 'Could not find fribidi executable, ignoring --bidi-workaround. '
647 'Make sure that fribidi is an executable file in one of the directories in your $PATH.')
648 else:
649 raise
650
651 self.params['compat_opts'] = set(self.params.get('compat_opts', ()))
652 if auto_init and auto_init != 'no_verbose_header':
653 self.print_debug_header()
654
be5df5ee
S
655 def check_deprecated(param, option, suggestion):
656 if self.params.get(param) is not None:
86e5f3ed 657 self.report_warning(f'{option} is deprecated. Use {suggestion} instead')
be5df5ee
S
658 return True
659 return False
660
661 if check_deprecated('cn_verification_proxy', '--cn-verification-proxy', '--geo-verification-proxy'):
38cce791
YCH
662 if self.params.get('geo_verification_proxy') is None:
663 self.params['geo_verification_proxy'] = self.params['cn_verification_proxy']
664
0d1bb027 665 check_deprecated('autonumber', '--auto-number', '-o "%(autonumber)s-%(title)s.%(ext)s"')
666 check_deprecated('usetitle', '--title', '-o "%(title)s-%(id)s.%(ext)s"')
53ed7066 667 check_deprecated('useid', '--id', '-o "%(id)s.%(ext)s"')
0d1bb027 668
49a57e70 669 for msg in self.params.get('_warnings', []):
0d1bb027 670 self.report_warning(msg)
ee8dd27a 671 for msg in self.params.get('_deprecation_warnings', []):
da4db748 672 self.deprecated_feature(msg)
0d1bb027 673
8a82af35 674 if 'list-formats' in self.params['compat_opts']:
ec11a9f4 675 self.params['listformats_table'] = False
676
b5ae35ee 677 if 'overwrites' not in self.params and self.params.get('nooverwrites') is not None:
b868936c 678 # nooverwrites was unnecessarily changed to overwrites
679 # in 0c3d0f51778b153f65c21906031c2e091fcfb641
680 # This ensures compatibility with both keys
681 self.params['overwrites'] = not self.params['nooverwrites']
b5ae35ee 682 elif self.params.get('overwrites') is None:
683 self.params.pop('overwrites', None)
b868936c 684 else:
685 self.params['nooverwrites'] = not self.params['overwrites']
b9d973be 686
e4221b70 687 if self.params.get('simulate') is None and any((
688 self.params.get('list_thumbnails'),
689 self.params.get('listformats'),
690 self.params.get('listsubtitles'),
691 )):
692 self.params['simulate'] = 'list_only'
693
455a15e2 694 self.params.setdefault('forceprint', {})
695 self.params.setdefault('print_to_file', {})
bb66c247 696
697 # Compatibility with older syntax
ca30f449 698 if not isinstance(params['forceprint'], dict):
455a15e2 699 self.params['forceprint'] = {'video': params['forceprint']}
ca30f449 700
97ec5bc5 701 if auto_init:
97ec5bc5 702 self.add_default_info_extractors()
703
3089bc74
S
704 if (sys.platform != 'win32'
705 and sys.getfilesystemencoding() in ['ascii', 'ANSI_X3.4-1968']
455a15e2 706 and not self.params.get('restrictfilenames', False)):
e9137224 707 # Unicode filesystem API will throw errors (#1474, #13027)
34308b30 708 self.report_warning(
6febd1c1 709 'Assuming --restrict-filenames since file system encoding '
1b725173 710 'cannot encode all characters. '
6febd1c1 711 'Set the LC_ALL environment variable to fix this.')
4a98cdbf 712 self.params['restrictfilenames'] = True
34308b30 713
bf1824b3 714 self._parse_outtmpl()
486dd09e 715
187986a8 716 # Creating format selector here allows us to catch syntax errors before the extraction
717 self.format_selector = (
fa9f30b8 718 self.params.get('format') if self.params.get('format') in (None, '-')
093a1710 719 else self.params['format'] if callable(self.params['format'])
187986a8 720 else self.build_format_selector(self.params['format']))
721
8b7539d2 722 # Set http_headers defaults according to std_headers
723 self.params['http_headers'] = merge_headers(std_headers, self.params.get('http_headers', {}))
724
013b50b7 725 hooks = {
726 'post_hooks': self.add_post_hook,
727 'progress_hooks': self.add_progress_hook,
728 'postprocessor_hooks': self.add_postprocessor_hook,
729 }
730 for opt, fn in hooks.items():
731 for ph in self.params.get(opt, []):
732 fn(ph)
71b640cc 733
5bfc8bee 734 for pp_def_raw in self.params.get('postprocessors', []):
735 pp_def = dict(pp_def_raw)
736 when = pp_def.pop('when', 'post_process')
737 self.add_post_processor(
f9934b96 738 get_postprocessor(pp_def.pop('key'))(self, **pp_def),
5bfc8bee 739 when=when)
740
97ec5bc5 741 self._setup_opener()
51fb4995
YCH
742 register_socks_protocols()
743
ed39cac5 744 def preload_download_archive(fn):
745 """Preload the archive, if any is specified"""
ae103564 746 archive = set()
ed39cac5 747 if fn is None:
ae103564 748 return archive
941e881e 749 elif not is_path_like(fn):
ae103564 750 return fn
751
49a57e70 752 self.write_debug(f'Loading archive file {fn!r}')
ed39cac5 753 try:
754 with locked_file(fn, 'r', encoding='utf-8') as archive_file:
755 for line in archive_file:
ae103564 756 archive.add(line.strip())
86e5f3ed 757 except OSError as ioe:
ed39cac5 758 if ioe.errno != errno.ENOENT:
759 raise
ae103564 760 return archive
ed39cac5 761
ae103564 762 self.archive = preload_download_archive(self.params.get('download_archive'))
ed39cac5 763
7d4111ed
PH
764 def warn_if_short_id(self, argv):
765 # short YouTube ID starting with dash?
766 idxs = [
767 i for i, a in enumerate(argv)
768 if re.match(r'^-[0-9A-Za-z_-]{10}$', a)]
769 if idxs:
770 correct_argv = (
7a5c1cfe 771 ['yt-dlp']
3089bc74
S
772 + [a for i, a in enumerate(argv) if i not in idxs]
773 + ['--'] + [argv[i] for i in idxs]
7d4111ed
PH
774 )
775 self.report_warning(
776 'Long argument string detected. '
49a57e70 777 'Use -- to separate parameters and URLs, like this:\n%s' %
7d4111ed
PH
778 args_to_str(correct_argv))
779
8222d8de
JMF
780 def add_info_extractor(self, ie):
781 """Add an InfoExtractor object to the end of the list."""
8b7491c8 782 ie_key = ie.ie_key()
783 self._ies[ie_key] = ie
e52d7f85 784 if not isinstance(ie, type):
8b7491c8 785 self._ies_instances[ie_key] = ie
e52d7f85 786 ie.set_downloader(self)
8222d8de 787
56c73665
JMF
788 def get_info_extractor(self, ie_key):
789 """
790 Get an instance of an IE with name ie_key, it will try to get one from
791 the _ies list, if there's no instance it will create a new one and add
792 it to the extractor list.
793 """
794 ie = self._ies_instances.get(ie_key)
795 if ie is None:
796 ie = get_info_extractor(ie_key)()
797 self.add_info_extractor(ie)
798 return ie
799
023fa8c4
JMF
800 def add_default_info_extractors(self):
801 """
802 Add the InfoExtractors returned by gen_extractors to the end of the list
803 """
fe7866d0 804 all_ies = {ie.IE_NAME.lower(): ie for ie in gen_extractor_classes()}
805 all_ies['end'] = UnsupportedURLIE()
806 try:
807 ie_names = orderedSet_from_options(
808 self.params.get('allowed_extractors', ['default']), {
809 'all': list(all_ies),
810 'default': [name for name, ie in all_ies.items() if ie._ENABLED],
811 }, use_regex=True)
812 except re.error as e:
813 raise ValueError(f'Wrong regex for allowed_extractors: {e.pattern}')
814 for name in ie_names:
815 self.add_info_extractor(all_ies[name])
816 self.write_debug(f'Loaded {len(ie_names)} extractors')
023fa8c4 817
56d868db 818 def add_post_processor(self, pp, when='post_process'):
8222d8de 819 """Add a PostProcessor object to the end of the chain."""
8aa0e7cd 820 assert when in POSTPROCESS_WHEN, f'Invalid when={when}'
5bfa4862 821 self._pps[when].append(pp)
8222d8de
JMF
822 pp.set_downloader(self)
823
ab8e5e51
AM
824 def add_post_hook(self, ph):
825 """Add the post hook"""
826 self._post_hooks.append(ph)
827
933605d7 828 def add_progress_hook(self, ph):
819e0531 829 """Add the download progress hook"""
933605d7 830 self._progress_hooks.append(ph)
8ab470f1 831
819e0531 832 def add_postprocessor_hook(self, ph):
833 """Add the postprocessing progress hook"""
834 self._postprocessor_hooks.append(ph)
5bfc8bee 835 for pps in self._pps.values():
836 for pp in pps:
837 pp.add_progress_hook(ph)
819e0531 838
1c088fa8 839 def _bidi_workaround(self, message):
5d681e96 840 if not hasattr(self, '_output_channel'):
1c088fa8
PH
841 return message
842
5d681e96 843 assert hasattr(self, '_output_process')
14f25df2 844 assert isinstance(message, str)
6febd1c1 845 line_count = message.count('\n') + 1
0f06bcd7 846 self._output_process.stdin.write((message + '\n').encode())
5d681e96 847 self._output_process.stdin.flush()
0f06bcd7 848 res = ''.join(self._output_channel.readline().decode()
9e1a5b84 849 for _ in range(line_count))
6febd1c1 850 return res[:-len('\n')]
1c088fa8 851
b35496d8 852 def _write_string(self, message, out=None, only_once=False):
853 if only_once:
854 if message in self._printed_messages:
855 return
856 self._printed_messages.add(message)
857 write_string(message, out=out, encoding=self.params.get('encoding'))
734f90bb 858
cf4f42cb 859 def to_stdout(self, message, skip_eol=False, quiet=None):
0760b0a7 860 """Print message to stdout"""
cf4f42cb 861 if quiet is not None:
da4db748 862 self.deprecation_warning('"YoutubeDL.to_stdout" no longer accepts the argument quiet. '
863 'Use "YoutubeDL.to_screen" instead')
8a82af35 864 if skip_eol is not False:
da4db748 865 self.deprecation_warning('"YoutubeDL.to_stdout" no longer accepts the argument skip_eol. '
866 'Use "YoutubeDL.to_screen" instead')
0bf9dc1e 867 self._write_string(f'{self._bidi_workaround(message)}\n', self._out_files.out)
cf4f42cb 868
dfea94f8 869 def to_screen(self, message, skip_eol=False, quiet=None, only_once=False):
cf4f42cb 870 """Print message to screen if not in quiet mode"""
8bf9319e 871 if self.params.get('logger'):
43afe285 872 self.params['logger'].debug(message)
cf4f42cb 873 return
874 if (self.params.get('quiet') if quiet is None else quiet) and not self.params.get('verbose'):
875 return
876 self._write_string(
877 '%s%s' % (self._bidi_workaround(message), ('' if skip_eol else '\n')),
dfea94f8 878 self._out_files.screen, only_once=only_once)
8222d8de 879
b35496d8 880 def to_stderr(self, message, only_once=False):
0760b0a7 881 """Print message to stderr"""
14f25df2 882 assert isinstance(message, str)
8bf9319e 883 if self.params.get('logger'):
43afe285
IB
884 self.params['logger'].error(message)
885 else:
5792c950 886 self._write_string(f'{self._bidi_workaround(message)}\n', self._out_files.error, only_once=only_once)
cf4f42cb 887
888 def _send_console_code(self, code):
591bb9d3 889 if compat_os_name == 'nt' or not self._out_files.console:
cf4f42cb 890 return
591bb9d3 891 self._write_string(code, self._out_files.console)
8222d8de 892
1e5b9a95
PH
893 def to_console_title(self, message):
894 if not self.params.get('consoletitle', False):
895 return
3efb96a6 896 message = remove_terminal_sequences(message)
4bede0d8
C
897 if compat_os_name == 'nt':
898 if ctypes.windll.kernel32.GetConsoleWindow():
899 # c_wchar_p() might not be necessary if `message` is
900 # already of type unicode()
901 ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(message))
cf4f42cb 902 else:
903 self._send_console_code(f'\033]0;{message}\007')
1e5b9a95 904
bdde425c 905 def save_console_title(self):
cf4f42cb 906 if not self.params.get('consoletitle') or self.params.get('simulate'):
bdde425c 907 return
592b7485 908 self._send_console_code('\033[22;0t') # Save the title on stack
bdde425c
PH
909
910 def restore_console_title(self):
cf4f42cb 911 if not self.params.get('consoletitle') or self.params.get('simulate'):
bdde425c 912 return
592b7485 913 self._send_console_code('\033[23;0t') # Restore the title from stack
bdde425c
PH
914
915 def __enter__(self):
916 self.save_console_title()
917 return self
918
919 def __exit__(self, *args):
920 self.restore_console_title()
f89197d7 921
dca08720 922 if self.params.get('cookiefile') is not None:
1bab3437 923 self.cookiejar.save(ignore_discard=True, ignore_expires=True)
bdde425c 924
fa9f30b8 925 def trouble(self, message=None, tb=None, is_error=True):
8222d8de
JMF
926 """Determine action to take when a download problem appears.
927
928 Depending on if the downloader has been configured to ignore
929 download errors or not, this method may throw an exception or
930 not when errors are found, after printing the message.
931
fa9f30b8 932 @param tb If given, is additional traceback information
933 @param is_error Whether to raise error according to ignorerrors
8222d8de
JMF
934 """
935 if message is not None:
936 self.to_stderr(message)
937 if self.params.get('verbose'):
938 if tb is None:
939 if sys.exc_info()[0]: # if .trouble has been called from an except block
6febd1c1 940 tb = ''
8222d8de 941 if hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
6febd1c1 942 tb += ''.join(traceback.format_exception(*sys.exc_info()[1].exc_info))
c0384f22 943 tb += encode_compat_str(traceback.format_exc())
8222d8de
JMF
944 else:
945 tb_data = traceback.format_list(traceback.extract_stack())
6febd1c1 946 tb = ''.join(tb_data)
c19bc311 947 if tb:
948 self.to_stderr(tb)
fa9f30b8 949 if not is_error:
950 return
b1940459 951 if not self.params.get('ignoreerrors'):
8222d8de
JMF
952 if sys.exc_info()[0] and hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
953 exc_info = sys.exc_info()[1].exc_info
954 else:
955 exc_info = sys.exc_info()
956 raise DownloadError(message, exc_info)
957 self._download_retcode = 1
958
19a03940 959 Styles = Namespace(
960 HEADERS='yellow',
961 EMPHASIS='light blue',
492272fe 962 FILENAME='green',
19a03940 963 ID='green',
964 DELIM='blue',
965 ERROR='red',
966 WARNING='yellow',
967 SUPPRESS='light black',
968 )
ec11a9f4 969
7578d77d 970 def _format_text(self, handle, allow_colors, text, f, fallback=None, *, test_encoding=False):
e5a998f3 971 text = str(text)
ec11a9f4 972 if test_encoding:
973 original_text = text
5c104538 974 # handle.encoding can be None. See https://github.com/yt-dlp/yt-dlp/issues/2711
975 encoding = self.params.get('encoding') or getattr(handle, 'encoding', None) or 'ascii'
ec11a9f4 976 text = text.encode(encoding, 'ignore').decode(encoding)
977 if fallback is not None and text != original_text:
978 text = fallback
7578d77d 979 return format_text(text, f) if allow_colors else text if fallback is None else fallback
ec11a9f4 980
591bb9d3 981 def _format_out(self, *args, **kwargs):
982 return self._format_text(self._out_files.out, self._allow_colors.out, *args, **kwargs)
983
ec11a9f4 984 def _format_screen(self, *args, **kwargs):
591bb9d3 985 return self._format_text(self._out_files.screen, self._allow_colors.screen, *args, **kwargs)
ec11a9f4 986
987 def _format_err(self, *args, **kwargs):
591bb9d3 988 return self._format_text(self._out_files.error, self._allow_colors.error, *args, **kwargs)
819e0531 989
c84aeac6 990 def report_warning(self, message, only_once=False):
8222d8de
JMF
991 '''
992 Print the message to stderr, it will be prefixed with 'WARNING:'
993 If stderr is a tty file the 'WARNING:' will be colored
994 '''
6d07ce01
JMF
995 if self.params.get('logger') is not None:
996 self.params['logger'].warning(message)
8222d8de 997 else:
ad8915b7
PH
998 if self.params.get('no_warnings'):
999 return
ec11a9f4 1000 self.to_stderr(f'{self._format_err("WARNING:", self.Styles.WARNING)} {message}', only_once)
8222d8de 1001
da4db748 1002 def deprecation_warning(self, message, *, stacklevel=0):
1003 deprecation_warning(
1004 message, stacklevel=stacklevel + 1, printer=self.report_error, is_error=False)
1005
1006 def deprecated_feature(self, message):
ee8dd27a 1007 if self.params.get('logger') is not None:
da4db748 1008 self.params['logger'].warning(f'Deprecated Feature: {message}')
1009 self.to_stderr(f'{self._format_err("Deprecated Feature:", self.Styles.ERROR)} {message}', True)
ee8dd27a 1010
fa9f30b8 1011 def report_error(self, message, *args, **kwargs):
8222d8de
JMF
1012 '''
1013 Do the same as trouble, but prefixes the message with 'ERROR:', colored
1014 in red if stderr is a tty file.
1015 '''
fa9f30b8 1016 self.trouble(f'{self._format_err("ERROR:", self.Styles.ERROR)} {message}', *args, **kwargs)
8222d8de 1017
b35496d8 1018 def write_debug(self, message, only_once=False):
0760b0a7 1019 '''Log debug message or Print message to stderr'''
1020 if not self.params.get('verbose', False):
1021 return
8a82af35 1022 message = f'[debug] {message}'
0760b0a7 1023 if self.params.get('logger'):
1024 self.params['logger'].debug(message)
1025 else:
b35496d8 1026 self.to_stderr(message, only_once)
0760b0a7 1027
8222d8de
JMF
1028 def report_file_already_downloaded(self, file_name):
1029 """Report file has already been fully downloaded."""
1030 try:
6febd1c1 1031 self.to_screen('[download] %s has already been downloaded' % file_name)
ce02ed60 1032 except UnicodeEncodeError:
6febd1c1 1033 self.to_screen('[download] The file has already been downloaded')
8222d8de 1034
0c3d0f51 1035 def report_file_delete(self, file_name):
1036 """Report that existing file will be deleted."""
1037 try:
c25228e5 1038 self.to_screen('Deleting existing file %s' % file_name)
0c3d0f51 1039 except UnicodeEncodeError:
c25228e5 1040 self.to_screen('Deleting existing file')
0c3d0f51 1041
319b6059 1042 def raise_no_formats(self, info, forced=False, *, msg=None):
0a5a191a 1043 has_drm = info.get('_has_drm')
319b6059 1044 ignored, expected = self.params.get('ignore_no_formats_error'), bool(msg)
1045 msg = msg or has_drm and 'This video is DRM protected' or 'No video formats found!'
1046 if forced or not ignored:
1151c407 1047 raise ExtractorError(msg, video_id=info['id'], ie=info['extractor'],
319b6059 1048 expected=has_drm or ignored or expected)
88acdbc2 1049 else:
1050 self.report_warning(msg)
1051
de6000d9 1052 def parse_outtmpl(self):
bf1824b3 1053 self.deprecation_warning('"YoutubeDL.parse_outtmpl" is deprecated and may be removed in a future version')
1054 self._parse_outtmpl()
1055 return self.params['outtmpl']
1056
1057 def _parse_outtmpl(self):
7b2c3f47 1058 sanitize = IDENTITY
bf1824b3 1059 if self.params.get('restrictfilenames'): # Remove spaces in the default template
71ce444a 1060 sanitize = lambda x: x.replace(' - ', ' ').replace(' ', '-')
bf1824b3 1061
1062 outtmpl = self.params.setdefault('outtmpl', {})
1063 if not isinstance(outtmpl, dict):
1064 self.params['outtmpl'] = outtmpl = {'default': outtmpl}
1065 outtmpl.update({k: sanitize(v) for k, v in DEFAULT_OUTTMPL.items() if outtmpl.get(k) is None})
de6000d9 1066
21cd8fae 1067 def get_output_path(self, dir_type='', filename=None):
1068 paths = self.params.get('paths', {})
d2c8aadf 1069 assert isinstance(paths, dict), '"paths" parameter must be a dictionary'
21cd8fae 1070 path = os.path.join(
1071 expand_path(paths.get('home', '').strip()),
1072 expand_path(paths.get(dir_type, '').strip()) if dir_type else '',
1073 filename or '')
21cd8fae 1074 return sanitize_path(path, force=self.params.get('windowsfilenames'))
1075
76a264ac 1076 @staticmethod
901130bb 1077 def _outtmpl_expandpath(outtmpl):
1078 # expand_path translates '%%' into '%' and '$$' into '$'
1079 # correspondingly that is not what we want since we need to keep
1080 # '%%' intact for template dict substitution step. Working around
1081 # with boundary-alike separator hack.
efa944f4 1082 sep = ''.join(random.choices(ascii_letters, k=32))
86e5f3ed 1083 outtmpl = outtmpl.replace('%%', f'%{sep}%').replace('$$', f'${sep}$')
901130bb 1084
1085 # outtmpl should be expand_path'ed before template dict substitution
1086 # because meta fields may contain env variables we don't want to
62b58c09 1087 # be expanded. E.g. for outtmpl "%(title)s.%(ext)s" and
901130bb 1088 # title "Hello $PATH", we don't want `$PATH` to be expanded.
1089 return expand_path(outtmpl).replace(sep, '')
1090
1091 @staticmethod
1092 def escape_outtmpl(outtmpl):
1093 ''' Escape any remaining strings like %s, %abc% etc. '''
1094 return re.sub(
1095 STR_FORMAT_RE_TMPL.format('', '(?![%(\0])'),
1096 lambda mobj: ('' if mobj.group('has_key') else '%') + mobj.group(0),
1097 outtmpl)
1098
1099 @classmethod
1100 def validate_outtmpl(cls, outtmpl):
76a264ac 1101 ''' @return None or Exception object '''
7d1eb38a 1102 outtmpl = re.sub(
47cdc68e 1103 STR_FORMAT_RE_TMPL.format('[^)]*', '[ljhqBUDS]'),
7d1eb38a 1104 lambda mobj: f'{mobj.group(0)[:-1]}s',
1105 cls._outtmpl_expandpath(outtmpl))
76a264ac 1106 try:
7d1eb38a 1107 cls.escape_outtmpl(outtmpl) % collections.defaultdict(int)
76a264ac 1108 return None
1109 except ValueError as err:
1110 return err
1111
03b4de72 1112 @staticmethod
1113 def _copy_infodict(info_dict):
1114 info_dict = dict(info_dict)
09b49e1f 1115 info_dict.pop('__postprocessors', None)
415f8d51 1116 info_dict.pop('__pending_error', None)
03b4de72 1117 return info_dict
1118
e0fd9573 1119 def prepare_outtmpl(self, outtmpl, info_dict, sanitize=False):
1120 """ Make the outtmpl and info_dict suitable for substitution: ydl.escape_outtmpl(outtmpl) % info_dict
1121 @param sanitize Whether to sanitize the output as a filename.
1122 For backward compatibility, a function can also be passed
1123 """
1124
6e84b215 1125 info_dict.setdefault('epoch', int(time.time())) # keep epoch consistent once set
143db31d 1126
03b4de72 1127 info_dict = self._copy_infodict(info_dict)
752cda38 1128 info_dict['duration_string'] = ( # %(duration>%H-%M-%S)s is wrong if duration > 24hrs
53c18592 1129 formatSeconds(info_dict['duration'], '-' if sanitize else ':')
143db31d 1130 if info_dict.get('duration', None) is not None
1131 else None)
1d485a1a 1132 info_dict['autonumber'] = int(self.params.get('autonumber_start', 1) - 1 + self._num_downloads)
9c906919 1133 info_dict['video_autonumber'] = self._num_videos
752cda38 1134 if info_dict.get('resolution') is None:
1135 info_dict['resolution'] = self.format_resolution(info_dict, default=None)
143db31d 1136
e6f21b3d 1137 # For fields playlist_index, playlist_autonumber and autonumber convert all occurrences
143db31d 1138 # of %(field)s to %(field)0Nd for backward compatibility
1139 field_size_compat_map = {
0a5a191a 1140 'playlist_index': number_of_digits(info_dict.get('__last_playlist_index') or 0),
ec11a9f4 1141 'playlist_autonumber': number_of_digits(info_dict.get('n_entries') or 0),
752cda38 1142 'autonumber': self.params.get('autonumber_size') or 5,
143db31d 1143 }
752cda38 1144
385a27fa 1145 TMPL_DICT = {}
47cdc68e 1146 EXTERNAL_FORMAT_RE = re.compile(STR_FORMAT_RE_TMPL.format('[^)]*', f'[{STR_FORMAT_TYPES}ljhqBUDS]'))
385a27fa 1147 MATH_FUNCTIONS = {
1148 '+': float.__add__,
1149 '-': float.__sub__,
1150 }
e625be0d 1151 # Field is of the form key1.key2...
07a1250e 1152 # where keys (except first) can be string, int, slice or "{field, ...}"
1153 FIELD_INNER_RE = r'(?:\w+|%(num)s|%(num)s?(?::%(num)s?){1,2})' % {'num': r'(?:-?\d+)'}
1154 FIELD_RE = r'\w*(?:\.(?:%(inner)s|{%(field)s(?:,%(field)s)*}))*' % {
1155 'inner': FIELD_INNER_RE,
1156 'field': rf'\w*(?:\.{FIELD_INNER_RE})*'
1157 }
1d485a1a 1158 MATH_FIELD_RE = rf'(?:{FIELD_RE}|-?{NUMBER_RE})'
385a27fa 1159 MATH_OPERATORS_RE = r'(?:%s)' % '|'.join(map(re.escape, MATH_FUNCTIONS.keys()))
78fde6e3 1160 INTERNAL_FORMAT_RE = re.compile(rf'''(?xs)
e625be0d 1161 (?P<negate>-)?
1d485a1a 1162 (?P<fields>{FIELD_RE})
1163 (?P<maths>(?:{MATH_OPERATORS_RE}{MATH_FIELD_RE})*)
e625be0d 1164 (?:>(?P<strf_format>.+?))?
34baa9fd 1165 (?P<remaining>
1166 (?P<alternate>(?<!\\),[^|&)]+)?
1167 (?:&(?P<replacement>.*?))?
1168 (?:\|(?P<default>.*?))?
1d485a1a 1169 )$''')
752cda38 1170
07a1250e 1171 def _traverse_infodict(fields):
1172 fields = [f for x in re.split(r'\.({.+?})\.?', fields)
1173 for f in ([x] if x.startswith('{') else x.split('.'))]
1174 for i in (0, -1):
1175 if fields and not fields[i]:
1176 fields.pop(i)
1177
1178 for i, f in enumerate(fields):
1179 if not f.startswith('{'):
1180 continue
1181 assert f.endswith('}'), f'No closing brace for {f} in {fields}'
1182 fields[i] = {k: k.split('.') for k in f[1:-1].split(',')}
1183
1184 return traverse_obj(info_dict, fields, is_user_input=True, traverse_string=True)
76a264ac 1185
752cda38 1186 def get_value(mdict):
1187 # Object traversal
2b8a2973 1188 value = _traverse_infodict(mdict['fields'])
752cda38 1189 # Negative
1190 if mdict['negate']:
1191 value = float_or_none(value)
1192 if value is not None:
1193 value *= -1
1194 # Do maths
385a27fa 1195 offset_key = mdict['maths']
1196 if offset_key:
752cda38 1197 value = float_or_none(value)
1198 operator = None
385a27fa 1199 while offset_key:
1200 item = re.match(
1201 MATH_FIELD_RE if operator else MATH_OPERATORS_RE,
1202 offset_key).group(0)
1203 offset_key = offset_key[len(item):]
1204 if operator is None:
752cda38 1205 operator = MATH_FUNCTIONS[item]
385a27fa 1206 continue
1207 item, multiplier = (item[1:], -1) if item[0] == '-' else (item, 1)
1208 offset = float_or_none(item)
1209 if offset is None:
2b8a2973 1210 offset = float_or_none(_traverse_infodict(item))
385a27fa 1211 try:
1212 value = operator(value, multiplier * offset)
1213 except (TypeError, ZeroDivisionError):
1214 return None
1215 operator = None
752cda38 1216 # Datetime formatting
1217 if mdict['strf_format']:
7c37ff97 1218 value = strftime_or_none(value, mdict['strf_format'].replace('\\,', ','))
752cda38 1219
a6bcaf71 1220 # XXX: Workaround for https://github.com/yt-dlp/yt-dlp/issues/4485
1221 if sanitize and value == '':
1222 value = None
752cda38 1223 return value
1224
b868936c 1225 na = self.params.get('outtmpl_na_placeholder', 'NA')
1226
e0fd9573 1227 def filename_sanitizer(key, value, restricted=self.params.get('restrictfilenames')):
5c3895ff 1228 return sanitize_filename(str(value), restricted=restricted, is_id=(
1229 bool(re.search(r'(^|[_.])id(\.|$)', key))
8a82af35 1230 if 'filename-sanitization' in self.params['compat_opts']
5c3895ff 1231 else NO_DEFAULT))
e0fd9573 1232
1233 sanitizer = sanitize if callable(sanitize) else filename_sanitizer
1234 sanitize = bool(sanitize)
1235
6e84b215 1236 def _dumpjson_default(obj):
1237 if isinstance(obj, (set, LazyList)):
1238 return list(obj)
adbc4ec4 1239 return repr(obj)
6e84b215 1240
ec9311c4 1241 class _ReplacementFormatter(Formatter):
1242 def get_field(self, field_name, args, kwargs):
1243 if field_name.isdigit():
1244 return args[0], -1
1245 raise ValueError('Unsupported field')
1246
1247 replacement_formatter = _ReplacementFormatter()
1248
752cda38 1249 def create_key(outer_mobj):
1250 if not outer_mobj.group('has_key'):
b836dc94 1251 return outer_mobj.group(0)
752cda38 1252 key = outer_mobj.group('key')
752cda38 1253 mobj = re.match(INTERNAL_FORMAT_RE, key)
e0fd9573 1254 initial_field = mobj.group('fields') if mobj else ''
e978789f 1255 value, replacement, default = None, None, na
7c37ff97 1256 while mobj:
e625be0d 1257 mobj = mobj.groupdict()
7c37ff97 1258 default = mobj['default'] if mobj['default'] is not None else default
752cda38 1259 value = get_value(mobj)
e978789f 1260 replacement = mobj['replacement']
7c37ff97 1261 if value is None and mobj['alternate']:
34baa9fd 1262 mobj = re.match(INTERNAL_FORMAT_RE, mobj['remaining'][1:])
7c37ff97 1263 else:
1264 break
752cda38 1265
b868936c 1266 fmt = outer_mobj.group('format')
752cda38 1267 if fmt == 's' and value is not None and key in field_size_compat_map.keys():
86e5f3ed 1268 fmt = f'0{field_size_compat_map[key]:d}d'
752cda38 1269
ec9311c4 1270 if value is None:
1271 value = default
1272 elif replacement is not None:
1273 try:
1274 value = replacement_formatter.format(replacement, value)
1275 except ValueError:
1276 value = na
752cda38 1277
4476d2c7 1278 flags = outer_mobj.group('conversion') or ''
7d1eb38a 1279 str_fmt = f'{fmt[:-1]}s'
524e2e4f 1280 if fmt[-1] == 'l': # list
4476d2c7 1281 delim = '\n' if '#' in flags else ', '
9e907ebd 1282 value, fmt = delim.join(map(str, variadic(value, allowed_types=(str, bytes)))), str_fmt
524e2e4f 1283 elif fmt[-1] == 'j': # json
deae7c17 1284 value, fmt = json.dumps(
1285 value, default=_dumpjson_default,
9b9dad11 1286 indent=4 if '#' in flags else None, ensure_ascii='+' not in flags), str_fmt
47cdc68e 1287 elif fmt[-1] == 'h': # html
deae7c17 1288 value, fmt = escapeHTML(str(value)), str_fmt
524e2e4f 1289 elif fmt[-1] == 'q': # quoted
4476d2c7 1290 value = map(str, variadic(value) if '#' in flags else [value])
1291 value, fmt = ' '.join(map(compat_shlex_quote, value)), str_fmt
524e2e4f 1292 elif fmt[-1] == 'B': # bytes
0f06bcd7 1293 value = f'%{str_fmt}'.encode() % str(value).encode()
f5aa5cfb 1294 value, fmt = value.decode('utf-8', 'ignore'), 's'
524e2e4f 1295 elif fmt[-1] == 'U': # unicode normalized
524e2e4f 1296 value, fmt = unicodedata.normalize(
1297 # "+" = compatibility equivalence, "#" = NFD
4476d2c7 1298 'NF%s%s' % ('K' if '+' in flags else '', 'D' if '#' in flags else 'C'),
524e2e4f 1299 value), str_fmt
e0fd9573 1300 elif fmt[-1] == 'D': # decimal suffix
abbeeebc 1301 num_fmt, fmt = fmt[:-1].replace('#', ''), 's'
1302 value = format_decimal_suffix(value, f'%{num_fmt}f%s' if num_fmt else '%d%s',
1303 factor=1024 if '#' in flags else 1000)
37893bb0 1304 elif fmt[-1] == 'S': # filename sanitization
e0fd9573 1305 value, fmt = filename_sanitizer(initial_field, value, restricted='#' in flags), str_fmt
7d1eb38a 1306 elif fmt[-1] == 'c':
524e2e4f 1307 if value:
1308 value = str(value)[0]
76a264ac 1309 else:
524e2e4f 1310 fmt = str_fmt
76a264ac 1311 elif fmt[-1] not in 'rs': # numeric
a439a3a4 1312 value = float_or_none(value)
752cda38 1313 if value is None:
1314 value, fmt = default, 's'
901130bb 1315
752cda38 1316 if sanitize:
1317 if fmt[-1] == 'r':
1318 # If value is an object, sanitize might convert it to a string
1319 # So we convert it to repr first
7d1eb38a 1320 value, fmt = repr(value), str_fmt
639f1cea 1321 if fmt[-1] in 'csr':
e0fd9573 1322 value = sanitizer(initial_field, value)
901130bb 1323
b868936c 1324 key = '%s\0%s' % (key.replace('%', '%\0'), outer_mobj.group('format'))
385a27fa 1325 TMPL_DICT[key] = value
b868936c 1326 return '{prefix}%({key}){fmt}'.format(key=key, fmt=fmt, prefix=outer_mobj.group('prefix'))
752cda38 1327
385a27fa 1328 return EXTERNAL_FORMAT_RE.sub(create_key, outtmpl), TMPL_DICT
143db31d 1329
819e0531 1330 def evaluate_outtmpl(self, outtmpl, info_dict, *args, **kwargs):
1331 outtmpl, info_dict = self.prepare_outtmpl(outtmpl, info_dict, *args, **kwargs)
1332 return self.escape_outtmpl(outtmpl) % info_dict
1333
5127e92a 1334 def _prepare_filename(self, info_dict, *, outtmpl=None, tmpl_type=None):
1335 assert None in (outtmpl, tmpl_type), 'outtmpl and tmpl_type are mutually exclusive'
1336 if outtmpl is None:
bf1824b3 1337 outtmpl = self.params['outtmpl'].get(tmpl_type or 'default', self.params['outtmpl']['default'])
8222d8de 1338 try:
5127e92a 1339 outtmpl = self._outtmpl_expandpath(outtmpl)
e0fd9573 1340 filename = self.evaluate_outtmpl(outtmpl, info_dict, True)
6a0546e3 1341 if not filename:
1342 return None
15da37c7 1343
5127e92a 1344 if tmpl_type in ('', 'temp'):
6a0546e3 1345 final_ext, ext = self.params.get('final_ext'), info_dict.get('ext')
1346 if final_ext and ext and final_ext != ext and filename.endswith(f'.{final_ext}'):
1347 filename = replace_extension(filename, ext, final_ext)
5127e92a 1348 elif tmpl_type:
6a0546e3 1349 force_ext = OUTTMPL_TYPES[tmpl_type]
1350 if force_ext:
1351 filename = replace_extension(filename, force_ext, info_dict.get('ext'))
de6000d9 1352
bdc3fd2f
U
1353 # https://github.com/blackjack4494/youtube-dlc/issues/85
1354 trim_file_name = self.params.get('trim_file_name', False)
1355 if trim_file_name:
5c22c63d 1356 no_ext, *ext = filename.rsplit('.', 2)
1357 filename = join_nonempty(no_ext[:trim_file_name], *ext, delim='.')
bdc3fd2f 1358
0202b52a 1359 return filename
8222d8de 1360 except ValueError as err:
6febd1c1 1361 self.report_error('Error in output template: ' + str(err) + ' (encoding: ' + repr(preferredencoding()) + ')')
8222d8de
JMF
1362 return None
1363
5127e92a 1364 def prepare_filename(self, info_dict, dir_type='', *, outtmpl=None, warn=False):
1365 """Generate the output filename"""
1366 if outtmpl:
1367 assert not dir_type, 'outtmpl and dir_type are mutually exclusive'
1368 dir_type = None
1369 filename = self._prepare_filename(info_dict, tmpl_type=dir_type, outtmpl=outtmpl)
80c03fa9 1370 if not filename and dir_type not in ('', 'temp'):
1371 return ''
de6000d9 1372
c84aeac6 1373 if warn:
21cd8fae 1374 if not self.params.get('paths'):
de6000d9 1375 pass
1376 elif filename == '-':
c84aeac6 1377 self.report_warning('--paths is ignored when an outputting to stdout', only_once=True)
de6000d9 1378 elif os.path.isabs(filename):
c84aeac6 1379 self.report_warning('--paths is ignored since an absolute path is given in output template', only_once=True)
de6000d9 1380 if filename == '-' or not filename:
1381 return filename
1382
21cd8fae 1383 return self.get_output_path(dir_type, filename)
0202b52a 1384
120fe513 1385 def _match_entry(self, info_dict, incomplete=False, silent=False):
6368e2e6 1386 """Returns None if the file should be downloaded"""
d7b460d0 1387 _type = info_dict.get('_type', 'video')
1388 assert incomplete or _type == 'video', 'Only video result can be considered complete'
8222d8de 1389
3bec830a 1390 video_title = info_dict.get('title', info_dict.get('id', 'entry'))
c77495e3 1391
8b0d7497 1392 def check_filter():
d7b460d0 1393 if _type in ('playlist', 'multi_video'):
1394 return
1395 elif _type in ('url', 'url_transparent') and not try_call(
1396 lambda: self.get_info_extractor(info_dict['ie_key']).is_single_video(info_dict['url'])):
1397 return
1398
8b0d7497 1399 if 'title' in info_dict:
1400 # This can happen when we're just evaluating the playlist
1401 title = info_dict['title']
1402 matchtitle = self.params.get('matchtitle', False)
1403 if matchtitle:
1404 if not re.search(matchtitle, title, re.IGNORECASE):
1405 return '"' + title + '" title did not match pattern "' + matchtitle + '"'
1406 rejecttitle = self.params.get('rejecttitle', False)
1407 if rejecttitle:
1408 if re.search(rejecttitle, title, re.IGNORECASE):
1409 return '"' + title + '" title matched reject pattern "' + rejecttitle + '"'
6368e2e6 1410
8b0d7497 1411 date = info_dict.get('upload_date')
1412 if date is not None:
1413 dateRange = self.params.get('daterange', DateRange())
1414 if date not in dateRange:
86e5f3ed 1415 return f'{date_from_str(date).isoformat()} upload date is not in range {dateRange}'
8b0d7497 1416 view_count = info_dict.get('view_count')
1417 if view_count is not None:
1418 min_views = self.params.get('min_views')
1419 if min_views is not None and view_count < min_views:
1420 return 'Skipping %s, because it has not reached minimum view count (%d/%d)' % (video_title, view_count, min_views)
1421 max_views = self.params.get('max_views')
1422 if max_views is not None and view_count > max_views:
1423 return 'Skipping %s, because it has exceeded the maximum view count (%d/%d)' % (video_title, view_count, max_views)
1424 if age_restricted(info_dict.get('age_limit'), self.params.get('age_limit')):
1425 return 'Skipping "%s" because it is age restricted' % video_title
8b0d7497 1426
8f18aca8 1427 match_filter = self.params.get('match_filter')
fe2ce85a 1428 if match_filter is None:
1429 return None
1430
1431 cancelled = None
1432 try:
8f18aca8 1433 try:
1434 ret = match_filter(info_dict, incomplete=incomplete)
1435 except TypeError:
1436 # For backward compatibility
1437 ret = None if incomplete else match_filter(info_dict)
fe2ce85a 1438 except DownloadCancelled as err:
1439 if err.msg is not NO_DEFAULT:
1440 raise
1441 ret, cancelled = err.msg, err
1442
1443 if ret is NO_DEFAULT:
1444 while True:
1445 filename = self._format_screen(self.prepare_filename(info_dict), self.Styles.FILENAME)
1446 reply = input(self._format_screen(
1447 f'Download "{filename}"? (Y/n): ', self.Styles.EMPHASIS)).lower().strip()
1448 if reply in {'y', ''}:
1449 return None
1450 elif reply == 'n':
1451 if cancelled:
1452 raise type(cancelled)(f'Skipping {video_title}')
1453 return f'Skipping {video_title}'
1454 return ret
8b0d7497 1455
c77495e3 1456 if self.in_download_archive(info_dict):
1457 reason = '%s has already been recorded in the archive' % video_title
1458 break_opt, break_err = 'break_on_existing', ExistingVideoReached
1459 else:
fe2ce85a 1460 try:
1461 reason = check_filter()
1462 except DownloadCancelled as e:
1463 reason, break_opt, break_err = e.msg, 'match_filter', type(e)
1464 else:
1465 break_opt, break_err = 'break_on_reject', RejectedVideoReached
8b0d7497 1466 if reason is not None:
120fe513 1467 if not silent:
1468 self.to_screen('[download] ' + reason)
c77495e3 1469 if self.params.get(break_opt, False):
1470 raise break_err()
8b0d7497 1471 return reason
fe7e0c98 1472
b6c45014
JMF
1473 @staticmethod
1474 def add_extra_info(info_dict, extra_info):
1475 '''Set the keys from extra_info in info dict if they are missing'''
1476 for key, value in extra_info.items():
1477 info_dict.setdefault(key, value)
1478
409e1828 1479 def extract_info(self, url, download=True, ie_key=None, extra_info=None,
61aa5ba3 1480 process=True, force_generic_extractor=False):
41d1cca3 1481 """
17ffed18 1482 Extract and return the information dictionary of the URL
41d1cca3 1483
1484 Arguments:
17ffed18 1485 @param url URL to extract
41d1cca3 1486
1487 Keyword arguments:
17ffed18 1488 @param download Whether to download videos
1489 @param process Whether to resolve all unresolved references (URLs, playlist items).
1490 Must be True for download to work
1491 @param ie_key Use only the extractor with this key
1492
1493 @param extra_info Dictionary containing the extra values to add to the info (For internal use only)
1494 @force_generic_extractor Force using the generic extractor (Deprecated; use ie_key='Generic')
41d1cca3 1495 """
fe7e0c98 1496
409e1828 1497 if extra_info is None:
1498 extra_info = {}
1499
61aa5ba3 1500 if not ie_key and force_generic_extractor:
d22dec74
S
1501 ie_key = 'Generic'
1502
8222d8de 1503 if ie_key:
fe7866d0 1504 ies = {ie_key: self._ies[ie_key]} if ie_key in self._ies else {}
8222d8de
JMF
1505 else:
1506 ies = self._ies
1507
fe7866d0 1508 for key, ie in ies.items():
8222d8de
JMF
1509 if not ie.suitable(url):
1510 continue
1511
1512 if not ie.working():
6febd1c1
PH
1513 self.report_warning('The program functionality for this site has been marked as broken, '
1514 'and will probably not work.')
8222d8de 1515
1151c407 1516 temp_id = ie.get_temp_id(url)
fe7866d0 1517 if temp_id is not None and self.in_download_archive({'id': temp_id, 'ie_key': key}):
1518 self.to_screen(f'[{key}] {temp_id}: has already been recorded in the archive')
5e5be0c0 1519 if self.params.get('break_on_existing', False):
1520 raise ExistingVideoReached()
a0566bbf 1521 break
fe7866d0 1522 return self.__extract_info(url, self.get_info_extractor(key), download, extra_info, process)
a0566bbf 1523 else:
fe7866d0 1524 extractors_restricted = self.params.get('allowed_extractors') not in (None, ['default'])
1525 self.report_error(f'No suitable extractor{format_field(ie_key, None, " (%s)")} found for URL {url}',
1526 tb=False if extractors_restricted else None)
a0566bbf 1527
7e88d7d7 1528 def _handle_extraction_exceptions(func):
b5ae35ee 1529 @functools.wraps(func)
a0566bbf 1530 def wrapper(self, *args, **kwargs):
6da22e7d 1531 while True:
1532 try:
1533 return func(self, *args, **kwargs)
1534 except (DownloadCancelled, LazyList.IndexError, PagedList.IndexError):
8222d8de 1535 raise
6da22e7d 1536 except ReExtractInfo as e:
1537 if e.expected:
1538 self.to_screen(f'{e}; Re-extracting data')
1539 else:
1540 self.to_stderr('\r')
1541 self.report_warning(f'{e}; Re-extracting data')
1542 continue
1543 except GeoRestrictedError as e:
1544 msg = e.msg
1545 if e.countries:
1546 msg += '\nThis video is available in %s.' % ', '.join(
1547 map(ISO3166Utils.short2full, e.countries))
1548 msg += '\nYou might want to use a VPN or a proxy server (with --proxy) to workaround.'
1549 self.report_error(msg)
1550 except ExtractorError as e: # An error we somewhat expected
1551 self.report_error(str(e), e.format_traceback())
1552 except Exception as e:
1553 if self.params.get('ignoreerrors'):
1554 self.report_error(str(e), tb=encode_compat_str(traceback.format_exc()))
1555 else:
1556 raise
1557 break
a0566bbf 1558 return wrapper
1559
693f0600 1560 def _wait_for_video(self, ie_result={}):
f2ebc5c7 1561 if (not self.params.get('wait_for_video')
1562 or ie_result.get('_type', 'video') != 'video'
1563 or ie_result.get('formats') or ie_result.get('url')):
1564 return
1565
1566 format_dur = lambda dur: '%02d:%02d:%02d' % timetuple_from_msec(dur * 1000)[:-1]
1567 last_msg = ''
1568
1569 def progress(msg):
1570 nonlocal last_msg
a7dc6a89 1571 full_msg = f'{msg}\n'
1572 if not self.params.get('noprogress'):
1573 full_msg = msg + ' ' * (len(last_msg) - len(msg)) + '\r'
1574 elif last_msg:
1575 return
1576 self.to_screen(full_msg, skip_eol=True)
f2ebc5c7 1577 last_msg = msg
1578
1579 min_wait, max_wait = self.params.get('wait_for_video')
1580 diff = try_get(ie_result, lambda x: x['release_timestamp'] - time.time())
1581 if diff is None and ie_result.get('live_status') == 'is_upcoming':
16c620bc 1582 diff = round(random.uniform(min_wait, max_wait) if (max_wait and min_wait) else (max_wait or min_wait), 0)
f2ebc5c7 1583 self.report_warning('Release time of video is not known')
693f0600 1584 elif ie_result and (diff or 0) <= 0:
f2ebc5c7 1585 self.report_warning('Video should already be available according to extracted info')
38d79fd1 1586 diff = min(max(diff or 0, min_wait or 0), max_wait or float('inf'))
f2ebc5c7 1587 self.to_screen(f'[wait] Waiting for {format_dur(diff)} - Press Ctrl+C to try now')
1588
1589 wait_till = time.time() + diff
1590 try:
1591 while True:
1592 diff = wait_till - time.time()
1593 if diff <= 0:
1594 progress('')
1595 raise ReExtractInfo('[wait] Wait period ended', expected=True)
1596 progress(f'[wait] Remaining time until next attempt: {self._format_screen(format_dur(diff), self.Styles.EMPHASIS)}')
1597 time.sleep(1)
1598 except KeyboardInterrupt:
1599 progress('')
1600 raise ReExtractInfo('[wait] Interrupted by user', expected=True)
1601 except BaseException as e:
1602 if not isinstance(e, ReExtractInfo):
1603 self.to_screen('')
1604 raise
1605
7e88d7d7 1606 @_handle_extraction_exceptions
58f197b7 1607 def __extract_info(self, url, ie, download, extra_info, process):
693f0600 1608 try:
1609 ie_result = ie.extract(url)
1610 except UserNotLive as e:
1611 if process:
1612 if self.params.get('wait_for_video'):
1613 self.report_warning(e)
1614 self._wait_for_video()
1615 raise
a0566bbf 1616 if ie_result is None: # Finished already (backwards compatibility; listformats and friends should be moved here)
cb794ee0 1617 self.report_warning(f'Extractor {ie.IE_NAME} returned nothing{bug_reports_message()}')
a0566bbf 1618 return
1619 if isinstance(ie_result, list):
1620 # Backwards compatibility: old IE result format
1621 ie_result = {
1622 '_type': 'compat_list',
1623 'entries': ie_result,
1624 }
e37d0efb 1625 if extra_info.get('original_url'):
1626 ie_result.setdefault('original_url', extra_info['original_url'])
a0566bbf 1627 self.add_default_extra_info(ie_result, ie, url)
1628 if process:
f2ebc5c7 1629 self._wait_for_video(ie_result)
a0566bbf 1630 return self.process_ie_result(ie_result, download, extra_info)
8222d8de 1631 else:
a0566bbf 1632 return ie_result
fe7e0c98 1633
ea38e55f 1634 def add_default_extra_info(self, ie_result, ie, url):
6033d980 1635 if url is not None:
1636 self.add_extra_info(ie_result, {
1637 'webpage_url': url,
1638 'original_url': url,
57ebfca3 1639 })
1640 webpage_url = ie_result.get('webpage_url')
1641 if webpage_url:
1642 self.add_extra_info(ie_result, {
1643 'webpage_url_basename': url_basename(webpage_url),
1644 'webpage_url_domain': get_domain(webpage_url),
6033d980 1645 })
1646 if ie is not None:
1647 self.add_extra_info(ie_result, {
1648 'extractor': ie.IE_NAME,
1649 'extractor_key': ie.ie_key(),
1650 })
ea38e55f 1651
58adec46 1652 def process_ie_result(self, ie_result, download=True, extra_info=None):
8222d8de
JMF
1653 """
1654 Take the result of the ie(may be modified) and resolve all unresolved
1655 references (URLs, playlist items).
1656
1657 It will also download the videos if 'download'.
1658 Returns the resolved ie_result.
1659 """
58adec46 1660 if extra_info is None:
1661 extra_info = {}
e8ee972c
PH
1662 result_type = ie_result.get('_type', 'video')
1663
057a5206 1664 if result_type in ('url', 'url_transparent'):
8f97a15d 1665 ie_result['url'] = sanitize_url(
1666 ie_result['url'], scheme='http' if self.params.get('prefer_insecure') else 'https')
8791e78c 1667 if ie_result.get('original_url') and not extra_info.get('original_url'):
1668 extra_info = {'original_url': ie_result['original_url'], **extra_info}
e37d0efb 1669
057a5206 1670 extract_flat = self.params.get('extract_flat', False)
3089bc74
S
1671 if ((extract_flat == 'in_playlist' and 'playlist' in extra_info)
1672 or extract_flat is True):
ecb54191 1673 info_copy = ie_result.copy()
6033d980 1674 ie = try_get(ie_result.get('ie_key'), self.get_info_extractor)
360167b9 1675 if ie and not ie_result.get('id'):
4614bc22 1676 info_copy['id'] = ie.get_temp_id(ie_result['url'])
6033d980 1677 self.add_default_extra_info(info_copy, ie, ie_result['url'])
4614bc22 1678 self.add_extra_info(info_copy, extra_info)
b5475f11 1679 info_copy, _ = self.pre_process(info_copy)
94dc8604 1680 self._fill_common_fields(info_copy, False)
17060584 1681 self.__forced_printings(info_copy)
415f8d51 1682 self._raise_pending_errors(info_copy)
4614bc22 1683 if self.params.get('force_write_download_archive', False):
1684 self.record_download_archive(info_copy)
e8ee972c
PH
1685 return ie_result
1686
8222d8de 1687 if result_type == 'video':
b6c45014 1688 self.add_extra_info(ie_result, extra_info)
9c2b75b5 1689 ie_result = self.process_video_result(ie_result, download=download)
415f8d51 1690 self._raise_pending_errors(ie_result)
28b0eb0f 1691 additional_urls = (ie_result or {}).get('additional_urls')
9c2b75b5 1692 if additional_urls:
e9f4ccd1 1693 # TODO: Improve MetadataParserPP to allow setting a list
14f25df2 1694 if isinstance(additional_urls, str):
9c2b75b5 1695 additional_urls = [additional_urls]
1696 self.to_screen(
1697 '[info] %s: %d additional URL(s) requested' % (ie_result['id'], len(additional_urls)))
1698 self.write_debug('Additional URLs: "%s"' % '", "'.join(additional_urls))
1699 ie_result['additional_entries'] = [
1700 self.extract_info(
b69fd25c 1701 url, download, extra_info=extra_info,
9c2b75b5 1702 force_generic_extractor=self.params.get('force_generic_extractor'))
1703 for url in additional_urls
1704 ]
1705 return ie_result
8222d8de
JMF
1706 elif result_type == 'url':
1707 # We have to add extra_info to the results because it may be
1708 # contained in a playlist
07cce701 1709 return self.extract_info(
1710 ie_result['url'], download,
1711 ie_key=ie_result.get('ie_key'),
1712 extra_info=extra_info)
7fc3fa05
PH
1713 elif result_type == 'url_transparent':
1714 # Use the information from the embedding page
1715 info = self.extract_info(
1716 ie_result['url'], ie_key=ie_result.get('ie_key'),
1717 extra_info=extra_info, download=False, process=False)
1718
1640eb09
S
1719 # extract_info may return None when ignoreerrors is enabled and
1720 # extraction failed with an error, don't crash and return early
1721 # in this case
1722 if not info:
1723 return info
1724
3975b4d2 1725 exempted_fields = {'_type', 'url', 'ie_key'}
1726 if not ie_result.get('section_end') and ie_result.get('section_start') is None:
1727 # For video clips, the id etc of the clip extractor should be used
1728 exempted_fields |= {'id', 'extractor', 'extractor_key'}
1729
412c617d 1730 new_result = info.copy()
3975b4d2 1731 new_result.update(filter_dict(ie_result, lambda k, v: v is not None and k not in exempted_fields))
7fc3fa05 1732
0563f7ac
S
1733 # Extracted info may not be a video result (i.e.
1734 # info.get('_type', 'video') != video) but rather an url or
1735 # url_transparent. In such cases outer metadata (from ie_result)
1736 # should be propagated to inner one (info). For this to happen
1737 # _type of info should be overridden with url_transparent. This
067aa17e 1738 # fixes issue from https://github.com/ytdl-org/youtube-dl/pull/11163.
0563f7ac
S
1739 if new_result.get('_type') == 'url':
1740 new_result['_type'] = 'url_transparent'
7fc3fa05
PH
1741
1742 return self.process_ie_result(
1743 new_result, download=download, extra_info=extra_info)
40fcba5e 1744 elif result_type in ('playlist', 'multi_video'):
30a074c2 1745 # Protect from infinite recursion due to recursively nested playlists
1746 # (see https://github.com/ytdl-org/youtube-dl/issues/27833)
0bd5a039 1747 webpage_url = ie_result.get('webpage_url') # Playlists maynot have webpage_url
1748 if webpage_url and webpage_url in self._playlist_urls:
7e85e872 1749 self.to_screen(
30a074c2 1750 '[download] Skipping already downloaded playlist: %s'
1751 % ie_result.get('title') or ie_result.get('id'))
1752 return
7e85e872 1753
30a074c2 1754 self._playlist_level += 1
1755 self._playlist_urls.add(webpage_url)
03f83004 1756 self._fill_common_fields(ie_result, False)
bc516a3f 1757 self._sanitize_thumbnails(ie_result)
30a074c2 1758 try:
1759 return self.__process_playlist(ie_result, download)
1760 finally:
1761 self._playlist_level -= 1
1762 if not self._playlist_level:
1763 self._playlist_urls.clear()
8222d8de 1764 elif result_type == 'compat_list':
c9bf4114
PH
1765 self.report_warning(
1766 'Extractor %s returned a compat_list result. '
1767 'It needs to be updated.' % ie_result.get('extractor'))
5f6a1245 1768
8222d8de 1769 def _fixup(r):
b868936c 1770 self.add_extra_info(r, {
1771 'extractor': ie_result['extractor'],
1772 'webpage_url': ie_result['webpage_url'],
1773 'webpage_url_basename': url_basename(ie_result['webpage_url']),
0bb322b9 1774 'webpage_url_domain': get_domain(ie_result['webpage_url']),
b868936c 1775 'extractor_key': ie_result['extractor_key'],
1776 })
8222d8de
JMF
1777 return r
1778 ie_result['entries'] = [
b6c45014 1779 self.process_ie_result(_fixup(r), download, extra_info)
8222d8de
JMF
1780 for r in ie_result['entries']
1781 ]
1782 return ie_result
1783 else:
1784 raise Exception('Invalid result type: %s' % result_type)
1785
e92caff5 1786 def _ensure_dir_exists(self, path):
1787 return make_dir(path, self.report_error)
1788
3b603dbd 1789 @staticmethod
3bec830a 1790 def _playlist_infodict(ie_result, strict=False, **kwargs):
1791 info = {
1792 'playlist_count': ie_result.get('playlist_count'),
3b603dbd 1793 'playlist': ie_result.get('title') or ie_result.get('id'),
1794 'playlist_id': ie_result.get('id'),
1795 'playlist_title': ie_result.get('title'),
1796 'playlist_uploader': ie_result.get('uploader'),
1797 'playlist_uploader_id': ie_result.get('uploader_id'),
3b603dbd 1798 **kwargs,
1799 }
3bec830a 1800 if strict:
1801 return info
0bd5a039 1802 if ie_result.get('webpage_url'):
1803 info.update({
1804 'webpage_url': ie_result['webpage_url'],
1805 'webpage_url_basename': url_basename(ie_result['webpage_url']),
1806 'webpage_url_domain': get_domain(ie_result['webpage_url']),
1807 })
3bec830a 1808 return {
1809 **info,
1810 'playlist_index': 0,
59d7de0d 1811 '__last_playlist_index': max(ie_result.get('requested_entries') or (0, 0)),
3bec830a 1812 'extractor': ie_result['extractor'],
3bec830a 1813 'extractor_key': ie_result['extractor_key'],
1814 }
3b603dbd 1815
30a074c2 1816 def __process_playlist(self, ie_result, download):
7e88d7d7 1817 """Process each entry in the playlist"""
f5ea4748 1818 assert ie_result['_type'] in ('playlist', 'multi_video')
1819
3bec830a 1820 common_info = self._playlist_infodict(ie_result, strict=True)
3955b207 1821 title = common_info.get('playlist') or '<Untitled>'
3bec830a 1822 if self._match_entry(common_info, incomplete=True) is not None:
1823 return
c6e07cf1 1824 self.to_screen(f'[download] Downloading {ie_result["_type"]}: {title}')
f0d785d3 1825
7e88d7d7 1826 all_entries = PlaylistEntries(self, ie_result)
7e9a6125 1827 entries = orderedSet(all_entries.get_requested_items(), lazy=True)
1828
1829 lazy = self.params.get('lazy_playlist')
1830 if lazy:
1831 resolved_entries, n_entries = [], 'N/A'
1832 ie_result['requested_entries'], ie_result['entries'] = None, None
1833 else:
1834 entries = resolved_entries = list(entries)
1835 n_entries = len(resolved_entries)
1836 ie_result['requested_entries'], ie_result['entries'] = tuple(zip(*resolved_entries)) or ([], [])
1837 if not ie_result.get('playlist_count'):
1838 # Better to do this after potentially exhausting entries
1839 ie_result['playlist_count'] = all_entries.get_full_count()
498f5606 1840
0647d925 1841 extra = self._playlist_infodict(ie_result, n_entries=int_or_none(n_entries))
1842 ie_copy = collections.ChainMap(ie_result, extra)
3bec830a 1843
e08a85d8 1844 _infojson_written = False
0bfc53d0 1845 write_playlist_files = self.params.get('allow_playlist_files', True)
1846 if write_playlist_files and self.params.get('list_thumbnails'):
1847 self.list_thumbnails(ie_result)
1848 if write_playlist_files and not self.params.get('simulate'):
e08a85d8 1849 _infojson_written = self._write_info_json(
1850 'playlist', ie_result, self.prepare_filename(ie_copy, 'pl_infojson'))
1851 if _infojson_written is None:
80c03fa9 1852 return
1853 if self._write_description('playlist', ie_result,
1854 self.prepare_filename(ie_copy, 'pl_description')) is None:
1855 return
681de68e 1856 # TODO: This should be passed to ThumbnailsConvertor if necessary
3bec830a 1857 self._write_thumbnails('playlist', ie_result, self.prepare_filename(ie_copy, 'pl_thumbnail'))
30a074c2 1858
7e9a6125 1859 if lazy:
1860 if self.params.get('playlistreverse') or self.params.get('playlistrandom'):
1861 self.report_warning('playlistreverse and playlistrandom are not supported with lazy_playlist', only_once=True)
1862 elif self.params.get('playlistreverse'):
1863 entries.reverse()
1864 elif self.params.get('playlistrandom'):
30a074c2 1865 random.shuffle(entries)
1866
bc5c2f8a 1867 self.to_screen(f'[{ie_result["extractor"]}] Playlist {title}: Downloading {n_entries} items'
7e88d7d7 1868 f'{format_field(ie_result, "playlist_count", " of %s")}')
30a074c2 1869
134c913c 1870 keep_resolved_entries = self.params.get('extract_flat') != 'discard'
1871 if self.params.get('extract_flat') == 'discard_in_playlist':
1872 keep_resolved_entries = ie_result['_type'] != 'playlist'
1873 if keep_resolved_entries:
1874 self.write_debug('The information of all playlist entries will be held in memory')
1875
26e2805c 1876 failures = 0
1877 max_failures = self.params.get('skip_playlist_after_errors') or float('inf')
7e9a6125 1878 for i, (playlist_index, entry) in enumerate(entries):
1879 if lazy:
1880 resolved_entries.append((playlist_index, entry))
3bec830a 1881 if not entry:
7e88d7d7 1882 continue
1883
7e88d7d7 1884 entry['__x_forwarded_for_ip'] = ie_result.get('__x_forwarded_for_ip')
7e9a6125 1885 if not lazy and 'playlist-index' in self.params.get('compat_opts', []):
1886 playlist_index = ie_result['requested_entries'][i]
1887
0647d925 1888 entry_copy = collections.ChainMap(entry, {
3bec830a 1889 **common_info,
3955b207 1890 'n_entries': int_or_none(n_entries),
71729754 1891 'playlist_index': playlist_index,
7e9a6125 1892 'playlist_autonumber': i + 1,
0647d925 1893 })
3bec830a 1894
0647d925 1895 if self._match_entry(entry_copy, incomplete=True) is not None:
f0ad6f8c 1896 # For compatabilty with youtube-dl. See https://github.com/yt-dlp/yt-dlp/issues/4369
1897 resolved_entries[i] = (playlist_index, NO_DEFAULT)
3bec830a 1898 continue
1899
bc5c2f8a 1900 self.to_screen('[download] Downloading item %s of %s' % (
3bec830a 1901 self._format_screen(i + 1, self.Styles.ID), self._format_screen(n_entries, self.Styles.EMPHASIS)))
1902
ec54bd43 1903 entry_result = self.__process_iterable_entry(entry, download, collections.ChainMap({
a6ca61d4 1904 'playlist_index': playlist_index,
1905 'playlist_autonumber': i + 1,
ec54bd43 1906 }, extra))
26e2805c 1907 if not entry_result:
1908 failures += 1
1909 if failures >= max_failures:
1910 self.report_error(
7e88d7d7 1911 f'Skipping the remaining entries in playlist "{title}" since {failures} items failed extraction')
26e2805c 1912 break
134c913c 1913 if keep_resolved_entries:
1914 resolved_entries[i] = (playlist_index, entry_result)
7e88d7d7 1915
1916 # Update with processed data
f0ad6f8c 1917 ie_result['entries'] = [e for _, e in resolved_entries if e is not NO_DEFAULT]
bc5c2f8a 1918 ie_result['requested_entries'] = [i for i, e in resolved_entries if e is not NO_DEFAULT]
1919 if ie_result['requested_entries'] == try_call(lambda: list(range(1, ie_result['playlist_count'] + 1))):
1920 # Do not set for full playlist
1921 ie_result.pop('requested_entries')
e08a85d8 1922
1923 # Write the updated info to json
cb96c5be 1924 if _infojson_written is True and self._write_info_json(
e08a85d8 1925 'updated playlist', ie_result,
1926 self.prepare_filename(ie_copy, 'pl_infojson'), overwrite=True) is None:
1927 return
ca30f449 1928
ed5835b4 1929 ie_result = self.run_all_pps('playlist', ie_result)
7e88d7d7 1930 self.to_screen(f'[download] Finished downloading playlist: {title}')
30a074c2 1931 return ie_result
1932
7e88d7d7 1933 @_handle_extraction_exceptions
a0566bbf 1934 def __process_iterable_entry(self, entry, download, extra_info):
1935 return self.process_ie_result(
1936 entry, download=download, extra_info=extra_info)
1937
67134eab
JMF
1938 def _build_format_filter(self, filter_spec):
1939 " Returns a function to filter the formats according to the filter_spec "
083c9df9
PH
1940
1941 OPERATORS = {
1942 '<': operator.lt,
1943 '<=': operator.le,
1944 '>': operator.gt,
1945 '>=': operator.ge,
1946 '=': operator.eq,
1947 '!=': operator.ne,
1948 }
67134eab 1949 operator_rex = re.compile(r'''(?x)\s*
c3f624ef 1950 (?P<key>[\w.-]+)\s*
187986a8 1951 (?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
1952 (?P<value>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)\s*
083c9df9 1953 ''' % '|'.join(map(re.escape, OPERATORS.keys())))
187986a8 1954 m = operator_rex.fullmatch(filter_spec)
9ddb6925
S
1955 if m:
1956 try:
1957 comparison_value = int(m.group('value'))
1958 except ValueError:
1959 comparison_value = parse_filesize(m.group('value'))
1960 if comparison_value is None:
1961 comparison_value = parse_filesize(m.group('value') + 'B')
1962 if comparison_value is None:
1963 raise ValueError(
1964 'Invalid value %r in format specification %r' % (
67134eab 1965 m.group('value'), filter_spec))
9ddb6925
S
1966 op = OPERATORS[m.group('op')]
1967
083c9df9 1968 if not m:
9ddb6925
S
1969 STR_OPERATORS = {
1970 '=': operator.eq,
10d33b34
YCH
1971 '^=': lambda attr, value: attr.startswith(value),
1972 '$=': lambda attr, value: attr.endswith(value),
1973 '*=': lambda attr, value: value in attr,
1ce9a3cb 1974 '~=': lambda attr, value: value.search(attr) is not None
9ddb6925 1975 }
187986a8 1976 str_operator_rex = re.compile(r'''(?x)\s*
1977 (?P<key>[a-zA-Z0-9._-]+)\s*
1ce9a3cb
LF
1978 (?P<negation>!\s*)?(?P<op>%s)\s*(?P<none_inclusive>\?\s*)?
1979 (?P<quote>["'])?
1980 (?P<value>(?(quote)(?:(?!(?P=quote))[^\\]|\\.)+|[\w.-]+))
1981 (?(quote)(?P=quote))\s*
9ddb6925 1982 ''' % '|'.join(map(re.escape, STR_OPERATORS.keys())))
187986a8 1983 m = str_operator_rex.fullmatch(filter_spec)
9ddb6925 1984 if m:
1ce9a3cb
LF
1985 if m.group('op') == '~=':
1986 comparison_value = re.compile(m.group('value'))
1987 else:
1988 comparison_value = re.sub(r'''\\([\\"'])''', r'\1', m.group('value'))
2cc779f4
S
1989 str_op = STR_OPERATORS[m.group('op')]
1990 if m.group('negation'):
e118a879 1991 op = lambda attr, value: not str_op(attr, value)
2cc779f4
S
1992 else:
1993 op = str_op
083c9df9 1994
9ddb6925 1995 if not m:
187986a8 1996 raise SyntaxError('Invalid filter specification %r' % filter_spec)
083c9df9
PH
1997
1998 def _filter(f):
1999 actual_value = f.get(m.group('key'))
2000 if actual_value is None:
2001 return m.group('none_inclusive')
2002 return op(actual_value, comparison_value)
67134eab
JMF
2003 return _filter
2004
9f1a1c36 2005 def _check_formats(self, formats):
2006 for f in formats:
2007 self.to_screen('[info] Testing format %s' % f['format_id'])
75689fe5 2008 path = self.get_output_path('temp')
2009 if not self._ensure_dir_exists(f'{path}/'):
2010 continue
2011 temp_file = tempfile.NamedTemporaryFile(suffix='.tmp', delete=False, dir=path or None)
9f1a1c36 2012 temp_file.close()
2013 try:
2014 success, _ = self.dl(temp_file.name, f, test=True)
8a82af35 2015 except (DownloadError, OSError, ValueError) + network_exceptions:
9f1a1c36 2016 success = False
2017 finally:
2018 if os.path.exists(temp_file.name):
2019 try:
2020 os.remove(temp_file.name)
2021 except OSError:
2022 self.report_warning('Unable to delete temporary file "%s"' % temp_file.name)
2023 if success:
2024 yield f
2025 else:
2026 self.to_screen('[info] Unable to download format %s. Skipping...' % f['format_id'])
2027
0017d9ad 2028 def _default_format_spec(self, info_dict, download=True):
0017d9ad 2029
af0f7428
S
2030 def can_merge():
2031 merger = FFmpegMergerPP(self)
2032 return merger.available and merger.can_merge()
2033
91ebc640 2034 prefer_best = (
b7b04c78 2035 not self.params.get('simulate')
91ebc640 2036 and download
2037 and (
2038 not can_merge()
21633673 2039 or info_dict.get('is_live') and not self.params.get('live_from_start')
bf1824b3 2040 or self.params['outtmpl']['default'] == '-'))
53ed7066 2041 compat = (
2042 prefer_best
2043 or self.params.get('allow_multiple_audio_streams', False)
8a82af35 2044 or 'format-spec' in self.params['compat_opts'])
91ebc640 2045
2046 return (
53ed7066 2047 'best/bestvideo+bestaudio' if prefer_best
2048 else 'bestvideo*+bestaudio/best' if not compat
91ebc640 2049 else 'bestvideo+bestaudio/best')
0017d9ad 2050
67134eab
JMF
2051 def build_format_selector(self, format_spec):
2052 def syntax_error(note, start):
2053 message = (
2054 'Invalid format specification: '
86e5f3ed 2055 '{}\n\t{}\n\t{}^'.format(note, format_spec, ' ' * start[1]))
67134eab
JMF
2056 return SyntaxError(message)
2057
2058 PICKFIRST = 'PICKFIRST'
2059 MERGE = 'MERGE'
2060 SINGLE = 'SINGLE'
0130afb7 2061 GROUP = 'GROUP'
67134eab
JMF
2062 FormatSelector = collections.namedtuple('FormatSelector', ['type', 'selector', 'filters'])
2063
91ebc640 2064 allow_multiple_streams = {'audio': self.params.get('allow_multiple_audio_streams', False),
2065 'video': self.params.get('allow_multiple_video_streams', False)}
909d24dd 2066
9f1a1c36 2067 check_formats = self.params.get('check_formats') == 'selected'
e8e73840 2068
67134eab
JMF
2069 def _parse_filter(tokens):
2070 filter_parts = []
2071 for type, string, start, _, _ in tokens:
2072 if type == tokenize.OP and string == ']':
2073 return ''.join(filter_parts)
2074 else:
2075 filter_parts.append(string)
2076
232541df 2077 def _remove_unused_ops(tokens):
62b58c09
L
2078 # Remove operators that we don't use and join them with the surrounding strings.
2079 # E.g. 'mp4' '-' 'baseline' '-' '16x9' is converted to 'mp4-baseline-16x9'
232541df
JMF
2080 ALLOWED_OPS = ('/', '+', ',', '(', ')')
2081 last_string, last_start, last_end, last_line = None, None, None, None
2082 for type, string, start, end, line in tokens:
2083 if type == tokenize.OP and string == '[':
2084 if last_string:
2085 yield tokenize.NAME, last_string, last_start, last_end, last_line
2086 last_string = None
2087 yield type, string, start, end, line
2088 # everything inside brackets will be handled by _parse_filter
2089 for type, string, start, end, line in tokens:
2090 yield type, string, start, end, line
2091 if type == tokenize.OP and string == ']':
2092 break
2093 elif type == tokenize.OP and string in ALLOWED_OPS:
2094 if last_string:
2095 yield tokenize.NAME, last_string, last_start, last_end, last_line
2096 last_string = None
2097 yield type, string, start, end, line
2098 elif type in [tokenize.NAME, tokenize.NUMBER, tokenize.OP]:
2099 if not last_string:
2100 last_string = string
2101 last_start = start
2102 last_end = end
2103 else:
2104 last_string += string
2105 if last_string:
2106 yield tokenize.NAME, last_string, last_start, last_end, last_line
2107
cf2ac6df 2108 def _parse_format_selection(tokens, inside_merge=False, inside_choice=False, inside_group=False):
67134eab
JMF
2109 selectors = []
2110 current_selector = None
2111 for type, string, start, _, _ in tokens:
2112 # ENCODING is only defined in python 3.x
2113 if type == getattr(tokenize, 'ENCODING', None):
2114 continue
2115 elif type in [tokenize.NAME, tokenize.NUMBER]:
2116 current_selector = FormatSelector(SINGLE, string, [])
2117 elif type == tokenize.OP:
cf2ac6df
JMF
2118 if string == ')':
2119 if not inside_group:
2120 # ')' will be handled by the parentheses group
2121 tokens.restore_last_token()
67134eab 2122 break
cf2ac6df 2123 elif inside_merge and string in ['/', ',']:
0130afb7
JMF
2124 tokens.restore_last_token()
2125 break
cf2ac6df
JMF
2126 elif inside_choice and string == ',':
2127 tokens.restore_last_token()
2128 break
2129 elif string == ',':
0a31a350
JMF
2130 if not current_selector:
2131 raise syntax_error('"," must follow a format selector', start)
67134eab
JMF
2132 selectors.append(current_selector)
2133 current_selector = None
2134 elif string == '/':
d96d604e
JMF
2135 if not current_selector:
2136 raise syntax_error('"/" must follow a format selector', start)
67134eab 2137 first_choice = current_selector
cf2ac6df 2138 second_choice = _parse_format_selection(tokens, inside_choice=True)
f5f4a27a 2139 current_selector = FormatSelector(PICKFIRST, (first_choice, second_choice), [])
67134eab
JMF
2140 elif string == '[':
2141 if not current_selector:
2142 current_selector = FormatSelector(SINGLE, 'best', [])
2143 format_filter = _parse_filter(tokens)
2144 current_selector.filters.append(format_filter)
0130afb7
JMF
2145 elif string == '(':
2146 if current_selector:
2147 raise syntax_error('Unexpected "("', start)
cf2ac6df
JMF
2148 group = _parse_format_selection(tokens, inside_group=True)
2149 current_selector = FormatSelector(GROUP, group, [])
67134eab 2150 elif string == '+':
d03cfdce 2151 if not current_selector:
2152 raise syntax_error('Unexpected "+"', start)
2153 selector_1 = current_selector
2154 selector_2 = _parse_format_selection(tokens, inside_merge=True)
2155 if not selector_2:
2156 raise syntax_error('Expected a selector', start)
2157 current_selector = FormatSelector(MERGE, (selector_1, selector_2), [])
67134eab 2158 else:
86e5f3ed 2159 raise syntax_error(f'Operator not recognized: "{string}"', start)
67134eab
JMF
2160 elif type == tokenize.ENDMARKER:
2161 break
2162 if current_selector:
2163 selectors.append(current_selector)
2164 return selectors
2165
f8d4ad9a 2166 def _merge(formats_pair):
2167 format_1, format_2 = formats_pair
2168
2169 formats_info = []
2170 formats_info.extend(format_1.get('requested_formats', (format_1,)))
2171 formats_info.extend(format_2.get('requested_formats', (format_2,)))
2172
2173 if not allow_multiple_streams['video'] or not allow_multiple_streams['audio']:
551f9388 2174 get_no_more = {'video': False, 'audio': False}
f8d4ad9a 2175 for (i, fmt_info) in enumerate(formats_info):
551f9388 2176 if fmt_info.get('acodec') == fmt_info.get('vcodec') == 'none':
2177 formats_info.pop(i)
2178 continue
2179 for aud_vid in ['audio', 'video']:
f8d4ad9a 2180 if not allow_multiple_streams[aud_vid] and fmt_info.get(aud_vid[0] + 'codec') != 'none':
2181 if get_no_more[aud_vid]:
2182 formats_info.pop(i)
f5510afe 2183 break
f8d4ad9a 2184 get_no_more[aud_vid] = True
2185
2186 if len(formats_info) == 1:
2187 return formats_info[0]
2188
2189 video_fmts = [fmt_info for fmt_info in formats_info if fmt_info.get('vcodec') != 'none']
2190 audio_fmts = [fmt_info for fmt_info in formats_info if fmt_info.get('acodec') != 'none']
2191
2192 the_only_video = video_fmts[0] if len(video_fmts) == 1 else None
2193 the_only_audio = audio_fmts[0] if len(audio_fmts) == 1 else None
2194
fc61aff4
LL
2195 output_ext = get_compatible_ext(
2196 vcodecs=[f.get('vcodec') for f in video_fmts],
2197 acodecs=[f.get('acodec') for f in audio_fmts],
2198 vexts=[f['ext'] for f in video_fmts],
2199 aexts=[f['ext'] for f in audio_fmts],
2200 preferences=(try_call(lambda: self.params['merge_output_format'].split('/'))
2201 or self.params.get('prefer_free_formats') and ('webm', 'mkv')))
f8d4ad9a 2202
975a0d0d 2203 filtered = lambda *keys: filter(None, (traverse_obj(fmt, *keys) for fmt in formats_info))
2204
f8d4ad9a 2205 new_dict = {
2206 'requested_formats': formats_info,
975a0d0d 2207 'format': '+'.join(filtered('format')),
2208 'format_id': '+'.join(filtered('format_id')),
f8d4ad9a 2209 'ext': output_ext,
975a0d0d 2210 'protocol': '+'.join(map(determine_protocol, formats_info)),
093a1710 2211 'language': '+'.join(orderedSet(filtered('language'))) or None,
2212 'format_note': '+'.join(orderedSet(filtered('format_note'))) or None,
2213 'filesize_approx': sum(filtered('filesize', 'filesize_approx')) or None,
975a0d0d 2214 'tbr': sum(filtered('tbr', 'vbr', 'abr')),
f8d4ad9a 2215 }
2216
2217 if the_only_video:
2218 new_dict.update({
2219 'width': the_only_video.get('width'),
2220 'height': the_only_video.get('height'),
2221 'resolution': the_only_video.get('resolution') or self.format_resolution(the_only_video),
2222 'fps': the_only_video.get('fps'),
49a57e70 2223 'dynamic_range': the_only_video.get('dynamic_range'),
f8d4ad9a 2224 'vcodec': the_only_video.get('vcodec'),
2225 'vbr': the_only_video.get('vbr'),
2226 'stretched_ratio': the_only_video.get('stretched_ratio'),
105bfd90 2227 'aspect_ratio': the_only_video.get('aspect_ratio'),
f8d4ad9a 2228 })
2229
2230 if the_only_audio:
2231 new_dict.update({
2232 'acodec': the_only_audio.get('acodec'),
2233 'abr': the_only_audio.get('abr'),
975a0d0d 2234 'asr': the_only_audio.get('asr'),
b8ed0f15 2235 'audio_channels': the_only_audio.get('audio_channels')
f8d4ad9a 2236 })
2237
2238 return new_dict
2239
e8e73840 2240 def _check_formats(formats):
981052c9 2241 if not check_formats:
2242 yield from formats
b5ac45b1 2243 return
9f1a1c36 2244 yield from self._check_formats(formats)
e8e73840 2245
67134eab 2246 def _build_selector_function(selector):
909d24dd 2247 if isinstance(selector, list): # ,
67134eab
JMF
2248 fs = [_build_selector_function(s) for s in selector]
2249
317f7ab6 2250 def selector_function(ctx):
67134eab 2251 for f in fs:
981052c9 2252 yield from f(ctx)
67134eab 2253 return selector_function
909d24dd 2254
2255 elif selector.type == GROUP: # ()
0130afb7 2256 selector_function = _build_selector_function(selector.selector)
909d24dd 2257
2258 elif selector.type == PICKFIRST: # /
67134eab
JMF
2259 fs = [_build_selector_function(s) for s in selector.selector]
2260
317f7ab6 2261 def selector_function(ctx):
67134eab 2262 for f in fs:
317f7ab6 2263 picked_formats = list(f(ctx))
67134eab
JMF
2264 if picked_formats:
2265 return picked_formats
2266 return []
67134eab 2267
981052c9 2268 elif selector.type == MERGE: # +
2269 selector_1, selector_2 = map(_build_selector_function, selector.selector)
2270
2271 def selector_function(ctx):
adbc4ec4 2272 for pair in itertools.product(selector_1(ctx), selector_2(ctx)):
981052c9 2273 yield _merge(pair)
2274
909d24dd 2275 elif selector.type == SINGLE: # atom
598d185d 2276 format_spec = selector.selector or 'best'
909d24dd 2277
f8d4ad9a 2278 # TODO: Add allvideo, allaudio etc by generalizing the code with best/worst selector
909d24dd 2279 if format_spec == 'all':
2280 def selector_function(ctx):
9222c381 2281 yield from _check_formats(ctx['formats'][::-1])
f8d4ad9a 2282 elif format_spec == 'mergeall':
2283 def selector_function(ctx):
316f2650 2284 formats = list(_check_formats(
2285 f for f in ctx['formats'] if f.get('vcodec') != 'none' or f.get('acodec') != 'none'))
e01d6aa4 2286 if not formats:
2287 return
921b76ca 2288 merged_format = formats[-1]
2289 for f in formats[-2::-1]:
f8d4ad9a 2290 merged_format = _merge((merged_format, f))
2291 yield merged_format
909d24dd 2292
2293 else:
85e801a9 2294 format_fallback, seperate_fallback, format_reverse, format_idx = False, None, True, 1
eff63539 2295 mobj = re.match(
2296 r'(?P<bw>best|worst|b|w)(?P<type>video|audio|v|a)?(?P<mod>\*)?(?:\.(?P<n>[1-9]\d*))?$',
2297 format_spec)
2298 if mobj is not None:
2299 format_idx = int_or_none(mobj.group('n'), default=1)
e8e73840 2300 format_reverse = mobj.group('bw')[0] == 'b'
eff63539 2301 format_type = (mobj.group('type') or [None])[0]
2302 not_format_type = {'v': 'a', 'a': 'v'}.get(format_type)
2303 format_modified = mobj.group('mod') is not None
909d24dd 2304
2305 format_fallback = not format_type and not format_modified # for b, w
8326b00a 2306 _filter_f = (
eff63539 2307 (lambda f: f.get('%scodec' % format_type) != 'none')
2308 if format_type and format_modified # bv*, ba*, wv*, wa*
2309 else (lambda f: f.get('%scodec' % not_format_type) == 'none')
2310 if format_type # bv, ba, wv, wa
2311 else (lambda f: f.get('vcodec') != 'none' and f.get('acodec') != 'none')
2312 if not format_modified # b, w
8326b00a 2313 else lambda f: True) # b*, w*
2314 filter_f = lambda f: _filter_f(f) and (
2315 f.get('vcodec') != 'none' or f.get('acodec') != 'none')
67134eab 2316 else:
48ee10ee 2317 if format_spec in self._format_selection_exts['audio']:
b11c04a8 2318 filter_f = lambda f: f.get('ext') == format_spec and f.get('acodec') != 'none'
48ee10ee 2319 elif format_spec in self._format_selection_exts['video']:
b11c04a8 2320 filter_f = lambda f: f.get('ext') == format_spec and f.get('acodec') != 'none' and f.get('vcodec') != 'none'
85e801a9 2321 seperate_fallback = lambda f: f.get('ext') == format_spec and f.get('vcodec') != 'none'
48ee10ee 2322 elif format_spec in self._format_selection_exts['storyboards']:
b11c04a8 2323 filter_f = lambda f: f.get('ext') == format_spec and f.get('acodec') == 'none' and f.get('vcodec') == 'none'
2324 else:
b5ae35ee 2325 filter_f = lambda f: f.get('format_id') == format_spec # id
909d24dd 2326
2327 def selector_function(ctx):
2328 formats = list(ctx['formats'])
909d24dd 2329 matches = list(filter(filter_f, formats)) if filter_f is not None else formats
85e801a9 2330 if not matches:
2331 if format_fallback and ctx['incomplete_formats']:
2332 # for extractors with incomplete formats (audio only (soundcloud)
2333 # or video only (imgur)) best/worst will fallback to
2334 # best/worst {video,audio}-only format
2335 matches = formats
2336 elif seperate_fallback and not ctx['has_merged_format']:
2337 # for compatibility with youtube-dl when there is no pre-merged format
2338 matches = list(filter(seperate_fallback, formats))
981052c9 2339 matches = LazyList(_check_formats(matches[::-1 if format_reverse else 1]))
2340 try:
e8e73840 2341 yield matches[format_idx - 1]
4abea8ca 2342 except LazyList.IndexError:
981052c9 2343 return
083c9df9 2344
67134eab 2345 filters = [self._build_format_filter(f) for f in selector.filters]
083c9df9 2346
317f7ab6 2347 def final_selector(ctx):
adbc4ec4 2348 ctx_copy = dict(ctx)
67134eab 2349 for _filter in filters:
317f7ab6
S
2350 ctx_copy['formats'] = list(filter(_filter, ctx_copy['formats']))
2351 return selector_function(ctx_copy)
67134eab 2352 return final_selector
083c9df9 2353
0f06bcd7 2354 stream = io.BytesIO(format_spec.encode())
0130afb7 2355 try:
f9934b96 2356 tokens = list(_remove_unused_ops(tokenize.tokenize(stream.readline)))
0130afb7
JMF
2357 except tokenize.TokenError:
2358 raise syntax_error('Missing closing/opening brackets or parenthesis', (0, len(format_spec)))
2359
86e5f3ed 2360 class TokenIterator:
0130afb7
JMF
2361 def __init__(self, tokens):
2362 self.tokens = tokens
2363 self.counter = 0
2364
2365 def __iter__(self):
2366 return self
2367
2368 def __next__(self):
2369 if self.counter >= len(self.tokens):
2370 raise StopIteration()
2371 value = self.tokens[self.counter]
2372 self.counter += 1
2373 return value
2374
2375 next = __next__
2376
2377 def restore_last_token(self):
2378 self.counter -= 1
2379
2380 parsed_selector = _parse_format_selection(iter(TokenIterator(tokens)))
67134eab 2381 return _build_selector_function(parsed_selector)
a9c58ad9 2382
e5660ee6 2383 def _calc_headers(self, info_dict):
8b7539d2 2384 res = merge_headers(self.params['http_headers'], info_dict.get('http_headers') or {})
e5660ee6 2385
c487cf00 2386 cookies = self._calc_cookies(info_dict['url'])
e5660ee6
JMF
2387 if cookies:
2388 res['Cookie'] = cookies
2389
0016b84e
S
2390 if 'X-Forwarded-For' not in res:
2391 x_forwarded_for_ip = info_dict.get('__x_forwarded_for_ip')
2392 if x_forwarded_for_ip:
2393 res['X-Forwarded-For'] = x_forwarded_for_ip
2394
e5660ee6
JMF
2395 return res
2396
c487cf00 2397 def _calc_cookies(self, url):
2398 pr = sanitized_Request(url)
e5660ee6 2399 self.cookiejar.add_cookie_header(pr)
662435f7 2400 return pr.get_header('Cookie')
e5660ee6 2401
9f1a1c36 2402 def _sort_thumbnails(self, thumbnails):
2403 thumbnails.sort(key=lambda t: (
2404 t.get('preference') if t.get('preference') is not None else -1,
2405 t.get('width') if t.get('width') is not None else -1,
2406 t.get('height') if t.get('height') is not None else -1,
2407 t.get('id') if t.get('id') is not None else '',
2408 t.get('url')))
2409
b0249bca 2410 def _sanitize_thumbnails(self, info_dict):
bc516a3f 2411 thumbnails = info_dict.get('thumbnails')
2412 if thumbnails is None:
2413 thumbnail = info_dict.get('thumbnail')
2414 if thumbnail:
2415 info_dict['thumbnails'] = thumbnails = [{'url': thumbnail}]
9f1a1c36 2416 if not thumbnails:
2417 return
2418
2419 def check_thumbnails(thumbnails):
2420 for t in thumbnails:
2421 self.to_screen(f'[info] Testing thumbnail {t["id"]}')
2422 try:
2423 self.urlopen(HEADRequest(t['url']))
2424 except network_exceptions as err:
2425 self.to_screen(f'[info] Unable to connect to thumbnail {t["id"]} URL {t["url"]!r} - {err}. Skipping...')
2426 continue
2427 yield t
2428
2429 self._sort_thumbnails(thumbnails)
2430 for i, t in enumerate(thumbnails):
2431 if t.get('id') is None:
2432 t['id'] = '%d' % i
2433 if t.get('width') and t.get('height'):
2434 t['resolution'] = '%dx%d' % (t['width'], t['height'])
2435 t['url'] = sanitize_url(t['url'])
2436
2437 if self.params.get('check_formats') is True:
282f5709 2438 info_dict['thumbnails'] = LazyList(check_thumbnails(thumbnails[::-1]), reverse=True)
9f1a1c36 2439 else:
2440 info_dict['thumbnails'] = thumbnails
bc516a3f 2441
94dc8604 2442 def _fill_common_fields(self, info_dict, final=True):
03f83004 2443 # TODO: move sanitization here
94dc8604 2444 if final:
7aefd19a 2445 title = info_dict['fulltitle'] = info_dict.get('title')
d4736fdb 2446 if not title:
2447 if title == '':
2448 self.write_debug('Extractor gave empty title. Creating a generic title')
2449 else:
2450 self.report_warning('Extractor failed to obtain "title". Creating a generic title instead')
1d485a1a 2451 info_dict['title'] = f'{info_dict["extractor"].replace(":", "-")} video #{info_dict["id"]}'
03f83004
LNO
2452
2453 if info_dict.get('duration') is not None:
2454 info_dict['duration_string'] = formatSeconds(info_dict['duration'])
2455
2456 for ts_key, date_key in (
2457 ('timestamp', 'upload_date'),
2458 ('release_timestamp', 'release_date'),
2459 ('modified_timestamp', 'modified_date'),
2460 ):
2461 if info_dict.get(date_key) is None and info_dict.get(ts_key) is not None:
2462 # Working around out-of-range timestamp values (e.g. negative ones on Windows,
2463 # see http://bugs.python.org/issue1646728)
19a03940 2464 with contextlib.suppress(ValueError, OverflowError, OSError):
03f83004
LNO
2465 upload_date = datetime.datetime.utcfromtimestamp(info_dict[ts_key])
2466 info_dict[date_key] = upload_date.strftime('%Y%m%d')
03f83004
LNO
2467
2468 live_keys = ('is_live', 'was_live')
2469 live_status = info_dict.get('live_status')
2470 if live_status is None:
2471 for key in live_keys:
2472 if info_dict.get(key) is False:
2473 continue
2474 if info_dict.get(key):
2475 live_status = key
2476 break
2477 if all(info_dict.get(key) is False for key in live_keys):
2478 live_status = 'not_live'
2479 if live_status:
2480 info_dict['live_status'] = live_status
2481 for key in live_keys:
2482 if info_dict.get(key) is None:
2483 info_dict[key] = (live_status == key)
a057779d 2484 if live_status == 'post_live':
2485 info_dict['was_live'] = True
03f83004
LNO
2486
2487 # Auto generate title fields corresponding to the *_number fields when missing
2488 # in order to always have clean titles. This is very common for TV series.
2489 for field in ('chapter', 'season', 'episode'):
94dc8604 2490 if final and info_dict.get('%s_number' % field) is not None and not info_dict.get(field):
03f83004
LNO
2491 info_dict[field] = '%s %d' % (field.capitalize(), info_dict['%s_number' % field])
2492
415f8d51 2493 def _raise_pending_errors(self, info):
2494 err = info.pop('__pending_error', None)
2495 if err:
2496 self.report_error(err, tb=False)
2497
784320c9 2498 def sort_formats(self, info_dict):
2499 formats = self._get_formats(info_dict)
784320c9 2500 formats.sort(key=FormatSorter(
c154302c 2501 self, info_dict.get('_format_sort_fields') or []).calculate_preference)
784320c9 2502
dd82ffea
JMF
2503 def process_video_result(self, info_dict, download=True):
2504 assert info_dict.get('_type', 'video') == 'video'
9c906919 2505 self._num_videos += 1
dd82ffea 2506
bec1fad2 2507 if 'id' not in info_dict:
fc08bdd6 2508 raise ExtractorError('Missing "id" field in extractor result', ie=info_dict['extractor'])
2509 elif not info_dict.get('id'):
2510 raise ExtractorError('Extractor failed to obtain "id"', ie=info_dict['extractor'])
455a15e2 2511
c9969434
S
2512 def report_force_conversion(field, field_not, conversion):
2513 self.report_warning(
2514 '"%s" field is not %s - forcing %s conversion, there is an error in extractor'
2515 % (field, field_not, conversion))
2516
2517 def sanitize_string_field(info, string_field):
2518 field = info.get(string_field)
14f25df2 2519 if field is None or isinstance(field, str):
c9969434
S
2520 return
2521 report_force_conversion(string_field, 'a string', 'string')
14f25df2 2522 info[string_field] = str(field)
c9969434
S
2523
2524 def sanitize_numeric_fields(info):
2525 for numeric_field in self._NUMERIC_FIELDS:
2526 field = info.get(numeric_field)
f9934b96 2527 if field is None or isinstance(field, (int, float)):
c9969434
S
2528 continue
2529 report_force_conversion(numeric_field, 'numeric', 'int')
2530 info[numeric_field] = int_or_none(field)
2531
2532 sanitize_string_field(info_dict, 'id')
2533 sanitize_numeric_fields(info_dict)
3975b4d2 2534 if info_dict.get('section_end') and info_dict.get('section_start') is not None:
2535 info_dict['duration'] = round(info_dict['section_end'] - info_dict['section_start'], 3)
4c3f8c3f 2536 if (info_dict.get('duration') or 0) <= 0 and info_dict.pop('duration', None):
50e93e03 2537 self.report_warning('"duration" field is negative, there is an error in extractor')
be6217b2 2538
9eef7c4e 2539 chapters = info_dict.get('chapters') or []
a3976e07 2540 if chapters and chapters[0].get('start_time'):
2541 chapters.insert(0, {'start_time': 0})
2542
9eef7c4e 2543 dummy_chapter = {'end_time': 0, 'start_time': info_dict.get('duration')}
a3976e07 2544 for idx, (prev, current, next_) in enumerate(zip(
2545 (dummy_chapter, *chapters), chapters, (*chapters[1:], dummy_chapter)), 1):
9eef7c4e 2546 if current.get('start_time') is None:
2547 current['start_time'] = prev.get('end_time')
2548 if not current.get('end_time'):
2549 current['end_time'] = next_.get('start_time')
a3976e07 2550 if not current.get('title'):
2551 current['title'] = f'<Untitled Chapter {idx}>'
9eef7c4e 2552
dd82ffea
JMF
2553 if 'playlist' not in info_dict:
2554 # It isn't part of a playlist
2555 info_dict['playlist'] = None
2556 info_dict['playlist_index'] = None
2557
bc516a3f 2558 self._sanitize_thumbnails(info_dict)
d5519808 2559
536a55da 2560 thumbnail = info_dict.get('thumbnail')
bc516a3f 2561 thumbnails = info_dict.get('thumbnails')
536a55da
S
2562 if thumbnail:
2563 info_dict['thumbnail'] = sanitize_url(thumbnail)
2564 elif thumbnails:
d5519808
PH
2565 info_dict['thumbnail'] = thumbnails[-1]['url']
2566
ae30b840 2567 if info_dict.get('display_id') is None and 'id' in info_dict:
0afef30b
PH
2568 info_dict['display_id'] = info_dict['id']
2569
03f83004 2570 self._fill_common_fields(info_dict)
33d2fc2f 2571
05108a49
S
2572 for cc_kind in ('subtitles', 'automatic_captions'):
2573 cc = info_dict.get(cc_kind)
2574 if cc:
2575 for _, subtitle in cc.items():
2576 for subtitle_format in subtitle:
2577 if subtitle_format.get('url'):
2578 subtitle_format['url'] = sanitize_url(subtitle_format['url'])
2579 if subtitle_format.get('ext') is None:
2580 subtitle_format['ext'] = determine_ext(subtitle_format['url']).lower()
2581
2582 automatic_captions = info_dict.get('automatic_captions')
4bba3716 2583 subtitles = info_dict.get('subtitles')
4bba3716 2584
360e1ca5 2585 info_dict['requested_subtitles'] = self.process_subtitles(
05108a49 2586 info_dict['id'], subtitles, automatic_captions)
a504ced0 2587
aebb4f4b 2588 formats = self._get_formats(info_dict)
dd82ffea 2589
c154302c 2590 # Backward compatibility with InfoExtractor._sort_formats
9ebac355 2591 field_preference = (formats or [{}])[0].pop('__sort_fields', None)
c154302c 2592 if field_preference:
2593 info_dict['_format_sort_fields'] = field_preference
2594
0a5a191a 2595 # or None ensures --clean-infojson removes it
2596 info_dict['_has_drm'] = any(f.get('has_drm') for f in formats) or None
88acdbc2 2597 if not self.params.get('allow_unplayable_formats'):
2598 formats = [f for f in formats if not f.get('has_drm')]
17ffed18 2599
2600 if formats and all(f.get('acodec') == f.get('vcodec') == 'none' for f in formats):
2601 self.report_warning(
2602 f'{"This video is DRM protected and " if info_dict["_has_drm"] else ""}'
2603 'only images are available for download. Use --list-formats to see them'.capitalize())
88acdbc2 2604
319b6059 2605 get_from_start = not info_dict.get('is_live') or bool(self.params.get('live_from_start'))
2606 if not get_from_start:
2607 info_dict['title'] += ' ' + datetime.datetime.now().strftime('%Y-%m-%d %H:%M')
2608 if info_dict.get('is_live') and formats:
adbc4ec4 2609 formats = [f for f in formats if bool(f.get('is_from_start')) == get_from_start]
319b6059 2610 if get_from_start and not formats:
a44ca5a4 2611 self.raise_no_formats(info_dict, msg=(
2612 '--live-from-start is passed, but there are no formats that can be downloaded from the start. '
2613 'If you want to download from the current time, use --no-live-from-start'))
adbc4ec4 2614
73af5cc8
S
2615 def is_wellformed(f):
2616 url = f.get('url')
a5ac0c47 2617 if not url:
73af5cc8
S
2618 self.report_warning(
2619 '"url" field is missing or empty - skipping format, '
2620 'there is an error in extractor')
a5ac0c47
S
2621 return False
2622 if isinstance(url, bytes):
2623 sanitize_string_field(f, 'url')
2624 return True
73af5cc8
S
2625
2626 # Filter out malformed formats for better extraction robustness
1ac7f461 2627 formats = list(filter(is_wellformed, formats or []))
2628
2629 if not formats:
2630 self.raise_no_formats(info_dict)
73af5cc8 2631
39f32f17 2632 for format in formats:
c9969434
S
2633 sanitize_string_field(format, 'format_id')
2634 sanitize_numeric_fields(format)
dcf77cf1 2635 format['url'] = sanitize_url(format['url'])
39f32f17 2636 if format.get('ext') is None:
2637 format['ext'] = determine_ext(format['url']).lower()
2638 if format.get('protocol') is None:
2639 format['protocol'] = determine_protocol(format)
2640 if format.get('resolution') is None:
2641 format['resolution'] = self.format_resolution(format, default=None)
2642 if format.get('dynamic_range') is None and format.get('vcodec') != 'none':
2643 format['dynamic_range'] = 'SDR'
2644 if format.get('aspect_ratio') is None:
2645 format['aspect_ratio'] = try_call(lambda: round(format['width'] / format['height'], 2))
2646 if (info_dict.get('duration') and format.get('tbr')
2647 and not format.get('filesize') and not format.get('filesize_approx')):
2648 format['filesize_approx'] = int(info_dict['duration'] * format['tbr'] * (1024 / 8))
2649 format['http_headers'] = self._calc_headers(collections.ChainMap(format, info_dict))
2650
2651 # This is copied to http_headers by the above _calc_headers and can now be removed
2652 if '__x_forwarded_for_ip' in info_dict:
2653 del info_dict['__x_forwarded_for_ip']
2654
c154302c 2655 self.sort_formats({
2656 'formats': formats,
2657 '_format_sort_fields': info_dict.get('_format_sort_fields')
2658 })
39f32f17 2659
2660 # Sanitize and group by format_id
2661 formats_dict = {}
2662 for i, format in enumerate(formats):
e74e3b63 2663 if not format.get('format_id'):
14f25df2 2664 format['format_id'] = str(i)
e2effb08
S
2665 else:
2666 # Sanitize format_id from characters used in format selector expression
ec85ded8 2667 format['format_id'] = re.sub(r'[\s,/+\[\]()]', '_', format['format_id'])
39f32f17 2668 formats_dict.setdefault(format['format_id'], []).append(format)
181c7053
S
2669
2670 # Make sure all formats have unique format_id
03b4de72 2671 common_exts = set(itertools.chain(*self._format_selection_exts.values()))
181c7053 2672 for format_id, ambiguous_formats in formats_dict.items():
48ee10ee 2673 ambigious_id = len(ambiguous_formats) > 1
2674 for i, format in enumerate(ambiguous_formats):
2675 if ambigious_id:
181c7053 2676 format['format_id'] = '%s-%d' % (format_id, i)
48ee10ee 2677 # Ensure there is no conflict between id and ext in format selection
2678 # See https://github.com/yt-dlp/yt-dlp/issues/1282
2679 if format['format_id'] != format['ext'] and format['format_id'] in common_exts:
2680 format['format_id'] = 'f%s' % format['format_id']
181c7053 2681
39f32f17 2682 if format.get('format') is None:
2683 format['format'] = '{id} - {res}{note}'.format(
2684 id=format['format_id'],
2685 res=self.format_resolution(format),
2686 note=format_field(format, 'format_note', ' (%s)'),
2687 )
dd82ffea 2688
9f1a1c36 2689 if self.params.get('check_formats') is True:
282f5709 2690 formats = LazyList(self._check_formats(formats[::-1]), reverse=True)
9f1a1c36 2691
88acdbc2 2692 if not formats or formats[0] is not info_dict:
b3d9ef88
JMF
2693 # only set the 'formats' fields if the original info_dict list them
2694 # otherwise we end up with a circular reference, the first (and unique)
f89197d7 2695 # element in the 'formats' field in info_dict is info_dict itself,
dfb1b146 2696 # which can't be exported to json
b3d9ef88 2697 info_dict['formats'] = formats
4ec82a72 2698
2699 info_dict, _ = self.pre_process(info_dict)
2700
6db9c4d5 2701 if self._match_entry(info_dict, incomplete=self._format_fields) is not None:
09b49e1f 2702 return info_dict
2703
2704 self.post_extract(info_dict)
2705 info_dict, _ = self.pre_process(info_dict, 'after_filter')
2706
093a1710 2707 # The pre-processors may have modified the formats
aebb4f4b 2708 formats = self._get_formats(info_dict)
093a1710 2709
e4221b70 2710 list_only = self.params.get('simulate') == 'list_only'
fa9f30b8 2711 interactive_format_selection = not list_only and self.format_selector == '-'
b7b04c78 2712 if self.params.get('list_thumbnails'):
2713 self.list_thumbnails(info_dict)
b7b04c78 2714 if self.params.get('listsubtitles'):
2715 if 'automatic_captions' in info_dict:
2716 self.list_subtitles(
2717 info_dict['id'], automatic_captions, 'automatic captions')
2718 self.list_subtitles(info_dict['id'], subtitles, 'subtitles')
fa9f30b8 2719 if self.params.get('listformats') or interactive_format_selection:
b69fd25c 2720 self.list_formats(info_dict)
169dbde9 2721 if list_only:
b7b04c78 2722 # Without this printing, -F --print-json will not work
17060584 2723 self.__forced_printings(info_dict)
c487cf00 2724 return info_dict
bfaae0a7 2725
187986a8 2726 format_selector = self.format_selector
2727 if format_selector is None:
0017d9ad 2728 req_format = self._default_format_spec(info_dict, download=download)
0760b0a7 2729 self.write_debug('Default format spec: %s' % req_format)
187986a8 2730 format_selector = self.build_format_selector(req_format)
317f7ab6 2731
fa9f30b8 2732 while True:
2733 if interactive_format_selection:
2734 req_format = input(
2735 self._format_screen('\nEnter format selector: ', self.Styles.EMPHASIS))
2736 try:
2737 format_selector = self.build_format_selector(req_format)
2738 except SyntaxError as err:
2739 self.report_error(err, tb=False, is_error=False)
2740 continue
2741
85e801a9 2742 formats_to_download = list(format_selector({
fa9f30b8 2743 'formats': formats,
85e801a9 2744 'has_merged_format': any('none' not in (f.get('acodec'), f.get('vcodec')) for f in formats),
2745 'incomplete_formats': (
2746 # All formats are video-only or
2747 all(f.get('vcodec') != 'none' and f.get('acodec') == 'none' for f in formats)
2748 # all formats are audio-only
2749 or all(f.get('vcodec') == 'none' and f.get('acodec') != 'none' for f in formats)),
2750 }))
fa9f30b8 2751 if interactive_format_selection and not formats_to_download:
2752 self.report_error('Requested format is not available', tb=False, is_error=False)
2753 continue
2754 break
317f7ab6 2755
dd82ffea 2756 if not formats_to_download:
b7da73eb 2757 if not self.params.get('ignore_no_formats_error'):
c0b6e5c7 2758 raise ExtractorError(
2759 'Requested format is not available. Use --list-formats for a list of available formats',
2760 expected=True, video_id=info_dict['id'], ie=info_dict['extractor'])
b62fa6d7 2761 self.report_warning('Requested format is not available')
2762 # Process what we can, even without any available formats.
2763 formats_to_download = [{}]
a13e6848 2764
0500ee3d 2765 requested_ranges = tuple(self.params.get('download_ranges', lambda *_: [{}])(info_dict, self))
5ec1b6b7 2766 best_format, downloaded_formats = formats_to_download[-1], []
b62fa6d7 2767 if download:
0500ee3d 2768 if best_format and requested_ranges:
5ec1b6b7 2769 def to_screen(*msg):
2770 self.to_screen(f'[info] {info_dict["id"]}: {" ".join(", ".join(variadic(m)) for m in msg)}')
2771
2772 to_screen(f'Downloading {len(formats_to_download)} format(s):',
2773 (f['format_id'] for f in formats_to_download))
0500ee3d 2774 if requested_ranges != ({}, ):
5ec1b6b7 2775 to_screen(f'Downloading {len(requested_ranges)} time ranges:',
fc2ba496 2776 (f'{c["start_time"]:.1f}-{c["end_time"]:.1f}' for c in requested_ranges))
a13e6848 2777 max_downloads_reached = False
5ec1b6b7 2778
0500ee3d 2779 for fmt, chapter in itertools.product(formats_to_download, requested_ranges):
5ec1b6b7 2780 new_info = self._copy_infodict(info_dict)
b7da73eb 2781 new_info.update(fmt)
3975b4d2 2782 offset, duration = info_dict.get('section_start') or 0, info_dict.get('duration') or float('inf')
fc2ba496 2783 end_time = offset + min(chapter.get('end_time', duration), duration)
3975b4d2 2784 if chapter or offset:
5ec1b6b7 2785 new_info.update({
3975b4d2 2786 'section_start': offset + chapter.get('start_time', 0),
2576d53a 2787 # duration may not be accurate. So allow deviations <1sec
2788 'section_end': end_time if end_time <= offset + duration + 1 else None,
5ec1b6b7 2789 'section_title': chapter.get('title'),
2790 'section_number': chapter.get('index'),
2791 })
2792 downloaded_formats.append(new_info)
a13e6848 2793 try:
2794 self.process_info(new_info)
2795 except MaxDownloadsReached:
2796 max_downloads_reached = True
415f8d51 2797 self._raise_pending_errors(new_info)
f46e2f9d 2798 # Remove copied info
2799 for key, val in tuple(new_info.items()):
2800 if info_dict.get(key) == val:
2801 new_info.pop(key)
a13e6848 2802 if max_downloads_reached:
2803 break
ebed8b37 2804
5ec1b6b7 2805 write_archive = {f.get('__write_download_archive', False) for f in downloaded_formats}
a13e6848 2806 assert write_archive.issubset({True, False, 'ignore'})
2807 if True in write_archive and False not in write_archive:
2808 self.record_download_archive(info_dict)
be72c624 2809
5ec1b6b7 2810 info_dict['requested_downloads'] = downloaded_formats
ed5835b4 2811 info_dict = self.run_all_pps('after_video', info_dict)
a13e6848 2812 if max_downloads_reached:
2813 raise MaxDownloadsReached()
ebed8b37 2814
49a57e70 2815 # We update the info dict with the selected best quality format (backwards compatibility)
be72c624 2816 info_dict.update(best_format)
dd82ffea
JMF
2817 return info_dict
2818
98c70d6f 2819 def process_subtitles(self, video_id, normal_subtitles, automatic_captions):
a504ced0 2820 """Select the requested subtitles and their format"""
d8a58ddc 2821 available_subs, normal_sub_langs = {}, []
98c70d6f
JMF
2822 if normal_subtitles and self.params.get('writesubtitles'):
2823 available_subs.update(normal_subtitles)
d8a58ddc 2824 normal_sub_langs = tuple(normal_subtitles.keys())
98c70d6f
JMF
2825 if automatic_captions and self.params.get('writeautomaticsub'):
2826 for lang, cap_info in automatic_captions.items():
360e1ca5
JMF
2827 if lang not in available_subs:
2828 available_subs[lang] = cap_info
2829
d2c8aadf 2830 if not available_subs or (
2831 not self.params.get('writesubtitles')
2832 and not self.params.get('writeautomaticsub')):
4d171848 2833 return None
a504ced0 2834
d8a58ddc 2835 all_sub_langs = tuple(available_subs.keys())
a504ced0 2836 if self.params.get('allsubtitles', False):
c32b0aab 2837 requested_langs = all_sub_langs
2838 elif self.params.get('subtitleslangs', False):
5314b521 2839 try:
2840 requested_langs = orderedSet_from_options(
2841 self.params.get('subtitleslangs'), {'all': all_sub_langs}, use_regex=True)
2842 except re.error as e:
2843 raise ValueError(f'Wrong regex for subtitlelangs: {e.pattern}')
a504ced0 2844 else:
376aa24b
SS
2845 requested_langs = LazyList(itertools.chain(
2846 ['en'] if 'en' in normal_sub_langs else [],
2847 filter(lambda f: f.startswith('en'), normal_sub_langs),
2848 ['en'] if 'en' in all_sub_langs else [],
2849 filter(lambda f: f.startswith('en'), all_sub_langs),
2850 normal_sub_langs, all_sub_langs,
2851 ))[:1]
ad3dc496 2852 if requested_langs:
d2c8aadf 2853 self.to_screen(f'[info] {video_id}: Downloading subtitles: {", ".join(requested_langs)}')
a504ced0
JMF
2854
2855 formats_query = self.params.get('subtitlesformat', 'best')
2856 formats_preference = formats_query.split('/') if formats_query else []
2857 subs = {}
2858 for lang in requested_langs:
2859 formats = available_subs.get(lang)
2860 if formats is None:
86e5f3ed 2861 self.report_warning(f'{lang} subtitles not available for {video_id}')
a504ced0 2862 continue
a504ced0
JMF
2863 for ext in formats_preference:
2864 if ext == 'best':
2865 f = formats[-1]
2866 break
2867 matches = list(filter(lambda f: f['ext'] == ext, formats))
2868 if matches:
2869 f = matches[-1]
2870 break
2871 else:
2872 f = formats[-1]
2873 self.report_warning(
2874 'No subtitle format found matching "%s" for language %s, '
2875 'using %s' % (formats_query, lang, f['ext']))
2876 subs[lang] = f
2877 return subs
2878
bb66c247 2879 def _forceprint(self, key, info_dict):
2880 if info_dict is None:
2881 return
2882 info_copy = info_dict.copy()
17060584 2883 info_copy.setdefault('filename', self.prepare_filename(info_dict))
2884 if info_dict.get('requested_formats') is not None:
2885 # For RTMP URLs, also include the playpath
2886 info_copy['urls'] = '\n'.join(f['url'] + f.get('play_path', '') for f in info_dict['requested_formats'])
2887 elif info_dict.get('url'):
2888 info_copy['urls'] = info_dict['url'] + info_dict.get('play_path', '')
bb66c247 2889 info_copy['formats_table'] = self.render_formats_table(info_dict)
2890 info_copy['thumbnails_table'] = self.render_thumbnails_table(info_dict)
2891 info_copy['subtitles_table'] = self.render_subtitles_table(info_dict.get('id'), info_dict.get('subtitles'))
2892 info_copy['automatic_captions_table'] = self.render_subtitles_table(info_dict.get('id'), info_dict.get('automatic_captions'))
2893
2894 def format_tmpl(tmpl):
48c8424b 2895 mobj = re.fullmatch(r'([\w.:,]|-\d|(?P<dict>{([\w.:,]|-\d)+}))+=?', tmpl)
07a1250e 2896 if not mobj:
2897 return tmpl
48c8424b 2898
2899 fmt = '%({})s'
2900 if tmpl.startswith('{'):
2901 tmpl = f'.{tmpl}'
2902 if tmpl.endswith('='):
2903 tmpl, fmt = tmpl[:-1], '{0} = %({0})#j'
2904 return '\n'.join(map(fmt.format, [tmpl] if mobj.group('dict') else tmpl.split(',')))
8130779d 2905
bb66c247 2906 for tmpl in self.params['forceprint'].get(key, []):
2907 self.to_stdout(self.evaluate_outtmpl(format_tmpl(tmpl), info_copy))
2908
2909 for tmpl, file_tmpl in self.params['print_to_file'].get(key, []):
5127e92a 2910 filename = self.prepare_filename(info_dict, outtmpl=file_tmpl)
bb66c247 2911 tmpl = format_tmpl(tmpl)
2912 self.to_screen(f'[info] Writing {tmpl!r} to: {filename}')
8d93e69d 2913 if self._ensure_dir_exists(filename):
9874e82b 2914 with open(filename, 'a', encoding='utf-8', newline='') as f:
2915 f.write(self.evaluate_outtmpl(tmpl, info_copy) + os.linesep)
ca30f449 2916
17060584 2917 return info_copy
2918
2919 def __forced_printings(self, info_dict, filename=None, incomplete=True):
bb66c247 2920 if (self.params.get('forcejson')
2921 or self.params['forceprint'].get('video')
2922 or self.params['print_to_file'].get('video')):
2b8a2973 2923 self.post_extract(info_dict)
17060584 2924 if filename:
2925 info_dict['filename'] = filename
b5f61b69 2926 info_copy = self._forceprint('video', info_dict)
2927
2928 def print_field(field, actual_field=None, optional=False):
2929 if actual_field is None:
2930 actual_field = field
2931 if self.params.get(f'force{field}') and (
2932 info_copy.get(field) is not None or (not optional and not incomplete)):
2933 self.to_stdout(info_copy[actual_field])
2934
2935 print_field('title')
2936 print_field('id')
2937 print_field('url', 'urls')
2938 print_field('thumbnail', optional=True)
2939 print_field('description', optional=True)
2940 print_field('filename', optional=True)
2941 if self.params.get('forceduration') and info_copy.get('duration') is not None:
2942 self.to_stdout(formatSeconds(info_copy['duration']))
2943 print_field('format')
53c18592 2944
2b8a2973 2945 if self.params.get('forcejson'):
6e84b215 2946 self.to_stdout(json.dumps(self.sanitize_info(info_dict)))
d06daf23 2947
e8e73840 2948 def dl(self, name, info, subtitle=False, test=False):
88acdbc2 2949 if not info.get('url'):
1151c407 2950 self.raise_no_formats(info, True)
e8e73840 2951
2952 if test:
2953 verbose = self.params.get('verbose')
2954 params = {
2955 'test': True,
a169858f 2956 'quiet': self.params.get('quiet') or not verbose,
e8e73840 2957 'verbose': verbose,
2958 'noprogress': not verbose,
2959 'nopart': True,
2960 'skip_unavailable_fragments': False,
2961 'keep_fragments': False,
2962 'overwrites': True,
2963 '_no_ytdl_file': True,
2964 }
2965 else:
2966 params = self.params
96fccc10 2967 fd = get_suitable_downloader(info, params, to_stdout=(name == '-'))(self, params)
e8e73840 2968 if not test:
2969 for ph in self._progress_hooks:
2970 fd.add_progress_hook(ph)
42676437
M
2971 urls = '", "'.join(
2972 (f['url'].split(',')[0] + ',<data>' if f['url'].startswith('data:') else f['url'])
2973 for f in info.get('requested_formats', []) or [info])
3a408f9d 2974 self.write_debug(f'Invoking {fd.FD_NAME} downloader on "{urls}"')
03b4de72 2975
adbc4ec4
THD
2976 # Note: Ideally info should be a deep-copied so that hooks cannot modify it.
2977 # But it may contain objects that are not deep-copyable
2978 new_info = self._copy_infodict(info)
e8e73840 2979 if new_info.get('http_headers') is None:
2980 new_info['http_headers'] = self._calc_headers(new_info)
2981 return fd.download(name, new_info, subtitle)
2982
e04938ab 2983 def existing_file(self, filepaths, *, default_overwrite=True):
2984 existing_files = list(filter(os.path.exists, orderedSet(filepaths)))
2985 if existing_files and not self.params.get('overwrites', default_overwrite):
2986 return existing_files[0]
2987
2988 for file in existing_files:
2989 self.report_file_delete(file)
2990 os.remove(file)
2991 return None
2992
8222d8de 2993 def process_info(self, info_dict):
09b49e1f 2994 """Process a single resolved IE result. (Modifies it in-place)"""
8222d8de
JMF
2995
2996 assert info_dict.get('_type', 'video') == 'video'
f46e2f9d 2997 original_infodict = info_dict
fd288278 2998
4513a41a 2999 if 'format' not in info_dict and 'ext' in info_dict:
8222d8de
JMF
3000 info_dict['format'] = info_dict['ext']
3001
c77495e3 3002 if self._match_entry(info_dict) is not None:
9e907ebd 3003 info_dict['__write_download_archive'] = 'ignore'
8222d8de
JMF
3004 return
3005
09b49e1f 3006 # Does nothing under normal operation - for backward compatibility of process_info
277d6ff5 3007 self.post_extract(info_dict)
119e40ef 3008
3009 def replace_info_dict(new_info):
3010 nonlocal info_dict
3011 if new_info == info_dict:
3012 return
3013 info_dict.clear()
3014 info_dict.update(new_info)
3015
3016 new_info, _ = self.pre_process(info_dict, 'video')
3017 replace_info_dict(new_info)
0c14d66a 3018 self._num_downloads += 1
8222d8de 3019
dcf64d43 3020 # info_dict['_filename'] needs to be set for backward compatibility
de6000d9 3021 info_dict['_filename'] = full_filename = self.prepare_filename(info_dict, warn=True)
3022 temp_filename = self.prepare_filename(info_dict, 'temp')
0202b52a 3023 files_to_move = {}
8222d8de
JMF
3024
3025 # Forced printings
4513a41a 3026 self.__forced_printings(info_dict, full_filename, incomplete=('format' not in info_dict))
8222d8de 3027
ca6d59d2 3028 def check_max_downloads():
3029 if self._num_downloads >= float(self.params.get('max_downloads') or 'inf'):
3030 raise MaxDownloadsReached()
3031
b7b04c78 3032 if self.params.get('simulate'):
9e907ebd 3033 info_dict['__write_download_archive'] = self.params.get('force_write_download_archive')
ca6d59d2 3034 check_max_downloads()
8222d8de
JMF
3035 return
3036
de6000d9 3037 if full_filename is None:
8222d8de 3038 return
e92caff5 3039 if not self._ensure_dir_exists(encodeFilename(full_filename)):
0202b52a 3040 return
e92caff5 3041 if not self._ensure_dir_exists(encodeFilename(temp_filename)):
8222d8de
JMF
3042 return
3043
80c03fa9 3044 if self._write_description('video', info_dict,
3045 self.prepare_filename(info_dict, 'description')) is None:
3046 return
3047
3048 sub_files = self._write_subtitles(info_dict, temp_filename)
3049 if sub_files is None:
3050 return
3051 files_to_move.update(dict(sub_files))
3052
3053 thumb_files = self._write_thumbnails(
3054 'video', info_dict, temp_filename, self.prepare_filename(info_dict, 'thumbnail'))
3055 if thumb_files is None:
3056 return
3057 files_to_move.update(dict(thumb_files))
8222d8de 3058
80c03fa9 3059 infofn = self.prepare_filename(info_dict, 'infojson')
3060 _infojson_written = self._write_info_json('video', info_dict, infofn)
3061 if _infojson_written:
dac5df5a 3062 info_dict['infojson_filename'] = infofn
e75bb0d6 3063 # For backward compatibility, even though it was a private field
80c03fa9 3064 info_dict['__infojson_filename'] = infofn
3065 elif _infojson_written is None:
3066 return
3067
3068 # Note: Annotations are deprecated
3069 annofn = None
1fb07d10 3070 if self.params.get('writeannotations', False):
de6000d9 3071 annofn = self.prepare_filename(info_dict, 'annotation')
80c03fa9 3072 if annofn:
e92caff5 3073 if not self._ensure_dir_exists(encodeFilename(annofn)):
0202b52a 3074 return
0c3d0f51 3075 if not self.params.get('overwrites', True) and os.path.exists(encodeFilename(annofn)):
6febd1c1 3076 self.to_screen('[info] Video annotations are already present')
ffddb112
RA
3077 elif not info_dict.get('annotations'):
3078 self.report_warning('There are no annotations to write.')
7b6fefc9
PH
3079 else:
3080 try:
6febd1c1 3081 self.to_screen('[info] Writing video annotations to: ' + annofn)
86e5f3ed 3082 with open(encodeFilename(annofn), 'w', encoding='utf-8') as annofile:
7b6fefc9
PH
3083 annofile.write(info_dict['annotations'])
3084 except (KeyError, TypeError):
6febd1c1 3085 self.report_warning('There are no annotations to write.')
86e5f3ed 3086 except OSError:
6febd1c1 3087 self.report_error('Cannot write annotations file: ' + annofn)
7b6fefc9 3088 return
1fb07d10 3089
732044af 3090 # Write internet shortcut files
08438d2c 3091 def _write_link_file(link_type):
60f3e995 3092 url = try_get(info_dict['webpage_url'], iri_to_uri)
3093 if not url:
3094 self.report_warning(
3095 f'Cannot write internet shortcut file because the actual URL of "{info_dict["webpage_url"]}" is unknown')
3096 return True
08438d2c 3097 linkfn = replace_extension(self.prepare_filename(info_dict, 'link'), link_type, info_dict.get('ext'))
0e6b018a
Z
3098 if not self._ensure_dir_exists(encodeFilename(linkfn)):
3099 return False
10e3742e 3100 if self.params.get('overwrites', True) and os.path.exists(encodeFilename(linkfn)):
08438d2c 3101 self.to_screen(f'[info] Internet shortcut (.{link_type}) is already present')
3102 return True
3103 try:
3104 self.to_screen(f'[info] Writing internet shortcut (.{link_type}) to: {linkfn}')
86e5f3ed 3105 with open(encodeFilename(to_high_limit_path(linkfn)), 'w', encoding='utf-8',
3106 newline='\r\n' if link_type == 'url' else '\n') as linkfile:
60f3e995 3107 template_vars = {'url': url}
08438d2c 3108 if link_type == 'desktop':
3109 template_vars['filename'] = linkfn[:-(len(link_type) + 1)]
3110 linkfile.write(LINK_TEMPLATES[link_type] % template_vars)
86e5f3ed 3111 except OSError:
08438d2c 3112 self.report_error(f'Cannot write internet shortcut {linkfn}')
3113 return False
732044af 3114 return True
3115
08438d2c 3116 write_links = {
3117 'url': self.params.get('writeurllink'),
3118 'webloc': self.params.get('writewebloclink'),
3119 'desktop': self.params.get('writedesktoplink'),
3120 }
3121 if self.params.get('writelink'):
3122 link_type = ('webloc' if sys.platform == 'darwin'
3123 else 'desktop' if sys.platform.startswith('linux')
3124 else 'url')
3125 write_links[link_type] = True
3126
3127 if any(should_write and not _write_link_file(link_type)
3128 for link_type, should_write in write_links.items()):
3129 return
732044af 3130
415f8d51 3131 new_info, files_to_move = self.pre_process(info_dict, 'before_dl', files_to_move)
3132 replace_info_dict(new_info)
56d868db 3133
a13e6848 3134 if self.params.get('skip_download'):
56d868db 3135 info_dict['filepath'] = temp_filename
3136 info_dict['__finaldir'] = os.path.dirname(os.path.abspath(encodeFilename(full_filename)))
3137 info_dict['__files_to_move'] = files_to_move
f46e2f9d 3138 replace_info_dict(self.run_pp(MoveFilesAfterDownloadPP(self, False), info_dict))
9e907ebd 3139 info_dict['__write_download_archive'] = self.params.get('force_write_download_archive')
56d868db 3140 else:
3141 # Download
b868936c 3142 info_dict.setdefault('__postprocessors', [])
4340deca 3143 try:
0202b52a 3144
e04938ab 3145 def existing_video_file(*filepaths):
6b591b29 3146 ext = info_dict.get('ext')
e04938ab 3147 converted = lambda file: replace_extension(file, self.params.get('final_ext') or ext, ext)
3148 file = self.existing_file(itertools.chain(*zip(map(converted, filepaths), filepaths)),
3149 default_overwrite=False)
3150 if file:
3151 info_dict['ext'] = os.path.splitext(file)[1][1:]
3152 return file
0202b52a 3153
7b2c3f47 3154 fd, success = None, True
fccf90e7 3155 if info_dict.get('protocol') or info_dict.get('url'):
56ba69e4 3156 fd = get_suitable_downloader(info_dict, self.params, to_stdout=temp_filename == '-')
71df9b7f 3157 if fd is not FFmpegFD and 'no-direct-merge' not in self.params['compat_opts'] and (
56ba69e4 3158 info_dict.get('section_start') or info_dict.get('section_end')):
7b2c3f47 3159 msg = ('This format cannot be partially downloaded' if FFmpegFD.available()
56ba69e4 3160 else 'You have requested downloading the video partially, but ffmpeg is not installed')
3161 self.report_error(f'{msg}. Aborting')
5ec1b6b7 3162 return
5ec1b6b7 3163
4340deca 3164 if info_dict.get('requested_formats') is not None:
81cd954a 3165 requested_formats = info_dict['requested_formats']
0202b52a 3166 old_ext = info_dict['ext']
4e3b637d 3167 if self.params.get('merge_output_format') is None:
4e3b637d 3168 if (info_dict['ext'] == 'webm'
3169 and info_dict.get('thumbnails')
3170 # check with type instead of pp_key, __name__, or isinstance
3171 # since we dont want any custom PPs to trigger this
c487cf00 3172 and any(type(pp) == EmbedThumbnailPP for pp in self._pps['post_process'])): # noqa: E721
4e3b637d 3173 info_dict['ext'] = 'mkv'
3174 self.report_warning(
3175 'webm doesn\'t support embedding a thumbnail, mkv will be used')
124bc071 3176 new_ext = info_dict['ext']
0202b52a 3177
124bc071 3178 def correct_ext(filename, ext=new_ext):
96fccc10 3179 if filename == '-':
3180 return filename
0202b52a 3181 filename_real_ext = os.path.splitext(filename)[1][1:]
3182 filename_wo_ext = (
3183 os.path.splitext(filename)[0]
124bc071 3184 if filename_real_ext in (old_ext, new_ext)
0202b52a 3185 else filename)
86e5f3ed 3186 return f'{filename_wo_ext}.{ext}'
0202b52a 3187
38c6902b 3188 # Ensure filename always has a correct extension for successful merge
0202b52a 3189 full_filename = correct_ext(full_filename)
3190 temp_filename = correct_ext(temp_filename)
e04938ab 3191 dl_filename = existing_video_file(full_filename, temp_filename)
1ea24129 3192 info_dict['__real_download'] = False
18e674b4 3193
7b2c3f47 3194 merger = FFmpegMergerPP(self)
adbc4ec4 3195 downloaded = []
dbf5416a 3196 if dl_filename is not None:
6c7274ec 3197 self.report_file_already_downloaded(dl_filename)
adbc4ec4
THD
3198 elif fd:
3199 for f in requested_formats if fd != FFmpegFD else []:
3200 f['filepath'] = fname = prepend_extension(
3201 correct_ext(temp_filename, info_dict['ext']),
3202 'f%s' % f['format_id'], info_dict['ext'])
3203 downloaded.append(fname)
dbf5416a 3204 info_dict['url'] = '\n'.join(f['url'] for f in requested_formats)
3205 success, real_download = self.dl(temp_filename, info_dict)
3206 info_dict['__real_download'] = real_download
18e674b4 3207 else:
18e674b4 3208 if self.params.get('allow_unplayable_formats'):
3209 self.report_warning(
3210 'You have requested merging of multiple formats '
3211 'while also allowing unplayable formats to be downloaded. '
3212 'The formats won\'t be merged to prevent data corruption.')
3213 elif not merger.available:
e8969bda 3214 msg = 'You have requested merging of multiple formats but ffmpeg is not installed'
3215 if not self.params.get('ignoreerrors'):
3216 self.report_error(f'{msg}. Aborting due to --abort-on-error')
3217 return
3218 self.report_warning(f'{msg}. The formats won\'t be merged')
18e674b4 3219
96fccc10 3220 if temp_filename == '-':
adbc4ec4 3221 reason = ('using a downloader other than ffmpeg' if FFmpegFD.can_merge_formats(info_dict, self.params)
96fccc10 3222 else 'but the formats are incompatible for simultaneous download' if merger.available
3223 else 'but ffmpeg is not installed')
3224 self.report_warning(
3225 f'You have requested downloading multiple formats to stdout {reason}. '
3226 'The formats will be streamed one after the other')
3227 fname = temp_filename
dbf5416a 3228 for f in requested_formats:
3229 new_info = dict(info_dict)
3230 del new_info['requested_formats']
3231 new_info.update(f)
96fccc10 3232 if temp_filename != '-':
124bc071 3233 fname = prepend_extension(
3234 correct_ext(temp_filename, new_info['ext']),
3235 'f%s' % f['format_id'], new_info['ext'])
96fccc10 3236 if not self._ensure_dir_exists(fname):
3237 return
a21e0ab1 3238 f['filepath'] = fname
96fccc10 3239 downloaded.append(fname)
dbf5416a 3240 partial_success, real_download = self.dl(fname, new_info)
3241 info_dict['__real_download'] = info_dict['__real_download'] or real_download
3242 success = success and partial_success
adbc4ec4
THD
3243
3244 if downloaded and merger.available and not self.params.get('allow_unplayable_formats'):
3245 info_dict['__postprocessors'].append(merger)
3246 info_dict['__files_to_merge'] = downloaded
3247 # Even if there were no downloads, it is being merged only now
3248 info_dict['__real_download'] = True
3249 else:
3250 for file in downloaded:
3251 files_to_move[file] = None
4340deca
P
3252 else:
3253 # Just a single file
e04938ab 3254 dl_filename = existing_video_file(full_filename, temp_filename)
6c7274ec 3255 if dl_filename is None or dl_filename == temp_filename:
3256 # dl_filename == temp_filename could mean that the file was partially downloaded with --no-part.
3257 # So we should try to resume the download
e8e73840 3258 success, real_download = self.dl(temp_filename, info_dict)
0202b52a 3259 info_dict['__real_download'] = real_download
6c7274ec 3260 else:
3261 self.report_file_already_downloaded(dl_filename)
0202b52a 3262
0202b52a 3263 dl_filename = dl_filename or temp_filename
c571435f 3264 info_dict['__finaldir'] = os.path.dirname(os.path.abspath(encodeFilename(full_filename)))
0202b52a 3265
3158150c 3266 except network_exceptions as err:
7960b056 3267 self.report_error('unable to download video data: %s' % error_to_compat_str(err))
4340deca 3268 return
86e5f3ed 3269 except OSError as err:
4340deca
P
3270 raise UnavailableVideoError(err)
3271 except (ContentTooShortError, ) as err:
86e5f3ed 3272 self.report_error(f'content too short (expected {err.expected} bytes and served {err.downloaded})')
4340deca 3273 return
8222d8de 3274
415f8d51 3275 self._raise_pending_errors(info_dict)
de6000d9 3276 if success and full_filename != '-':
f17f8651 3277
fd7cfb64 3278 def fixup():
3279 do_fixup = True
3280 fixup_policy = self.params.get('fixup')
3281 vid = info_dict['id']
3282
3283 if fixup_policy in ('ignore', 'never'):
3284 return
3285 elif fixup_policy == 'warn':
3fe75fdc 3286 do_fixup = 'warn'
f89b3e2d 3287 elif fixup_policy != 'force':
3288 assert fixup_policy in ('detect_or_warn', None)
3289 if not info_dict.get('__real_download'):
3290 do_fixup = False
fd7cfb64 3291
3292 def ffmpeg_fixup(cndn, msg, cls):
3fe75fdc 3293 if not (do_fixup and cndn):
fd7cfb64 3294 return
3fe75fdc 3295 elif do_fixup == 'warn':
fd7cfb64 3296 self.report_warning(f'{vid}: {msg}')
3297 return
3298 pp = cls(self)
3299 if pp.available:
3300 info_dict['__postprocessors'].append(pp)
3301 else:
3302 self.report_warning(f'{vid}: {msg}. Install ffmpeg to fix this automatically')
3303
3304 stretched_ratio = info_dict.get('stretched_ratio')
ca9def71
LNO
3305 ffmpeg_fixup(stretched_ratio not in (1, None),
3306 f'Non-uniform pixel ratio {stretched_ratio}',
3307 FFmpegFixupStretchedPP)
fd7cfb64 3308
993191c0 3309 downloader = get_suitable_downloader(info_dict, self.params) if 'protocol' in info_dict else None
24146491 3310 downloader = downloader.FD_NAME if downloader else None
adbc4ec4 3311
ca9def71
LNO
3312 ext = info_dict.get('ext')
3313 postprocessed_by_ffmpeg = info_dict.get('requested_formats') or any((
3314 isinstance(pp, FFmpegVideoConvertorPP)
3315 and resolve_recode_mapping(ext, pp.mapping)[0] not in (ext, None)
3316 ) for pp in self._pps['post_process'])
3317
3318 if not postprocessed_by_ffmpeg:
3319 ffmpeg_fixup(ext == 'm4a' and info_dict.get('container') == 'm4a_dash',
f2df4071 3320 'writing DASH m4a. Only some players support this container',
3321 FFmpegFixupM4aPP)
24146491 3322 ffmpeg_fixup(downloader == 'hlsnative' and not self.params.get('hls_use_mpegts')
494f5230 3323 or info_dict.get('is_live') and self.params.get('hls_use_mpegts') is None,
adbc4ec4
THD
3324 'Possible MPEG-TS in MP4 container or malformed AAC timestamps',
3325 FFmpegFixupM3u8PP)
26010b5c 3326 ffmpeg_fixup(info_dict.get('is_live') and downloader == 'dashsegments',
adbc4ec4
THD
3327 'Possible duplicate MOOV atoms', FFmpegFixupDuplicateMoovPP)
3328
24146491 3329 ffmpeg_fixup(downloader == 'web_socket_fragment', 'Malformed timestamps detected', FFmpegFixupTimestampPP)
3330 ffmpeg_fixup(downloader == 'web_socket_fragment', 'Malformed duration detected', FFmpegFixupDurationPP)
fd7cfb64 3331
3332 fixup()
8222d8de 3333 try:
f46e2f9d 3334 replace_info_dict(self.post_process(dl_filename, info_dict, files_to_move))
af819c21 3335 except PostProcessingError as err:
3336 self.report_error('Postprocessing: %s' % str(err))
8222d8de 3337 return
ab8e5e51
AM
3338 try:
3339 for ph in self._post_hooks:
23c1a667 3340 ph(info_dict['filepath'])
ab8e5e51
AM
3341 except Exception as err:
3342 self.report_error('post hooks: %s' % str(err))
3343 return
9e907ebd 3344 info_dict['__write_download_archive'] = True
2d30509f 3345
c487cf00 3346 assert info_dict is original_infodict # Make sure the info_dict was modified in-place
a13e6848 3347 if self.params.get('force_write_download_archive'):
9e907ebd 3348 info_dict['__write_download_archive'] = True
ca6d59d2 3349 check_max_downloads()
8222d8de 3350
aa9369a2 3351 def __download_wrapper(self, func):
3352 @functools.wraps(func)
3353 def wrapper(*args, **kwargs):
3354 try:
3355 res = func(*args, **kwargs)
3356 except UnavailableVideoError as e:
3357 self.report_error(e)
b222c271 3358 except DownloadCancelled as e:
3359 self.to_screen(f'[info] {e}')
3360 if not self.params.get('break_per_url'):
3361 raise
fd404bec 3362 self._num_downloads = 0
aa9369a2 3363 else:
3364 if self.params.get('dump_single_json', False):
3365 self.post_extract(res)
3366 self.to_stdout(json.dumps(self.sanitize_info(res)))
3367 return wrapper
3368
8222d8de
JMF
3369 def download(self, url_list):
3370 """Download a given list of URLs."""
aa9369a2 3371 url_list = variadic(url_list) # Passing a single URL is a common mistake
bf1824b3 3372 outtmpl = self.params['outtmpl']['default']
3089bc74
S
3373 if (len(url_list) > 1
3374 and outtmpl != '-'
3375 and '%' not in outtmpl
3376 and self.params.get('max_downloads') != 1):
acd69589 3377 raise SameFileError(outtmpl)
8222d8de
JMF
3378
3379 for url in url_list:
aa9369a2 3380 self.__download_wrapper(self.extract_info)(
3381 url, force_generic_extractor=self.params.get('force_generic_extractor', False))
8222d8de
JMF
3382
3383 return self._download_retcode
3384
1dcc4c0c 3385 def download_with_info_file(self, info_filename):
31bd3925
JMF
3386 with contextlib.closing(fileinput.FileInput(
3387 [info_filename], mode='r',
3388 openhook=fileinput.hook_encoded('utf-8'))) as f:
3389 # FileInput doesn't have a read method, we can't call json.load
ab1de9cb 3390 infos = [self.sanitize_info(info, self.params.get('clean_infojson', True))
3391 for info in variadic(json.loads('\n'.join(f)))]
3392 for info in infos:
3393 try:
3394 self.__download_wrapper(self.process_ie_result)(info, download=True)
3395 except (DownloadError, EntryNotInPlaylist, ReExtractInfo) as e:
3396 if not isinstance(e, EntryNotInPlaylist):
3397 self.to_stderr('\r')
3398 webpage_url = info.get('webpage_url')
3399 if webpage_url is None:
3400 raise
aa9369a2 3401 self.report_warning(f'The info failed to download: {e}; trying with URL {webpage_url}')
ab1de9cb 3402 self.download([webpage_url])
d4943898 3403 return self._download_retcode
1dcc4c0c 3404
cb202fd2 3405 @staticmethod
8012d892 3406 def sanitize_info(info_dict, remove_private_keys=False):
3407 ''' Sanitize the infodict for converting to json '''
3ad56b42 3408 if info_dict is None:
3409 return info_dict
6e84b215 3410 info_dict.setdefault('epoch', int(time.time()))
6a5a30f9 3411 info_dict.setdefault('_type', 'video')
b5e7a2e6 3412 info_dict.setdefault('_version', {
3413 'version': __version__,
3414 'current_git_head': current_git_head(),
3415 'release_git_head': RELEASE_GIT_HEAD,
3416 'repository': REPOSITORY,
3417 })
09b49e1f 3418
8012d892 3419 if remove_private_keys:
0a5a191a 3420 reject = lambda k, v: v is None or k.startswith('__') or k in {
f46e2f9d 3421 'requested_downloads', 'requested_formats', 'requested_subtitles', 'requested_entries',
0a5a191a 3422 'entries', 'filepath', '_filename', 'infojson_filename', 'original_url', 'playlist_autonumber',
08e29b9f 3423 '_format_sort_fields',
6e84b215 3424 }
ae8f99e6 3425 else:
09b49e1f 3426 reject = lambda k, v: False
adbc4ec4
THD
3427
3428 def filter_fn(obj):
3429 if isinstance(obj, dict):
3430 return {k: filter_fn(v) for k, v in obj.items() if not reject(k, v)}
3431 elif isinstance(obj, (list, tuple, set, LazyList)):
3432 return list(map(filter_fn, obj))
3433 elif obj is None or isinstance(obj, (str, int, float, bool)):
3434 return obj
3435 else:
3436 return repr(obj)
3437
5226731e 3438 return filter_fn(info_dict)
cb202fd2 3439
8012d892 3440 @staticmethod
3441 def filter_requested_info(info_dict, actually_filter=True):
3442 ''' Alias of sanitize_info for backward compatibility '''
3443 return YoutubeDL.sanitize_info(info_dict, actually_filter)
3444
43d7f5a5 3445 def _delete_downloaded_files(self, *files_to_delete, info={}, msg=None):
3446 for filename in set(filter(None, files_to_delete)):
3447 if msg:
3448 self.to_screen(msg % filename)
3449 try:
3450 os.remove(filename)
3451 except OSError:
3452 self.report_warning(f'Unable to delete file {filename}')
3453 if filename in info.get('__files_to_move', []): # NB: Delete even if None
3454 del info['__files_to_move'][filename]
3455
ed5835b4 3456 @staticmethod
3457 def post_extract(info_dict):
3458 def actual_post_extract(info_dict):
3459 if info_dict.get('_type') in ('playlist', 'multi_video'):
3460 for video_dict in info_dict.get('entries', {}):
3461 actual_post_extract(video_dict or {})
3462 return
3463
09b49e1f 3464 post_extractor = info_dict.pop('__post_extractor', None) or (lambda: {})
3465 info_dict.update(post_extractor())
ed5835b4 3466
3467 actual_post_extract(info_dict or {})
3468
dcf64d43 3469 def run_pp(self, pp, infodict):
5bfa4862 3470 files_to_delete = []
dcf64d43 3471 if '__files_to_move' not in infodict:
3472 infodict['__files_to_move'] = {}
b1940459 3473 try:
3474 files_to_delete, infodict = pp.run(infodict)
3475 except PostProcessingError as e:
3476 # Must be True and not 'only_download'
3477 if self.params.get('ignoreerrors') is True:
3478 self.report_error(e)
3479 return infodict
3480 raise
3481
5bfa4862 3482 if not files_to_delete:
dcf64d43 3483 return infodict
5bfa4862 3484 if self.params.get('keepvideo', False):
3485 for f in files_to_delete:
dcf64d43 3486 infodict['__files_to_move'].setdefault(f, '')
5bfa4862 3487 else:
43d7f5a5 3488 self._delete_downloaded_files(
3489 *files_to_delete, info=infodict, msg='Deleting original file %s (pass -k to keep)')
dcf64d43 3490 return infodict
5bfa4862 3491
17ba4343 3492 def run_all_pps(self, key, info, *, additional_pps=None, fatal=True):
3493 if key != 'video':
3494 self._forceprint(key, info)
3495 for pp in (additional_pps or []) + self._pps[key]:
3496 info = self.run_pp(pp, info)
ed5835b4 3497 return info
277d6ff5 3498
56d868db 3499 def pre_process(self, ie_info, key='pre_process', files_to_move=None):
5bfa4862 3500 info = dict(ie_info)
56d868db 3501 info['__files_to_move'] = files_to_move or {}
415f8d51 3502 try:
3503 info = self.run_all_pps(key, info)
3504 except PostProcessingError as err:
3505 msg = f'Preprocessing: {err}'
3506 info.setdefault('__pending_error', msg)
3507 self.report_error(msg, is_error=False)
56d868db 3508 return info, info.pop('__files_to_move', None)
5bfa4862 3509
f46e2f9d 3510 def post_process(self, filename, info, files_to_move=None):
8222d8de 3511 """Run all the postprocessors on the given file."""
8222d8de 3512 info['filepath'] = filename
dcf64d43 3513 info['__files_to_move'] = files_to_move or {}
ed5835b4 3514 info = self.run_all_pps('post_process', info, additional_pps=info.get('__postprocessors'))
dcf64d43 3515 info = self.run_pp(MoveFilesAfterDownloadPP(self), info)
3516 del info['__files_to_move']
ed5835b4 3517 return self.run_all_pps('after_move', info)
c1c9a79c 3518
5db07df6 3519 def _make_archive_id(self, info_dict):
e9fef7ee
S
3520 video_id = info_dict.get('id')
3521 if not video_id:
3522 return
5db07df6
PH
3523 # Future-proof against any change in case
3524 # and backwards compatibility with prior versions
e9fef7ee 3525 extractor = info_dict.get('extractor_key') or info_dict.get('ie_key') # key in a playlist
7012b23c 3526 if extractor is None:
1211bb6d
S
3527 url = str_or_none(info_dict.get('url'))
3528 if not url:
3529 return
e9fef7ee 3530 # Try to find matching extractor for the URL and take its ie_key
8b7491c8 3531 for ie_key, ie in self._ies.items():
1211bb6d 3532 if ie.suitable(url):
8b7491c8 3533 extractor = ie_key
e9fef7ee
S
3534 break
3535 else:
3536 return
0647d925 3537 return make_archive_id(extractor, video_id)
5db07df6
PH
3538
3539 def in_download_archive(self, info_dict):
ae103564 3540 if not self.archive:
5db07df6
PH
3541 return False
3542
1e8fe57e 3543 vid_ids = [self._make_archive_id(info_dict)]
c200096c 3544 vid_ids.extend(info_dict.get('_old_archive_ids') or [])
1e8fe57e 3545 return any(id_ in self.archive for id_ in vid_ids)
c1c9a79c
PH
3546
3547 def record_download_archive(self, info_dict):
3548 fn = self.params.get('download_archive')
3549 if fn is None:
3550 return
5db07df6
PH
3551 vid_id = self._make_archive_id(info_dict)
3552 assert vid_id
ae103564 3553
a13e6848 3554 self.write_debug(f'Adding to archive: {vid_id}')
9c935fbc 3555 if is_path_like(fn):
ae103564 3556 with locked_file(fn, 'a', encoding='utf-8') as archive_file:
3557 archive_file.write(vid_id + '\n')
a45e8619 3558 self.archive.add(vid_id)
dd82ffea 3559
8c51aa65 3560 @staticmethod
8abeeb94 3561 def format_resolution(format, default='unknown'):
9359f3d4 3562 if format.get('vcodec') == 'none' and format.get('acodec') != 'none':
fb04e403 3563 return 'audio only'
f49d89ee
PH
3564 if format.get('resolution') is not None:
3565 return format['resolution']
35615307 3566 if format.get('width') and format.get('height'):
ff51ed58 3567 return '%dx%d' % (format['width'], format['height'])
35615307 3568 elif format.get('height'):
ff51ed58 3569 return '%sp' % format['height']
35615307 3570 elif format.get('width'):
ff51ed58 3571 return '%dx?' % format['width']
3572 return default
8c51aa65 3573
8130779d 3574 def _list_format_headers(self, *headers):
3575 if self.params.get('listformats_table', True) is not False:
591bb9d3 3576 return [self._format_out(header, self.Styles.HEADERS) for header in headers]
8130779d 3577 return headers
3578
c57f7757
PH
3579 def _format_note(self, fdict):
3580 res = ''
3581 if fdict.get('ext') in ['f4f', 'f4m']:
f304da8a 3582 res += '(unsupported)'
32f90364
PH
3583 if fdict.get('language'):
3584 if res:
3585 res += ' '
f304da8a 3586 res += '[%s]' % fdict['language']
c57f7757 3587 if fdict.get('format_note') is not None:
f304da8a 3588 if res:
3589 res += ' '
3590 res += fdict['format_note']
c57f7757 3591 if fdict.get('tbr') is not None:
f304da8a 3592 if res:
3593 res += ', '
3594 res += '%4dk' % fdict['tbr']
c57f7757
PH
3595 if fdict.get('container') is not None:
3596 if res:
3597 res += ', '
3598 res += '%s container' % fdict['container']
3089bc74
S
3599 if (fdict.get('vcodec') is not None
3600 and fdict.get('vcodec') != 'none'):
c57f7757
PH
3601 if res:
3602 res += ', '
3603 res += fdict['vcodec']
91c7271a 3604 if fdict.get('vbr') is not None:
c57f7757
PH
3605 res += '@'
3606 elif fdict.get('vbr') is not None and fdict.get('abr') is not None:
3607 res += 'video@'
3608 if fdict.get('vbr') is not None:
3609 res += '%4dk' % fdict['vbr']
fbb21cf5 3610 if fdict.get('fps') is not None:
5d583bdf
S
3611 if res:
3612 res += ', '
3613 res += '%sfps' % fdict['fps']
c57f7757
PH
3614 if fdict.get('acodec') is not None:
3615 if res:
3616 res += ', '
3617 if fdict['acodec'] == 'none':
3618 res += 'video only'
3619 else:
3620 res += '%-5s' % fdict['acodec']
3621 elif fdict.get('abr') is not None:
3622 if res:
3623 res += ', '
3624 res += 'audio'
3625 if fdict.get('abr') is not None:
3626 res += '@%3dk' % fdict['abr']
3627 if fdict.get('asr') is not None:
3628 res += ' (%5dHz)' % fdict['asr']
3629 if fdict.get('filesize') is not None:
3630 if res:
3631 res += ', '
3632 res += format_bytes(fdict['filesize'])
9732d77e
PH
3633 elif fdict.get('filesize_approx') is not None:
3634 if res:
3635 res += ', '
3636 res += '~' + format_bytes(fdict['filesize_approx'])
c57f7757 3637 return res
91c7271a 3638
aebb4f4b 3639 def _get_formats(self, info_dict):
3640 if info_dict.get('formats') is None:
3641 if info_dict.get('url') and info_dict.get('_type', 'video') == 'video':
3642 return [info_dict]
3643 return []
3644 return info_dict['formats']
b69fd25c 3645
aebb4f4b 3646 def render_formats_table(self, info_dict):
3647 formats = self._get_formats(info_dict)
3648 if not formats:
3649 return
8130779d 3650 if not self.params.get('listformats_table', True) is not False:
76d321f6 3651 table = [
3652 [
3653 format_field(f, 'format_id'),
3654 format_field(f, 'ext'),
3655 self.format_resolution(f),
8130779d 3656 self._format_note(f)
d5d1df8a 3657 ] for f in formats if (f.get('preference') or 0) >= -1000]
8130779d 3658 return render_table(['format code', 'extension', 'resolution', 'note'], table, extra_gap=1)
3659
d816f61f 3660 def simplified_codec(f, field):
3661 assert field in ('acodec', 'vcodec')
3662 codec = f.get(field, 'unknown')
f5ea4748 3663 if not codec:
3664 return 'unknown'
3665 elif codec != 'none':
d816f61f 3666 return '.'.join(codec.split('.')[:4])
3667
3668 if field == 'vcodec' and f.get('acodec') == 'none':
3669 return 'images'
3670 elif field == 'acodec' and f.get('vcodec') == 'none':
3671 return ''
3672 return self._format_out('audio only' if field == 'vcodec' else 'video only',
3673 self.Styles.SUPPRESS)
3674
591bb9d3 3675 delim = self._format_out('\u2502', self.Styles.DELIM, '|', test_encoding=True)
8130779d 3676 table = [
3677 [
591bb9d3 3678 self._format_out(format_field(f, 'format_id'), self.Styles.ID),
8130779d 3679 format_field(f, 'ext'),
3680 format_field(f, func=self.format_resolution, ignore=('audio only', 'images')),
563e0bf8 3681 format_field(f, 'fps', '\t%d', func=round),
8130779d 3682 format_field(f, 'dynamic_range', '%s', ignore=(None, 'SDR')).replace('HDR', ''),
b8ed0f15 3683 format_field(f, 'audio_channels', '\t%s'),
8130779d 3684 delim,
3685 format_field(f, 'filesize', ' \t%s', func=format_bytes) + format_field(f, 'filesize_approx', '~\t%s', func=format_bytes),
563e0bf8 3686 format_field(f, 'tbr', '\t%dk', func=round),
8130779d 3687 shorten_protocol_name(f.get('protocol', '')),
3688 delim,
d816f61f 3689 simplified_codec(f, 'vcodec'),
563e0bf8 3690 format_field(f, 'vbr', '\t%dk', func=round),
d816f61f 3691 simplified_codec(f, 'acodec'),
563e0bf8 3692 format_field(f, 'abr', '\t%dk', func=round),
ae61d108 3693 format_field(f, 'asr', '\t%s', func=format_decimal_suffix),
8130779d 3694 join_nonempty(
591bb9d3 3695 self._format_out('UNSUPPORTED', 'light red') if f.get('ext') in ('f4f', 'f4m') else None,
a5387729 3696 self._format_out('DRM', 'light red') if f.get('has_drm') else None,
8130779d 3697 format_field(f, 'language', '[%s]'),
3698 join_nonempty(format_field(f, 'format_note'),
3699 format_field(f, 'container', ignore=(None, f.get('ext'))),
3700 delim=', '),
3701 delim=' '),
3702 ] for f in formats if f.get('preference') is None or f['preference'] >= -1000]
3703 header_line = self._list_format_headers(
b8ed0f15 3704 'ID', 'EXT', 'RESOLUTION', '\tFPS', 'HDR', 'CH', delim, '\tFILESIZE', '\tTBR', 'PROTO',
8130779d 3705 delim, 'VCODEC', '\tVBR', 'ACODEC', '\tABR', '\tASR', 'MORE INFO')
3706
3707 return render_table(
3708 header_line, table, hide_empty=True,
591bb9d3 3709 delim=self._format_out('\u2500', self.Styles.DELIM, '-', test_encoding=True))
8130779d 3710
3711 def render_thumbnails_table(self, info_dict):
88f23a18 3712 thumbnails = list(info_dict.get('thumbnails') or [])
cfb56d1a 3713 if not thumbnails:
8130779d 3714 return None
3715 return render_table(
ec11a9f4 3716 self._list_format_headers('ID', 'Width', 'Height', 'URL'),
177662e0 3717 [[t.get('id'), t.get('width') or 'unknown', t.get('height') or 'unknown', t['url']] for t in thumbnails])
2412044c 3718
8130779d 3719 def render_subtitles_table(self, video_id, subtitles):
2412044c 3720 def _row(lang, formats):
49c258e1 3721 exts, names = zip(*((f['ext'], f.get('name') or 'unknown') for f in reversed(formats)))
2412044c 3722 if len(set(names)) == 1:
7aee40c1 3723 names = [] if names[0] == 'unknown' else names[:1]
2412044c 3724 return [lang, ', '.join(names), ', '.join(exts)]
3725
8130779d 3726 if not subtitles:
3727 return None
3728 return render_table(
ec11a9f4 3729 self._list_format_headers('Language', 'Name', 'Formats'),
2412044c 3730 [_row(lang, formats) for lang, formats in subtitles.items()],
8130779d 3731 hide_empty=True)
3732
3733 def __list_table(self, video_id, name, func, *args):
3734 table = func(*args)
3735 if not table:
3736 self.to_screen(f'{video_id} has no {name}')
3737 return
3738 self.to_screen(f'[info] Available {name} for {video_id}:')
3739 self.to_stdout(table)
3740
3741 def list_formats(self, info_dict):
3742 self.__list_table(info_dict['id'], 'formats', self.render_formats_table, info_dict)
3743
3744 def list_thumbnails(self, info_dict):
3745 self.__list_table(info_dict['id'], 'thumbnails', self.render_thumbnails_table, info_dict)
3746
3747 def list_subtitles(self, video_id, subtitles, name='subtitles'):
3748 self.__list_table(video_id, name, self.render_subtitles_table, video_id, subtitles)
a504ced0 3749
dca08720
PH
3750 def urlopen(self, req):
3751 """ Start an HTTP download """
f9934b96 3752 if isinstance(req, str):
67dda517 3753 req = sanitized_Request(req)
19a41fc6 3754 return self._opener.open(req, timeout=self._socket_timeout)
dca08720
PH
3755
3756 def print_debug_header(self):
3757 if not self.params.get('verbose'):
3758 return
49a57e70 3759
a057779d 3760 from . import _IN_CLI # Must be delayed import
3761
560738f3 3762 # These imports can be slow. So import them only as needed
3763 from .extractor.extractors import _LAZY_LOADER
e756f45b
M
3764 from .extractor.extractors import (
3765 _PLUGIN_CLASSES as plugin_ies,
3766 _PLUGIN_OVERRIDES as plugin_ie_overrides
3767 )
560738f3 3768
49a57e70 3769 def get_encoding(stream):
2a938746 3770 ret = str(getattr(stream, 'encoding', 'missing (%s)' % type(stream).__name__))
49a57e70 3771 if not supports_terminal_sequences(stream):
53973b4d 3772 from .utils import WINDOWS_VT_MODE # Must be imported locally
e3c7d495 3773 ret += ' (No VT)' if WINDOWS_VT_MODE is False else ' (No ANSI)'
49a57e70 3774 return ret
3775
591bb9d3 3776 encoding_str = 'Encodings: locale %s, fs %s, pref %s, %s' % (
49a57e70 3777 locale.getpreferredencoding(),
3778 sys.getfilesystemencoding(),
591bb9d3 3779 self.get_encoding(),
3780 ', '.join(
64fa820c 3781 f'{key} {get_encoding(stream)}' for key, stream in self._out_files.items_
591bb9d3 3782 if stream is not None and key != 'console')
3783 )
883d4b1e 3784
3785 logger = self.params.get('logger')
3786 if logger:
3787 write_debug = lambda msg: logger.debug(f'[debug] {msg}')
3788 write_debug(encoding_str)
3789 else:
96565c7e 3790 write_string(f'[debug] {encoding_str}\n', encoding=None)
49a57e70 3791 write_debug = lambda msg: self._write_string(f'[debug] {msg}\n')
734f90bb 3792
4c88ff87 3793 source = detect_variant()
70b23409 3794 if VARIANT not in (None, 'pip'):
3795 source += '*'
a5387729 3796 klass = type(self)
36eaf303 3797 write_debug(join_nonempty(
b5e7a2e6 3798 f'{"yt-dlp" if REPOSITORY == "yt-dlp/yt-dlp" else REPOSITORY} version',
392389b7 3799 f'{CHANNEL}@{__version__}',
29cb20bd 3800 f'[{RELEASE_GIT_HEAD[:9]}]' if RELEASE_GIT_HEAD else '',
36eaf303 3801 '' if source == 'unknown' else f'({source})',
a5387729 3802 '' if _IN_CLI else 'API' if klass == YoutubeDL else f'API:{self.__module__}.{klass.__qualname__}',
36eaf303 3803 delim=' '))
497074f0 3804
3805 if not _IN_CLI:
3806 write_debug(f'params: {self.params}')
3807
6e21fdd2 3808 if not _LAZY_LOADER:
3809 if os.environ.get('YTDLP_NO_LAZY_EXTRACTORS'):
49a57e70 3810 write_debug('Lazy loading extractors is forcibly disabled')
6e21fdd2 3811 else:
49a57e70 3812 write_debug('Lazy loading extractors is disabled')
8a82af35 3813 if self.params['compat_opts']:
3814 write_debug('Compatibility options: %s' % ', '.join(self.params['compat_opts']))
36eaf303 3815
b5e7a2e6 3816 if current_git_head():
3817 write_debug(f'Git HEAD: {current_git_head()}')
b1f94422 3818 write_debug(system_identifier())
d28b5171 3819
8913ef74 3820 exe_versions, ffmpeg_features = FFmpegPostProcessor.get_versions_and_features(self)
3821 ffmpeg_features = {key for key, val in ffmpeg_features.items() if val}
3822 if ffmpeg_features:
19a03940 3823 exe_versions['ffmpeg'] += ' (%s)' % ','.join(sorted(ffmpeg_features))
8913ef74 3824
4c83c967 3825 exe_versions['rtmpdump'] = rtmpdump_version()
feee8d32 3826 exe_versions['phantomjs'] = PhantomJSwrapper._version()
d28b5171 3827 exe_str = ', '.join(
2831b468 3828 f'{exe} {v}' for exe, v in sorted(exe_versions.items()) if v
3829 ) or 'none'
49a57e70 3830 write_debug('exe versions: %s' % exe_str)
dca08720 3831
1d485a1a 3832 from .compat.compat_utils import get_package_info
9b8ee23b 3833 from .dependencies import available_dependencies
3834
3835 write_debug('Optional libraries: %s' % (', '.join(sorted({
1d485a1a 3836 join_nonempty(*get_package_info(m)) for m in available_dependencies.values()
9b8ee23b 3837 })) or 'none'))
2831b468 3838
97ec5bc5 3839 self._setup_opener()
dca08720
PH
3840 proxy_map = {}
3841 for handler in self._opener.handlers:
3842 if hasattr(handler, 'proxies'):
3843 proxy_map.update(handler.proxies)
49a57e70 3844 write_debug(f'Proxy map: {proxy_map}')
dca08720 3845
e756f45b
M
3846 for plugin_type, plugins in {'Extractor': plugin_ies, 'Post-Processor': plugin_pps}.items():
3847 display_list = ['%s%s' % (
8e40b9d1 3848 klass.__name__, '' if klass.__name__ == name else f' as {name}')
e756f45b
M
3849 for name, klass in plugins.items()]
3850 if plugin_type == 'Extractor':
3851 display_list.extend(f'{plugins[-1].IE_NAME.partition("+")[2]} ({parent.__name__})'
3852 for parent, plugins in plugin_ie_overrides.items())
3853 if not display_list:
3854 continue
3855 write_debug(f'{plugin_type} Plugins: {", ".join(sorted(display_list))}')
3856
8e40b9d1
M
3857 plugin_dirs = plugin_directories()
3858 if plugin_dirs:
3859 write_debug(f'Plugin directories: {plugin_dirs}')
3860
49a57e70 3861 # Not implemented
3862 if False and self.params.get('call_home'):
0f06bcd7 3863 ipaddr = self.urlopen('https://yt-dl.org/ip').read().decode()
49a57e70 3864 write_debug('Public IP address: %s' % ipaddr)
58b1f00d 3865 latest_version = self.urlopen(
0f06bcd7 3866 'https://yt-dl.org/latest/version').read().decode()
58b1f00d
PH
3867 if version_tuple(latest_version) > version_tuple(__version__):
3868 self.report_warning(
3869 'You are using an outdated version (newest version: %s)! '
3870 'See https://yt-dl.org/update if you need help updating.' %
3871 latest_version)
3872
e344693b 3873 def _setup_opener(self):
97ec5bc5 3874 if hasattr(self, '_opener'):
3875 return
6ad14cab 3876 timeout_val = self.params.get('socket_timeout')
17bddf3e 3877 self._socket_timeout = 20 if timeout_val is None else float(timeout_val)
6ad14cab 3878
982ee69a 3879 opts_cookiesfrombrowser = self.params.get('cookiesfrombrowser')
dca08720
PH
3880 opts_cookiefile = self.params.get('cookiefile')
3881 opts_proxy = self.params.get('proxy')
3882
982ee69a 3883 self.cookiejar = load_cookies(opts_cookiefile, opts_cookiesfrombrowser, self)
dca08720 3884
6a3f4c3f 3885 cookie_processor = YoutubeDLCookieProcessor(self.cookiejar)
dca08720
PH
3886 if opts_proxy is not None:
3887 if opts_proxy == '':
3888 proxies = {}
3889 else:
3890 proxies = {'http': opts_proxy, 'https': opts_proxy}
3891 else:
ac668111 3892 proxies = urllib.request.getproxies()
067aa17e 3893 # Set HTTPS proxy to HTTP one if given (https://github.com/ytdl-org/youtube-dl/issues/805)
dca08720
PH
3894 if 'http' in proxies and 'https' not in proxies:
3895 proxies['https'] = proxies['http']
91410c9b 3896 proxy_handler = PerRequestProxyHandler(proxies)
a0ddb8a2
PH
3897
3898 debuglevel = 1 if self.params.get('debug_printtraffic') else 0
be4a824d
PH
3899 https_handler = make_HTTPS_handler(self.params, debuglevel=debuglevel)
3900 ydlh = YoutubeDLHandler(self.params, debuglevel=debuglevel)
fca6dba8 3901 redirect_handler = YoutubeDLRedirectHandler()
f9934b96 3902 data_handler = urllib.request.DataHandler()
6240b0a2
JMF
3903
3904 # When passing our own FileHandler instance, build_opener won't add the
3905 # default FileHandler and allows us to disable the file protocol, which
3906 # can be used for malicious purposes (see
067aa17e 3907 # https://github.com/ytdl-org/youtube-dl/issues/8227)
ac668111 3908 file_handler = urllib.request.FileHandler()
6240b0a2 3909
8300774c
M
3910 if not self.params.get('enable_file_urls'):
3911 def file_open(*args, **kwargs):
3912 raise urllib.error.URLError(
3913 'file:// URLs are explicitly disabled in yt-dlp for security reasons. '
3914 'Use --enable-file-urls to enable at your own risk.')
3915 file_handler.file_open = file_open
6240b0a2 3916
ac668111 3917 opener = urllib.request.build_opener(
fca6dba8 3918 proxy_handler, https_handler, cookie_processor, ydlh, redirect_handler, data_handler, file_handler)
2461f79d 3919
dca08720
PH
3920 # Delete the default user-agent header, which would otherwise apply in
3921 # cases where our custom HTTP handler doesn't come into play
067aa17e 3922 # (See https://github.com/ytdl-org/youtube-dl/issues/1309 for details)
dca08720
PH
3923 opener.addheaders = []
3924 self._opener = opener
62fec3b2
PH
3925
3926 def encode(self, s):
3927 if isinstance(s, bytes):
3928 return s # Already encoded
3929
3930 try:
3931 return s.encode(self.get_encoding())
3932 except UnicodeEncodeError as err:
3933 err.reason = err.reason + '. Check your system encoding configuration or use the --encoding option.'
3934 raise
3935
3936 def get_encoding(self):
3937 encoding = self.params.get('encoding')
3938 if encoding is None:
3939 encoding = preferredencoding()
3940 return encoding
ec82d85a 3941
e08a85d8 3942 def _write_info_json(self, label, ie_result, infofn, overwrite=None):
cb96c5be 3943 ''' Write infojson and returns True = written, 'exists' = Already exists, False = skip, None = error '''
e08a85d8 3944 if overwrite is None:
3945 overwrite = self.params.get('overwrites', True)
80c03fa9 3946 if not self.params.get('writeinfojson'):
3947 return False
3948 elif not infofn:
3949 self.write_debug(f'Skipping writing {label} infojson')
3950 return False
3951 elif not self._ensure_dir_exists(infofn):
3952 return None
e08a85d8 3953 elif not overwrite and os.path.exists(infofn):
80c03fa9 3954 self.to_screen(f'[info] {label.title()} metadata is already present')
cb96c5be 3955 return 'exists'
3956
3957 self.to_screen(f'[info] Writing {label} metadata as JSON to: {infofn}')
3958 try:
3959 write_json_file(self.sanitize_info(ie_result, self.params.get('clean_infojson', True)), infofn)
3960 return True
86e5f3ed 3961 except OSError:
cb96c5be 3962 self.report_error(f'Cannot write {label} metadata to JSON file {infofn}')
3963 return None
80c03fa9 3964
3965 def _write_description(self, label, ie_result, descfn):
3966 ''' Write description and returns True = written, False = skip, None = error '''
3967 if not self.params.get('writedescription'):
3968 return False
3969 elif not descfn:
3970 self.write_debug(f'Skipping writing {label} description')
3971 return False
3972 elif not self._ensure_dir_exists(descfn):
3973 return None
3974 elif not self.params.get('overwrites', True) and os.path.exists(descfn):
3975 self.to_screen(f'[info] {label.title()} description is already present')
3976 elif ie_result.get('description') is None:
88fb9425 3977 self.to_screen(f'[info] There\'s no {label} description to write')
80c03fa9 3978 return False
3979 else:
3980 try:
3981 self.to_screen(f'[info] Writing {label} description to: {descfn}')
86e5f3ed 3982 with open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile:
80c03fa9 3983 descfile.write(ie_result['description'])
86e5f3ed 3984 except OSError:
80c03fa9 3985 self.report_error(f'Cannot write {label} description file {descfn}')
3986 return None
3987 return True
3988
3989 def _write_subtitles(self, info_dict, filename):
3990 ''' Write subtitles to file and return list of (sub_filename, final_sub_filename); or None if error'''
3991 ret = []
3992 subtitles = info_dict.get('requested_subtitles')
88fb9425 3993 if not (self.params.get('writesubtitles') or self.params.get('writeautomaticsub')):
80c03fa9 3994 # subtitles download errors are already managed as troubles in relevant IE
3995 # that way it will silently go on when used with unsupporting IE
3996 return ret
88fb9425 3997 elif not subtitles:
c8bc203f 3998 self.to_screen('[info] There are no subtitles for the requested languages')
88fb9425 3999 return ret
80c03fa9 4000 sub_filename_base = self.prepare_filename(info_dict, 'subtitle')
4001 if not sub_filename_base:
4002 self.to_screen('[info] Skipping writing video subtitles')
4003 return ret
88fb9425 4004
80c03fa9 4005 for sub_lang, sub_info in subtitles.items():
4006 sub_format = sub_info['ext']
4007 sub_filename = subtitles_filename(filename, sub_lang, sub_format, info_dict.get('ext'))
4008 sub_filename_final = subtitles_filename(sub_filename_base, sub_lang, sub_format, info_dict.get('ext'))
e04938ab 4009 existing_sub = self.existing_file((sub_filename_final, sub_filename))
4010 if existing_sub:
80c03fa9 4011 self.to_screen(f'[info] Video subtitle {sub_lang}.{sub_format} is already present')
e04938ab 4012 sub_info['filepath'] = existing_sub
4013 ret.append((existing_sub, sub_filename_final))
80c03fa9 4014 continue
4015
4016 self.to_screen(f'[info] Writing video subtitles to: {sub_filename}')
4017 if sub_info.get('data') is not None:
4018 try:
4019 # Use newline='' to prevent conversion of newline characters
4020 # See https://github.com/ytdl-org/youtube-dl/issues/10268
86e5f3ed 4021 with open(sub_filename, 'w', encoding='utf-8', newline='') as subfile:
80c03fa9 4022 subfile.write(sub_info['data'])
4023 sub_info['filepath'] = sub_filename
4024 ret.append((sub_filename, sub_filename_final))
4025 continue
86e5f3ed 4026 except OSError:
80c03fa9 4027 self.report_error(f'Cannot write video subtitles file {sub_filename}')
4028 return None
4029
4030 try:
4031 sub_copy = sub_info.copy()
4032 sub_copy.setdefault('http_headers', info_dict.get('http_headers'))
4033 self.dl(sub_filename, sub_copy, subtitle=True)
4034 sub_info['filepath'] = sub_filename
4035 ret.append((sub_filename, sub_filename_final))
6020e05d 4036 except (DownloadError, ExtractorError, IOError, OSError, ValueError) + network_exceptions as err:
c70c418d 4037 msg = f'Unable to download video subtitles for {sub_lang!r}: {err}'
6020e05d 4038 if self.params.get('ignoreerrors') is not True: # False or 'only_download'
c70c418d 4039 if not self.params.get('ignoreerrors'):
4040 self.report_error(msg)
4041 raise DownloadError(msg)
4042 self.report_warning(msg)
519804a9 4043 return ret
80c03fa9 4044
4045 def _write_thumbnails(self, label, info_dict, filename, thumb_filename_base=None):
4046 ''' Write thumbnails to file and return list of (thumb_filename, final_thumb_filename) '''
6c4fd172 4047 write_all = self.params.get('write_all_thumbnails', False)
80c03fa9 4048 thumbnails, ret = [], []
6c4fd172 4049 if write_all or self.params.get('writethumbnail', False):
0202b52a 4050 thumbnails = info_dict.get('thumbnails') or []
88fb9425 4051 if not thumbnails:
c8bc203f 4052 self.to_screen(f'[info] There are no {label} thumbnails to download')
88fb9425 4053 return ret
6c4fd172 4054 multiple = write_all and len(thumbnails) > 1
ec82d85a 4055
80c03fa9 4056 if thumb_filename_base is None:
4057 thumb_filename_base = filename
4058 if thumbnails and not thumb_filename_base:
4059 self.write_debug(f'Skipping writing {label} thumbnail')
4060 return ret
4061
dd0228ce 4062 for idx, t in list(enumerate(thumbnails))[::-1]:
80c03fa9 4063 thumb_ext = (f'{t["id"]}.' if multiple else '') + determine_ext(t['url'], 'jpg')
aa9369a2 4064 thumb_display_id = f'{label} thumbnail {t["id"]}'
80c03fa9 4065 thumb_filename = replace_extension(filename, thumb_ext, info_dict.get('ext'))
4066 thumb_filename_final = replace_extension(thumb_filename_base, thumb_ext, info_dict.get('ext'))
ec82d85a 4067
e04938ab 4068 existing_thumb = self.existing_file((thumb_filename_final, thumb_filename))
4069 if existing_thumb:
aa9369a2 4070 self.to_screen('[info] %s is already present' % (
4071 thumb_display_id if multiple else f'{label} thumbnail').capitalize())
e04938ab 4072 t['filepath'] = existing_thumb
4073 ret.append((existing_thumb, thumb_filename_final))
ec82d85a 4074 else:
80c03fa9 4075 self.to_screen(f'[info] Downloading {thumb_display_id} ...')
ec82d85a 4076 try:
297e9952 4077 uf = self.urlopen(sanitized_Request(t['url'], headers=t.get('http_headers', {})))
80c03fa9 4078 self.to_screen(f'[info] Writing {thumb_display_id} to: {thumb_filename}')
d3d89c32 4079 with open(encodeFilename(thumb_filename), 'wb') as thumbf:
ec82d85a 4080 shutil.copyfileobj(uf, thumbf)
80c03fa9 4081 ret.append((thumb_filename, thumb_filename_final))
885cc0b7 4082 t['filepath'] = thumb_filename
3158150c 4083 except network_exceptions as err:
dd0228ce 4084 thumbnails.pop(idx)
80c03fa9 4085 self.report_warning(f'Unable to download {thumb_display_id}: {err}')
6c4fd172 4086 if ret and not write_all:
4087 break
0202b52a 4088 return ret