]> jfr.im git - yt-dlp.git/blame - yt_dlp/YoutubeDL.py
[cleanup] Consistent style for file heads
[yt-dlp.git] / yt_dlp / YoutubeDL.py
CommitLineData
26e63931 1import collections
31bd3925 2import contextlib
9d2ecdbc 3import datetime
c1c9a79c 4import errno
31bd3925 5import fileinput
b5ae35ee 6import functools
8222d8de 7import io
b82f815f 8import itertools
8694c600 9import json
62fec3b2 10import locale
083c9df9 11import operator
8222d8de 12import os
dca08720 13import platform
f8271158 14import random
8222d8de
JMF
15import re
16import shutil
dca08720 17import subprocess
8222d8de 18import sys
21cd8fae 19import tempfile
8222d8de 20import time
67134eab 21import tokenize
8222d8de 22import traceback
524e2e4f 23import unicodedata
f9934b96 24import urllib.request
961ea474
S
25from string import ascii_letters
26
f8271158 27from .cache import Cache
ac668111 28from .compat import HAS_LEGACY as compat_has_legacy
29from .compat import compat_os_name, compat_shlex_quote, compat_str
982ee69a 30from .cookies import load_cookies
f8271158 31from .downloader import FFmpegFD, get_suitable_downloader, shorten_protocol_name
32from .downloader.rtmp import rtmpdump_version
f8271158 33from .extractor import gen_extractor_classes, get_info_extractor
34from .extractor.openload import PhantomJSwrapper
35from .minicurses import format_text
36from .postprocessor import _PLUGIN_CLASSES as plugin_postprocessors
37from .postprocessor import (
38 EmbedThumbnailPP,
39 FFmpegFixupDuplicateMoovPP,
40 FFmpegFixupDurationPP,
41 FFmpegFixupM3u8PP,
42 FFmpegFixupM4aPP,
43 FFmpegFixupStretchedPP,
44 FFmpegFixupTimestampPP,
45 FFmpegMergerPP,
46 FFmpegPostProcessor,
47 MoveFilesAfterDownloadPP,
48 get_postprocessor,
49)
50from .update import detect_variant
8c25f81b 51from .utils import (
f8271158 52 DEFAULT_OUTTMPL,
7b2c3f47 53 IDENTITY,
f8271158 54 LINK_TEMPLATES,
55 NO_DEFAULT,
1d485a1a 56 NUMBER_RE,
f8271158 57 OUTTMPL_TYPES,
58 POSTPROCESS_WHEN,
59 STR_FORMAT_RE_TMPL,
60 STR_FORMAT_TYPES,
61 ContentTooShortError,
62 DateRange,
63 DownloadCancelled,
64 DownloadError,
65 EntryNotInPlaylist,
66 ExistingVideoReached,
67 ExtractorError,
68 GeoRestrictedError,
69 HEADRequest,
f8271158 70 ISO3166Utils,
71 LazyList,
72 MaxDownloadsReached,
19a03940 73 Namespace,
f8271158 74 PagedList,
75 PerRequestProxyHandler,
7e88d7d7 76 PlaylistEntries,
f8271158 77 Popen,
78 PostProcessingError,
79 ReExtractInfo,
80 RejectedVideoReached,
81 SameFileError,
82 UnavailableVideoError,
83 YoutubeDLCookieProcessor,
84 YoutubeDLHandler,
85 YoutubeDLRedirectHandler,
eedb7ba5
S
86 age_restricted,
87 args_to_str,
ce02ed60 88 date_from_str,
ce02ed60 89 determine_ext,
b5559424 90 determine_protocol,
c0384f22 91 encode_compat_str,
ce02ed60 92 encodeFilename,
a06916d9 93 error_to_compat_str,
590bc6f6 94 expand_path,
90137ca4 95 filter_dict,
e29663c6 96 float_or_none,
02dbf93f 97 format_bytes,
e0fd9573 98 format_decimal_suffix,
f8271158 99 format_field,
525ef922 100 formatSeconds,
0bb322b9 101 get_domain,
c9969434 102 int_or_none,
732044af 103 iri_to_uri,
34921b43 104 join_nonempty,
ce02ed60 105 locked_file,
0202b52a 106 make_dir,
dca08720 107 make_HTTPS_handler,
8b7539d2 108 merge_headers,
3158150c 109 network_exceptions,
ec11a9f4 110 number_of_digits,
cd6fc19e 111 orderedSet,
083c9df9 112 parse_filesize,
dca08720 113 platform_name,
ce02ed60 114 preferredencoding,
eedb7ba5 115 prepend_extension,
51fb4995 116 register_socks_protocols,
3efb96a6 117 remove_terminal_sequences,
cfb56d1a 118 render_table,
eedb7ba5 119 replace_extension,
ce02ed60 120 sanitize_filename,
1bb5c511 121 sanitize_path,
dcf77cf1 122 sanitize_url,
67dda517 123 sanitized_Request,
e5660ee6 124 std_headers,
1211bb6d 125 str_or_none,
e29663c6 126 strftime_or_none,
ce02ed60 127 subtitles_filename,
819e0531 128 supports_terminal_sequences,
f2ebc5c7 129 timetuple_from_msec,
732044af 130 to_high_limit_path,
324ad820 131 traverse_obj,
6033d980 132 try_get,
29eb5174 133 url_basename,
7d1eb38a 134 variadic,
58b1f00d 135 version_tuple,
53973b4d 136 windows_enable_vt_mode,
ce02ed60
PH
137 write_json_file,
138 write_string,
4f026faf 139)
f8271158 140from .version import RELEASE_GIT_HEAD, __version__
8222d8de 141
e9c0cdd3
YCH
142if compat_os_name == 'nt':
143 import ctypes
144
2459b6e1 145
86e5f3ed 146class YoutubeDL:
8222d8de
JMF
147 """YoutubeDL class.
148
149 YoutubeDL objects are the ones responsible of downloading the
150 actual video file and writing it to disk if the user has requested
151 it, among some other tasks. In most cases there should be one per
152 program. As, given a video URL, the downloader doesn't know how to
153 extract all the needed information, task that InfoExtractors do, it
154 has to pass the URL to one of them.
155
156 For this, YoutubeDL objects have a method that allows
157 InfoExtractors to be registered in a given order. When it is passed
158 a URL, the YoutubeDL object handles it to the first InfoExtractor it
159 finds that reports being able to handle it. The InfoExtractor extracts
160 all the information about the video or videos the URL refers to, and
161 YoutubeDL process the extracted information, possibly using a File
162 Downloader to download the video.
163
164 YoutubeDL objects accept a lot of parameters. In order not to saturate
165 the object constructor with arguments, it receives a dictionary of
166 options instead. These options are available through the params
167 attribute for the InfoExtractors to use. The YoutubeDL also
168 registers itself as the downloader in charge for the InfoExtractors
169 that are added to it, so this is a "mutual registration".
170
171 Available options:
172
173 username: Username for authentication purposes.
174 password: Password for authentication purposes.
180940e0 175 videopassword: Password for accessing a video.
1da50aa3
S
176 ap_mso: Adobe Pass multiple-system operator identifier.
177 ap_username: Multiple-system operator account username.
178 ap_password: Multiple-system operator account password.
8222d8de
JMF
179 usenetrc: Use netrc for authentication instead.
180 verbose: Print additional info to stdout.
181 quiet: Do not print messages to stdout.
ad8915b7 182 no_warnings: Do not print out anything for warnings.
bb66c247 183 forceprint: A dict with keys WHEN mapped to a list of templates to
184 print to stdout. The allowed keys are video or any of the
185 items in utils.POSTPROCESS_WHEN.
ca30f449 186 For compatibility, a single list is also accepted
bb66c247 187 print_to_file: A dict with keys WHEN (same as forceprint) mapped to
188 a list of tuples with (template, filename)
8694c600 189 forcejson: Force printing info_dict as JSON.
63e0be34
PH
190 dump_single_json: Force printing the info_dict of the whole playlist
191 (or video) as a single JSON line.
c25228e5 192 force_write_download_archive: Force writing download archive regardless
193 of 'skip_download' or 'simulate'.
b7b04c78 194 simulate: Do not download the video files. If unset (or None),
195 simulate only if listsubtitles, listformats or list_thumbnails is used
eb8a4433 196 format: Video format code. see "FORMAT SELECTION" for more details.
093a1710 197 You can also pass a function. The function takes 'ctx' as
198 argument and returns the formats to download.
199 See "build_format_selector" for an implementation
63ad4d43 200 allow_unplayable_formats: Allow unplayable formats to be extracted and downloaded.
b7da73eb 201 ignore_no_formats_error: Ignore "No video formats" error. Usefull for
202 extracting metadata even if the video is not actually
203 available for download (experimental)
0930b11f 204 format_sort: A list of fields by which to sort the video formats.
205 See "Sorting Formats" for more details.
c25228e5 206 format_sort_force: Force the given format_sort. see "Sorting Formats"
207 for more details.
08d30158 208 prefer_free_formats: Whether to prefer video formats with free containers
209 over non-free ones of same quality.
c25228e5 210 allow_multiple_video_streams: Allow multiple video streams to be merged
211 into a single file
212 allow_multiple_audio_streams: Allow multiple audio streams to be merged
213 into a single file
0ba692ac 214 check_formats Whether to test if the formats are downloadable.
9f1a1c36 215 Can be True (check all), False (check none),
216 'selected' (check selected formats),
0ba692ac 217 or None (check only if requested by extractor)
4524baf0 218 paths: Dictionary of output paths. The allowed keys are 'home'
219 'temp' and the keys of OUTTMPL_TYPES (in utils.py)
de6000d9 220 outtmpl: Dictionary of templates for output names. Allowed keys
4524baf0 221 are 'default' and the keys of OUTTMPL_TYPES (in utils.py).
34488702 222 For compatibility with youtube-dl, a single string can also be used
a820dc72
RA
223 outtmpl_na_placeholder: Placeholder for unavailable meta fields.
224 restrictfilenames: Do not allow "&" and spaces in file names
225 trim_file_name: Limit length of filename (extension excluded)
4524baf0 226 windowsfilenames: Force the filenames to be windows compatible
b1940459 227 ignoreerrors: Do not stop on download/postprocessing errors.
228 Can be 'only_download' to ignore only download errors.
229 Default is 'only_download' for CLI, but False for API
26e2805c 230 skip_playlist_after_errors: Number of allowed failures until the rest of
231 the playlist is skipped
d22dec74 232 force_generic_extractor: Force downloader to use the generic extractor
0c3d0f51 233 overwrites: Overwrite all video and metadata files if True,
234 overwrite only non-video files if None
235 and don't overwrite any file if False
34488702 236 For compatibility with youtube-dl,
237 "nooverwrites" may also be used instead
c14e88f0 238 playlist_items: Specific indices of playlist to download.
75822ca7 239 playlistrandom: Download playlist items in random order.
7e9a6125 240 lazy_playlist: Process playlist entries as they are received.
8222d8de
JMF
241 matchtitle: Download only matching titles.
242 rejecttitle: Reject downloads for matching titles.
8bf9319e 243 logger: Log messages to a logging.Logger instance.
8222d8de 244 logtostderr: Log messages to stderr instead of stdout.
819e0531 245 consoletitle: Display progress in console window's titlebar.
8222d8de
JMF
246 writedescription: Write the video description to a .description file
247 writeinfojson: Write the video description to a .info.json file
75d43ca0 248 clean_infojson: Remove private fields from the infojson
34488702 249 getcomments: Extract video comments. This will not be written to disk
06167fbb 250 unless writeinfojson is also given
1fb07d10 251 writeannotations: Write the video annotations to a .annotations.xml file
8222d8de 252 writethumbnail: Write the thumbnail image to a file
c25228e5 253 allow_playlist_files: Whether to write playlists' description, infojson etc
254 also to disk when using the 'write*' options
ec82d85a 255 write_all_thumbnails: Write all thumbnail formats to files
732044af 256 writelink: Write an internet shortcut file, depending on the
257 current platform (.url/.webloc/.desktop)
258 writeurllink: Write a Windows internet shortcut file (.url)
259 writewebloclink: Write a macOS internet shortcut file (.webloc)
260 writedesktoplink: Write a Linux internet shortcut file (.desktop)
8222d8de 261 writesubtitles: Write the video subtitles to a file
741dd8ea 262 writeautomaticsub: Write the automatically generated subtitles to a file
8222d8de 263 listsubtitles: Lists all available subtitles for the video
a504ced0 264 subtitlesformat: The format code for subtitles
c32b0aab 265 subtitleslangs: List of languages of the subtitles to download (can be regex).
266 The list may contain "all" to refer to all the available
267 subtitles. The language can be prefixed with a "-" to
268 exclude it from the requested languages. Eg: ['all', '-live_chat']
8222d8de
JMF
269 keepvideo: Keep the video file after post-processing
270 daterange: A DateRange object, download only if the upload_date is in the range.
271 skip_download: Skip the actual download of the video file
c35f9e72 272 cachedir: Location of the cache files in the filesystem.
a0e07d31 273 False to disable filesystem cache.
47192f92 274 noplaylist: Download single video instead of a playlist if in doubt.
8dbe9899
PH
275 age_limit: An integer representing the user's age in years.
276 Unsuitable videos for the given age are skipped.
5fe18bdb
PH
277 min_views: An integer representing the minimum view count the video
278 must have in order to not be skipped.
279 Videos without view count information are always
280 downloaded. None for no limit.
281 max_views: An integer representing the maximum view count.
282 Videos that are more popular than that are not
283 downloaded.
284 Videos without view count information are always
285 downloaded. None for no limit.
286 download_archive: File name of a file where all downloads are recorded.
c1c9a79c
PH
287 Videos already present in the file are not downloaded
288 again.
8a51f564 289 break_on_existing: Stop the download process after attempting to download a
290 file that is in the archive.
291 break_on_reject: Stop the download process when encountering a video that
292 has been filtered out.
b222c271 293 break_per_url: Whether break_on_reject and break_on_existing
294 should act on each input URL as opposed to for the entire queue
d76fa1f3 295 cookiefile: File name or text stream from where cookies should be read and dumped to
f59f5ef8
MB
296 cookiesfrombrowser: A tuple containing the name of the browser, the profile
297 name/pathfrom where cookies are loaded, and the name of the
298 keyring. Eg: ('chrome', ) or ('vivaldi', 'default', 'BASICTEXT')
f81c62a6 299 legacyserverconnect: Explicitly allow HTTPS connection to servers that do not
300 support RFC 5746 secure renegotiation
f59f5ef8 301 nocheckcertificate: Do not verify SSL certificates
bb58c9ed 302 client_certificate: Path to client certificate file in PEM format. May include the private key
303 client_certificate_key: Path to private key file for client certificate
304 client_certificate_password: Password for client certificate private key, if encrypted.
305 If not provided and the key is encrypted, yt-dlp will ask interactively
7e8c0af0
PH
306 prefer_insecure: Use HTTP instead of HTTPS to retrieve information.
307 At the moment, this is only supported by YouTube.
8b7539d2 308 http_headers: A dictionary of custom headers to be used for all requests
a1ee09e8 309 proxy: URL of the proxy server to use
38cce791 310 geo_verification_proxy: URL of the proxy to use for IP address verification
504f20dd 311 on geo-restricted sites.
e344693b 312 socket_timeout: Time to wait for unresponsive hosts, in seconds
0783b09b
PH
313 bidi_workaround: Work around buggy terminals without bidirectional text
314 support, using fridibi
a0ddb8a2 315 debug_printtraffic:Print out sent and received HTTP traffic
04b4d394
PH
316 default_search: Prepend this string if an input url is not valid.
317 'auto' for elaborate guessing
62fec3b2 318 encoding: Use this encoding instead of the system-specified.
e8ee972c 319 extract_flat: Do not resolve URLs, return the immediate result.
057a5206
PH
320 Pass in 'in_playlist' to only show this behavior for
321 playlist items.
f2ebc5c7 322 wait_for_video: If given, wait for scheduled streams to become available.
323 The value should be a tuple containing the range
324 (min_secs, max_secs) to wait between retries
4f026faf 325 postprocessors: A list of dictionaries, each with an entry
71b640cc 326 * key: The name of the postprocessor. See
7a5c1cfe 327 yt_dlp/postprocessor/__init__.py for a list.
bb66c247 328 * when: When to run the postprocessor. Allowed values are
329 the entries of utils.POSTPROCESS_WHEN
56d868db 330 Assumed to be 'post_process' if not given
71b640cc
PH
331 progress_hooks: A list of functions that get called on download
332 progress, with a dictionary with the entries
5cda4eda 333 * status: One of "downloading", "error", or "finished".
ee69b99a 334 Check this first and ignore unknown values.
3ba7740d 335 * info_dict: The extracted info_dict
71b640cc 336
5cda4eda 337 If status is one of "downloading", or "finished", the
ee69b99a
PH
338 following properties may also be present:
339 * filename: The final filename (always present)
5cda4eda 340 * tmpfilename: The filename we're currently writing to
71b640cc
PH
341 * downloaded_bytes: Bytes on disk
342 * total_bytes: Size of the whole file, None if unknown
5cda4eda
PH
343 * total_bytes_estimate: Guess of the eventual file size,
344 None if unavailable.
345 * elapsed: The number of seconds since download started.
71b640cc
PH
346 * eta: The estimated time in seconds, None if unknown
347 * speed: The download speed in bytes/second, None if
348 unknown
5cda4eda
PH
349 * fragment_index: The counter of the currently
350 downloaded video fragment.
351 * fragment_count: The number of fragments (= individual
352 files that will be merged)
71b640cc
PH
353
354 Progress hooks are guaranteed to be called at least once
355 (with status "finished") if the download is successful.
819e0531 356 postprocessor_hooks: A list of functions that get called on postprocessing
357 progress, with a dictionary with the entries
358 * status: One of "started", "processing", or "finished".
359 Check this first and ignore unknown values.
360 * postprocessor: Name of the postprocessor
361 * info_dict: The extracted info_dict
362
363 Progress hooks are guaranteed to be called at least twice
364 (with status "started" and "finished") if the processing is successful.
45598f15 365 merge_output_format: Extension to use when merging formats.
6b591b29 366 final_ext: Expected final extension; used to detect when the file was
59a7a13e 367 already downloaded and converted
6271f1ca
PH
368 fixup: Automatically correct known faults of the file.
369 One of:
370 - "never": do nothing
371 - "warn": only emit a warning
372 - "detect_or_warn": check whether we can do anything
62cd676c 373 about it, warn otherwise (default)
504f20dd 374 source_address: Client-side IP address to bind to.
1cf376f5 375 sleep_interval_requests: Number of seconds to sleep between requests
376 during extraction
7aa589a5
S
377 sleep_interval: Number of seconds to sleep before each download when
378 used alone or a lower bound of a range for randomized
379 sleep before each download (minimum possible number
380 of seconds to sleep) when used along with
381 max_sleep_interval.
382 max_sleep_interval:Upper bound of a range for randomized sleep before each
383 download (maximum possible number of seconds to sleep).
384 Must only be used along with sleep_interval.
385 Actual sleep time will be a random float from range
386 [sleep_interval; max_sleep_interval].
1cf376f5 387 sleep_interval_subtitles: Number of seconds to sleep before each subtitle download
cfb56d1a
PH
388 listformats: Print an overview of available video formats and exit.
389 list_thumbnails: Print a table of all thumbnails and exit.
0a41f331 390 match_filter: A function that gets called for every video with the signature
391 (info_dict, *, incomplete: bool) -> Optional[str]
392 For backward compatibility with youtube-dl, the signature
393 (info_dict) -> Optional[str] is also allowed.
394 - If it returns a message, the video is ignored.
395 - If it returns None, the video is downloaded.
396 - If it returns utils.NO_DEFAULT, the user is interactively
397 asked whether to download the video.
347de493 398 match_filter_func in utils.py is one example for this.
7e5db8c9 399 no_color: Do not emit color codes in output.
0a840f58 400 geo_bypass: Bypass geographic restriction via faking X-Forwarded-For
504f20dd 401 HTTP header
0a840f58 402 geo_bypass_country:
773f291d
S
403 Two-letter ISO 3166-2 country code that will be used for
404 explicit geographic restriction bypassing via faking
504f20dd 405 X-Forwarded-For HTTP header
5f95927a
S
406 geo_bypass_ip_block:
407 IP range in CIDR notation that will be used similarly to
504f20dd 408 geo_bypass_country
52a8a1e1 409 external_downloader: A dictionary of protocol keys and the executable of the
410 external downloader to use for it. The allowed protocols
411 are default|http|ftp|m3u8|dash|rtsp|rtmp|mms.
412 Set the value to 'native' to use the native downloader
53ed7066 413 compat_opts: Compatibility options. See "Differences in default behavior".
3acf6d38 414 The following options do not work when used through the API:
b5ae35ee 415 filename, abort-on-error, multistreams, no-live-chat, format-sort
dac5df5a 416 no-clean-infojson, no-playlist-metafiles, no-keep-subs, no-attach-info-json.
e4f02757 417 Refer __init__.py for their implementation
819e0531 418 progress_template: Dictionary of templates for progress outputs.
419 Allowed keys are 'download', 'postprocess',
420 'download-title' (console title) and 'postprocess-title'.
421 The template is mapped on a dictionary with keys 'progress' and 'info'
23326151 422 retry_sleep_functions: Dictionary of functions that takes the number of attempts
423 as argument and returns the time to sleep in seconds.
424 Allowed keys are 'http', 'fragment', 'file_access'
5ec1b6b7 425 download_ranges: A function that gets called for every video with the signature
426 (info_dict, *, ydl) -> Iterable[Section].
427 Only the returned sections will be downloaded. Each Section contains:
428 * start_time: Start time of the section in seconds
429 * end_time: End time of the section in seconds
430 * title: Section title (Optional)
431 * index: Section number (Optional)
fe7e0c98 432
8222d8de 433 The following parameters are not used by YoutubeDL itself, they are used by
7a5c1cfe 434 the downloader (see yt_dlp/downloader/common.py):
51d9739f 435 nopart, updatetime, buffersize, ratelimit, throttledratelimit, min_filesize,
205a0654
EH
436 max_filesize, test, noresizebuffer, retries, file_access_retries, fragment_retries,
437 continuedl, noprogress, xattr_set_filesize, hls_use_mpegts, http_chunk_size,
59a7a13e 438 external_downloader_args, concurrent_fragment_downloads.
76b1bd67
JMF
439
440 The following options are used by the post processors:
c0b7d117
S
441 ffmpeg_location: Location of the ffmpeg/avconv binary; either the path
442 to the binary or its containing directory.
43820c03 443 postprocessor_args: A dictionary of postprocessor/executable keys (in lower case)
34488702 444 and a list of additional command-line arguments for the
445 postprocessor/executable. The dict can also have "PP+EXE" keys
446 which are used when the given exe is used by the given PP.
447 Use 'default' as the name for arguments to passed to all PP
448 For compatibility with youtube-dl, a single list of args
449 can also be used
e409895f 450
451 The following options are used by the extractors:
62bff2c1 452 extractor_retries: Number of times to retry for known errors
453 dynamic_mpd: Whether to process dynamic DASH manifests (default: True)
e409895f 454 hls_split_discontinuity: Split HLS playlists to different formats at
62bff2c1 455 discontinuities such as ad breaks (default: False)
5d3a0e79 456 extractor_args: A dictionary of arguments to be passed to the extractors.
457 See "EXTRACTOR ARGUMENTS" for details.
458 Eg: {'youtube': {'skip': ['dash', 'hls']}}
88f23a18 459 mark_watched: Mark videos watched (even with --simulate). Only for YouTube
1890fc63 460
461 The following options are deprecated and may be removed in the future:
462
7e9a6125 463 playliststart: - Use playlist_items
464 Playlist item to start at.
465 playlistend: - Use playlist_items
466 Playlist item to end at.
467 playlistreverse: - Use playlist_items
468 Download playlist items in reverse order.
1890fc63 469 forceurl: - Use forceprint
470 Force printing final URL.
471 forcetitle: - Use forceprint
472 Force printing title.
473 forceid: - Use forceprint
474 Force printing ID.
475 forcethumbnail: - Use forceprint
476 Force printing thumbnail URL.
477 forcedescription: - Use forceprint
478 Force printing description.
479 forcefilename: - Use forceprint
480 Force printing final filename.
481 forceduration: - Use forceprint
482 Force printing duration.
483 allsubtitles: - Use subtitleslangs = ['all']
484 Downloads all the subtitles of the video
485 (requires writesubtitles or writeautomaticsub)
486 include_ads: - Doesn't work
487 Download ads as well
488 call_home: - Not implemented
489 Boolean, true iff we are allowed to contact the
490 yt-dlp servers for debugging.
491 post_hooks: - Register a custom postprocessor
492 A list of functions that get called as the final step
493 for each video file, after all postprocessors have been
494 called. The filename will be passed as the only argument.
495 hls_prefer_native: - Use external_downloader = {'m3u8': 'native'} or {'m3u8': 'ffmpeg'}.
496 Use the native HLS downloader instead of ffmpeg/avconv
497 if True, otherwise use ffmpeg/avconv if False, otherwise
498 use downloader suggested by extractor if None.
499 prefer_ffmpeg: - avconv support is deprecated
500 If False, use avconv instead of ffmpeg if both are available,
501 otherwise prefer ffmpeg.
502 youtube_include_dash_manifest: - Use extractor_args
5d3a0e79 503 If True (default), DASH manifests and related
62bff2c1 504 data will be downloaded and processed by extractor.
505 You can reduce network I/O by disabling it if you don't
506 care about DASH. (only for youtube)
1890fc63 507 youtube_include_hls_manifest: - Use extractor_args
5d3a0e79 508 If True (default), HLS manifests and related
62bff2c1 509 data will be downloaded and processed by extractor.
510 You can reduce network I/O by disabling it if you don't
511 care about HLS. (only for youtube)
8222d8de
JMF
512 """
513
86e5f3ed 514 _NUMERIC_FIELDS = {
c9969434 515 'width', 'height', 'tbr', 'abr', 'asr', 'vbr', 'fps', 'filesize', 'filesize_approx',
e6f21b3d 516 'timestamp', 'release_timestamp',
c9969434
S
517 'duration', 'view_count', 'like_count', 'dislike_count', 'repost_count',
518 'average_rating', 'comment_count', 'age_limit',
519 'start_time', 'end_time',
520 'chapter_number', 'season_number', 'episode_number',
521 'track_number', 'disc_number', 'release_year',
86e5f3ed 522 }
c9969434 523
6db9c4d5 524 _format_fields = {
525 # NB: Keep in sync with the docstring of extractor/common.py
a44ca5a4 526 'url', 'manifest_url', 'manifest_stream_number', 'ext', 'format', 'format_id', 'format_note',
6db9c4d5 527 'width', 'height', 'resolution', 'dynamic_range', 'tbr', 'abr', 'acodec', 'asr',
528 'vbr', 'fps', 'vcodec', 'container', 'filesize', 'filesize_approx',
529 'player_url', 'protocol', 'fragment_base_url', 'fragments', 'is_from_start',
530 'preference', 'language', 'language_preference', 'quality', 'source_preference',
531 'http_headers', 'stretched_ratio', 'no_resume', 'has_drm', 'downloader_options',
532 'page_url', 'app', 'play_path', 'tc_url', 'flash_version', 'rtmp_live', 'rtmp_conn', 'rtmp_protocol', 'rtmp_real_time'
533 }
48ee10ee 534 _format_selection_exts = {
535 'audio': {'m4a', 'mp3', 'ogg', 'aac'},
536 'video': {'mp4', 'flv', 'webm', '3gp'},
537 'storyboards': {'mhtml'},
538 }
539
3511266b 540 def __init__(self, params=None, auto_init=True):
883d4b1e 541 """Create a FileDownloader object with the given options.
542 @param auto_init Whether to load the default extractors and print header (if verbose).
49a57e70 543 Set to 'no_verbose_header' to not print the header
883d4b1e 544 """
e9f9a10f
JMF
545 if params is None:
546 params = {}
592b7485 547 self.params = params
8b7491c8 548 self._ies = {}
56c73665 549 self._ies_instances = {}
1e43a6f7 550 self._pps = {k: [] for k in POSTPROCESS_WHEN}
b35496d8 551 self._printed_messages = set()
1cf376f5 552 self._first_webpage_request = True
ab8e5e51 553 self._post_hooks = []
933605d7 554 self._progress_hooks = []
819e0531 555 self._postprocessor_hooks = []
8222d8de
JMF
556 self._download_retcode = 0
557 self._num_downloads = 0
9c906919 558 self._num_videos = 0
592b7485 559 self._playlist_level = 0
560 self._playlist_urls = set()
a0e07d31 561 self.cache = Cache(self)
34308b30 562
819e0531 563 windows_enable_vt_mode()
591bb9d3 564 stdout = sys.stderr if self.params.get('logtostderr') else sys.stdout
565 self._out_files = Namespace(
566 out=stdout,
567 error=sys.stderr,
568 screen=sys.stderr if self.params.get('quiet') else stdout,
569 console=None if compat_os_name == 'nt' else next(
cf4f42cb 570 filter(supports_terminal_sequences, (sys.stderr, sys.stdout)), None)
591bb9d3 571 )
572 self._allow_colors = Namespace(**{
573 type_: not self.params.get('no_color') and supports_terminal_sequences(stream)
64fa820c 574 for type_, stream in self._out_files.items_ if type_ != 'console'
591bb9d3 575 })
819e0531 576
eff42759 577 MIN_SUPPORTED, MIN_RECOMMENDED = (3, 6), (3, 7)
578 current_version = sys.version_info[:2]
579 if current_version < MIN_RECOMMENDED:
580 msg = 'Support for Python version %d.%d has been deprecated and will break in future versions of yt-dlp'
581 if current_version < MIN_SUPPORTED:
582 msg = 'Python version %d.%d is no longer supported'
583 self.deprecation_warning(
584 f'{msg}! Please update to Python %d.%d or above' % (*current_version, *MIN_RECOMMENDED))
a61f4b28 585
88acdbc2 586 if self.params.get('allow_unplayable_formats'):
587 self.report_warning(
ec11a9f4 588 f'You have asked for {self._format_err("UNPLAYABLE", self.Styles.EMPHASIS)} formats to be listed/downloaded. '
819e0531 589 'This is a developer option intended for debugging. \n'
590 ' If you experience any issues while using this option, '
ec11a9f4 591 f'{self._format_err("DO NOT", self.Styles.ERROR)} open a bug report')
88acdbc2 592
be5df5ee
S
593 def check_deprecated(param, option, suggestion):
594 if self.params.get(param) is not None:
86e5f3ed 595 self.report_warning(f'{option} is deprecated. Use {suggestion} instead')
be5df5ee
S
596 return True
597 return False
598
599 if check_deprecated('cn_verification_proxy', '--cn-verification-proxy', '--geo-verification-proxy'):
38cce791
YCH
600 if self.params.get('geo_verification_proxy') is None:
601 self.params['geo_verification_proxy'] = self.params['cn_verification_proxy']
602
0d1bb027 603 check_deprecated('autonumber', '--auto-number', '-o "%(autonumber)s-%(title)s.%(ext)s"')
604 check_deprecated('usetitle', '--title', '-o "%(title)s-%(id)s.%(ext)s"')
53ed7066 605 check_deprecated('useid', '--id', '-o "%(id)s.%(ext)s"')
0d1bb027 606
49a57e70 607 for msg in self.params.get('_warnings', []):
0d1bb027 608 self.report_warning(msg)
ee8dd27a 609 for msg in self.params.get('_deprecation_warnings', []):
610 self.deprecation_warning(msg)
0d1bb027 611
8a82af35 612 self.params['compat_opts'] = set(self.params.get('compat_opts', ()))
613 if not compat_has_legacy:
614 self.params['compat_opts'].add('no-compat-legacy')
615 if 'list-formats' in self.params['compat_opts']:
ec11a9f4 616 self.params['listformats_table'] = False
617
b5ae35ee 618 if 'overwrites' not in self.params and self.params.get('nooverwrites') is not None:
b868936c 619 # nooverwrites was unnecessarily changed to overwrites
620 # in 0c3d0f51778b153f65c21906031c2e091fcfb641
621 # This ensures compatibility with both keys
622 self.params['overwrites'] = not self.params['nooverwrites']
b5ae35ee 623 elif self.params.get('overwrites') is None:
624 self.params.pop('overwrites', None)
b868936c 625 else:
626 self.params['nooverwrites'] = not self.params['overwrites']
b9d973be 627
455a15e2 628 self.params.setdefault('forceprint', {})
629 self.params.setdefault('print_to_file', {})
bb66c247 630
631 # Compatibility with older syntax
ca30f449 632 if not isinstance(params['forceprint'], dict):
455a15e2 633 self.params['forceprint'] = {'video': params['forceprint']}
ca30f449 634
455a15e2 635 if self.params.get('bidi_workaround', False):
1c088fa8
PH
636 try:
637 import pty
638 master, slave = pty.openpty()
ac668111 639 width = shutil.get_terminal_size().columns
591bb9d3 640 width_args = [] if width is None else ['-w', str(width)]
641 sp_kwargs = {'stdin': subprocess.PIPE, 'stdout': slave, 'stderr': self._out_files.error}
5d681e96 642 try:
d3c93ec2 643 self._output_process = Popen(['bidiv'] + width_args, **sp_kwargs)
5d681e96 644 except OSError:
d3c93ec2 645 self._output_process = Popen(['fribidi', '-c', 'UTF-8'] + width_args, **sp_kwargs)
5d681e96 646 self._output_channel = os.fdopen(master, 'rb')
1c088fa8 647 except OSError as ose:
66e7ace1 648 if ose.errno == errno.ENOENT:
49a57e70 649 self.report_warning(
650 'Could not find fribidi executable, ignoring --bidi-workaround. '
651 'Make sure that fribidi is an executable file in one of the directories in your $PATH.')
1c088fa8
PH
652 else:
653 raise
0783b09b 654
97ec5bc5 655 if auto_init:
656 if auto_init != 'no_verbose_header':
657 self.print_debug_header()
658 self.add_default_info_extractors()
659
3089bc74
S
660 if (sys.platform != 'win32'
661 and sys.getfilesystemencoding() in ['ascii', 'ANSI_X3.4-1968']
455a15e2 662 and not self.params.get('restrictfilenames', False)):
e9137224 663 # Unicode filesystem API will throw errors (#1474, #13027)
34308b30 664 self.report_warning(
6febd1c1 665 'Assuming --restrict-filenames since file system encoding '
1b725173 666 'cannot encode all characters. '
6febd1c1 667 'Set the LC_ALL environment variable to fix this.')
4a98cdbf 668 self.params['restrictfilenames'] = True
34308b30 669
bf1824b3 670 self._parse_outtmpl()
486dd09e 671
187986a8 672 # Creating format selector here allows us to catch syntax errors before the extraction
673 self.format_selector = (
fa9f30b8 674 self.params.get('format') if self.params.get('format') in (None, '-')
093a1710 675 else self.params['format'] if callable(self.params['format'])
187986a8 676 else self.build_format_selector(self.params['format']))
677
8b7539d2 678 # Set http_headers defaults according to std_headers
679 self.params['http_headers'] = merge_headers(std_headers, self.params.get('http_headers', {}))
680
013b50b7 681 hooks = {
682 'post_hooks': self.add_post_hook,
683 'progress_hooks': self.add_progress_hook,
684 'postprocessor_hooks': self.add_postprocessor_hook,
685 }
686 for opt, fn in hooks.items():
687 for ph in self.params.get(opt, []):
688 fn(ph)
71b640cc 689
5bfc8bee 690 for pp_def_raw in self.params.get('postprocessors', []):
691 pp_def = dict(pp_def_raw)
692 when = pp_def.pop('when', 'post_process')
693 self.add_post_processor(
f9934b96 694 get_postprocessor(pp_def.pop('key'))(self, **pp_def),
5bfc8bee 695 when=when)
696
97ec5bc5 697 self._setup_opener()
51fb4995
YCH
698 register_socks_protocols()
699
ed39cac5 700 def preload_download_archive(fn):
701 """Preload the archive, if any is specified"""
702 if fn is None:
703 return False
49a57e70 704 self.write_debug(f'Loading archive file {fn!r}')
ed39cac5 705 try:
706 with locked_file(fn, 'r', encoding='utf-8') as archive_file:
707 for line in archive_file:
708 self.archive.add(line.strip())
86e5f3ed 709 except OSError as ioe:
ed39cac5 710 if ioe.errno != errno.ENOENT:
711 raise
712 return False
713 return True
714
715 self.archive = set()
716 preload_download_archive(self.params.get('download_archive'))
717
7d4111ed
PH
718 def warn_if_short_id(self, argv):
719 # short YouTube ID starting with dash?
720 idxs = [
721 i for i, a in enumerate(argv)
722 if re.match(r'^-[0-9A-Za-z_-]{10}$', a)]
723 if idxs:
724 correct_argv = (
7a5c1cfe 725 ['yt-dlp']
3089bc74
S
726 + [a for i, a in enumerate(argv) if i not in idxs]
727 + ['--'] + [argv[i] for i in idxs]
7d4111ed
PH
728 )
729 self.report_warning(
730 'Long argument string detected. '
49a57e70 731 'Use -- to separate parameters and URLs, like this:\n%s' %
7d4111ed
PH
732 args_to_str(correct_argv))
733
8222d8de
JMF
734 def add_info_extractor(self, ie):
735 """Add an InfoExtractor object to the end of the list."""
8b7491c8 736 ie_key = ie.ie_key()
737 self._ies[ie_key] = ie
e52d7f85 738 if not isinstance(ie, type):
8b7491c8 739 self._ies_instances[ie_key] = ie
e52d7f85 740 ie.set_downloader(self)
8222d8de 741
8b7491c8 742 def _get_info_extractor_class(self, ie_key):
743 ie = self._ies.get(ie_key)
744 if ie is None:
745 ie = get_info_extractor(ie_key)
746 self.add_info_extractor(ie)
747 return ie
748
56c73665
JMF
749 def get_info_extractor(self, ie_key):
750 """
751 Get an instance of an IE with name ie_key, it will try to get one from
752 the _ies list, if there's no instance it will create a new one and add
753 it to the extractor list.
754 """
755 ie = self._ies_instances.get(ie_key)
756 if ie is None:
757 ie = get_info_extractor(ie_key)()
758 self.add_info_extractor(ie)
759 return ie
760
023fa8c4
JMF
761 def add_default_info_extractors(self):
762 """
763 Add the InfoExtractors returned by gen_extractors to the end of the list
764 """
e52d7f85 765 for ie in gen_extractor_classes():
023fa8c4
JMF
766 self.add_info_extractor(ie)
767
56d868db 768 def add_post_processor(self, pp, when='post_process'):
8222d8de 769 """Add a PostProcessor object to the end of the chain."""
8aa0e7cd 770 assert when in POSTPROCESS_WHEN, f'Invalid when={when}'
5bfa4862 771 self._pps[when].append(pp)
8222d8de
JMF
772 pp.set_downloader(self)
773
ab8e5e51
AM
774 def add_post_hook(self, ph):
775 """Add the post hook"""
776 self._post_hooks.append(ph)
777
933605d7 778 def add_progress_hook(self, ph):
819e0531 779 """Add the download progress hook"""
933605d7 780 self._progress_hooks.append(ph)
8ab470f1 781
819e0531 782 def add_postprocessor_hook(self, ph):
783 """Add the postprocessing progress hook"""
784 self._postprocessor_hooks.append(ph)
5bfc8bee 785 for pps in self._pps.values():
786 for pp in pps:
787 pp.add_progress_hook(ph)
819e0531 788
1c088fa8 789 def _bidi_workaround(self, message):
5d681e96 790 if not hasattr(self, '_output_channel'):
1c088fa8
PH
791 return message
792
5d681e96 793 assert hasattr(self, '_output_process')
11b85ce6 794 assert isinstance(message, compat_str)
6febd1c1 795 line_count = message.count('\n') + 1
0f06bcd7 796 self._output_process.stdin.write((message + '\n').encode())
5d681e96 797 self._output_process.stdin.flush()
0f06bcd7 798 res = ''.join(self._output_channel.readline().decode()
9e1a5b84 799 for _ in range(line_count))
6febd1c1 800 return res[:-len('\n')]
1c088fa8 801
b35496d8 802 def _write_string(self, message, out=None, only_once=False):
803 if only_once:
804 if message in self._printed_messages:
805 return
806 self._printed_messages.add(message)
807 write_string(message, out=out, encoding=self.params.get('encoding'))
734f90bb 808
cf4f42cb 809 def to_stdout(self, message, skip_eol=False, quiet=None):
0760b0a7 810 """Print message to stdout"""
cf4f42cb 811 if quiet is not None:
ae6a1b95 812 self.deprecation_warning('"YoutubeDL.to_stdout" no longer accepts the argument quiet. Use "YoutubeDL.to_screen" instead')
8a82af35 813 if skip_eol is not False:
814 self.deprecation_warning('"YoutubeDL.to_stdout" no longer accepts the argument skip_eol. Use "YoutubeDL.to_screen" instead')
0bf9dc1e 815 self._write_string(f'{self._bidi_workaround(message)}\n', self._out_files.out)
cf4f42cb 816
817 def to_screen(self, message, skip_eol=False, quiet=None):
818 """Print message to screen if not in quiet mode"""
8bf9319e 819 if self.params.get('logger'):
43afe285 820 self.params['logger'].debug(message)
cf4f42cb 821 return
822 if (self.params.get('quiet') if quiet is None else quiet) and not self.params.get('verbose'):
823 return
824 self._write_string(
825 '%s%s' % (self._bidi_workaround(message), ('' if skip_eol else '\n')),
591bb9d3 826 self._out_files.screen)
8222d8de 827
b35496d8 828 def to_stderr(self, message, only_once=False):
0760b0a7 829 """Print message to stderr"""
11b85ce6 830 assert isinstance(message, compat_str)
8bf9319e 831 if self.params.get('logger'):
43afe285
IB
832 self.params['logger'].error(message)
833 else:
5792c950 834 self._write_string(f'{self._bidi_workaround(message)}\n', self._out_files.error, only_once=only_once)
cf4f42cb 835
836 def _send_console_code(self, code):
591bb9d3 837 if compat_os_name == 'nt' or not self._out_files.console:
cf4f42cb 838 return
591bb9d3 839 self._write_string(code, self._out_files.console)
8222d8de 840
1e5b9a95
PH
841 def to_console_title(self, message):
842 if not self.params.get('consoletitle', False):
843 return
3efb96a6 844 message = remove_terminal_sequences(message)
4bede0d8
C
845 if compat_os_name == 'nt':
846 if ctypes.windll.kernel32.GetConsoleWindow():
847 # c_wchar_p() might not be necessary if `message` is
848 # already of type unicode()
849 ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(message))
cf4f42cb 850 else:
851 self._send_console_code(f'\033]0;{message}\007')
1e5b9a95 852
bdde425c 853 def save_console_title(self):
cf4f42cb 854 if not self.params.get('consoletitle') or self.params.get('simulate'):
bdde425c 855 return
592b7485 856 self._send_console_code('\033[22;0t') # Save the title on stack
bdde425c
PH
857
858 def restore_console_title(self):
cf4f42cb 859 if not self.params.get('consoletitle') or self.params.get('simulate'):
bdde425c 860 return
592b7485 861 self._send_console_code('\033[23;0t') # Restore the title from stack
bdde425c
PH
862
863 def __enter__(self):
864 self.save_console_title()
865 return self
866
867 def __exit__(self, *args):
868 self.restore_console_title()
f89197d7 869
dca08720 870 if self.params.get('cookiefile') is not None:
1bab3437 871 self.cookiejar.save(ignore_discard=True, ignore_expires=True)
bdde425c 872
fa9f30b8 873 def trouble(self, message=None, tb=None, is_error=True):
8222d8de
JMF
874 """Determine action to take when a download problem appears.
875
876 Depending on if the downloader has been configured to ignore
877 download errors or not, this method may throw an exception or
878 not when errors are found, after printing the message.
879
fa9f30b8 880 @param tb If given, is additional traceback information
881 @param is_error Whether to raise error according to ignorerrors
8222d8de
JMF
882 """
883 if message is not None:
884 self.to_stderr(message)
885 if self.params.get('verbose'):
886 if tb is None:
887 if sys.exc_info()[0]: # if .trouble has been called from an except block
6febd1c1 888 tb = ''
8222d8de 889 if hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
6febd1c1 890 tb += ''.join(traceback.format_exception(*sys.exc_info()[1].exc_info))
c0384f22 891 tb += encode_compat_str(traceback.format_exc())
8222d8de
JMF
892 else:
893 tb_data = traceback.format_list(traceback.extract_stack())
6febd1c1 894 tb = ''.join(tb_data)
c19bc311 895 if tb:
896 self.to_stderr(tb)
fa9f30b8 897 if not is_error:
898 return
b1940459 899 if not self.params.get('ignoreerrors'):
8222d8de
JMF
900 if sys.exc_info()[0] and hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
901 exc_info = sys.exc_info()[1].exc_info
902 else:
903 exc_info = sys.exc_info()
904 raise DownloadError(message, exc_info)
905 self._download_retcode = 1
906
19a03940 907 Styles = Namespace(
908 HEADERS='yellow',
909 EMPHASIS='light blue',
492272fe 910 FILENAME='green',
19a03940 911 ID='green',
912 DELIM='blue',
913 ERROR='red',
914 WARNING='yellow',
915 SUPPRESS='light black',
916 )
ec11a9f4 917
7578d77d 918 def _format_text(self, handle, allow_colors, text, f, fallback=None, *, test_encoding=False):
e5a998f3 919 text = str(text)
ec11a9f4 920 if test_encoding:
921 original_text = text
5c104538 922 # handle.encoding can be None. See https://github.com/yt-dlp/yt-dlp/issues/2711
923 encoding = self.params.get('encoding') or getattr(handle, 'encoding', None) or 'ascii'
ec11a9f4 924 text = text.encode(encoding, 'ignore').decode(encoding)
925 if fallback is not None and text != original_text:
926 text = fallback
7578d77d 927 return format_text(text, f) if allow_colors else text if fallback is None else fallback
ec11a9f4 928
591bb9d3 929 def _format_out(self, *args, **kwargs):
930 return self._format_text(self._out_files.out, self._allow_colors.out, *args, **kwargs)
931
ec11a9f4 932 def _format_screen(self, *args, **kwargs):
591bb9d3 933 return self._format_text(self._out_files.screen, self._allow_colors.screen, *args, **kwargs)
ec11a9f4 934
935 def _format_err(self, *args, **kwargs):
591bb9d3 936 return self._format_text(self._out_files.error, self._allow_colors.error, *args, **kwargs)
819e0531 937
c84aeac6 938 def report_warning(self, message, only_once=False):
8222d8de
JMF
939 '''
940 Print the message to stderr, it will be prefixed with 'WARNING:'
941 If stderr is a tty file the 'WARNING:' will be colored
942 '''
6d07ce01
JMF
943 if self.params.get('logger') is not None:
944 self.params['logger'].warning(message)
8222d8de 945 else:
ad8915b7
PH
946 if self.params.get('no_warnings'):
947 return
ec11a9f4 948 self.to_stderr(f'{self._format_err("WARNING:", self.Styles.WARNING)} {message}', only_once)
8222d8de 949
ee8dd27a 950 def deprecation_warning(self, message):
951 if self.params.get('logger') is not None:
a44ca5a4 952 self.params['logger'].warning(f'DeprecationWarning: {message}')
ee8dd27a 953 else:
954 self.to_stderr(f'{self._format_err("DeprecationWarning:", self.Styles.ERROR)} {message}', True)
955
fa9f30b8 956 def report_error(self, message, *args, **kwargs):
8222d8de
JMF
957 '''
958 Do the same as trouble, but prefixes the message with 'ERROR:', colored
959 in red if stderr is a tty file.
960 '''
fa9f30b8 961 self.trouble(f'{self._format_err("ERROR:", self.Styles.ERROR)} {message}', *args, **kwargs)
8222d8de 962
b35496d8 963 def write_debug(self, message, only_once=False):
0760b0a7 964 '''Log debug message or Print message to stderr'''
965 if not self.params.get('verbose', False):
966 return
8a82af35 967 message = f'[debug] {message}'
0760b0a7 968 if self.params.get('logger'):
969 self.params['logger'].debug(message)
970 else:
b35496d8 971 self.to_stderr(message, only_once)
0760b0a7 972
8222d8de
JMF
973 def report_file_already_downloaded(self, file_name):
974 """Report file has already been fully downloaded."""
975 try:
6febd1c1 976 self.to_screen('[download] %s has already been downloaded' % file_name)
ce02ed60 977 except UnicodeEncodeError:
6febd1c1 978 self.to_screen('[download] The file has already been downloaded')
8222d8de 979
0c3d0f51 980 def report_file_delete(self, file_name):
981 """Report that existing file will be deleted."""
982 try:
c25228e5 983 self.to_screen('Deleting existing file %s' % file_name)
0c3d0f51 984 except UnicodeEncodeError:
c25228e5 985 self.to_screen('Deleting existing file')
0c3d0f51 986
319b6059 987 def raise_no_formats(self, info, forced=False, *, msg=None):
0a5a191a 988 has_drm = info.get('_has_drm')
319b6059 989 ignored, expected = self.params.get('ignore_no_formats_error'), bool(msg)
990 msg = msg or has_drm and 'This video is DRM protected' or 'No video formats found!'
991 if forced or not ignored:
1151c407 992 raise ExtractorError(msg, video_id=info['id'], ie=info['extractor'],
319b6059 993 expected=has_drm or ignored or expected)
88acdbc2 994 else:
995 self.report_warning(msg)
996
de6000d9 997 def parse_outtmpl(self):
bf1824b3 998 self.deprecation_warning('"YoutubeDL.parse_outtmpl" is deprecated and may be removed in a future version')
999 self._parse_outtmpl()
1000 return self.params['outtmpl']
1001
1002 def _parse_outtmpl(self):
7b2c3f47 1003 sanitize = IDENTITY
bf1824b3 1004 if self.params.get('restrictfilenames'): # Remove spaces in the default template
71ce444a 1005 sanitize = lambda x: x.replace(' - ', ' ').replace(' ', '-')
bf1824b3 1006
1007 outtmpl = self.params.setdefault('outtmpl', {})
1008 if not isinstance(outtmpl, dict):
1009 self.params['outtmpl'] = outtmpl = {'default': outtmpl}
1010 outtmpl.update({k: sanitize(v) for k, v in DEFAULT_OUTTMPL.items() if outtmpl.get(k) is None})
de6000d9 1011
21cd8fae 1012 def get_output_path(self, dir_type='', filename=None):
1013 paths = self.params.get('paths', {})
1014 assert isinstance(paths, dict)
1015 path = os.path.join(
1016 expand_path(paths.get('home', '').strip()),
1017 expand_path(paths.get(dir_type, '').strip()) if dir_type else '',
1018 filename or '')
21cd8fae 1019 return sanitize_path(path, force=self.params.get('windowsfilenames'))
1020
76a264ac 1021 @staticmethod
901130bb 1022 def _outtmpl_expandpath(outtmpl):
1023 # expand_path translates '%%' into '%' and '$$' into '$'
1024 # correspondingly that is not what we want since we need to keep
1025 # '%%' intact for template dict substitution step. Working around
1026 # with boundary-alike separator hack.
1027 sep = ''.join([random.choice(ascii_letters) for _ in range(32)])
86e5f3ed 1028 outtmpl = outtmpl.replace('%%', f'%{sep}%').replace('$$', f'${sep}$')
901130bb 1029
1030 # outtmpl should be expand_path'ed before template dict substitution
1031 # because meta fields may contain env variables we don't want to
1032 # be expanded. For example, for outtmpl "%(title)s.%(ext)s" and
1033 # title "Hello $PATH", we don't want `$PATH` to be expanded.
1034 return expand_path(outtmpl).replace(sep, '')
1035
1036 @staticmethod
1037 def escape_outtmpl(outtmpl):
1038 ''' Escape any remaining strings like %s, %abc% etc. '''
1039 return re.sub(
1040 STR_FORMAT_RE_TMPL.format('', '(?![%(\0])'),
1041 lambda mobj: ('' if mobj.group('has_key') else '%') + mobj.group(0),
1042 outtmpl)
1043
1044 @classmethod
1045 def validate_outtmpl(cls, outtmpl):
76a264ac 1046 ''' @return None or Exception object '''
7d1eb38a 1047 outtmpl = re.sub(
37893bb0 1048 STR_FORMAT_RE_TMPL.format('[^)]*', '[ljqBUDS]'),
7d1eb38a 1049 lambda mobj: f'{mobj.group(0)[:-1]}s',
1050 cls._outtmpl_expandpath(outtmpl))
76a264ac 1051 try:
7d1eb38a 1052 cls.escape_outtmpl(outtmpl) % collections.defaultdict(int)
76a264ac 1053 return None
1054 except ValueError as err:
1055 return err
1056
03b4de72 1057 @staticmethod
1058 def _copy_infodict(info_dict):
1059 info_dict = dict(info_dict)
09b49e1f 1060 info_dict.pop('__postprocessors', None)
415f8d51 1061 info_dict.pop('__pending_error', None)
03b4de72 1062 return info_dict
1063
e0fd9573 1064 def prepare_outtmpl(self, outtmpl, info_dict, sanitize=False):
1065 """ Make the outtmpl and info_dict suitable for substitution: ydl.escape_outtmpl(outtmpl) % info_dict
1066 @param sanitize Whether to sanitize the output as a filename.
1067 For backward compatibility, a function can also be passed
1068 """
1069
6e84b215 1070 info_dict.setdefault('epoch', int(time.time())) # keep epoch consistent once set
143db31d 1071
03b4de72 1072 info_dict = self._copy_infodict(info_dict)
752cda38 1073 info_dict['duration_string'] = ( # %(duration>%H-%M-%S)s is wrong if duration > 24hrs
53c18592 1074 formatSeconds(info_dict['duration'], '-' if sanitize else ':')
143db31d 1075 if info_dict.get('duration', None) is not None
1076 else None)
1d485a1a 1077 info_dict['autonumber'] = int(self.params.get('autonumber_start', 1) - 1 + self._num_downloads)
9c906919 1078 info_dict['video_autonumber'] = self._num_videos
752cda38 1079 if info_dict.get('resolution') is None:
1080 info_dict['resolution'] = self.format_resolution(info_dict, default=None)
143db31d 1081
e6f21b3d 1082 # For fields playlist_index, playlist_autonumber and autonumber convert all occurrences
143db31d 1083 # of %(field)s to %(field)0Nd for backward compatibility
1084 field_size_compat_map = {
0a5a191a 1085 'playlist_index': number_of_digits(info_dict.get('__last_playlist_index') or 0),
ec11a9f4 1086 'playlist_autonumber': number_of_digits(info_dict.get('n_entries') or 0),
752cda38 1087 'autonumber': self.params.get('autonumber_size') or 5,
143db31d 1088 }
752cda38 1089
385a27fa 1090 TMPL_DICT = {}
37893bb0 1091 EXTERNAL_FORMAT_RE = re.compile(STR_FORMAT_RE_TMPL.format('[^)]*', f'[{STR_FORMAT_TYPES}ljqBUDS]'))
385a27fa 1092 MATH_FUNCTIONS = {
1093 '+': float.__add__,
1094 '-': float.__sub__,
1095 }
e625be0d 1096 # Field is of the form key1.key2...
1097 # where keys (except first) can be string, int or slice
2b8a2973 1098 FIELD_RE = r'\w*(?:\.(?:\w+|{num}|{num}?(?::{num}?){{1,2}}))*'.format(num=r'(?:-?\d+)')
1d485a1a 1099 MATH_FIELD_RE = rf'(?:{FIELD_RE}|-?{NUMBER_RE})'
385a27fa 1100 MATH_OPERATORS_RE = r'(?:%s)' % '|'.join(map(re.escape, MATH_FUNCTIONS.keys()))
1d485a1a 1101 INTERNAL_FORMAT_RE = re.compile(rf'''(?x)
e625be0d 1102 (?P<negate>-)?
1d485a1a 1103 (?P<fields>{FIELD_RE})
1104 (?P<maths>(?:{MATH_OPERATORS_RE}{MATH_FIELD_RE})*)
e625be0d 1105 (?:>(?P<strf_format>.+?))?
34baa9fd 1106 (?P<remaining>
1107 (?P<alternate>(?<!\\),[^|&)]+)?
1108 (?:&(?P<replacement>.*?))?
1109 (?:\|(?P<default>.*?))?
1d485a1a 1110 )$''')
752cda38 1111
2b8a2973 1112 def _traverse_infodict(k):
1113 k = k.split('.')
1114 if k[0] == '':
1115 k.pop(0)
1116 return traverse_obj(info_dict, k, is_user_input=True, traverse_string=True)
76a264ac 1117
752cda38 1118 def get_value(mdict):
1119 # Object traversal
2b8a2973 1120 value = _traverse_infodict(mdict['fields'])
752cda38 1121 # Negative
1122 if mdict['negate']:
1123 value = float_or_none(value)
1124 if value is not None:
1125 value *= -1
1126 # Do maths
385a27fa 1127 offset_key = mdict['maths']
1128 if offset_key:
752cda38 1129 value = float_or_none(value)
1130 operator = None
385a27fa 1131 while offset_key:
1132 item = re.match(
1133 MATH_FIELD_RE if operator else MATH_OPERATORS_RE,
1134 offset_key).group(0)
1135 offset_key = offset_key[len(item):]
1136 if operator is None:
752cda38 1137 operator = MATH_FUNCTIONS[item]
385a27fa 1138 continue
1139 item, multiplier = (item[1:], -1) if item[0] == '-' else (item, 1)
1140 offset = float_or_none(item)
1141 if offset is None:
2b8a2973 1142 offset = float_or_none(_traverse_infodict(item))
385a27fa 1143 try:
1144 value = operator(value, multiplier * offset)
1145 except (TypeError, ZeroDivisionError):
1146 return None
1147 operator = None
752cda38 1148 # Datetime formatting
1149 if mdict['strf_format']:
7c37ff97 1150 value = strftime_or_none(value, mdict['strf_format'].replace('\\,', ','))
752cda38 1151
1152 return value
1153
b868936c 1154 na = self.params.get('outtmpl_na_placeholder', 'NA')
1155
e0fd9573 1156 def filename_sanitizer(key, value, restricted=self.params.get('restrictfilenames')):
5c3895ff 1157 return sanitize_filename(str(value), restricted=restricted, is_id=(
1158 bool(re.search(r'(^|[_.])id(\.|$)', key))
8a82af35 1159 if 'filename-sanitization' in self.params['compat_opts']
5c3895ff 1160 else NO_DEFAULT))
e0fd9573 1161
1162 sanitizer = sanitize if callable(sanitize) else filename_sanitizer
1163 sanitize = bool(sanitize)
1164
6e84b215 1165 def _dumpjson_default(obj):
1166 if isinstance(obj, (set, LazyList)):
1167 return list(obj)
adbc4ec4 1168 return repr(obj)
6e84b215 1169
752cda38 1170 def create_key(outer_mobj):
1171 if not outer_mobj.group('has_key'):
b836dc94 1172 return outer_mobj.group(0)
752cda38 1173 key = outer_mobj.group('key')
752cda38 1174 mobj = re.match(INTERNAL_FORMAT_RE, key)
e0fd9573 1175 initial_field = mobj.group('fields') if mobj else ''
e978789f 1176 value, replacement, default = None, None, na
7c37ff97 1177 while mobj:
e625be0d 1178 mobj = mobj.groupdict()
7c37ff97 1179 default = mobj['default'] if mobj['default'] is not None else default
752cda38 1180 value = get_value(mobj)
e978789f 1181 replacement = mobj['replacement']
7c37ff97 1182 if value is None and mobj['alternate']:
34baa9fd 1183 mobj = re.match(INTERNAL_FORMAT_RE, mobj['remaining'][1:])
7c37ff97 1184 else:
1185 break
752cda38 1186
b868936c 1187 fmt = outer_mobj.group('format')
752cda38 1188 if fmt == 's' and value is not None and key in field_size_compat_map.keys():
86e5f3ed 1189 fmt = f'0{field_size_compat_map[key]:d}d'
752cda38 1190
e978789f 1191 value = default if value is None else value if replacement is None else replacement
752cda38 1192
4476d2c7 1193 flags = outer_mobj.group('conversion') or ''
7d1eb38a 1194 str_fmt = f'{fmt[:-1]}s'
524e2e4f 1195 if fmt[-1] == 'l': # list
4476d2c7 1196 delim = '\n' if '#' in flags else ', '
9e907ebd 1197 value, fmt = delim.join(map(str, variadic(value, allowed_types=(str, bytes)))), str_fmt
524e2e4f 1198 elif fmt[-1] == 'j': # json
4476d2c7 1199 value, fmt = json.dumps(value, default=_dumpjson_default, indent=4 if '#' in flags else None), str_fmt
524e2e4f 1200 elif fmt[-1] == 'q': # quoted
4476d2c7 1201 value = map(str, variadic(value) if '#' in flags else [value])
1202 value, fmt = ' '.join(map(compat_shlex_quote, value)), str_fmt
524e2e4f 1203 elif fmt[-1] == 'B': # bytes
0f06bcd7 1204 value = f'%{str_fmt}'.encode() % str(value).encode()
f5aa5cfb 1205 value, fmt = value.decode('utf-8', 'ignore'), 's'
524e2e4f 1206 elif fmt[-1] == 'U': # unicode normalized
524e2e4f 1207 value, fmt = unicodedata.normalize(
1208 # "+" = compatibility equivalence, "#" = NFD
4476d2c7 1209 'NF%s%s' % ('K' if '+' in flags else '', 'D' if '#' in flags else 'C'),
524e2e4f 1210 value), str_fmt
e0fd9573 1211 elif fmt[-1] == 'D': # decimal suffix
abbeeebc 1212 num_fmt, fmt = fmt[:-1].replace('#', ''), 's'
1213 value = format_decimal_suffix(value, f'%{num_fmt}f%s' if num_fmt else '%d%s',
1214 factor=1024 if '#' in flags else 1000)
37893bb0 1215 elif fmt[-1] == 'S': # filename sanitization
e0fd9573 1216 value, fmt = filename_sanitizer(initial_field, value, restricted='#' in flags), str_fmt
7d1eb38a 1217 elif fmt[-1] == 'c':
524e2e4f 1218 if value:
1219 value = str(value)[0]
76a264ac 1220 else:
524e2e4f 1221 fmt = str_fmt
76a264ac 1222 elif fmt[-1] not in 'rs': # numeric
a439a3a4 1223 value = float_or_none(value)
752cda38 1224 if value is None:
1225 value, fmt = default, 's'
901130bb 1226
752cda38 1227 if sanitize:
1228 if fmt[-1] == 'r':
1229 # If value is an object, sanitize might convert it to a string
1230 # So we convert it to repr first
7d1eb38a 1231 value, fmt = repr(value), str_fmt
639f1cea 1232 if fmt[-1] in 'csr':
e0fd9573 1233 value = sanitizer(initial_field, value)
901130bb 1234
b868936c 1235 key = '%s\0%s' % (key.replace('%', '%\0'), outer_mobj.group('format'))
385a27fa 1236 TMPL_DICT[key] = value
b868936c 1237 return '{prefix}%({key}){fmt}'.format(key=key, fmt=fmt, prefix=outer_mobj.group('prefix'))
752cda38 1238
385a27fa 1239 return EXTERNAL_FORMAT_RE.sub(create_key, outtmpl), TMPL_DICT
143db31d 1240
819e0531 1241 def evaluate_outtmpl(self, outtmpl, info_dict, *args, **kwargs):
1242 outtmpl, info_dict = self.prepare_outtmpl(outtmpl, info_dict, *args, **kwargs)
1243 return self.escape_outtmpl(outtmpl) % info_dict
1244
5127e92a 1245 def _prepare_filename(self, info_dict, *, outtmpl=None, tmpl_type=None):
1246 assert None in (outtmpl, tmpl_type), 'outtmpl and tmpl_type are mutually exclusive'
1247 if outtmpl is None:
bf1824b3 1248 outtmpl = self.params['outtmpl'].get(tmpl_type or 'default', self.params['outtmpl']['default'])
8222d8de 1249 try:
5127e92a 1250 outtmpl = self._outtmpl_expandpath(outtmpl)
e0fd9573 1251 filename = self.evaluate_outtmpl(outtmpl, info_dict, True)
6a0546e3 1252 if not filename:
1253 return None
15da37c7 1254
5127e92a 1255 if tmpl_type in ('', 'temp'):
6a0546e3 1256 final_ext, ext = self.params.get('final_ext'), info_dict.get('ext')
1257 if final_ext and ext and final_ext != ext and filename.endswith(f'.{final_ext}'):
1258 filename = replace_extension(filename, ext, final_ext)
5127e92a 1259 elif tmpl_type:
6a0546e3 1260 force_ext = OUTTMPL_TYPES[tmpl_type]
1261 if force_ext:
1262 filename = replace_extension(filename, force_ext, info_dict.get('ext'))
de6000d9 1263
bdc3fd2f
U
1264 # https://github.com/blackjack4494/youtube-dlc/issues/85
1265 trim_file_name = self.params.get('trim_file_name', False)
1266 if trim_file_name:
5c22c63d 1267 no_ext, *ext = filename.rsplit('.', 2)
1268 filename = join_nonempty(no_ext[:trim_file_name], *ext, delim='.')
bdc3fd2f 1269
0202b52a 1270 return filename
8222d8de 1271 except ValueError as err:
6febd1c1 1272 self.report_error('Error in output template: ' + str(err) + ' (encoding: ' + repr(preferredencoding()) + ')')
8222d8de
JMF
1273 return None
1274
5127e92a 1275 def prepare_filename(self, info_dict, dir_type='', *, outtmpl=None, warn=False):
1276 """Generate the output filename"""
1277 if outtmpl:
1278 assert not dir_type, 'outtmpl and dir_type are mutually exclusive'
1279 dir_type = None
1280 filename = self._prepare_filename(info_dict, tmpl_type=dir_type, outtmpl=outtmpl)
80c03fa9 1281 if not filename and dir_type not in ('', 'temp'):
1282 return ''
de6000d9 1283
c84aeac6 1284 if warn:
21cd8fae 1285 if not self.params.get('paths'):
de6000d9 1286 pass
1287 elif filename == '-':
c84aeac6 1288 self.report_warning('--paths is ignored when an outputting to stdout', only_once=True)
de6000d9 1289 elif os.path.isabs(filename):
c84aeac6 1290 self.report_warning('--paths is ignored since an absolute path is given in output template', only_once=True)
de6000d9 1291 if filename == '-' or not filename:
1292 return filename
1293
21cd8fae 1294 return self.get_output_path(dir_type, filename)
0202b52a 1295
120fe513 1296 def _match_entry(self, info_dict, incomplete=False, silent=False):
ecdec191 1297 """ Returns None if the file should be downloaded """
8222d8de 1298
c77495e3 1299 video_title = info_dict.get('title', info_dict.get('id', 'video'))
1300
8b0d7497 1301 def check_filter():
8b0d7497 1302 if 'title' in info_dict:
1303 # This can happen when we're just evaluating the playlist
1304 title = info_dict['title']
1305 matchtitle = self.params.get('matchtitle', False)
1306 if matchtitle:
1307 if not re.search(matchtitle, title, re.IGNORECASE):
1308 return '"' + title + '" title did not match pattern "' + matchtitle + '"'
1309 rejecttitle = self.params.get('rejecttitle', False)
1310 if rejecttitle:
1311 if re.search(rejecttitle, title, re.IGNORECASE):
1312 return '"' + title + '" title matched reject pattern "' + rejecttitle + '"'
1313 date = info_dict.get('upload_date')
1314 if date is not None:
1315 dateRange = self.params.get('daterange', DateRange())
1316 if date not in dateRange:
86e5f3ed 1317 return f'{date_from_str(date).isoformat()} upload date is not in range {dateRange}'
8b0d7497 1318 view_count = info_dict.get('view_count')
1319 if view_count is not None:
1320 min_views = self.params.get('min_views')
1321 if min_views is not None and view_count < min_views:
1322 return 'Skipping %s, because it has not reached minimum view count (%d/%d)' % (video_title, view_count, min_views)
1323 max_views = self.params.get('max_views')
1324 if max_views is not None and view_count > max_views:
1325 return 'Skipping %s, because it has exceeded the maximum view count (%d/%d)' % (video_title, view_count, max_views)
1326 if age_restricted(info_dict.get('age_limit'), self.params.get('age_limit')):
1327 return 'Skipping "%s" because it is age restricted' % video_title
8b0d7497 1328
8f18aca8 1329 match_filter = self.params.get('match_filter')
1330 if match_filter is not None:
1331 try:
1332 ret = match_filter(info_dict, incomplete=incomplete)
1333 except TypeError:
1334 # For backward compatibility
1335 ret = None if incomplete else match_filter(info_dict)
492272fe 1336 if ret is NO_DEFAULT:
1337 while True:
1338 filename = self._format_screen(self.prepare_filename(info_dict), self.Styles.FILENAME)
1339 reply = input(self._format_screen(
1340 f'Download "{filename}"? (Y/n): ', self.Styles.EMPHASIS)).lower().strip()
1341 if reply in {'y', ''}:
1342 return None
1343 elif reply == 'n':
1344 return f'Skipping {video_title}'
492272fe 1345 elif ret is not None:
8f18aca8 1346 return ret
8b0d7497 1347 return None
1348
c77495e3 1349 if self.in_download_archive(info_dict):
1350 reason = '%s has already been recorded in the archive' % video_title
1351 break_opt, break_err = 'break_on_existing', ExistingVideoReached
1352 else:
1353 reason = check_filter()
1354 break_opt, break_err = 'break_on_reject', RejectedVideoReached
8b0d7497 1355 if reason is not None:
120fe513 1356 if not silent:
1357 self.to_screen('[download] ' + reason)
c77495e3 1358 if self.params.get(break_opt, False):
1359 raise break_err()
8b0d7497 1360 return reason
fe7e0c98 1361
b6c45014
JMF
1362 @staticmethod
1363 def add_extra_info(info_dict, extra_info):
1364 '''Set the keys from extra_info in info dict if they are missing'''
1365 for key, value in extra_info.items():
1366 info_dict.setdefault(key, value)
1367
409e1828 1368 def extract_info(self, url, download=True, ie_key=None, extra_info=None,
61aa5ba3 1369 process=True, force_generic_extractor=False):
41d1cca3 1370 """
1371 Return a list with a dictionary for each video extracted.
1372
1373 Arguments:
1374 url -- URL to extract
1375
1376 Keyword arguments:
1377 download -- whether to download videos during extraction
1378 ie_key -- extractor key hint
1379 extra_info -- dictionary containing the extra values to add to each result
1380 process -- whether to resolve all unresolved references (URLs, playlist items),
1381 must be True for download to work.
1382 force_generic_extractor -- force using the generic extractor
1383 """
fe7e0c98 1384
409e1828 1385 if extra_info is None:
1386 extra_info = {}
1387
61aa5ba3 1388 if not ie_key and force_generic_extractor:
d22dec74
S
1389 ie_key = 'Generic'
1390
8222d8de 1391 if ie_key:
8b7491c8 1392 ies = {ie_key: self._get_info_extractor_class(ie_key)}
8222d8de
JMF
1393 else:
1394 ies = self._ies
1395
8b7491c8 1396 for ie_key, ie in ies.items():
8222d8de
JMF
1397 if not ie.suitable(url):
1398 continue
1399
1400 if not ie.working():
6febd1c1
PH
1401 self.report_warning('The program functionality for this site has been marked as broken, '
1402 'and will probably not work.')
8222d8de 1403
1151c407 1404 temp_id = ie.get_temp_id(url)
a0566bbf 1405 if temp_id is not None and self.in_download_archive({'id': temp_id, 'ie_key': ie_key}):
5e5be0c0 1406 self.to_screen(f'[{ie_key}] {temp_id}: has already been recorded in the archive')
1407 if self.params.get('break_on_existing', False):
1408 raise ExistingVideoReached()
a0566bbf 1409 break
8b7491c8 1410 return self.__extract_info(url, self.get_info_extractor(ie_key), download, extra_info, process)
a0566bbf 1411 else:
1412 self.report_error('no suitable InfoExtractor for URL %s' % url)
1413
7e88d7d7 1414 def _handle_extraction_exceptions(func):
b5ae35ee 1415 @functools.wraps(func)
a0566bbf 1416 def wrapper(self, *args, **kwargs):
6da22e7d 1417 while True:
1418 try:
1419 return func(self, *args, **kwargs)
1420 except (DownloadCancelled, LazyList.IndexError, PagedList.IndexError):
8222d8de 1421 raise
6da22e7d 1422 except ReExtractInfo as e:
1423 if e.expected:
1424 self.to_screen(f'{e}; Re-extracting data')
1425 else:
1426 self.to_stderr('\r')
1427 self.report_warning(f'{e}; Re-extracting data')
1428 continue
1429 except GeoRestrictedError as e:
1430 msg = e.msg
1431 if e.countries:
1432 msg += '\nThis video is available in %s.' % ', '.join(
1433 map(ISO3166Utils.short2full, e.countries))
1434 msg += '\nYou might want to use a VPN or a proxy server (with --proxy) to workaround.'
1435 self.report_error(msg)
1436 except ExtractorError as e: # An error we somewhat expected
1437 self.report_error(str(e), e.format_traceback())
1438 except Exception as e:
1439 if self.params.get('ignoreerrors'):
1440 self.report_error(str(e), tb=encode_compat_str(traceback.format_exc()))
1441 else:
1442 raise
1443 break
a0566bbf 1444 return wrapper
1445
f2ebc5c7 1446 def _wait_for_video(self, ie_result):
1447 if (not self.params.get('wait_for_video')
1448 or ie_result.get('_type', 'video') != 'video'
1449 or ie_result.get('formats') or ie_result.get('url')):
1450 return
1451
1452 format_dur = lambda dur: '%02d:%02d:%02d' % timetuple_from_msec(dur * 1000)[:-1]
1453 last_msg = ''
1454
1455 def progress(msg):
1456 nonlocal last_msg
1457 self.to_screen(msg + ' ' * (len(last_msg) - len(msg)) + '\r', skip_eol=True)
1458 last_msg = msg
1459
1460 min_wait, max_wait = self.params.get('wait_for_video')
1461 diff = try_get(ie_result, lambda x: x['release_timestamp'] - time.time())
1462 if diff is None and ie_result.get('live_status') == 'is_upcoming':
16c620bc 1463 diff = round(random.uniform(min_wait, max_wait) if (max_wait and min_wait) else (max_wait or min_wait), 0)
f2ebc5c7 1464 self.report_warning('Release time of video is not known')
1465 elif (diff or 0) <= 0:
1466 self.report_warning('Video should already be available according to extracted info')
38d79fd1 1467 diff = min(max(diff or 0, min_wait or 0), max_wait or float('inf'))
f2ebc5c7 1468 self.to_screen(f'[wait] Waiting for {format_dur(diff)} - Press Ctrl+C to try now')
1469
1470 wait_till = time.time() + diff
1471 try:
1472 while True:
1473 diff = wait_till - time.time()
1474 if diff <= 0:
1475 progress('')
1476 raise ReExtractInfo('[wait] Wait period ended', expected=True)
1477 progress(f'[wait] Remaining time until next attempt: {self._format_screen(format_dur(diff), self.Styles.EMPHASIS)}')
1478 time.sleep(1)
1479 except KeyboardInterrupt:
1480 progress('')
1481 raise ReExtractInfo('[wait] Interrupted by user', expected=True)
1482 except BaseException as e:
1483 if not isinstance(e, ReExtractInfo):
1484 self.to_screen('')
1485 raise
1486
7e88d7d7 1487 @_handle_extraction_exceptions
58f197b7 1488 def __extract_info(self, url, ie, download, extra_info, process):
a0566bbf 1489 ie_result = ie.extract(url)
1490 if ie_result is None: # Finished already (backwards compatibility; listformats and friends should be moved here)
1491 return
1492 if isinstance(ie_result, list):
1493 # Backwards compatibility: old IE result format
1494 ie_result = {
1495 '_type': 'compat_list',
1496 'entries': ie_result,
1497 }
e37d0efb 1498 if extra_info.get('original_url'):
1499 ie_result.setdefault('original_url', extra_info['original_url'])
a0566bbf 1500 self.add_default_extra_info(ie_result, ie, url)
1501 if process:
f2ebc5c7 1502 self._wait_for_video(ie_result)
a0566bbf 1503 return self.process_ie_result(ie_result, download, extra_info)
8222d8de 1504 else:
a0566bbf 1505 return ie_result
fe7e0c98 1506
ea38e55f 1507 def add_default_extra_info(self, ie_result, ie, url):
6033d980 1508 if url is not None:
1509 self.add_extra_info(ie_result, {
1510 'webpage_url': url,
1511 'original_url': url,
57ebfca3 1512 })
1513 webpage_url = ie_result.get('webpage_url')
1514 if webpage_url:
1515 self.add_extra_info(ie_result, {
1516 'webpage_url_basename': url_basename(webpage_url),
1517 'webpage_url_domain': get_domain(webpage_url),
6033d980 1518 })
1519 if ie is not None:
1520 self.add_extra_info(ie_result, {
1521 'extractor': ie.IE_NAME,
1522 'extractor_key': ie.ie_key(),
1523 })
ea38e55f 1524
58adec46 1525 def process_ie_result(self, ie_result, download=True, extra_info=None):
8222d8de
JMF
1526 """
1527 Take the result of the ie(may be modified) and resolve all unresolved
1528 references (URLs, playlist items).
1529
1530 It will also download the videos if 'download'.
1531 Returns the resolved ie_result.
1532 """
58adec46 1533 if extra_info is None:
1534 extra_info = {}
e8ee972c
PH
1535 result_type = ie_result.get('_type', 'video')
1536
057a5206 1537 if result_type in ('url', 'url_transparent'):
134c6ea8 1538 ie_result['url'] = sanitize_url(ie_result['url'])
e37d0efb 1539 if ie_result.get('original_url'):
1540 extra_info.setdefault('original_url', ie_result['original_url'])
1541
057a5206 1542 extract_flat = self.params.get('extract_flat', False)
3089bc74
S
1543 if ((extract_flat == 'in_playlist' and 'playlist' in extra_info)
1544 or extract_flat is True):
ecb54191 1545 info_copy = ie_result.copy()
6033d980 1546 ie = try_get(ie_result.get('ie_key'), self.get_info_extractor)
360167b9 1547 if ie and not ie_result.get('id'):
4614bc22 1548 info_copy['id'] = ie.get_temp_id(ie_result['url'])
6033d980 1549 self.add_default_extra_info(info_copy, ie, ie_result['url'])
4614bc22 1550 self.add_extra_info(info_copy, extra_info)
b5475f11 1551 info_copy, _ = self.pre_process(info_copy)
ecb54191 1552 self.__forced_printings(info_copy, self.prepare_filename(info_copy), incomplete=True)
415f8d51 1553 self._raise_pending_errors(info_copy)
4614bc22 1554 if self.params.get('force_write_download_archive', False):
1555 self.record_download_archive(info_copy)
e8ee972c
PH
1556 return ie_result
1557
8222d8de 1558 if result_type == 'video':
b6c45014 1559 self.add_extra_info(ie_result, extra_info)
9c2b75b5 1560 ie_result = self.process_video_result(ie_result, download=download)
415f8d51 1561 self._raise_pending_errors(ie_result)
28b0eb0f 1562 additional_urls = (ie_result or {}).get('additional_urls')
9c2b75b5 1563 if additional_urls:
e9f4ccd1 1564 # TODO: Improve MetadataParserPP to allow setting a list
9c2b75b5 1565 if isinstance(additional_urls, compat_str):
1566 additional_urls = [additional_urls]
1567 self.to_screen(
1568 '[info] %s: %d additional URL(s) requested' % (ie_result['id'], len(additional_urls)))
1569 self.write_debug('Additional URLs: "%s"' % '", "'.join(additional_urls))
1570 ie_result['additional_entries'] = [
1571 self.extract_info(
b69fd25c 1572 url, download, extra_info=extra_info,
9c2b75b5 1573 force_generic_extractor=self.params.get('force_generic_extractor'))
1574 for url in additional_urls
1575 ]
1576 return ie_result
8222d8de
JMF
1577 elif result_type == 'url':
1578 # We have to add extra_info to the results because it may be
1579 # contained in a playlist
07cce701 1580 return self.extract_info(
1581 ie_result['url'], download,
1582 ie_key=ie_result.get('ie_key'),
1583 extra_info=extra_info)
7fc3fa05
PH
1584 elif result_type == 'url_transparent':
1585 # Use the information from the embedding page
1586 info = self.extract_info(
1587 ie_result['url'], ie_key=ie_result.get('ie_key'),
1588 extra_info=extra_info, download=False, process=False)
1589
1640eb09
S
1590 # extract_info may return None when ignoreerrors is enabled and
1591 # extraction failed with an error, don't crash and return early
1592 # in this case
1593 if not info:
1594 return info
1595
3975b4d2 1596 exempted_fields = {'_type', 'url', 'ie_key'}
1597 if not ie_result.get('section_end') and ie_result.get('section_start') is None:
1598 # For video clips, the id etc of the clip extractor should be used
1599 exempted_fields |= {'id', 'extractor', 'extractor_key'}
1600
412c617d 1601 new_result = info.copy()
3975b4d2 1602 new_result.update(filter_dict(ie_result, lambda k, v: v is not None and k not in exempted_fields))
7fc3fa05 1603
0563f7ac
S
1604 # Extracted info may not be a video result (i.e.
1605 # info.get('_type', 'video') != video) but rather an url or
1606 # url_transparent. In such cases outer metadata (from ie_result)
1607 # should be propagated to inner one (info). For this to happen
1608 # _type of info should be overridden with url_transparent. This
067aa17e 1609 # fixes issue from https://github.com/ytdl-org/youtube-dl/pull/11163.
0563f7ac
S
1610 if new_result.get('_type') == 'url':
1611 new_result['_type'] = 'url_transparent'
7fc3fa05
PH
1612
1613 return self.process_ie_result(
1614 new_result, download=download, extra_info=extra_info)
40fcba5e 1615 elif result_type in ('playlist', 'multi_video'):
30a074c2 1616 # Protect from infinite recursion due to recursively nested playlists
1617 # (see https://github.com/ytdl-org/youtube-dl/issues/27833)
1618 webpage_url = ie_result['webpage_url']
1619 if webpage_url in self._playlist_urls:
7e85e872 1620 self.to_screen(
30a074c2 1621 '[download] Skipping already downloaded playlist: %s'
1622 % ie_result.get('title') or ie_result.get('id'))
1623 return
7e85e872 1624
30a074c2 1625 self._playlist_level += 1
1626 self._playlist_urls.add(webpage_url)
03f83004 1627 self._fill_common_fields(ie_result, False)
bc516a3f 1628 self._sanitize_thumbnails(ie_result)
30a074c2 1629 try:
1630 return self.__process_playlist(ie_result, download)
1631 finally:
1632 self._playlist_level -= 1
1633 if not self._playlist_level:
1634 self._playlist_urls.clear()
8222d8de 1635 elif result_type == 'compat_list':
c9bf4114
PH
1636 self.report_warning(
1637 'Extractor %s returned a compat_list result. '
1638 'It needs to be updated.' % ie_result.get('extractor'))
5f6a1245 1639
8222d8de 1640 def _fixup(r):
b868936c 1641 self.add_extra_info(r, {
1642 'extractor': ie_result['extractor'],
1643 'webpage_url': ie_result['webpage_url'],
1644 'webpage_url_basename': url_basename(ie_result['webpage_url']),
0bb322b9 1645 'webpage_url_domain': get_domain(ie_result['webpage_url']),
b868936c 1646 'extractor_key': ie_result['extractor_key'],
1647 })
8222d8de
JMF
1648 return r
1649 ie_result['entries'] = [
b6c45014 1650 self.process_ie_result(_fixup(r), download, extra_info)
8222d8de
JMF
1651 for r in ie_result['entries']
1652 ]
1653 return ie_result
1654 else:
1655 raise Exception('Invalid result type: %s' % result_type)
1656
e92caff5 1657 def _ensure_dir_exists(self, path):
1658 return make_dir(path, self.report_error)
1659
3b603dbd 1660 @staticmethod
1661 def _playlist_infodict(ie_result, **kwargs):
1662 return {
1663 **ie_result,
1664 'playlist': ie_result.get('title') or ie_result.get('id'),
1665 'playlist_id': ie_result.get('id'),
1666 'playlist_title': ie_result.get('title'),
1667 'playlist_uploader': ie_result.get('uploader'),
1668 'playlist_uploader_id': ie_result.get('uploader_id'),
1669 'playlist_index': 0,
1670 **kwargs,
1671 }
1672
30a074c2 1673 def __process_playlist(self, ie_result, download):
7e88d7d7 1674 """Process each entry in the playlist"""
1675 title = ie_result.get('title') or ie_result.get('id') or '<Untitled>'
1676 self.to_screen(f'[download] Downloading playlist: {title}')
f0d785d3 1677
7e88d7d7 1678 all_entries = PlaylistEntries(self, ie_result)
7e9a6125 1679 entries = orderedSet(all_entries.get_requested_items(), lazy=True)
1680
1681 lazy = self.params.get('lazy_playlist')
1682 if lazy:
1683 resolved_entries, n_entries = [], 'N/A'
1684 ie_result['requested_entries'], ie_result['entries'] = None, None
1685 else:
1686 entries = resolved_entries = list(entries)
1687 n_entries = len(resolved_entries)
1688 ie_result['requested_entries'], ie_result['entries'] = tuple(zip(*resolved_entries)) or ([], [])
1689 if not ie_result.get('playlist_count'):
1690 # Better to do this after potentially exhausting entries
1691 ie_result['playlist_count'] = all_entries.get_full_count()
498f5606 1692
e08a85d8 1693 _infojson_written = False
0bfc53d0 1694 write_playlist_files = self.params.get('allow_playlist_files', True)
1695 if write_playlist_files and self.params.get('list_thumbnails'):
1696 self.list_thumbnails(ie_result)
1697 if write_playlist_files and not self.params.get('simulate'):
7e9a6125 1698 ie_copy = self._playlist_infodict(ie_result, n_entries=int_or_none(n_entries))
e08a85d8 1699 _infojson_written = self._write_info_json(
1700 'playlist', ie_result, self.prepare_filename(ie_copy, 'pl_infojson'))
1701 if _infojson_written is None:
80c03fa9 1702 return
1703 if self._write_description('playlist', ie_result,
1704 self.prepare_filename(ie_copy, 'pl_description')) is None:
1705 return
681de68e 1706 # TODO: This should be passed to ThumbnailsConvertor if necessary
80c03fa9 1707 self._write_thumbnails('playlist', ie_copy, self.prepare_filename(ie_copy, 'pl_thumbnail'))
30a074c2 1708
7e9a6125 1709 if lazy:
1710 if self.params.get('playlistreverse') or self.params.get('playlistrandom'):
1711 self.report_warning('playlistreverse and playlistrandom are not supported with lazy_playlist', only_once=True)
1712 elif self.params.get('playlistreverse'):
1713 entries.reverse()
1714 elif self.params.get('playlistrandom'):
30a074c2 1715 random.shuffle(entries)
1716
7e88d7d7 1717 self.to_screen(f'[{ie_result["extractor"]}] Playlist {title}: Downloading {n_entries} videos'
1718 f'{format_field(ie_result, "playlist_count", " of %s")}')
30a074c2 1719
26e2805c 1720 failures = 0
1721 max_failures = self.params.get('skip_playlist_after_errors') or float('inf')
7e9a6125 1722 for i, (playlist_index, entry) in enumerate(entries):
1723 if lazy:
1724 resolved_entries.append((playlist_index, entry))
1725
7e88d7d7 1726 # TODO: Add auto-generated fields
1ac4fd80 1727 if not entry or self._match_entry(entry, incomplete=True) is not None:
7e88d7d7 1728 continue
1729
19a03940 1730 self.to_screen('[download] Downloading video %s of %s' % (
7e9a6125 1731 self._format_screen(i + 1, self.Styles.ID), self._format_screen(n_entries, self.Styles.EMPHASIS)))
7e88d7d7 1732
1733 entry['__x_forwarded_for_ip'] = ie_result.get('__x_forwarded_for_ip')
7e9a6125 1734 if not lazy and 'playlist-index' in self.params.get('compat_opts', []):
1735 playlist_index = ie_result['requested_entries'][i]
1736
7e88d7d7 1737 entry_result = self.__process_iterable_entry(entry, download, {
7e9a6125 1738 'n_entries': int_or_none(n_entries),
1739 '__last_playlist_index': max(ie_result['requested_entries'] or (0, 0)),
f0d785d3 1740 'playlist_count': ie_result.get('playlist_count'),
71729754 1741 'playlist_index': playlist_index,
7e9a6125 1742 'playlist_autonumber': i + 1,
7e88d7d7 1743 'playlist': title,
30a074c2 1744 'playlist_id': ie_result.get('id'),
1745 'playlist_title': ie_result.get('title'),
1746 'playlist_uploader': ie_result.get('uploader'),
1747 'playlist_uploader_id': ie_result.get('uploader_id'),
30a074c2 1748 'extractor': ie_result['extractor'],
1749 'webpage_url': ie_result['webpage_url'],
1750 'webpage_url_basename': url_basename(ie_result['webpage_url']),
0bb322b9 1751 'webpage_url_domain': get_domain(ie_result['webpage_url']),
30a074c2 1752 'extractor_key': ie_result['extractor_key'],
7e88d7d7 1753 })
26e2805c 1754 if not entry_result:
1755 failures += 1
1756 if failures >= max_failures:
1757 self.report_error(
7e88d7d7 1758 f'Skipping the remaining entries in playlist "{title}" since {failures} items failed extraction')
26e2805c 1759 break
7e9a6125 1760 resolved_entries[i] = (playlist_index, entry_result)
7e88d7d7 1761
1762 # Update with processed data
7e9a6125 1763 ie_result['requested_entries'], ie_result['entries'] = tuple(zip(*resolved_entries)) or ([], [])
e08a85d8 1764
1765 # Write the updated info to json
cb96c5be 1766 if _infojson_written is True and self._write_info_json(
e08a85d8 1767 'updated playlist', ie_result,
1768 self.prepare_filename(ie_copy, 'pl_infojson'), overwrite=True) is None:
1769 return
ca30f449 1770
ed5835b4 1771 ie_result = self.run_all_pps('playlist', ie_result)
7e88d7d7 1772 self.to_screen(f'[download] Finished downloading playlist: {title}')
30a074c2 1773 return ie_result
1774
7e88d7d7 1775 @_handle_extraction_exceptions
a0566bbf 1776 def __process_iterable_entry(self, entry, download, extra_info):
1777 return self.process_ie_result(
1778 entry, download=download, extra_info=extra_info)
1779
67134eab
JMF
1780 def _build_format_filter(self, filter_spec):
1781 " Returns a function to filter the formats according to the filter_spec "
083c9df9
PH
1782
1783 OPERATORS = {
1784 '<': operator.lt,
1785 '<=': operator.le,
1786 '>': operator.gt,
1787 '>=': operator.ge,
1788 '=': operator.eq,
1789 '!=': operator.ne,
1790 }
67134eab 1791 operator_rex = re.compile(r'''(?x)\s*
187986a8 1792 (?P<key>width|height|tbr|abr|vbr|asr|filesize|filesize_approx|fps)\s*
1793 (?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
1794 (?P<value>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)\s*
083c9df9 1795 ''' % '|'.join(map(re.escape, OPERATORS.keys())))
187986a8 1796 m = operator_rex.fullmatch(filter_spec)
9ddb6925
S
1797 if m:
1798 try:
1799 comparison_value = int(m.group('value'))
1800 except ValueError:
1801 comparison_value = parse_filesize(m.group('value'))
1802 if comparison_value is None:
1803 comparison_value = parse_filesize(m.group('value') + 'B')
1804 if comparison_value is None:
1805 raise ValueError(
1806 'Invalid value %r in format specification %r' % (
67134eab 1807 m.group('value'), filter_spec))
9ddb6925
S
1808 op = OPERATORS[m.group('op')]
1809
083c9df9 1810 if not m:
9ddb6925
S
1811 STR_OPERATORS = {
1812 '=': operator.eq,
10d33b34
YCH
1813 '^=': lambda attr, value: attr.startswith(value),
1814 '$=': lambda attr, value: attr.endswith(value),
1815 '*=': lambda attr, value: value in attr,
1ce9a3cb 1816 '~=': lambda attr, value: value.search(attr) is not None
9ddb6925 1817 }
187986a8 1818 str_operator_rex = re.compile(r'''(?x)\s*
1819 (?P<key>[a-zA-Z0-9._-]+)\s*
1ce9a3cb
LF
1820 (?P<negation>!\s*)?(?P<op>%s)\s*(?P<none_inclusive>\?\s*)?
1821 (?P<quote>["'])?
1822 (?P<value>(?(quote)(?:(?!(?P=quote))[^\\]|\\.)+|[\w.-]+))
1823 (?(quote)(?P=quote))\s*
9ddb6925 1824 ''' % '|'.join(map(re.escape, STR_OPERATORS.keys())))
187986a8 1825 m = str_operator_rex.fullmatch(filter_spec)
9ddb6925 1826 if m:
1ce9a3cb
LF
1827 if m.group('op') == '~=':
1828 comparison_value = re.compile(m.group('value'))
1829 else:
1830 comparison_value = re.sub(r'''\\([\\"'])''', r'\1', m.group('value'))
2cc779f4
S
1831 str_op = STR_OPERATORS[m.group('op')]
1832 if m.group('negation'):
e118a879 1833 op = lambda attr, value: not str_op(attr, value)
2cc779f4
S
1834 else:
1835 op = str_op
083c9df9 1836
9ddb6925 1837 if not m:
187986a8 1838 raise SyntaxError('Invalid filter specification %r' % filter_spec)
083c9df9
PH
1839
1840 def _filter(f):
1841 actual_value = f.get(m.group('key'))
1842 if actual_value is None:
1843 return m.group('none_inclusive')
1844 return op(actual_value, comparison_value)
67134eab
JMF
1845 return _filter
1846
9f1a1c36 1847 def _check_formats(self, formats):
1848 for f in formats:
1849 self.to_screen('[info] Testing format %s' % f['format_id'])
75689fe5 1850 path = self.get_output_path('temp')
1851 if not self._ensure_dir_exists(f'{path}/'):
1852 continue
1853 temp_file = tempfile.NamedTemporaryFile(suffix='.tmp', delete=False, dir=path or None)
9f1a1c36 1854 temp_file.close()
1855 try:
1856 success, _ = self.dl(temp_file.name, f, test=True)
8a82af35 1857 except (DownloadError, OSError, ValueError) + network_exceptions:
9f1a1c36 1858 success = False
1859 finally:
1860 if os.path.exists(temp_file.name):
1861 try:
1862 os.remove(temp_file.name)
1863 except OSError:
1864 self.report_warning('Unable to delete temporary file "%s"' % temp_file.name)
1865 if success:
1866 yield f
1867 else:
1868 self.to_screen('[info] Unable to download format %s. Skipping...' % f['format_id'])
1869
0017d9ad 1870 def _default_format_spec(self, info_dict, download=True):
0017d9ad 1871
af0f7428
S
1872 def can_merge():
1873 merger = FFmpegMergerPP(self)
1874 return merger.available and merger.can_merge()
1875
91ebc640 1876 prefer_best = (
b7b04c78 1877 not self.params.get('simulate')
91ebc640 1878 and download
1879 and (
1880 not can_merge()
21633673 1881 or info_dict.get('is_live') and not self.params.get('live_from_start')
bf1824b3 1882 or self.params['outtmpl']['default'] == '-'))
53ed7066 1883 compat = (
1884 prefer_best
1885 or self.params.get('allow_multiple_audio_streams', False)
8a82af35 1886 or 'format-spec' in self.params['compat_opts'])
91ebc640 1887
1888 return (
53ed7066 1889 'best/bestvideo+bestaudio' if prefer_best
1890 else 'bestvideo*+bestaudio/best' if not compat
91ebc640 1891 else 'bestvideo+bestaudio/best')
0017d9ad 1892
67134eab
JMF
1893 def build_format_selector(self, format_spec):
1894 def syntax_error(note, start):
1895 message = (
1896 'Invalid format specification: '
86e5f3ed 1897 '{}\n\t{}\n\t{}^'.format(note, format_spec, ' ' * start[1]))
67134eab
JMF
1898 return SyntaxError(message)
1899
1900 PICKFIRST = 'PICKFIRST'
1901 MERGE = 'MERGE'
1902 SINGLE = 'SINGLE'
0130afb7 1903 GROUP = 'GROUP'
67134eab
JMF
1904 FormatSelector = collections.namedtuple('FormatSelector', ['type', 'selector', 'filters'])
1905
91ebc640 1906 allow_multiple_streams = {'audio': self.params.get('allow_multiple_audio_streams', False),
1907 'video': self.params.get('allow_multiple_video_streams', False)}
909d24dd 1908
9f1a1c36 1909 check_formats = self.params.get('check_formats') == 'selected'
e8e73840 1910
67134eab
JMF
1911 def _parse_filter(tokens):
1912 filter_parts = []
1913 for type, string, start, _, _ in tokens:
1914 if type == tokenize.OP and string == ']':
1915 return ''.join(filter_parts)
1916 else:
1917 filter_parts.append(string)
1918
232541df 1919 def _remove_unused_ops(tokens):
17cc1534 1920 # Remove operators that we don't use and join them with the surrounding strings
232541df
JMF
1921 # for example: 'mp4' '-' 'baseline' '-' '16x9' is converted to 'mp4-baseline-16x9'
1922 ALLOWED_OPS = ('/', '+', ',', '(', ')')
1923 last_string, last_start, last_end, last_line = None, None, None, None
1924 for type, string, start, end, line in tokens:
1925 if type == tokenize.OP and string == '[':
1926 if last_string:
1927 yield tokenize.NAME, last_string, last_start, last_end, last_line
1928 last_string = None
1929 yield type, string, start, end, line
1930 # everything inside brackets will be handled by _parse_filter
1931 for type, string, start, end, line in tokens:
1932 yield type, string, start, end, line
1933 if type == tokenize.OP and string == ']':
1934 break
1935 elif type == tokenize.OP and string in ALLOWED_OPS:
1936 if last_string:
1937 yield tokenize.NAME, last_string, last_start, last_end, last_line
1938 last_string = None
1939 yield type, string, start, end, line
1940 elif type in [tokenize.NAME, tokenize.NUMBER, tokenize.OP]:
1941 if not last_string:
1942 last_string = string
1943 last_start = start
1944 last_end = end
1945 else:
1946 last_string += string
1947 if last_string:
1948 yield tokenize.NAME, last_string, last_start, last_end, last_line
1949
cf2ac6df 1950 def _parse_format_selection(tokens, inside_merge=False, inside_choice=False, inside_group=False):
67134eab
JMF
1951 selectors = []
1952 current_selector = None
1953 for type, string, start, _, _ in tokens:
1954 # ENCODING is only defined in python 3.x
1955 if type == getattr(tokenize, 'ENCODING', None):
1956 continue
1957 elif type in [tokenize.NAME, tokenize.NUMBER]:
1958 current_selector = FormatSelector(SINGLE, string, [])
1959 elif type == tokenize.OP:
cf2ac6df
JMF
1960 if string == ')':
1961 if not inside_group:
1962 # ')' will be handled by the parentheses group
1963 tokens.restore_last_token()
67134eab 1964 break
cf2ac6df 1965 elif inside_merge and string in ['/', ',']:
0130afb7
JMF
1966 tokens.restore_last_token()
1967 break
cf2ac6df
JMF
1968 elif inside_choice and string == ',':
1969 tokens.restore_last_token()
1970 break
1971 elif string == ',':
0a31a350
JMF
1972 if not current_selector:
1973 raise syntax_error('"," must follow a format selector', start)
67134eab
JMF
1974 selectors.append(current_selector)
1975 current_selector = None
1976 elif string == '/':
d96d604e
JMF
1977 if not current_selector:
1978 raise syntax_error('"/" must follow a format selector', start)
67134eab 1979 first_choice = current_selector
cf2ac6df 1980 second_choice = _parse_format_selection(tokens, inside_choice=True)
f5f4a27a 1981 current_selector = FormatSelector(PICKFIRST, (first_choice, second_choice), [])
67134eab
JMF
1982 elif string == '[':
1983 if not current_selector:
1984 current_selector = FormatSelector(SINGLE, 'best', [])
1985 format_filter = _parse_filter(tokens)
1986 current_selector.filters.append(format_filter)
0130afb7
JMF
1987 elif string == '(':
1988 if current_selector:
1989 raise syntax_error('Unexpected "("', start)
cf2ac6df
JMF
1990 group = _parse_format_selection(tokens, inside_group=True)
1991 current_selector = FormatSelector(GROUP, group, [])
67134eab 1992 elif string == '+':
d03cfdce 1993 if not current_selector:
1994 raise syntax_error('Unexpected "+"', start)
1995 selector_1 = current_selector
1996 selector_2 = _parse_format_selection(tokens, inside_merge=True)
1997 if not selector_2:
1998 raise syntax_error('Expected a selector', start)
1999 current_selector = FormatSelector(MERGE, (selector_1, selector_2), [])
67134eab 2000 else:
86e5f3ed 2001 raise syntax_error(f'Operator not recognized: "{string}"', start)
67134eab
JMF
2002 elif type == tokenize.ENDMARKER:
2003 break
2004 if current_selector:
2005 selectors.append(current_selector)
2006 return selectors
2007
f8d4ad9a 2008 def _merge(formats_pair):
2009 format_1, format_2 = formats_pair
2010
2011 formats_info = []
2012 formats_info.extend(format_1.get('requested_formats', (format_1,)))
2013 formats_info.extend(format_2.get('requested_formats', (format_2,)))
2014
2015 if not allow_multiple_streams['video'] or not allow_multiple_streams['audio']:
551f9388 2016 get_no_more = {'video': False, 'audio': False}
f8d4ad9a 2017 for (i, fmt_info) in enumerate(formats_info):
551f9388 2018 if fmt_info.get('acodec') == fmt_info.get('vcodec') == 'none':
2019 formats_info.pop(i)
2020 continue
2021 for aud_vid in ['audio', 'video']:
f8d4ad9a 2022 if not allow_multiple_streams[aud_vid] and fmt_info.get(aud_vid[0] + 'codec') != 'none':
2023 if get_no_more[aud_vid]:
2024 formats_info.pop(i)
f5510afe 2025 break
f8d4ad9a 2026 get_no_more[aud_vid] = True
2027
2028 if len(formats_info) == 1:
2029 return formats_info[0]
2030
2031 video_fmts = [fmt_info for fmt_info in formats_info if fmt_info.get('vcodec') != 'none']
2032 audio_fmts = [fmt_info for fmt_info in formats_info if fmt_info.get('acodec') != 'none']
2033
2034 the_only_video = video_fmts[0] if len(video_fmts) == 1 else None
2035 the_only_audio = audio_fmts[0] if len(audio_fmts) == 1 else None
2036
2037 output_ext = self.params.get('merge_output_format')
2038 if not output_ext:
2039 if the_only_video:
2040 output_ext = the_only_video['ext']
2041 elif the_only_audio and not video_fmts:
2042 output_ext = the_only_audio['ext']
2043 else:
2044 output_ext = 'mkv'
2045
975a0d0d 2046 filtered = lambda *keys: filter(None, (traverse_obj(fmt, *keys) for fmt in formats_info))
2047
f8d4ad9a 2048 new_dict = {
2049 'requested_formats': formats_info,
975a0d0d 2050 'format': '+'.join(filtered('format')),
2051 'format_id': '+'.join(filtered('format_id')),
f8d4ad9a 2052 'ext': output_ext,
975a0d0d 2053 'protocol': '+'.join(map(determine_protocol, formats_info)),
093a1710 2054 'language': '+'.join(orderedSet(filtered('language'))) or None,
2055 'format_note': '+'.join(orderedSet(filtered('format_note'))) or None,
2056 'filesize_approx': sum(filtered('filesize', 'filesize_approx')) or None,
975a0d0d 2057 'tbr': sum(filtered('tbr', 'vbr', 'abr')),
f8d4ad9a 2058 }
2059
2060 if the_only_video:
2061 new_dict.update({
2062 'width': the_only_video.get('width'),
2063 'height': the_only_video.get('height'),
2064 'resolution': the_only_video.get('resolution') or self.format_resolution(the_only_video),
2065 'fps': the_only_video.get('fps'),
49a57e70 2066 'dynamic_range': the_only_video.get('dynamic_range'),
f8d4ad9a 2067 'vcodec': the_only_video.get('vcodec'),
2068 'vbr': the_only_video.get('vbr'),
2069 'stretched_ratio': the_only_video.get('stretched_ratio'),
2070 })
2071
2072 if the_only_audio:
2073 new_dict.update({
2074 'acodec': the_only_audio.get('acodec'),
2075 'abr': the_only_audio.get('abr'),
975a0d0d 2076 'asr': the_only_audio.get('asr'),
f8d4ad9a 2077 })
2078
2079 return new_dict
2080
e8e73840 2081 def _check_formats(formats):
981052c9 2082 if not check_formats:
2083 yield from formats
b5ac45b1 2084 return
9f1a1c36 2085 yield from self._check_formats(formats)
e8e73840 2086
67134eab 2087 def _build_selector_function(selector):
909d24dd 2088 if isinstance(selector, list): # ,
67134eab
JMF
2089 fs = [_build_selector_function(s) for s in selector]
2090
317f7ab6 2091 def selector_function(ctx):
67134eab 2092 for f in fs:
981052c9 2093 yield from f(ctx)
67134eab 2094 return selector_function
909d24dd 2095
2096 elif selector.type == GROUP: # ()
0130afb7 2097 selector_function = _build_selector_function(selector.selector)
909d24dd 2098
2099 elif selector.type == PICKFIRST: # /
67134eab
JMF
2100 fs = [_build_selector_function(s) for s in selector.selector]
2101
317f7ab6 2102 def selector_function(ctx):
67134eab 2103 for f in fs:
317f7ab6 2104 picked_formats = list(f(ctx))
67134eab
JMF
2105 if picked_formats:
2106 return picked_formats
2107 return []
67134eab 2108
981052c9 2109 elif selector.type == MERGE: # +
2110 selector_1, selector_2 = map(_build_selector_function, selector.selector)
2111
2112 def selector_function(ctx):
adbc4ec4 2113 for pair in itertools.product(selector_1(ctx), selector_2(ctx)):
981052c9 2114 yield _merge(pair)
2115
909d24dd 2116 elif selector.type == SINGLE: # atom
598d185d 2117 format_spec = selector.selector or 'best'
909d24dd 2118
f8d4ad9a 2119 # TODO: Add allvideo, allaudio etc by generalizing the code with best/worst selector
909d24dd 2120 if format_spec == 'all':
2121 def selector_function(ctx):
9222c381 2122 yield from _check_formats(ctx['formats'][::-1])
f8d4ad9a 2123 elif format_spec == 'mergeall':
2124 def selector_function(ctx):
316f2650 2125 formats = list(_check_formats(
2126 f for f in ctx['formats'] if f.get('vcodec') != 'none' or f.get('acodec') != 'none'))
e01d6aa4 2127 if not formats:
2128 return
921b76ca 2129 merged_format = formats[-1]
2130 for f in formats[-2::-1]:
f8d4ad9a 2131 merged_format = _merge((merged_format, f))
2132 yield merged_format
909d24dd 2133
2134 else:
85e801a9 2135 format_fallback, seperate_fallback, format_reverse, format_idx = False, None, True, 1
eff63539 2136 mobj = re.match(
2137 r'(?P<bw>best|worst|b|w)(?P<type>video|audio|v|a)?(?P<mod>\*)?(?:\.(?P<n>[1-9]\d*))?$',
2138 format_spec)
2139 if mobj is not None:
2140 format_idx = int_or_none(mobj.group('n'), default=1)
e8e73840 2141 format_reverse = mobj.group('bw')[0] == 'b'
eff63539 2142 format_type = (mobj.group('type') or [None])[0]
2143 not_format_type = {'v': 'a', 'a': 'v'}.get(format_type)
2144 format_modified = mobj.group('mod') is not None
909d24dd 2145
2146 format_fallback = not format_type and not format_modified # for b, w
8326b00a 2147 _filter_f = (
eff63539 2148 (lambda f: f.get('%scodec' % format_type) != 'none')
2149 if format_type and format_modified # bv*, ba*, wv*, wa*
2150 else (lambda f: f.get('%scodec' % not_format_type) == 'none')
2151 if format_type # bv, ba, wv, wa
2152 else (lambda f: f.get('vcodec') != 'none' and f.get('acodec') != 'none')
2153 if not format_modified # b, w
8326b00a 2154 else lambda f: True) # b*, w*
2155 filter_f = lambda f: _filter_f(f) and (
2156 f.get('vcodec') != 'none' or f.get('acodec') != 'none')
67134eab 2157 else:
48ee10ee 2158 if format_spec in self._format_selection_exts['audio']:
b11c04a8 2159 filter_f = lambda f: f.get('ext') == format_spec and f.get('acodec') != 'none'
48ee10ee 2160 elif format_spec in self._format_selection_exts['video']:
b11c04a8 2161 filter_f = lambda f: f.get('ext') == format_spec and f.get('acodec') != 'none' and f.get('vcodec') != 'none'
85e801a9 2162 seperate_fallback = lambda f: f.get('ext') == format_spec and f.get('vcodec') != 'none'
48ee10ee 2163 elif format_spec in self._format_selection_exts['storyboards']:
b11c04a8 2164 filter_f = lambda f: f.get('ext') == format_spec and f.get('acodec') == 'none' and f.get('vcodec') == 'none'
2165 else:
b5ae35ee 2166 filter_f = lambda f: f.get('format_id') == format_spec # id
909d24dd 2167
2168 def selector_function(ctx):
2169 formats = list(ctx['formats'])
909d24dd 2170 matches = list(filter(filter_f, formats)) if filter_f is not None else formats
85e801a9 2171 if not matches:
2172 if format_fallback and ctx['incomplete_formats']:
2173 # for extractors with incomplete formats (audio only (soundcloud)
2174 # or video only (imgur)) best/worst will fallback to
2175 # best/worst {video,audio}-only format
2176 matches = formats
2177 elif seperate_fallback and not ctx['has_merged_format']:
2178 # for compatibility with youtube-dl when there is no pre-merged format
2179 matches = list(filter(seperate_fallback, formats))
981052c9 2180 matches = LazyList(_check_formats(matches[::-1 if format_reverse else 1]))
2181 try:
e8e73840 2182 yield matches[format_idx - 1]
4abea8ca 2183 except LazyList.IndexError:
981052c9 2184 return
083c9df9 2185
67134eab 2186 filters = [self._build_format_filter(f) for f in selector.filters]
083c9df9 2187
317f7ab6 2188 def final_selector(ctx):
adbc4ec4 2189 ctx_copy = dict(ctx)
67134eab 2190 for _filter in filters:
317f7ab6
S
2191 ctx_copy['formats'] = list(filter(_filter, ctx_copy['formats']))
2192 return selector_function(ctx_copy)
67134eab 2193 return final_selector
083c9df9 2194
0f06bcd7 2195 stream = io.BytesIO(format_spec.encode())
0130afb7 2196 try:
f9934b96 2197 tokens = list(_remove_unused_ops(tokenize.tokenize(stream.readline)))
0130afb7
JMF
2198 except tokenize.TokenError:
2199 raise syntax_error('Missing closing/opening brackets or parenthesis', (0, len(format_spec)))
2200
86e5f3ed 2201 class TokenIterator:
0130afb7
JMF
2202 def __init__(self, tokens):
2203 self.tokens = tokens
2204 self.counter = 0
2205
2206 def __iter__(self):
2207 return self
2208
2209 def __next__(self):
2210 if self.counter >= len(self.tokens):
2211 raise StopIteration()
2212 value = self.tokens[self.counter]
2213 self.counter += 1
2214 return value
2215
2216 next = __next__
2217
2218 def restore_last_token(self):
2219 self.counter -= 1
2220
2221 parsed_selector = _parse_format_selection(iter(TokenIterator(tokens)))
67134eab 2222 return _build_selector_function(parsed_selector)
a9c58ad9 2223
e5660ee6 2224 def _calc_headers(self, info_dict):
8b7539d2 2225 res = merge_headers(self.params['http_headers'], info_dict.get('http_headers') or {})
e5660ee6 2226
c487cf00 2227 cookies = self._calc_cookies(info_dict['url'])
e5660ee6
JMF
2228 if cookies:
2229 res['Cookie'] = cookies
2230
0016b84e
S
2231 if 'X-Forwarded-For' not in res:
2232 x_forwarded_for_ip = info_dict.get('__x_forwarded_for_ip')
2233 if x_forwarded_for_ip:
2234 res['X-Forwarded-For'] = x_forwarded_for_ip
2235
e5660ee6
JMF
2236 return res
2237
c487cf00 2238 def _calc_cookies(self, url):
2239 pr = sanitized_Request(url)
e5660ee6 2240 self.cookiejar.add_cookie_header(pr)
662435f7 2241 return pr.get_header('Cookie')
e5660ee6 2242
9f1a1c36 2243 def _sort_thumbnails(self, thumbnails):
2244 thumbnails.sort(key=lambda t: (
2245 t.get('preference') if t.get('preference') is not None else -1,
2246 t.get('width') if t.get('width') is not None else -1,
2247 t.get('height') if t.get('height') is not None else -1,
2248 t.get('id') if t.get('id') is not None else '',
2249 t.get('url')))
2250
b0249bca 2251 def _sanitize_thumbnails(self, info_dict):
bc516a3f 2252 thumbnails = info_dict.get('thumbnails')
2253 if thumbnails is None:
2254 thumbnail = info_dict.get('thumbnail')
2255 if thumbnail:
2256 info_dict['thumbnails'] = thumbnails = [{'url': thumbnail}]
9f1a1c36 2257 if not thumbnails:
2258 return
2259
2260 def check_thumbnails(thumbnails):
2261 for t in thumbnails:
2262 self.to_screen(f'[info] Testing thumbnail {t["id"]}')
2263 try:
2264 self.urlopen(HEADRequest(t['url']))
2265 except network_exceptions as err:
2266 self.to_screen(f'[info] Unable to connect to thumbnail {t["id"]} URL {t["url"]!r} - {err}. Skipping...')
2267 continue
2268 yield t
2269
2270 self._sort_thumbnails(thumbnails)
2271 for i, t in enumerate(thumbnails):
2272 if t.get('id') is None:
2273 t['id'] = '%d' % i
2274 if t.get('width') and t.get('height'):
2275 t['resolution'] = '%dx%d' % (t['width'], t['height'])
2276 t['url'] = sanitize_url(t['url'])
2277
2278 if self.params.get('check_formats') is True:
282f5709 2279 info_dict['thumbnails'] = LazyList(check_thumbnails(thumbnails[::-1]), reverse=True)
9f1a1c36 2280 else:
2281 info_dict['thumbnails'] = thumbnails
bc516a3f 2282
03f83004
LNO
2283 def _fill_common_fields(self, info_dict, is_video=True):
2284 # TODO: move sanitization here
2285 if is_video:
2286 # playlists are allowed to lack "title"
d4736fdb 2287 title = info_dict.get('title', NO_DEFAULT)
2288 if title is NO_DEFAULT:
03f83004
LNO
2289 raise ExtractorError('Missing "title" field in extractor result',
2290 video_id=info_dict['id'], ie=info_dict['extractor'])
d4736fdb 2291 info_dict['fulltitle'] = title
2292 if not title:
2293 if title == '':
2294 self.write_debug('Extractor gave empty title. Creating a generic title')
2295 else:
2296 self.report_warning('Extractor failed to obtain "title". Creating a generic title instead')
1d485a1a 2297 info_dict['title'] = f'{info_dict["extractor"].replace(":", "-")} video #{info_dict["id"]}'
03f83004
LNO
2298
2299 if info_dict.get('duration') is not None:
2300 info_dict['duration_string'] = formatSeconds(info_dict['duration'])
2301
2302 for ts_key, date_key in (
2303 ('timestamp', 'upload_date'),
2304 ('release_timestamp', 'release_date'),
2305 ('modified_timestamp', 'modified_date'),
2306 ):
2307 if info_dict.get(date_key) is None and info_dict.get(ts_key) is not None:
2308 # Working around out-of-range timestamp values (e.g. negative ones on Windows,
2309 # see http://bugs.python.org/issue1646728)
19a03940 2310 with contextlib.suppress(ValueError, OverflowError, OSError):
03f83004
LNO
2311 upload_date = datetime.datetime.utcfromtimestamp(info_dict[ts_key])
2312 info_dict[date_key] = upload_date.strftime('%Y%m%d')
03f83004
LNO
2313
2314 live_keys = ('is_live', 'was_live')
2315 live_status = info_dict.get('live_status')
2316 if live_status is None:
2317 for key in live_keys:
2318 if info_dict.get(key) is False:
2319 continue
2320 if info_dict.get(key):
2321 live_status = key
2322 break
2323 if all(info_dict.get(key) is False for key in live_keys):
2324 live_status = 'not_live'
2325 if live_status:
2326 info_dict['live_status'] = live_status
2327 for key in live_keys:
2328 if info_dict.get(key) is None:
2329 info_dict[key] = (live_status == key)
2330
2331 # Auto generate title fields corresponding to the *_number fields when missing
2332 # in order to always have clean titles. This is very common for TV series.
2333 for field in ('chapter', 'season', 'episode'):
2334 if info_dict.get('%s_number' % field) is not None and not info_dict.get(field):
2335 info_dict[field] = '%s %d' % (field.capitalize(), info_dict['%s_number' % field])
2336
415f8d51 2337 def _raise_pending_errors(self, info):
2338 err = info.pop('__pending_error', None)
2339 if err:
2340 self.report_error(err, tb=False)
2341
dd82ffea
JMF
2342 def process_video_result(self, info_dict, download=True):
2343 assert info_dict.get('_type', 'video') == 'video'
9c906919 2344 self._num_videos += 1
dd82ffea 2345
bec1fad2 2346 if 'id' not in info_dict:
fc08bdd6 2347 raise ExtractorError('Missing "id" field in extractor result', ie=info_dict['extractor'])
2348 elif not info_dict.get('id'):
2349 raise ExtractorError('Extractor failed to obtain "id"', ie=info_dict['extractor'])
455a15e2 2350
c9969434
S
2351 def report_force_conversion(field, field_not, conversion):
2352 self.report_warning(
2353 '"%s" field is not %s - forcing %s conversion, there is an error in extractor'
2354 % (field, field_not, conversion))
2355
2356 def sanitize_string_field(info, string_field):
2357 field = info.get(string_field)
2358 if field is None or isinstance(field, compat_str):
2359 return
2360 report_force_conversion(string_field, 'a string', 'string')
2361 info[string_field] = compat_str(field)
2362
2363 def sanitize_numeric_fields(info):
2364 for numeric_field in self._NUMERIC_FIELDS:
2365 field = info.get(numeric_field)
f9934b96 2366 if field is None or isinstance(field, (int, float)):
c9969434
S
2367 continue
2368 report_force_conversion(numeric_field, 'numeric', 'int')
2369 info[numeric_field] = int_or_none(field)
2370
2371 sanitize_string_field(info_dict, 'id')
2372 sanitize_numeric_fields(info_dict)
3975b4d2 2373 if info_dict.get('section_end') and info_dict.get('section_start') is not None:
2374 info_dict['duration'] = round(info_dict['section_end'] - info_dict['section_start'], 3)
4c3f8c3f 2375 if (info_dict.get('duration') or 0) <= 0 and info_dict.pop('duration', None):
50e93e03 2376 self.report_warning('"duration" field is negative, there is an error in extractor')
be6217b2 2377
dd82ffea
JMF
2378 if 'playlist' not in info_dict:
2379 # It isn't part of a playlist
2380 info_dict['playlist'] = None
2381 info_dict['playlist_index'] = None
2382
bc516a3f 2383 self._sanitize_thumbnails(info_dict)
d5519808 2384
536a55da 2385 thumbnail = info_dict.get('thumbnail')
bc516a3f 2386 thumbnails = info_dict.get('thumbnails')
536a55da
S
2387 if thumbnail:
2388 info_dict['thumbnail'] = sanitize_url(thumbnail)
2389 elif thumbnails:
d5519808
PH
2390 info_dict['thumbnail'] = thumbnails[-1]['url']
2391
ae30b840 2392 if info_dict.get('display_id') is None and 'id' in info_dict:
0afef30b
PH
2393 info_dict['display_id'] = info_dict['id']
2394
03f83004 2395 self._fill_common_fields(info_dict)
33d2fc2f 2396
05108a49
S
2397 for cc_kind in ('subtitles', 'automatic_captions'):
2398 cc = info_dict.get(cc_kind)
2399 if cc:
2400 for _, subtitle in cc.items():
2401 for subtitle_format in subtitle:
2402 if subtitle_format.get('url'):
2403 subtitle_format['url'] = sanitize_url(subtitle_format['url'])
2404 if subtitle_format.get('ext') is None:
2405 subtitle_format['ext'] = determine_ext(subtitle_format['url']).lower()
2406
2407 automatic_captions = info_dict.get('automatic_captions')
4bba3716 2408 subtitles = info_dict.get('subtitles')
4bba3716 2409
360e1ca5 2410 info_dict['requested_subtitles'] = self.process_subtitles(
05108a49 2411 info_dict['id'], subtitles, automatic_captions)
a504ced0 2412
dd82ffea
JMF
2413 if info_dict.get('formats') is None:
2414 # There's only one format available
2415 formats = [info_dict]
2416 else:
2417 formats = info_dict['formats']
2418
0a5a191a 2419 # or None ensures --clean-infojson removes it
2420 info_dict['_has_drm'] = any(f.get('has_drm') for f in formats) or None
88acdbc2 2421 if not self.params.get('allow_unplayable_formats'):
2422 formats = [f for f in formats if not f.get('has_drm')]
0a5a191a 2423 if info_dict['_has_drm'] and all(
c0b6e5c7 2424 f.get('acodec') == f.get('vcodec') == 'none' for f in formats):
2425 self.report_warning(
2426 'This video is DRM protected and only images are available for download. '
2427 'Use --list-formats to see them')
88acdbc2 2428
319b6059 2429 get_from_start = not info_dict.get('is_live') or bool(self.params.get('live_from_start'))
2430 if not get_from_start:
2431 info_dict['title'] += ' ' + datetime.datetime.now().strftime('%Y-%m-%d %H:%M')
2432 if info_dict.get('is_live') and formats:
adbc4ec4 2433 formats = [f for f in formats if bool(f.get('is_from_start')) == get_from_start]
319b6059 2434 if get_from_start and not formats:
a44ca5a4 2435 self.raise_no_formats(info_dict, msg=(
2436 '--live-from-start is passed, but there are no formats that can be downloaded from the start. '
2437 'If you want to download from the current time, use --no-live-from-start'))
adbc4ec4 2438
db95dc13 2439 if not formats:
1151c407 2440 self.raise_no_formats(info_dict)
db95dc13 2441
73af5cc8
S
2442 def is_wellformed(f):
2443 url = f.get('url')
a5ac0c47 2444 if not url:
73af5cc8
S
2445 self.report_warning(
2446 '"url" field is missing or empty - skipping format, '
2447 'there is an error in extractor')
a5ac0c47
S
2448 return False
2449 if isinstance(url, bytes):
2450 sanitize_string_field(f, 'url')
2451 return True
73af5cc8
S
2452
2453 # Filter out malformed formats for better extraction robustness
2454 formats = list(filter(is_wellformed, formats))
2455
181c7053
S
2456 formats_dict = {}
2457
dd82ffea 2458 # We check that all the formats have the format and format_id fields
db95dc13 2459 for i, format in enumerate(formats):
c9969434
S
2460 sanitize_string_field(format, 'format_id')
2461 sanitize_numeric_fields(format)
dcf77cf1 2462 format['url'] = sanitize_url(format['url'])
e74e3b63 2463 if not format.get('format_id'):
8016c922 2464 format['format_id'] = compat_str(i)
e2effb08
S
2465 else:
2466 # Sanitize format_id from characters used in format selector expression
ec85ded8 2467 format['format_id'] = re.sub(r'[\s,/+\[\]()]', '_', format['format_id'])
181c7053
S
2468 format_id = format['format_id']
2469 if format_id not in formats_dict:
2470 formats_dict[format_id] = []
2471 formats_dict[format_id].append(format)
2472
2473 # Make sure all formats have unique format_id
03b4de72 2474 common_exts = set(itertools.chain(*self._format_selection_exts.values()))
181c7053 2475 for format_id, ambiguous_formats in formats_dict.items():
48ee10ee 2476 ambigious_id = len(ambiguous_formats) > 1
2477 for i, format in enumerate(ambiguous_formats):
2478 if ambigious_id:
181c7053 2479 format['format_id'] = '%s-%d' % (format_id, i)
48ee10ee 2480 if format.get('ext') is None:
2481 format['ext'] = determine_ext(format['url']).lower()
2482 # Ensure there is no conflict between id and ext in format selection
2483 # See https://github.com/yt-dlp/yt-dlp/issues/1282
2484 if format['format_id'] != format['ext'] and format['format_id'] in common_exts:
2485 format['format_id'] = 'f%s' % format['format_id']
181c7053
S
2486
2487 for i, format in enumerate(formats):
8c51aa65 2488 if format.get('format') is None:
6febd1c1 2489 format['format'] = '{id} - {res}{note}'.format(
8c51aa65
JMF
2490 id=format['format_id'],
2491 res=self.format_resolution(format),
b868936c 2492 note=format_field(format, 'format_note', ' (%s)'),
8c51aa65 2493 )
6f0be937 2494 if format.get('protocol') is None:
b5559424 2495 format['protocol'] = determine_protocol(format)
239df021 2496 if format.get('resolution') is None:
2497 format['resolution'] = self.format_resolution(format, default=None)
176f1866 2498 if format.get('dynamic_range') is None and format.get('vcodec') != 'none':
2499 format['dynamic_range'] = 'SDR'
f2fe69c7 2500 if (info_dict.get('duration') and format.get('tbr')
2501 and not format.get('filesize') and not format.get('filesize_approx')):
56ba69e4 2502 format['filesize_approx'] = int(info_dict['duration'] * format['tbr'] * (1024 / 8))
f2fe69c7 2503
e5660ee6
JMF
2504 # Add HTTP headers, so that external programs can use them from the
2505 # json output
2506 full_format_info = info_dict.copy()
2507 full_format_info.update(format)
2508 format['http_headers'] = self._calc_headers(full_format_info)
0016b84e
S
2509 # Remove private housekeeping stuff
2510 if '__x_forwarded_for_ip' in info_dict:
2511 del info_dict['__x_forwarded_for_ip']
dd82ffea 2512
9f1a1c36 2513 if self.params.get('check_formats') is True:
282f5709 2514 formats = LazyList(self._check_formats(formats[::-1]), reverse=True)
9f1a1c36 2515
88acdbc2 2516 if not formats or formats[0] is not info_dict:
b3d9ef88
JMF
2517 # only set the 'formats' fields if the original info_dict list them
2518 # otherwise we end up with a circular reference, the first (and unique)
f89197d7 2519 # element in the 'formats' field in info_dict is info_dict itself,
dfb1b146 2520 # which can't be exported to json
b3d9ef88 2521 info_dict['formats'] = formats
4ec82a72 2522
2523 info_dict, _ = self.pre_process(info_dict)
2524
6db9c4d5 2525 if self._match_entry(info_dict, incomplete=self._format_fields) is not None:
09b49e1f 2526 return info_dict
2527
2528 self.post_extract(info_dict)
2529 info_dict, _ = self.pre_process(info_dict, 'after_filter')
2530
093a1710 2531 # The pre-processors may have modified the formats
2532 formats = info_dict.get('formats', [info_dict])
2533
fa9f30b8 2534 list_only = self.params.get('simulate') is None and (
2535 self.params.get('list_thumbnails') or self.params.get('listformats') or self.params.get('listsubtitles'))
2536 interactive_format_selection = not list_only and self.format_selector == '-'
b7b04c78 2537 if self.params.get('list_thumbnails'):
2538 self.list_thumbnails(info_dict)
b7b04c78 2539 if self.params.get('listsubtitles'):
2540 if 'automatic_captions' in info_dict:
2541 self.list_subtitles(
2542 info_dict['id'], automatic_captions, 'automatic captions')
2543 self.list_subtitles(info_dict['id'], subtitles, 'subtitles')
fa9f30b8 2544 if self.params.get('listformats') or interactive_format_selection:
b69fd25c 2545 self.list_formats(info_dict)
169dbde9 2546 if list_only:
b7b04c78 2547 # Without this printing, -F --print-json will not work
169dbde9 2548 self.__forced_printings(info_dict, self.prepare_filename(info_dict), incomplete=True)
c487cf00 2549 return info_dict
bfaae0a7 2550
187986a8 2551 format_selector = self.format_selector
2552 if format_selector is None:
0017d9ad 2553 req_format = self._default_format_spec(info_dict, download=download)
0760b0a7 2554 self.write_debug('Default format spec: %s' % req_format)
187986a8 2555 format_selector = self.build_format_selector(req_format)
317f7ab6 2556
fa9f30b8 2557 while True:
2558 if interactive_format_selection:
2559 req_format = input(
2560 self._format_screen('\nEnter format selector: ', self.Styles.EMPHASIS))
2561 try:
2562 format_selector = self.build_format_selector(req_format)
2563 except SyntaxError as err:
2564 self.report_error(err, tb=False, is_error=False)
2565 continue
2566
85e801a9 2567 formats_to_download = list(format_selector({
fa9f30b8 2568 'formats': formats,
85e801a9 2569 'has_merged_format': any('none' not in (f.get('acodec'), f.get('vcodec')) for f in formats),
2570 'incomplete_formats': (
2571 # All formats are video-only or
2572 all(f.get('vcodec') != 'none' and f.get('acodec') == 'none' for f in formats)
2573 # all formats are audio-only
2574 or all(f.get('vcodec') == 'none' and f.get('acodec') != 'none' for f in formats)),
2575 }))
fa9f30b8 2576 if interactive_format_selection and not formats_to_download:
2577 self.report_error('Requested format is not available', tb=False, is_error=False)
2578 continue
2579 break
317f7ab6 2580
dd82ffea 2581 if not formats_to_download:
b7da73eb 2582 if not self.params.get('ignore_no_formats_error'):
c0b6e5c7 2583 raise ExtractorError(
2584 'Requested format is not available. Use --list-formats for a list of available formats',
2585 expected=True, video_id=info_dict['id'], ie=info_dict['extractor'])
b62fa6d7 2586 self.report_warning('Requested format is not available')
2587 # Process what we can, even without any available formats.
2588 formats_to_download = [{}]
a13e6848 2589
5ec1b6b7 2590 requested_ranges = self.params.get('download_ranges')
2591 if requested_ranges:
2592 requested_ranges = tuple(requested_ranges(info_dict, self))
2593
2594 best_format, downloaded_formats = formats_to_download[-1], []
b62fa6d7 2595 if download:
2596 if best_format:
5ec1b6b7 2597 def to_screen(*msg):
2598 self.to_screen(f'[info] {info_dict["id"]}: {" ".join(", ".join(variadic(m)) for m in msg)}')
2599
2600 to_screen(f'Downloading {len(formats_to_download)} format(s):',
2601 (f['format_id'] for f in formats_to_download))
2602 if requested_ranges:
2603 to_screen(f'Downloading {len(requested_ranges)} time ranges:',
2604 (f'{int(c["start_time"])}-{int(c["end_time"])}' for c in requested_ranges))
a13e6848 2605 max_downloads_reached = False
5ec1b6b7 2606
2607 for fmt, chapter in itertools.product(formats_to_download, requested_ranges or [{}]):
2608 new_info = self._copy_infodict(info_dict)
b7da73eb 2609 new_info.update(fmt)
3975b4d2 2610 offset, duration = info_dict.get('section_start') or 0, info_dict.get('duration') or float('inf')
2611 if chapter or offset:
5ec1b6b7 2612 new_info.update({
3975b4d2 2613 'section_start': offset + chapter.get('start_time', 0),
bc401608 2614 'section_end': offset + min(chapter.get('end_time', duration), duration),
5ec1b6b7 2615 'section_title': chapter.get('title'),
2616 'section_number': chapter.get('index'),
2617 })
2618 downloaded_formats.append(new_info)
a13e6848 2619 try:
2620 self.process_info(new_info)
2621 except MaxDownloadsReached:
2622 max_downloads_reached = True
415f8d51 2623 self._raise_pending_errors(new_info)
f46e2f9d 2624 # Remove copied info
2625 for key, val in tuple(new_info.items()):
2626 if info_dict.get(key) == val:
2627 new_info.pop(key)
a13e6848 2628 if max_downloads_reached:
2629 break
ebed8b37 2630
5ec1b6b7 2631 write_archive = {f.get('__write_download_archive', False) for f in downloaded_formats}
a13e6848 2632 assert write_archive.issubset({True, False, 'ignore'})
2633 if True in write_archive and False not in write_archive:
2634 self.record_download_archive(info_dict)
be72c624 2635
5ec1b6b7 2636 info_dict['requested_downloads'] = downloaded_formats
ed5835b4 2637 info_dict = self.run_all_pps('after_video', info_dict)
a13e6848 2638 if max_downloads_reached:
2639 raise MaxDownloadsReached()
ebed8b37 2640
49a57e70 2641 # We update the info dict with the selected best quality format (backwards compatibility)
be72c624 2642 info_dict.update(best_format)
dd82ffea
JMF
2643 return info_dict
2644
98c70d6f 2645 def process_subtitles(self, video_id, normal_subtitles, automatic_captions):
a504ced0 2646 """Select the requested subtitles and their format"""
d8a58ddc 2647 available_subs, normal_sub_langs = {}, []
98c70d6f
JMF
2648 if normal_subtitles and self.params.get('writesubtitles'):
2649 available_subs.update(normal_subtitles)
d8a58ddc 2650 normal_sub_langs = tuple(normal_subtitles.keys())
98c70d6f
JMF
2651 if automatic_captions and self.params.get('writeautomaticsub'):
2652 for lang, cap_info in automatic_captions.items():
360e1ca5
JMF
2653 if lang not in available_subs:
2654 available_subs[lang] = cap_info
2655
4d171848
JMF
2656 if (not self.params.get('writesubtitles') and not
2657 self.params.get('writeautomaticsub') or not
2658 available_subs):
2659 return None
a504ced0 2660
d8a58ddc 2661 all_sub_langs = tuple(available_subs.keys())
a504ced0 2662 if self.params.get('allsubtitles', False):
c32b0aab 2663 requested_langs = all_sub_langs
2664 elif self.params.get('subtitleslangs', False):
77c4a9ef 2665 # A list is used so that the order of languages will be the same as
2666 # given in subtitleslangs. See https://github.com/yt-dlp/yt-dlp/issues/1041
2667 requested_langs = []
2668 for lang_re in self.params.get('subtitleslangs'):
77c4a9ef 2669 discard = lang_re[0] == '-'
c32b0aab 2670 if discard:
77c4a9ef 2671 lang_re = lang_re[1:]
3aa91540 2672 if lang_re == 'all':
2673 if discard:
2674 requested_langs = []
2675 else:
2676 requested_langs.extend(all_sub_langs)
2677 continue
77c4a9ef 2678 current_langs = filter(re.compile(lang_re + '$').match, all_sub_langs)
c32b0aab 2679 if discard:
2680 for lang in current_langs:
77c4a9ef 2681 while lang in requested_langs:
2682 requested_langs.remove(lang)
c32b0aab 2683 else:
77c4a9ef 2684 requested_langs.extend(current_langs)
2685 requested_langs = orderedSet(requested_langs)
d8a58ddc 2686 elif normal_sub_langs:
2687 requested_langs = ['en'] if 'en' in normal_sub_langs else normal_sub_langs[:1]
a504ced0 2688 else:
d8a58ddc 2689 requested_langs = ['en'] if 'en' in all_sub_langs else all_sub_langs[:1]
ad3dc496 2690 if requested_langs:
2691 self.write_debug('Downloading subtitles: %s' % ', '.join(requested_langs))
a504ced0
JMF
2692
2693 formats_query = self.params.get('subtitlesformat', 'best')
2694 formats_preference = formats_query.split('/') if formats_query else []
2695 subs = {}
2696 for lang in requested_langs:
2697 formats = available_subs.get(lang)
2698 if formats is None:
86e5f3ed 2699 self.report_warning(f'{lang} subtitles not available for {video_id}')
a504ced0 2700 continue
a504ced0
JMF
2701 for ext in formats_preference:
2702 if ext == 'best':
2703 f = formats[-1]
2704 break
2705 matches = list(filter(lambda f: f['ext'] == ext, formats))
2706 if matches:
2707 f = matches[-1]
2708 break
2709 else:
2710 f = formats[-1]
2711 self.report_warning(
2712 'No subtitle format found matching "%s" for language %s, '
2713 'using %s' % (formats_query, lang, f['ext']))
2714 subs[lang] = f
2715 return subs
2716
bb66c247 2717 def _forceprint(self, key, info_dict):
2718 if info_dict is None:
2719 return
2720 info_copy = info_dict.copy()
2721 info_copy['formats_table'] = self.render_formats_table(info_dict)
2722 info_copy['thumbnails_table'] = self.render_thumbnails_table(info_dict)
2723 info_copy['subtitles_table'] = self.render_subtitles_table(info_dict.get('id'), info_dict.get('subtitles'))
2724 info_copy['automatic_captions_table'] = self.render_subtitles_table(info_dict.get('id'), info_dict.get('automatic_captions'))
2725
2726 def format_tmpl(tmpl):
2727 mobj = re.match(r'\w+(=?)$', tmpl)
2728 if mobj and mobj.group(1):
2729 return f'{tmpl[:-1]} = %({tmpl[:-1]})r'
2730 elif mobj:
2731 return f'%({tmpl})s'
2732 return tmpl
8130779d 2733
bb66c247 2734 for tmpl in self.params['forceprint'].get(key, []):
2735 self.to_stdout(self.evaluate_outtmpl(format_tmpl(tmpl), info_copy))
2736
2737 for tmpl, file_tmpl in self.params['print_to_file'].get(key, []):
5127e92a 2738 filename = self.prepare_filename(info_dict, outtmpl=file_tmpl)
bb66c247 2739 tmpl = format_tmpl(tmpl)
2740 self.to_screen(f'[info] Writing {tmpl!r} to: {filename}')
8d93e69d 2741 if self._ensure_dir_exists(filename):
86e5f3ed 2742 with open(filename, 'a', encoding='utf-8') as f:
8d93e69d 2743 f.write(self.evaluate_outtmpl(tmpl, info_copy) + '\n')
ca30f449 2744
d06daf23 2745 def __forced_printings(self, info_dict, filename, incomplete):
53c18592 2746 def print_mandatory(field, actual_field=None):
2747 if actual_field is None:
2748 actual_field = field
d06daf23 2749 if (self.params.get('force%s' % field, False)
53c18592 2750 and (not incomplete or info_dict.get(actual_field) is not None)):
2751 self.to_stdout(info_dict[actual_field])
d06daf23
S
2752
2753 def print_optional(field):
2754 if (self.params.get('force%s' % field, False)
2755 and info_dict.get(field) is not None):
2756 self.to_stdout(info_dict[field])
2757
53c18592 2758 info_dict = info_dict.copy()
2759 if filename is not None:
2760 info_dict['filename'] = filename
2761 if info_dict.get('requested_formats') is not None:
2762 # For RTMP URLs, also include the playpath
2763 info_dict['urls'] = '\n'.join(f['url'] + f.get('play_path', '') for f in info_dict['requested_formats'])
10331a26 2764 elif info_dict.get('url'):
53c18592 2765 info_dict['urls'] = info_dict['url'] + info_dict.get('play_path', '')
2766
bb66c247 2767 if (self.params.get('forcejson')
2768 or self.params['forceprint'].get('video')
2769 or self.params['print_to_file'].get('video')):
2b8a2973 2770 self.post_extract(info_dict)
bb66c247 2771 self._forceprint('video', info_dict)
53c18592 2772
d06daf23
S
2773 print_mandatory('title')
2774 print_mandatory('id')
53c18592 2775 print_mandatory('url', 'urls')
d06daf23
S
2776 print_optional('thumbnail')
2777 print_optional('description')
53c18592 2778 print_optional('filename')
b868936c 2779 if self.params.get('forceduration') and info_dict.get('duration') is not None:
d06daf23
S
2780 self.to_stdout(formatSeconds(info_dict['duration']))
2781 print_mandatory('format')
53c18592 2782
2b8a2973 2783 if self.params.get('forcejson'):
6e84b215 2784 self.to_stdout(json.dumps(self.sanitize_info(info_dict)))
d06daf23 2785
e8e73840 2786 def dl(self, name, info, subtitle=False, test=False):
88acdbc2 2787 if not info.get('url'):
1151c407 2788 self.raise_no_formats(info, True)
e8e73840 2789
2790 if test:
2791 verbose = self.params.get('verbose')
2792 params = {
2793 'test': True,
a169858f 2794 'quiet': self.params.get('quiet') or not verbose,
e8e73840 2795 'verbose': verbose,
2796 'noprogress': not verbose,
2797 'nopart': True,
2798 'skip_unavailable_fragments': False,
2799 'keep_fragments': False,
2800 'overwrites': True,
2801 '_no_ytdl_file': True,
2802 }
2803 else:
2804 params = self.params
96fccc10 2805 fd = get_suitable_downloader(info, params, to_stdout=(name == '-'))(self, params)
e8e73840 2806 if not test:
2807 for ph in self._progress_hooks:
2808 fd.add_progress_hook(ph)
42676437
M
2809 urls = '", "'.join(
2810 (f['url'].split(',')[0] + ',<data>' if f['url'].startswith('data:') else f['url'])
2811 for f in info.get('requested_formats', []) or [info])
3a408f9d 2812 self.write_debug(f'Invoking {fd.FD_NAME} downloader on "{urls}"')
03b4de72 2813
adbc4ec4
THD
2814 # Note: Ideally info should be a deep-copied so that hooks cannot modify it.
2815 # But it may contain objects that are not deep-copyable
2816 new_info = self._copy_infodict(info)
e8e73840 2817 if new_info.get('http_headers') is None:
2818 new_info['http_headers'] = self._calc_headers(new_info)
2819 return fd.download(name, new_info, subtitle)
2820
e04938ab 2821 def existing_file(self, filepaths, *, default_overwrite=True):
2822 existing_files = list(filter(os.path.exists, orderedSet(filepaths)))
2823 if existing_files and not self.params.get('overwrites', default_overwrite):
2824 return existing_files[0]
2825
2826 for file in existing_files:
2827 self.report_file_delete(file)
2828 os.remove(file)
2829 return None
2830
8222d8de 2831 def process_info(self, info_dict):
09b49e1f 2832 """Process a single resolved IE result. (Modifies it in-place)"""
8222d8de
JMF
2833
2834 assert info_dict.get('_type', 'video') == 'video'
f46e2f9d 2835 original_infodict = info_dict
fd288278 2836
4513a41a 2837 if 'format' not in info_dict and 'ext' in info_dict:
8222d8de
JMF
2838 info_dict['format'] = info_dict['ext']
2839
09b49e1f 2840 # This is mostly just for backward compatibility of process_info
2841 # As a side-effect, this allows for format-specific filters
c77495e3 2842 if self._match_entry(info_dict) is not None:
9e907ebd 2843 info_dict['__write_download_archive'] = 'ignore'
8222d8de
JMF
2844 return
2845
09b49e1f 2846 # Does nothing under normal operation - for backward compatibility of process_info
277d6ff5 2847 self.post_extract(info_dict)
0c14d66a 2848 self._num_downloads += 1
8222d8de 2849
dcf64d43 2850 # info_dict['_filename'] needs to be set for backward compatibility
de6000d9 2851 info_dict['_filename'] = full_filename = self.prepare_filename(info_dict, warn=True)
2852 temp_filename = self.prepare_filename(info_dict, 'temp')
0202b52a 2853 files_to_move = {}
8222d8de
JMF
2854
2855 # Forced printings
4513a41a 2856 self.__forced_printings(info_dict, full_filename, incomplete=('format' not in info_dict))
8222d8de 2857
ca6d59d2 2858 def check_max_downloads():
2859 if self._num_downloads >= float(self.params.get('max_downloads') or 'inf'):
2860 raise MaxDownloadsReached()
2861
b7b04c78 2862 if self.params.get('simulate'):
9e907ebd 2863 info_dict['__write_download_archive'] = self.params.get('force_write_download_archive')
ca6d59d2 2864 check_max_downloads()
8222d8de
JMF
2865 return
2866
de6000d9 2867 if full_filename is None:
8222d8de 2868 return
e92caff5 2869 if not self._ensure_dir_exists(encodeFilename(full_filename)):
0202b52a 2870 return
e92caff5 2871 if not self._ensure_dir_exists(encodeFilename(temp_filename)):
8222d8de
JMF
2872 return
2873
80c03fa9 2874 if self._write_description('video', info_dict,
2875 self.prepare_filename(info_dict, 'description')) is None:
2876 return
2877
2878 sub_files = self._write_subtitles(info_dict, temp_filename)
2879 if sub_files is None:
2880 return
2881 files_to_move.update(dict(sub_files))
2882
2883 thumb_files = self._write_thumbnails(
2884 'video', info_dict, temp_filename, self.prepare_filename(info_dict, 'thumbnail'))
2885 if thumb_files is None:
2886 return
2887 files_to_move.update(dict(thumb_files))
8222d8de 2888
80c03fa9 2889 infofn = self.prepare_filename(info_dict, 'infojson')
2890 _infojson_written = self._write_info_json('video', info_dict, infofn)
2891 if _infojson_written:
dac5df5a 2892 info_dict['infojson_filename'] = infofn
e75bb0d6 2893 # For backward compatibility, even though it was a private field
80c03fa9 2894 info_dict['__infojson_filename'] = infofn
2895 elif _infojson_written is None:
2896 return
2897
2898 # Note: Annotations are deprecated
2899 annofn = None
1fb07d10 2900 if self.params.get('writeannotations', False):
de6000d9 2901 annofn = self.prepare_filename(info_dict, 'annotation')
80c03fa9 2902 if annofn:
e92caff5 2903 if not self._ensure_dir_exists(encodeFilename(annofn)):
0202b52a 2904 return
0c3d0f51 2905 if not self.params.get('overwrites', True) and os.path.exists(encodeFilename(annofn)):
6febd1c1 2906 self.to_screen('[info] Video annotations are already present')
ffddb112
RA
2907 elif not info_dict.get('annotations'):
2908 self.report_warning('There are no annotations to write.')
7b6fefc9
PH
2909 else:
2910 try:
6febd1c1 2911 self.to_screen('[info] Writing video annotations to: ' + annofn)
86e5f3ed 2912 with open(encodeFilename(annofn), 'w', encoding='utf-8') as annofile:
7b6fefc9
PH
2913 annofile.write(info_dict['annotations'])
2914 except (KeyError, TypeError):
6febd1c1 2915 self.report_warning('There are no annotations to write.')
86e5f3ed 2916 except OSError:
6febd1c1 2917 self.report_error('Cannot write annotations file: ' + annofn)
7b6fefc9 2918 return
1fb07d10 2919
732044af 2920 # Write internet shortcut files
08438d2c 2921 def _write_link_file(link_type):
60f3e995 2922 url = try_get(info_dict['webpage_url'], iri_to_uri)
2923 if not url:
2924 self.report_warning(
2925 f'Cannot write internet shortcut file because the actual URL of "{info_dict["webpage_url"]}" is unknown')
2926 return True
08438d2c 2927 linkfn = replace_extension(self.prepare_filename(info_dict, 'link'), link_type, info_dict.get('ext'))
0e6b018a
Z
2928 if not self._ensure_dir_exists(encodeFilename(linkfn)):
2929 return False
10e3742e 2930 if self.params.get('overwrites', True) and os.path.exists(encodeFilename(linkfn)):
08438d2c 2931 self.to_screen(f'[info] Internet shortcut (.{link_type}) is already present')
2932 return True
2933 try:
2934 self.to_screen(f'[info] Writing internet shortcut (.{link_type}) to: {linkfn}')
86e5f3ed 2935 with open(encodeFilename(to_high_limit_path(linkfn)), 'w', encoding='utf-8',
2936 newline='\r\n' if link_type == 'url' else '\n') as linkfile:
60f3e995 2937 template_vars = {'url': url}
08438d2c 2938 if link_type == 'desktop':
2939 template_vars['filename'] = linkfn[:-(len(link_type) + 1)]
2940 linkfile.write(LINK_TEMPLATES[link_type] % template_vars)
86e5f3ed 2941 except OSError:
08438d2c 2942 self.report_error(f'Cannot write internet shortcut {linkfn}')
2943 return False
732044af 2944 return True
2945
08438d2c 2946 write_links = {
2947 'url': self.params.get('writeurllink'),
2948 'webloc': self.params.get('writewebloclink'),
2949 'desktop': self.params.get('writedesktoplink'),
2950 }
2951 if self.params.get('writelink'):
2952 link_type = ('webloc' if sys.platform == 'darwin'
2953 else 'desktop' if sys.platform.startswith('linux')
2954 else 'url')
2955 write_links[link_type] = True
2956
2957 if any(should_write and not _write_link_file(link_type)
2958 for link_type, should_write in write_links.items()):
2959 return
732044af 2960
f46e2f9d 2961 def replace_info_dict(new_info):
2962 nonlocal info_dict
2963 if new_info == info_dict:
2964 return
2965 info_dict.clear()
2966 info_dict.update(new_info)
2967
415f8d51 2968 new_info, files_to_move = self.pre_process(info_dict, 'before_dl', files_to_move)
2969 replace_info_dict(new_info)
56d868db 2970
a13e6848 2971 if self.params.get('skip_download'):
56d868db 2972 info_dict['filepath'] = temp_filename
2973 info_dict['__finaldir'] = os.path.dirname(os.path.abspath(encodeFilename(full_filename)))
2974 info_dict['__files_to_move'] = files_to_move
f46e2f9d 2975 replace_info_dict(self.run_pp(MoveFilesAfterDownloadPP(self, False), info_dict))
9e907ebd 2976 info_dict['__write_download_archive'] = self.params.get('force_write_download_archive')
56d868db 2977 else:
2978 # Download
b868936c 2979 info_dict.setdefault('__postprocessors', [])
4340deca 2980 try:
0202b52a 2981
e04938ab 2982 def existing_video_file(*filepaths):
6b591b29 2983 ext = info_dict.get('ext')
e04938ab 2984 converted = lambda file: replace_extension(file, self.params.get('final_ext') or ext, ext)
2985 file = self.existing_file(itertools.chain(*zip(map(converted, filepaths), filepaths)),
2986 default_overwrite=False)
2987 if file:
2988 info_dict['ext'] = os.path.splitext(file)[1][1:]
2989 return file
0202b52a 2990
7b2c3f47 2991 fd, success = None, True
fccf90e7 2992 if info_dict.get('protocol') or info_dict.get('url'):
56ba69e4 2993 fd = get_suitable_downloader(info_dict, self.params, to_stdout=temp_filename == '-')
2994 if fd is not FFmpegFD and (
2995 info_dict.get('section_start') or info_dict.get('section_end')):
7b2c3f47 2996 msg = ('This format cannot be partially downloaded' if FFmpegFD.available()
56ba69e4 2997 else 'You have requested downloading the video partially, but ffmpeg is not installed')
2998 self.report_error(f'{msg}. Aborting')
5ec1b6b7 2999 return
5ec1b6b7 3000
4340deca 3001 if info_dict.get('requested_formats') is not None:
81cd954a
S
3002
3003 def compatible_formats(formats):
d03cfdce 3004 # TODO: some formats actually allow this (mkv, webm, ogg, mp4), but not all of them.
3005 video_formats = [format for format in formats if format.get('vcodec') != 'none']
3006 audio_formats = [format for format in formats if format.get('acodec') != 'none']
3007 if len(video_formats) > 2 or len(audio_formats) > 2:
3008 return False
3009
81cd954a 3010 # Check extension
86e5f3ed 3011 exts = {format.get('ext') for format in formats}
d03cfdce 3012 COMPATIBLE_EXTS = (
86e5f3ed 3013 {'mp3', 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'ismv', 'isma'},
3014 {'webm'},
d03cfdce 3015 )
3016 for ext_sets in COMPATIBLE_EXTS:
3017 if ext_sets.issuperset(exts):
3018 return True
81cd954a
S
3019 # TODO: Check acodec/vcodec
3020 return False
3021
3022 requested_formats = info_dict['requested_formats']
0202b52a 3023 old_ext = info_dict['ext']
4e3b637d 3024 if self.params.get('merge_output_format') is None:
3025 if not compatible_formats(requested_formats):
3026 info_dict['ext'] = 'mkv'
3027 self.report_warning(
3028 'Requested formats are incompatible for merge and will be merged into mkv')
3029 if (info_dict['ext'] == 'webm'
3030 and info_dict.get('thumbnails')
3031 # check with type instead of pp_key, __name__, or isinstance
3032 # since we dont want any custom PPs to trigger this
c487cf00 3033 and any(type(pp) == EmbedThumbnailPP for pp in self._pps['post_process'])): # noqa: E721
4e3b637d 3034 info_dict['ext'] = 'mkv'
3035 self.report_warning(
3036 'webm doesn\'t support embedding a thumbnail, mkv will be used')
124bc071 3037 new_ext = info_dict['ext']
0202b52a 3038
124bc071 3039 def correct_ext(filename, ext=new_ext):
96fccc10 3040 if filename == '-':
3041 return filename
0202b52a 3042 filename_real_ext = os.path.splitext(filename)[1][1:]
3043 filename_wo_ext = (
3044 os.path.splitext(filename)[0]
124bc071 3045 if filename_real_ext in (old_ext, new_ext)
0202b52a 3046 else filename)
86e5f3ed 3047 return f'{filename_wo_ext}.{ext}'
0202b52a 3048
38c6902b 3049 # Ensure filename always has a correct extension for successful merge
0202b52a 3050 full_filename = correct_ext(full_filename)
3051 temp_filename = correct_ext(temp_filename)
e04938ab 3052 dl_filename = existing_video_file(full_filename, temp_filename)
1ea24129 3053 info_dict['__real_download'] = False
18e674b4 3054
7b2c3f47 3055 merger = FFmpegMergerPP(self)
adbc4ec4 3056 downloaded = []
dbf5416a 3057 if dl_filename is not None:
6c7274ec 3058 self.report_file_already_downloaded(dl_filename)
adbc4ec4
THD
3059 elif fd:
3060 for f in requested_formats if fd != FFmpegFD else []:
3061 f['filepath'] = fname = prepend_extension(
3062 correct_ext(temp_filename, info_dict['ext']),
3063 'f%s' % f['format_id'], info_dict['ext'])
3064 downloaded.append(fname)
dbf5416a 3065 info_dict['url'] = '\n'.join(f['url'] for f in requested_formats)
3066 success, real_download = self.dl(temp_filename, info_dict)
3067 info_dict['__real_download'] = real_download
18e674b4 3068 else:
18e674b4 3069 if self.params.get('allow_unplayable_formats'):
3070 self.report_warning(
3071 'You have requested merging of multiple formats '
3072 'while also allowing unplayable formats to be downloaded. '
3073 'The formats won\'t be merged to prevent data corruption.')
3074 elif not merger.available:
e8969bda 3075 msg = 'You have requested merging of multiple formats but ffmpeg is not installed'
3076 if not self.params.get('ignoreerrors'):
3077 self.report_error(f'{msg}. Aborting due to --abort-on-error')
3078 return
3079 self.report_warning(f'{msg}. The formats won\'t be merged')
18e674b4 3080
96fccc10 3081 if temp_filename == '-':
adbc4ec4 3082 reason = ('using a downloader other than ffmpeg' if FFmpegFD.can_merge_formats(info_dict, self.params)
96fccc10 3083 else 'but the formats are incompatible for simultaneous download' if merger.available
3084 else 'but ffmpeg is not installed')
3085 self.report_warning(
3086 f'You have requested downloading multiple formats to stdout {reason}. '
3087 'The formats will be streamed one after the other')
3088 fname = temp_filename
dbf5416a 3089 for f in requested_formats:
3090 new_info = dict(info_dict)
3091 del new_info['requested_formats']
3092 new_info.update(f)
96fccc10 3093 if temp_filename != '-':
124bc071 3094 fname = prepend_extension(
3095 correct_ext(temp_filename, new_info['ext']),
3096 'f%s' % f['format_id'], new_info['ext'])
96fccc10 3097 if not self._ensure_dir_exists(fname):
3098 return
a21e0ab1 3099 f['filepath'] = fname
96fccc10 3100 downloaded.append(fname)
dbf5416a 3101 partial_success, real_download = self.dl(fname, new_info)
3102 info_dict['__real_download'] = info_dict['__real_download'] or real_download
3103 success = success and partial_success
adbc4ec4
THD
3104
3105 if downloaded and merger.available and not self.params.get('allow_unplayable_formats'):
3106 info_dict['__postprocessors'].append(merger)
3107 info_dict['__files_to_merge'] = downloaded
3108 # Even if there were no downloads, it is being merged only now
3109 info_dict['__real_download'] = True
3110 else:
3111 for file in downloaded:
3112 files_to_move[file] = None
4340deca
P
3113 else:
3114 # Just a single file
e04938ab 3115 dl_filename = existing_video_file(full_filename, temp_filename)
6c7274ec 3116 if dl_filename is None or dl_filename == temp_filename:
3117 # dl_filename == temp_filename could mean that the file was partially downloaded with --no-part.
3118 # So we should try to resume the download
e8e73840 3119 success, real_download = self.dl(temp_filename, info_dict)
0202b52a 3120 info_dict['__real_download'] = real_download
6c7274ec 3121 else:
3122 self.report_file_already_downloaded(dl_filename)
0202b52a 3123
0202b52a 3124 dl_filename = dl_filename or temp_filename
c571435f 3125 info_dict['__finaldir'] = os.path.dirname(os.path.abspath(encodeFilename(full_filename)))
0202b52a 3126
3158150c 3127 except network_exceptions as err:
7960b056 3128 self.report_error('unable to download video data: %s' % error_to_compat_str(err))
4340deca 3129 return
86e5f3ed 3130 except OSError as err:
4340deca
P
3131 raise UnavailableVideoError(err)
3132 except (ContentTooShortError, ) as err:
86e5f3ed 3133 self.report_error(f'content too short (expected {err.expected} bytes and served {err.downloaded})')
4340deca 3134 return
8222d8de 3135
415f8d51 3136 self._raise_pending_errors(info_dict)
de6000d9 3137 if success and full_filename != '-':
f17f8651 3138
fd7cfb64 3139 def fixup():
3140 do_fixup = True
3141 fixup_policy = self.params.get('fixup')
3142 vid = info_dict['id']
3143
3144 if fixup_policy in ('ignore', 'never'):
3145 return
3146 elif fixup_policy == 'warn':
3fe75fdc 3147 do_fixup = 'warn'
f89b3e2d 3148 elif fixup_policy != 'force':
3149 assert fixup_policy in ('detect_or_warn', None)
3150 if not info_dict.get('__real_download'):
3151 do_fixup = False
fd7cfb64 3152
3153 def ffmpeg_fixup(cndn, msg, cls):
3fe75fdc 3154 if not (do_fixup and cndn):
fd7cfb64 3155 return
3fe75fdc 3156 elif do_fixup == 'warn':
fd7cfb64 3157 self.report_warning(f'{vid}: {msg}')
3158 return
3159 pp = cls(self)
3160 if pp.available:
3161 info_dict['__postprocessors'].append(pp)
3162 else:
3163 self.report_warning(f'{vid}: {msg}. Install ffmpeg to fix this automatically')
3164
3165 stretched_ratio = info_dict.get('stretched_ratio')
3166 ffmpeg_fixup(
3167 stretched_ratio not in (1, None),
3168 f'Non-uniform pixel ratio {stretched_ratio}',
3169 FFmpegFixupStretchedPP)
3170
3171 ffmpeg_fixup(
3172 (info_dict.get('requested_formats') is None
3173 and info_dict.get('container') == 'm4a_dash'
3174 and info_dict.get('ext') == 'm4a'),
3175 'writing DASH m4a. Only some players support this container',
3176 FFmpegFixupM4aPP)
3177
993191c0 3178 downloader = get_suitable_downloader(info_dict, self.params) if 'protocol' in info_dict else None
24146491 3179 downloader = downloader.FD_NAME if downloader else None
adbc4ec4
THD
3180
3181 if info_dict.get('requested_formats') is None: # Not necessary if doing merger
24146491 3182 ffmpeg_fixup(downloader == 'hlsnative' and not self.params.get('hls_use_mpegts')
494f5230 3183 or info_dict.get('is_live') and self.params.get('hls_use_mpegts') is None,
adbc4ec4
THD
3184 'Possible MPEG-TS in MP4 container or malformed AAC timestamps',
3185 FFmpegFixupM3u8PP)
3186 ffmpeg_fixup(info_dict.get('is_live') and downloader == 'DashSegmentsFD',
3187 'Possible duplicate MOOV atoms', FFmpegFixupDuplicateMoovPP)
3188
24146491 3189 ffmpeg_fixup(downloader == 'web_socket_fragment', 'Malformed timestamps detected', FFmpegFixupTimestampPP)
3190 ffmpeg_fixup(downloader == 'web_socket_fragment', 'Malformed duration detected', FFmpegFixupDurationPP)
fd7cfb64 3191
3192 fixup()
8222d8de 3193 try:
f46e2f9d 3194 replace_info_dict(self.post_process(dl_filename, info_dict, files_to_move))
af819c21 3195 except PostProcessingError as err:
3196 self.report_error('Postprocessing: %s' % str(err))
8222d8de 3197 return
ab8e5e51
AM
3198 try:
3199 for ph in self._post_hooks:
23c1a667 3200 ph(info_dict['filepath'])
ab8e5e51
AM
3201 except Exception as err:
3202 self.report_error('post hooks: %s' % str(err))
3203 return
9e907ebd 3204 info_dict['__write_download_archive'] = True
2d30509f 3205
c487cf00 3206 assert info_dict is original_infodict # Make sure the info_dict was modified in-place
a13e6848 3207 if self.params.get('force_write_download_archive'):
9e907ebd 3208 info_dict['__write_download_archive'] = True
ca6d59d2 3209 check_max_downloads()
8222d8de 3210
aa9369a2 3211 def __download_wrapper(self, func):
3212 @functools.wraps(func)
3213 def wrapper(*args, **kwargs):
3214 try:
3215 res = func(*args, **kwargs)
3216 except UnavailableVideoError as e:
3217 self.report_error(e)
b222c271 3218 except DownloadCancelled as e:
3219 self.to_screen(f'[info] {e}')
3220 if not self.params.get('break_per_url'):
3221 raise
aa9369a2 3222 else:
3223 if self.params.get('dump_single_json', False):
3224 self.post_extract(res)
3225 self.to_stdout(json.dumps(self.sanitize_info(res)))
3226 return wrapper
3227
8222d8de
JMF
3228 def download(self, url_list):
3229 """Download a given list of URLs."""
aa9369a2 3230 url_list = variadic(url_list) # Passing a single URL is a common mistake
bf1824b3 3231 outtmpl = self.params['outtmpl']['default']
3089bc74
S
3232 if (len(url_list) > 1
3233 and outtmpl != '-'
3234 and '%' not in outtmpl
3235 and self.params.get('max_downloads') != 1):
acd69589 3236 raise SameFileError(outtmpl)
8222d8de
JMF
3237
3238 for url in url_list:
aa9369a2 3239 self.__download_wrapper(self.extract_info)(
3240 url, force_generic_extractor=self.params.get('force_generic_extractor', False))
8222d8de
JMF
3241
3242 return self._download_retcode
3243
1dcc4c0c 3244 def download_with_info_file(self, info_filename):
31bd3925
JMF
3245 with contextlib.closing(fileinput.FileInput(
3246 [info_filename], mode='r',
3247 openhook=fileinput.hook_encoded('utf-8'))) as f:
3248 # FileInput doesn't have a read method, we can't call json.load
8012d892 3249 info = self.sanitize_info(json.loads('\n'.join(f)), self.params.get('clean_infojson', True))
d4943898 3250 try:
aa9369a2 3251 self.__download_wrapper(self.process_ie_result)(info, download=True)
f2ebc5c7 3252 except (DownloadError, EntryNotInPlaylist, ReExtractInfo) as e:
bf5f605e 3253 if not isinstance(e, EntryNotInPlaylist):
3254 self.to_stderr('\r')
d4943898
JMF
3255 webpage_url = info.get('webpage_url')
3256 if webpage_url is not None:
aa9369a2 3257 self.report_warning(f'The info failed to download: {e}; trying with URL {webpage_url}')
d4943898
JMF
3258 return self.download([webpage_url])
3259 else:
3260 raise
3261 return self._download_retcode
1dcc4c0c 3262
cb202fd2 3263 @staticmethod
8012d892 3264 def sanitize_info(info_dict, remove_private_keys=False):
3265 ''' Sanitize the infodict for converting to json '''
3ad56b42 3266 if info_dict is None:
3267 return info_dict
6e84b215 3268 info_dict.setdefault('epoch', int(time.time()))
6a5a30f9 3269 info_dict.setdefault('_type', 'video')
09b49e1f 3270
8012d892 3271 if remove_private_keys:
0a5a191a 3272 reject = lambda k, v: v is None or k.startswith('__') or k in {
f46e2f9d 3273 'requested_downloads', 'requested_formats', 'requested_subtitles', 'requested_entries',
0a5a191a 3274 'entries', 'filepath', '_filename', 'infojson_filename', 'original_url', 'playlist_autonumber',
6e84b215 3275 }
ae8f99e6 3276 else:
09b49e1f 3277 reject = lambda k, v: False
adbc4ec4
THD
3278
3279 def filter_fn(obj):
3280 if isinstance(obj, dict):
3281 return {k: filter_fn(v) for k, v in obj.items() if not reject(k, v)}
3282 elif isinstance(obj, (list, tuple, set, LazyList)):
3283 return list(map(filter_fn, obj))
3284 elif obj is None or isinstance(obj, (str, int, float, bool)):
3285 return obj
3286 else:
3287 return repr(obj)
3288
5226731e 3289 return filter_fn(info_dict)
cb202fd2 3290
8012d892 3291 @staticmethod
3292 def filter_requested_info(info_dict, actually_filter=True):
3293 ''' Alias of sanitize_info for backward compatibility '''
3294 return YoutubeDL.sanitize_info(info_dict, actually_filter)
3295
43d7f5a5 3296 def _delete_downloaded_files(self, *files_to_delete, info={}, msg=None):
3297 for filename in set(filter(None, files_to_delete)):
3298 if msg:
3299 self.to_screen(msg % filename)
3300 try:
3301 os.remove(filename)
3302 except OSError:
3303 self.report_warning(f'Unable to delete file {filename}')
3304 if filename in info.get('__files_to_move', []): # NB: Delete even if None
3305 del info['__files_to_move'][filename]
3306
ed5835b4 3307 @staticmethod
3308 def post_extract(info_dict):
3309 def actual_post_extract(info_dict):
3310 if info_dict.get('_type') in ('playlist', 'multi_video'):
3311 for video_dict in info_dict.get('entries', {}):
3312 actual_post_extract(video_dict or {})
3313 return
3314
09b49e1f 3315 post_extractor = info_dict.pop('__post_extractor', None) or (lambda: {})
3316 info_dict.update(post_extractor())
ed5835b4 3317
3318 actual_post_extract(info_dict or {})
3319
dcf64d43 3320 def run_pp(self, pp, infodict):
5bfa4862 3321 files_to_delete = []
dcf64d43 3322 if '__files_to_move' not in infodict:
3323 infodict['__files_to_move'] = {}
b1940459 3324 try:
3325 files_to_delete, infodict = pp.run(infodict)
3326 except PostProcessingError as e:
3327 # Must be True and not 'only_download'
3328 if self.params.get('ignoreerrors') is True:
3329 self.report_error(e)
3330 return infodict
3331 raise
3332
5bfa4862 3333 if not files_to_delete:
dcf64d43 3334 return infodict
5bfa4862 3335 if self.params.get('keepvideo', False):
3336 for f in files_to_delete:
dcf64d43 3337 infodict['__files_to_move'].setdefault(f, '')
5bfa4862 3338 else:
43d7f5a5 3339 self._delete_downloaded_files(
3340 *files_to_delete, info=infodict, msg='Deleting original file %s (pass -k to keep)')
dcf64d43 3341 return infodict
5bfa4862 3342
ed5835b4 3343 def run_all_pps(self, key, info, *, additional_pps=None):
bb66c247 3344 self._forceprint(key, info)
ed5835b4 3345 for pp in (additional_pps or []) + self._pps[key]:
dc5f409c 3346 info = self.run_pp(pp, info)
ed5835b4 3347 return info
277d6ff5 3348
56d868db 3349 def pre_process(self, ie_info, key='pre_process', files_to_move=None):
5bfa4862 3350 info = dict(ie_info)
56d868db 3351 info['__files_to_move'] = files_to_move or {}
415f8d51 3352 try:
3353 info = self.run_all_pps(key, info)
3354 except PostProcessingError as err:
3355 msg = f'Preprocessing: {err}'
3356 info.setdefault('__pending_error', msg)
3357 self.report_error(msg, is_error=False)
56d868db 3358 return info, info.pop('__files_to_move', None)
5bfa4862 3359
f46e2f9d 3360 def post_process(self, filename, info, files_to_move=None):
8222d8de 3361 """Run all the postprocessors on the given file."""
8222d8de 3362 info['filepath'] = filename
dcf64d43 3363 info['__files_to_move'] = files_to_move or {}
ed5835b4 3364 info = self.run_all_pps('post_process', info, additional_pps=info.get('__postprocessors'))
dcf64d43 3365 info = self.run_pp(MoveFilesAfterDownloadPP(self), info)
3366 del info['__files_to_move']
ed5835b4 3367 return self.run_all_pps('after_move', info)
c1c9a79c 3368
5db07df6 3369 def _make_archive_id(self, info_dict):
e9fef7ee
S
3370 video_id = info_dict.get('id')
3371 if not video_id:
3372 return
5db07df6
PH
3373 # Future-proof against any change in case
3374 # and backwards compatibility with prior versions
e9fef7ee 3375 extractor = info_dict.get('extractor_key') or info_dict.get('ie_key') # key in a playlist
7012b23c 3376 if extractor is None:
1211bb6d
S
3377 url = str_or_none(info_dict.get('url'))
3378 if not url:
3379 return
e9fef7ee 3380 # Try to find matching extractor for the URL and take its ie_key
8b7491c8 3381 for ie_key, ie in self._ies.items():
1211bb6d 3382 if ie.suitable(url):
8b7491c8 3383 extractor = ie_key
e9fef7ee
S
3384 break
3385 else:
3386 return
86e5f3ed 3387 return f'{extractor.lower()} {video_id}'
5db07df6
PH
3388
3389 def in_download_archive(self, info_dict):
3390 fn = self.params.get('download_archive')
3391 if fn is None:
3392 return False
3393
3394 vid_id = self._make_archive_id(info_dict)
e9fef7ee 3395 if not vid_id:
7012b23c 3396 return False # Incomplete video information
5db07df6 3397
a45e8619 3398 return vid_id in self.archive
c1c9a79c
PH
3399
3400 def record_download_archive(self, info_dict):
3401 fn = self.params.get('download_archive')
3402 if fn is None:
3403 return
5db07df6
PH
3404 vid_id = self._make_archive_id(info_dict)
3405 assert vid_id
a13e6848 3406 self.write_debug(f'Adding to archive: {vid_id}')
c1c9a79c 3407 with locked_file(fn, 'a', encoding='utf-8') as archive_file:
6febd1c1 3408 archive_file.write(vid_id + '\n')
a45e8619 3409 self.archive.add(vid_id)
dd82ffea 3410
8c51aa65 3411 @staticmethod
8abeeb94 3412 def format_resolution(format, default='unknown'):
9359f3d4 3413 if format.get('vcodec') == 'none' and format.get('acodec') != 'none':
fb04e403 3414 return 'audio only'
f49d89ee
PH
3415 if format.get('resolution') is not None:
3416 return format['resolution']
35615307 3417 if format.get('width') and format.get('height'):
ff51ed58 3418 return '%dx%d' % (format['width'], format['height'])
35615307 3419 elif format.get('height'):
ff51ed58 3420 return '%sp' % format['height']
35615307 3421 elif format.get('width'):
ff51ed58 3422 return '%dx?' % format['width']
3423 return default
8c51aa65 3424
8130779d 3425 def _list_format_headers(self, *headers):
3426 if self.params.get('listformats_table', True) is not False:
591bb9d3 3427 return [self._format_out(header, self.Styles.HEADERS) for header in headers]
8130779d 3428 return headers
3429
c57f7757
PH
3430 def _format_note(self, fdict):
3431 res = ''
3432 if fdict.get('ext') in ['f4f', 'f4m']:
f304da8a 3433 res += '(unsupported)'
32f90364
PH
3434 if fdict.get('language'):
3435 if res:
3436 res += ' '
f304da8a 3437 res += '[%s]' % fdict['language']
c57f7757 3438 if fdict.get('format_note') is not None:
f304da8a 3439 if res:
3440 res += ' '
3441 res += fdict['format_note']
c57f7757 3442 if fdict.get('tbr') is not None:
f304da8a 3443 if res:
3444 res += ', '
3445 res += '%4dk' % fdict['tbr']
c57f7757
PH
3446 if fdict.get('container') is not None:
3447 if res:
3448 res += ', '
3449 res += '%s container' % fdict['container']
3089bc74
S
3450 if (fdict.get('vcodec') is not None
3451 and fdict.get('vcodec') != 'none'):
c57f7757
PH
3452 if res:
3453 res += ', '
3454 res += fdict['vcodec']
91c7271a 3455 if fdict.get('vbr') is not None:
c57f7757
PH
3456 res += '@'
3457 elif fdict.get('vbr') is not None and fdict.get('abr') is not None:
3458 res += 'video@'
3459 if fdict.get('vbr') is not None:
3460 res += '%4dk' % fdict['vbr']
fbb21cf5 3461 if fdict.get('fps') is not None:
5d583bdf
S
3462 if res:
3463 res += ', '
3464 res += '%sfps' % fdict['fps']
c57f7757
PH
3465 if fdict.get('acodec') is not None:
3466 if res:
3467 res += ', '
3468 if fdict['acodec'] == 'none':
3469 res += 'video only'
3470 else:
3471 res += '%-5s' % fdict['acodec']
3472 elif fdict.get('abr') is not None:
3473 if res:
3474 res += ', '
3475 res += 'audio'
3476 if fdict.get('abr') is not None:
3477 res += '@%3dk' % fdict['abr']
3478 if fdict.get('asr') is not None:
3479 res += ' (%5dHz)' % fdict['asr']
3480 if fdict.get('filesize') is not None:
3481 if res:
3482 res += ', '
3483 res += format_bytes(fdict['filesize'])
9732d77e
PH
3484 elif fdict.get('filesize_approx') is not None:
3485 if res:
3486 res += ', '
3487 res += '~' + format_bytes(fdict['filesize_approx'])
c57f7757 3488 return res
91c7271a 3489
8130779d 3490 def render_formats_table(self, info_dict):
b69fd25c 3491 if not info_dict.get('formats') and not info_dict.get('url'):
8130779d 3492 return None
b69fd25c 3493
94badb25 3494 formats = info_dict.get('formats', [info_dict])
8130779d 3495 if not self.params.get('listformats_table', True) is not False:
76d321f6 3496 table = [
3497 [
3498 format_field(f, 'format_id'),
3499 format_field(f, 'ext'),
3500 self.format_resolution(f),
8130779d 3501 self._format_note(f)
3502 ] for f in formats if f.get('preference') is None or f['preference'] >= -1000]
3503 return render_table(['format code', 'extension', 'resolution', 'note'], table, extra_gap=1)
3504
591bb9d3 3505 delim = self._format_out('\u2502', self.Styles.DELIM, '|', test_encoding=True)
8130779d 3506 table = [
3507 [
591bb9d3 3508 self._format_out(format_field(f, 'format_id'), self.Styles.ID),
8130779d 3509 format_field(f, 'ext'),
3510 format_field(f, func=self.format_resolution, ignore=('audio only', 'images')),
3511 format_field(f, 'fps', '\t%d'),
3512 format_field(f, 'dynamic_range', '%s', ignore=(None, 'SDR')).replace('HDR', ''),
3513 delim,
3514 format_field(f, 'filesize', ' \t%s', func=format_bytes) + format_field(f, 'filesize_approx', '~\t%s', func=format_bytes),
3515 format_field(f, 'tbr', '\t%dk'),
3516 shorten_protocol_name(f.get('protocol', '')),
3517 delim,
3518 format_field(f, 'vcodec', default='unknown').replace(
3519 'none', 'images' if f.get('acodec') == 'none'
591bb9d3 3520 else self._format_out('audio only', self.Styles.SUPPRESS)),
8130779d 3521 format_field(f, 'vbr', '\t%dk'),
3522 format_field(f, 'acodec', default='unknown').replace(
3523 'none', '' if f.get('vcodec') == 'none'
591bb9d3 3524 else self._format_out('video only', self.Styles.SUPPRESS)),
8130779d 3525 format_field(f, 'abr', '\t%dk'),
3526 format_field(f, 'asr', '\t%dHz'),
3527 join_nonempty(
591bb9d3 3528 self._format_out('UNSUPPORTED', 'light red') if f.get('ext') in ('f4f', 'f4m') else None,
8130779d 3529 format_field(f, 'language', '[%s]'),
3530 join_nonempty(format_field(f, 'format_note'),
3531 format_field(f, 'container', ignore=(None, f.get('ext'))),
3532 delim=', '),
3533 delim=' '),
3534 ] for f in formats if f.get('preference') is None or f['preference'] >= -1000]
3535 header_line = self._list_format_headers(
3536 'ID', 'EXT', 'RESOLUTION', '\tFPS', 'HDR', delim, '\tFILESIZE', '\tTBR', 'PROTO',
3537 delim, 'VCODEC', '\tVBR', 'ACODEC', '\tABR', '\tASR', 'MORE INFO')
3538
3539 return render_table(
3540 header_line, table, hide_empty=True,
591bb9d3 3541 delim=self._format_out('\u2500', self.Styles.DELIM, '-', test_encoding=True))
8130779d 3542
3543 def render_thumbnails_table(self, info_dict):
88f23a18 3544 thumbnails = list(info_dict.get('thumbnails') or [])
cfb56d1a 3545 if not thumbnails:
8130779d 3546 return None
3547 return render_table(
ec11a9f4 3548 self._list_format_headers('ID', 'Width', 'Height', 'URL'),
6970b600 3549 [[t.get('id'), t.get('width', 'unknown'), t.get('height', 'unknown'), t['url']] for t in thumbnails])
2412044c 3550
8130779d 3551 def render_subtitles_table(self, video_id, subtitles):
2412044c 3552 def _row(lang, formats):
49c258e1 3553 exts, names = zip(*((f['ext'], f.get('name') or 'unknown') for f in reversed(formats)))
2412044c 3554 if len(set(names)) == 1:
7aee40c1 3555 names = [] if names[0] == 'unknown' else names[:1]
2412044c 3556 return [lang, ', '.join(names), ', '.join(exts)]
3557
8130779d 3558 if not subtitles:
3559 return None
3560 return render_table(
ec11a9f4 3561 self._list_format_headers('Language', 'Name', 'Formats'),
2412044c 3562 [_row(lang, formats) for lang, formats in subtitles.items()],
8130779d 3563 hide_empty=True)
3564
3565 def __list_table(self, video_id, name, func, *args):
3566 table = func(*args)
3567 if not table:
3568 self.to_screen(f'{video_id} has no {name}')
3569 return
3570 self.to_screen(f'[info] Available {name} for {video_id}:')
3571 self.to_stdout(table)
3572
3573 def list_formats(self, info_dict):
3574 self.__list_table(info_dict['id'], 'formats', self.render_formats_table, info_dict)
3575
3576 def list_thumbnails(self, info_dict):
3577 self.__list_table(info_dict['id'], 'thumbnails', self.render_thumbnails_table, info_dict)
3578
3579 def list_subtitles(self, video_id, subtitles, name='subtitles'):
3580 self.__list_table(video_id, name, self.render_subtitles_table, video_id, subtitles)
a504ced0 3581
dca08720
PH
3582 def urlopen(self, req):
3583 """ Start an HTTP download """
f9934b96 3584 if isinstance(req, str):
67dda517 3585 req = sanitized_Request(req)
19a41fc6 3586 return self._opener.open(req, timeout=self._socket_timeout)
dca08720
PH
3587
3588 def print_debug_header(self):
3589 if not self.params.get('verbose'):
3590 return
49a57e70 3591
560738f3 3592 # These imports can be slow. So import them only as needed
3593 from .extractor.extractors import _LAZY_LOADER
3594 from .extractor.extractors import _PLUGIN_CLASSES as plugin_extractors
3595
49a57e70 3596 def get_encoding(stream):
2a938746 3597 ret = str(getattr(stream, 'encoding', 'missing (%s)' % type(stream).__name__))
49a57e70 3598 if not supports_terminal_sequences(stream):
53973b4d 3599 from .utils import WINDOWS_VT_MODE # Must be imported locally
e3c7d495 3600 ret += ' (No VT)' if WINDOWS_VT_MODE is False else ' (No ANSI)'
49a57e70 3601 return ret
3602
591bb9d3 3603 encoding_str = 'Encodings: locale %s, fs %s, pref %s, %s' % (
49a57e70 3604 locale.getpreferredencoding(),
3605 sys.getfilesystemencoding(),
591bb9d3 3606 self.get_encoding(),
3607 ', '.join(
64fa820c 3608 f'{key} {get_encoding(stream)}' for key, stream in self._out_files.items_
591bb9d3 3609 if stream is not None and key != 'console')
3610 )
883d4b1e 3611
3612 logger = self.params.get('logger')
3613 if logger:
3614 write_debug = lambda msg: logger.debug(f'[debug] {msg}')
3615 write_debug(encoding_str)
3616 else:
96565c7e 3617 write_string(f'[debug] {encoding_str}\n', encoding=None)
49a57e70 3618 write_debug = lambda msg: self._write_string(f'[debug] {msg}\n')
734f90bb 3619
4c88ff87 3620 source = detect_variant()
36eaf303 3621 write_debug(join_nonempty(
3622 'yt-dlp version', __version__,
3623 f'[{RELEASE_GIT_HEAD}]' if RELEASE_GIT_HEAD else '',
3624 '' if source == 'unknown' else f'({source})',
3625 delim=' '))
6e21fdd2 3626 if not _LAZY_LOADER:
3627 if os.environ.get('YTDLP_NO_LAZY_EXTRACTORS'):
49a57e70 3628 write_debug('Lazy loading extractors is forcibly disabled')
6e21fdd2 3629 else:
49a57e70 3630 write_debug('Lazy loading extractors is disabled')
3ae5e797 3631 if plugin_extractors or plugin_postprocessors:
49a57e70 3632 write_debug('Plugins: %s' % [
3ae5e797 3633 '%s%s' % (klass.__name__, '' if klass.__name__ == name else f' as {name}')
3634 for name, klass in itertools.chain(plugin_extractors.items(), plugin_postprocessors.items())])
8a82af35 3635 if self.params['compat_opts']:
3636 write_debug('Compatibility options: %s' % ', '.join(self.params['compat_opts']))
36eaf303 3637
3638 if source == 'source':
dca08720 3639 try:
f0c9fb96 3640 stdout, _, _ = Popen.run(
36eaf303 3641 ['git', 'rev-parse', '--short', 'HEAD'],
f0c9fb96 3642 text=True, cwd=os.path.dirname(os.path.abspath(__file__)),
3643 stdout=subprocess.PIPE, stderr=subprocess.PIPE)
3644 if re.fullmatch('[0-9a-f]+', stdout.strip()):
3645 write_debug(f'Git HEAD: {stdout.strip()}')
70a1165b 3646 except Exception:
19a03940 3647 with contextlib.suppress(Exception):
36eaf303 3648 sys.exc_clear()
b300cda4
S
3649
3650 def python_implementation():
3651 impl_name = platform.python_implementation()
3652 if impl_name == 'PyPy' and hasattr(sys, 'pypy_version_info'):
3653 return impl_name + ' version %d.%d.%d' % sys.pypy_version_info[:3]
3654 return impl_name
3655
49a57e70 3656 write_debug('Python version %s (%s %s) - %s' % (
e5813e53 3657 platform.python_version(),
3658 python_implementation(),
3659 platform.architecture()[0],
b300cda4 3660 platform_name()))
d28b5171 3661
8913ef74 3662 exe_versions, ffmpeg_features = FFmpegPostProcessor.get_versions_and_features(self)
3663 ffmpeg_features = {key for key, val in ffmpeg_features.items() if val}
3664 if ffmpeg_features:
19a03940 3665 exe_versions['ffmpeg'] += ' (%s)' % ','.join(sorted(ffmpeg_features))
8913ef74 3666
4c83c967 3667 exe_versions['rtmpdump'] = rtmpdump_version()
feee8d32 3668 exe_versions['phantomjs'] = PhantomJSwrapper._version()
d28b5171 3669 exe_str = ', '.join(
2831b468 3670 f'{exe} {v}' for exe, v in sorted(exe_versions.items()) if v
3671 ) or 'none'
49a57e70 3672 write_debug('exe versions: %s' % exe_str)
dca08720 3673
1d485a1a 3674 from .compat.compat_utils import get_package_info
9b8ee23b 3675 from .dependencies import available_dependencies
3676
3677 write_debug('Optional libraries: %s' % (', '.join(sorted({
1d485a1a 3678 join_nonempty(*get_package_info(m)) for m in available_dependencies.values()
9b8ee23b 3679 })) or 'none'))
2831b468 3680
97ec5bc5 3681 self._setup_opener()
dca08720
PH
3682 proxy_map = {}
3683 for handler in self._opener.handlers:
3684 if hasattr(handler, 'proxies'):
3685 proxy_map.update(handler.proxies)
49a57e70 3686 write_debug(f'Proxy map: {proxy_map}')
dca08720 3687
49a57e70 3688 # Not implemented
3689 if False and self.params.get('call_home'):
0f06bcd7 3690 ipaddr = self.urlopen('https://yt-dl.org/ip').read().decode()
49a57e70 3691 write_debug('Public IP address: %s' % ipaddr)
58b1f00d 3692 latest_version = self.urlopen(
0f06bcd7 3693 'https://yt-dl.org/latest/version').read().decode()
58b1f00d
PH
3694 if version_tuple(latest_version) > version_tuple(__version__):
3695 self.report_warning(
3696 'You are using an outdated version (newest version: %s)! '
3697 'See https://yt-dl.org/update if you need help updating.' %
3698 latest_version)
3699
e344693b 3700 def _setup_opener(self):
97ec5bc5 3701 if hasattr(self, '_opener'):
3702 return
6ad14cab 3703 timeout_val = self.params.get('socket_timeout')
17bddf3e 3704 self._socket_timeout = 20 if timeout_val is None else float(timeout_val)
6ad14cab 3705
982ee69a 3706 opts_cookiesfrombrowser = self.params.get('cookiesfrombrowser')
dca08720
PH
3707 opts_cookiefile = self.params.get('cookiefile')
3708 opts_proxy = self.params.get('proxy')
3709
982ee69a 3710 self.cookiejar = load_cookies(opts_cookiefile, opts_cookiesfrombrowser, self)
dca08720 3711
6a3f4c3f 3712 cookie_processor = YoutubeDLCookieProcessor(self.cookiejar)
dca08720
PH
3713 if opts_proxy is not None:
3714 if opts_proxy == '':
3715 proxies = {}
3716 else:
3717 proxies = {'http': opts_proxy, 'https': opts_proxy}
3718 else:
ac668111 3719 proxies = urllib.request.getproxies()
067aa17e 3720 # Set HTTPS proxy to HTTP one if given (https://github.com/ytdl-org/youtube-dl/issues/805)
dca08720
PH
3721 if 'http' in proxies and 'https' not in proxies:
3722 proxies['https'] = proxies['http']
91410c9b 3723 proxy_handler = PerRequestProxyHandler(proxies)
a0ddb8a2
PH
3724
3725 debuglevel = 1 if self.params.get('debug_printtraffic') else 0
be4a824d
PH
3726 https_handler = make_HTTPS_handler(self.params, debuglevel=debuglevel)
3727 ydlh = YoutubeDLHandler(self.params, debuglevel=debuglevel)
fca6dba8 3728 redirect_handler = YoutubeDLRedirectHandler()
f9934b96 3729 data_handler = urllib.request.DataHandler()
6240b0a2
JMF
3730
3731 # When passing our own FileHandler instance, build_opener won't add the
3732 # default FileHandler and allows us to disable the file protocol, which
3733 # can be used for malicious purposes (see
067aa17e 3734 # https://github.com/ytdl-org/youtube-dl/issues/8227)
ac668111 3735 file_handler = urllib.request.FileHandler()
6240b0a2
JMF
3736
3737 def file_open(*args, **kwargs):
ac668111 3738 raise urllib.error.URLError('file:// scheme is explicitly disabled in yt-dlp for security reasons')
6240b0a2
JMF
3739 file_handler.file_open = file_open
3740
ac668111 3741 opener = urllib.request.build_opener(
fca6dba8 3742 proxy_handler, https_handler, cookie_processor, ydlh, redirect_handler, data_handler, file_handler)
2461f79d 3743
dca08720
PH
3744 # Delete the default user-agent header, which would otherwise apply in
3745 # cases where our custom HTTP handler doesn't come into play
067aa17e 3746 # (See https://github.com/ytdl-org/youtube-dl/issues/1309 for details)
dca08720
PH
3747 opener.addheaders = []
3748 self._opener = opener
62fec3b2
PH
3749
3750 def encode(self, s):
3751 if isinstance(s, bytes):
3752 return s # Already encoded
3753
3754 try:
3755 return s.encode(self.get_encoding())
3756 except UnicodeEncodeError as err:
3757 err.reason = err.reason + '. Check your system encoding configuration or use the --encoding option.'
3758 raise
3759
3760 def get_encoding(self):
3761 encoding = self.params.get('encoding')
3762 if encoding is None:
3763 encoding = preferredencoding()
3764 return encoding
ec82d85a 3765
e08a85d8 3766 def _write_info_json(self, label, ie_result, infofn, overwrite=None):
cb96c5be 3767 ''' Write infojson and returns True = written, 'exists' = Already exists, False = skip, None = error '''
e08a85d8 3768 if overwrite is None:
3769 overwrite = self.params.get('overwrites', True)
80c03fa9 3770 if not self.params.get('writeinfojson'):
3771 return False
3772 elif not infofn:
3773 self.write_debug(f'Skipping writing {label} infojson')
3774 return False
3775 elif not self._ensure_dir_exists(infofn):
3776 return None
e08a85d8 3777 elif not overwrite and os.path.exists(infofn):
80c03fa9 3778 self.to_screen(f'[info] {label.title()} metadata is already present')
cb96c5be 3779 return 'exists'
3780
3781 self.to_screen(f'[info] Writing {label} metadata as JSON to: {infofn}')
3782 try:
3783 write_json_file(self.sanitize_info(ie_result, self.params.get('clean_infojson', True)), infofn)
3784 return True
86e5f3ed 3785 except OSError:
cb96c5be 3786 self.report_error(f'Cannot write {label} metadata to JSON file {infofn}')
3787 return None
80c03fa9 3788
3789 def _write_description(self, label, ie_result, descfn):
3790 ''' Write description and returns True = written, False = skip, None = error '''
3791 if not self.params.get('writedescription'):
3792 return False
3793 elif not descfn:
3794 self.write_debug(f'Skipping writing {label} description')
3795 return False
3796 elif not self._ensure_dir_exists(descfn):
3797 return None
3798 elif not self.params.get('overwrites', True) and os.path.exists(descfn):
3799 self.to_screen(f'[info] {label.title()} description is already present')
3800 elif ie_result.get('description') is None:
3801 self.report_warning(f'There\'s no {label} description to write')
3802 return False
3803 else:
3804 try:
3805 self.to_screen(f'[info] Writing {label} description to: {descfn}')
86e5f3ed 3806 with open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile:
80c03fa9 3807 descfile.write(ie_result['description'])
86e5f3ed 3808 except OSError:
80c03fa9 3809 self.report_error(f'Cannot write {label} description file {descfn}')
3810 return None
3811 return True
3812
3813 def _write_subtitles(self, info_dict, filename):
3814 ''' Write subtitles to file and return list of (sub_filename, final_sub_filename); or None if error'''
3815 ret = []
3816 subtitles = info_dict.get('requested_subtitles')
3817 if not subtitles or not (self.params.get('writesubtitles') or self.params.get('writeautomaticsub')):
3818 # subtitles download errors are already managed as troubles in relevant IE
3819 # that way it will silently go on when used with unsupporting IE
3820 return ret
3821
3822 sub_filename_base = self.prepare_filename(info_dict, 'subtitle')
3823 if not sub_filename_base:
3824 self.to_screen('[info] Skipping writing video subtitles')
3825 return ret
3826 for sub_lang, sub_info in subtitles.items():
3827 sub_format = sub_info['ext']
3828 sub_filename = subtitles_filename(filename, sub_lang, sub_format, info_dict.get('ext'))
3829 sub_filename_final = subtitles_filename(sub_filename_base, sub_lang, sub_format, info_dict.get('ext'))
e04938ab 3830 existing_sub = self.existing_file((sub_filename_final, sub_filename))
3831 if existing_sub:
80c03fa9 3832 self.to_screen(f'[info] Video subtitle {sub_lang}.{sub_format} is already present')
e04938ab 3833 sub_info['filepath'] = existing_sub
3834 ret.append((existing_sub, sub_filename_final))
80c03fa9 3835 continue
3836
3837 self.to_screen(f'[info] Writing video subtitles to: {sub_filename}')
3838 if sub_info.get('data') is not None:
3839 try:
3840 # Use newline='' to prevent conversion of newline characters
3841 # See https://github.com/ytdl-org/youtube-dl/issues/10268
86e5f3ed 3842 with open(sub_filename, 'w', encoding='utf-8', newline='') as subfile:
80c03fa9 3843 subfile.write(sub_info['data'])
3844 sub_info['filepath'] = sub_filename
3845 ret.append((sub_filename, sub_filename_final))
3846 continue
86e5f3ed 3847 except OSError:
80c03fa9 3848 self.report_error(f'Cannot write video subtitles file {sub_filename}')
3849 return None
3850
3851 try:
3852 sub_copy = sub_info.copy()
3853 sub_copy.setdefault('http_headers', info_dict.get('http_headers'))
3854 self.dl(sub_filename, sub_copy, subtitle=True)
3855 sub_info['filepath'] = sub_filename
3856 ret.append((sub_filename, sub_filename_final))
6020e05d 3857 except (DownloadError, ExtractorError, IOError, OSError, ValueError) + network_exceptions as err:
c70c418d 3858 msg = f'Unable to download video subtitles for {sub_lang!r}: {err}'
6020e05d 3859 if self.params.get('ignoreerrors') is not True: # False or 'only_download'
c70c418d 3860 if not self.params.get('ignoreerrors'):
3861 self.report_error(msg)
3862 raise DownloadError(msg)
3863 self.report_warning(msg)
519804a9 3864 return ret
80c03fa9 3865
3866 def _write_thumbnails(self, label, info_dict, filename, thumb_filename_base=None):
3867 ''' Write thumbnails to file and return list of (thumb_filename, final_thumb_filename) '''
6c4fd172 3868 write_all = self.params.get('write_all_thumbnails', False)
80c03fa9 3869 thumbnails, ret = [], []
6c4fd172 3870 if write_all or self.params.get('writethumbnail', False):
0202b52a 3871 thumbnails = info_dict.get('thumbnails') or []
6c4fd172 3872 multiple = write_all and len(thumbnails) > 1
ec82d85a 3873
80c03fa9 3874 if thumb_filename_base is None:
3875 thumb_filename_base = filename
3876 if thumbnails and not thumb_filename_base:
3877 self.write_debug(f'Skipping writing {label} thumbnail')
3878 return ret
3879
dd0228ce 3880 for idx, t in list(enumerate(thumbnails))[::-1]:
80c03fa9 3881 thumb_ext = (f'{t["id"]}.' if multiple else '') + determine_ext(t['url'], 'jpg')
aa9369a2 3882 thumb_display_id = f'{label} thumbnail {t["id"]}'
80c03fa9 3883 thumb_filename = replace_extension(filename, thumb_ext, info_dict.get('ext'))
3884 thumb_filename_final = replace_extension(thumb_filename_base, thumb_ext, info_dict.get('ext'))
ec82d85a 3885
e04938ab 3886 existing_thumb = self.existing_file((thumb_filename_final, thumb_filename))
3887 if existing_thumb:
aa9369a2 3888 self.to_screen('[info] %s is already present' % (
3889 thumb_display_id if multiple else f'{label} thumbnail').capitalize())
e04938ab 3890 t['filepath'] = existing_thumb
3891 ret.append((existing_thumb, thumb_filename_final))
ec82d85a 3892 else:
80c03fa9 3893 self.to_screen(f'[info] Downloading {thumb_display_id} ...')
ec82d85a 3894 try:
297e9952 3895 uf = self.urlopen(sanitized_Request(t['url'], headers=t.get('http_headers', {})))
80c03fa9 3896 self.to_screen(f'[info] Writing {thumb_display_id} to: {thumb_filename}')
d3d89c32 3897 with open(encodeFilename(thumb_filename), 'wb') as thumbf:
ec82d85a 3898 shutil.copyfileobj(uf, thumbf)
80c03fa9 3899 ret.append((thumb_filename, thumb_filename_final))
885cc0b7 3900 t['filepath'] = thumb_filename
3158150c 3901 except network_exceptions as err:
dd0228ce 3902 thumbnails.pop(idx)
80c03fa9 3903 self.report_warning(f'Unable to download {thumb_display_id}: {err}')
6c4fd172 3904 if ret and not write_all:
3905 break
0202b52a 3906 return ret