]> jfr.im git - yt-dlp.git/blame - yt_dlp/YoutubeDL.py
[cleanup] Misc cleanup
[yt-dlp.git] / yt_dlp / YoutubeDL.py
CommitLineData
26e63931 1import collections
31bd3925 2import contextlib
9d2ecdbc 3import datetime
c1c9a79c 4import errno
31bd3925 5import fileinput
b5ae35ee 6import functools
8222d8de 7import io
b82f815f 8import itertools
8694c600 9import json
62fec3b2 10import locale
083c9df9 11import operator
8222d8de 12import os
f8271158 13import random
8222d8de
JMF
14import re
15import shutil
dca08720 16import subprocess
8222d8de 17import sys
21cd8fae 18import tempfile
8222d8de 19import time
67134eab 20import tokenize
8222d8de 21import traceback
524e2e4f 22import unicodedata
f9934b96 23import urllib.request
961ea474
S
24from string import ascii_letters
25
f8271158 26from .cache import Cache
ac668111 27from .compat import HAS_LEGACY as compat_has_legacy
14f25df2 28from .compat import compat_os_name, compat_shlex_quote
982ee69a 29from .cookies import load_cookies
f8271158 30from .downloader import FFmpegFD, get_suitable_downloader, shorten_protocol_name
31from .downloader.rtmp import rtmpdump_version
f8271158 32from .extractor import gen_extractor_classes, get_info_extractor
33from .extractor.openload import PhantomJSwrapper
34from .minicurses import format_text
35from .postprocessor import _PLUGIN_CLASSES as plugin_postprocessors
36from .postprocessor import (
37 EmbedThumbnailPP,
38 FFmpegFixupDuplicateMoovPP,
39 FFmpegFixupDurationPP,
40 FFmpegFixupM3u8PP,
41 FFmpegFixupM4aPP,
42 FFmpegFixupStretchedPP,
43 FFmpegFixupTimestampPP,
44 FFmpegMergerPP,
45 FFmpegPostProcessor,
ca9def71 46 FFmpegVideoConvertorPP,
f8271158 47 MoveFilesAfterDownloadPP,
48 get_postprocessor,
49)
ca9def71 50from .postprocessor.ffmpeg import resolve_mapping as resolve_recode_mapping
f8271158 51from .update import detect_variant
8c25f81b 52from .utils import (
f8271158 53 DEFAULT_OUTTMPL,
7b2c3f47 54 IDENTITY,
f8271158 55 LINK_TEMPLATES,
56 NO_DEFAULT,
1d485a1a 57 NUMBER_RE,
f8271158 58 OUTTMPL_TYPES,
59 POSTPROCESS_WHEN,
60 STR_FORMAT_RE_TMPL,
61 STR_FORMAT_TYPES,
62 ContentTooShortError,
63 DateRange,
64 DownloadCancelled,
65 DownloadError,
66 EntryNotInPlaylist,
67 ExistingVideoReached,
68 ExtractorError,
69 GeoRestrictedError,
70 HEADRequest,
f8271158 71 ISO3166Utils,
72 LazyList,
73 MaxDownloadsReached,
19a03940 74 Namespace,
f8271158 75 PagedList,
76 PerRequestProxyHandler,
7e88d7d7 77 PlaylistEntries,
f8271158 78 Popen,
79 PostProcessingError,
80 ReExtractInfo,
81 RejectedVideoReached,
82 SameFileError,
83 UnavailableVideoError,
84 YoutubeDLCookieProcessor,
85 YoutubeDLHandler,
86 YoutubeDLRedirectHandler,
eedb7ba5
S
87 age_restricted,
88 args_to_str,
ce02ed60 89 date_from_str,
ce02ed60 90 determine_ext,
b5559424 91 determine_protocol,
c0384f22 92 encode_compat_str,
ce02ed60 93 encodeFilename,
a06916d9 94 error_to_compat_str,
47cdc68e 95 escapeHTML,
590bc6f6 96 expand_path,
90137ca4 97 filter_dict,
e29663c6 98 float_or_none,
02dbf93f 99 format_bytes,
e0fd9573 100 format_decimal_suffix,
f8271158 101 format_field,
525ef922 102 formatSeconds,
0bb322b9 103 get_domain,
c9969434 104 int_or_none,
732044af 105 iri_to_uri,
34921b43 106 join_nonempty,
ce02ed60 107 locked_file,
0202b52a 108 make_dir,
dca08720 109 make_HTTPS_handler,
8b7539d2 110 merge_headers,
3158150c 111 network_exceptions,
ec11a9f4 112 number_of_digits,
cd6fc19e 113 orderedSet,
083c9df9 114 parse_filesize,
ce02ed60 115 preferredencoding,
eedb7ba5 116 prepend_extension,
51fb4995 117 register_socks_protocols,
3efb96a6 118 remove_terminal_sequences,
cfb56d1a 119 render_table,
eedb7ba5 120 replace_extension,
ce02ed60 121 sanitize_filename,
1bb5c511 122 sanitize_path,
dcf77cf1 123 sanitize_url,
67dda517 124 sanitized_Request,
e5660ee6 125 std_headers,
1211bb6d 126 str_or_none,
e29663c6 127 strftime_or_none,
ce02ed60 128 subtitles_filename,
819e0531 129 supports_terminal_sequences,
b1f94422 130 system_identifier,
f2ebc5c7 131 timetuple_from_msec,
732044af 132 to_high_limit_path,
324ad820 133 traverse_obj,
6033d980 134 try_get,
29eb5174 135 url_basename,
7d1eb38a 136 variadic,
58b1f00d 137 version_tuple,
53973b4d 138 windows_enable_vt_mode,
ce02ed60
PH
139 write_json_file,
140 write_string,
4f026faf 141)
f8271158 142from .version import RELEASE_GIT_HEAD, __version__
8222d8de 143
e9c0cdd3
YCH
144if compat_os_name == 'nt':
145 import ctypes
146
2459b6e1 147
86e5f3ed 148class YoutubeDL:
8222d8de
JMF
149 """YoutubeDL class.
150
151 YoutubeDL objects are the ones responsible of downloading the
152 actual video file and writing it to disk if the user has requested
153 it, among some other tasks. In most cases there should be one per
154 program. As, given a video URL, the downloader doesn't know how to
155 extract all the needed information, task that InfoExtractors do, it
156 has to pass the URL to one of them.
157
158 For this, YoutubeDL objects have a method that allows
159 InfoExtractors to be registered in a given order. When it is passed
160 a URL, the YoutubeDL object handles it to the first InfoExtractor it
161 finds that reports being able to handle it. The InfoExtractor extracts
162 all the information about the video or videos the URL refers to, and
163 YoutubeDL process the extracted information, possibly using a File
164 Downloader to download the video.
165
166 YoutubeDL objects accept a lot of parameters. In order not to saturate
167 the object constructor with arguments, it receives a dictionary of
168 options instead. These options are available through the params
169 attribute for the InfoExtractors to use. The YoutubeDL also
170 registers itself as the downloader in charge for the InfoExtractors
171 that are added to it, so this is a "mutual registration".
172
173 Available options:
174
175 username: Username for authentication purposes.
176 password: Password for authentication purposes.
180940e0 177 videopassword: Password for accessing a video.
1da50aa3
S
178 ap_mso: Adobe Pass multiple-system operator identifier.
179 ap_username: Multiple-system operator account username.
180 ap_password: Multiple-system operator account password.
8222d8de
JMF
181 usenetrc: Use netrc for authentication instead.
182 verbose: Print additional info to stdout.
183 quiet: Do not print messages to stdout.
ad8915b7 184 no_warnings: Do not print out anything for warnings.
bb66c247 185 forceprint: A dict with keys WHEN mapped to a list of templates to
186 print to stdout. The allowed keys are video or any of the
187 items in utils.POSTPROCESS_WHEN.
ca30f449 188 For compatibility, a single list is also accepted
bb66c247 189 print_to_file: A dict with keys WHEN (same as forceprint) mapped to
190 a list of tuples with (template, filename)
8694c600 191 forcejson: Force printing info_dict as JSON.
63e0be34
PH
192 dump_single_json: Force printing the info_dict of the whole playlist
193 (or video) as a single JSON line.
c25228e5 194 force_write_download_archive: Force writing download archive regardless
195 of 'skip_download' or 'simulate'.
b7b04c78 196 simulate: Do not download the video files. If unset (or None),
197 simulate only if listsubtitles, listformats or list_thumbnails is used
eb8a4433 198 format: Video format code. see "FORMAT SELECTION" for more details.
093a1710 199 You can also pass a function. The function takes 'ctx' as
200 argument and returns the formats to download.
201 See "build_format_selector" for an implementation
63ad4d43 202 allow_unplayable_formats: Allow unplayable formats to be extracted and downloaded.
b7da73eb 203 ignore_no_formats_error: Ignore "No video formats" error. Usefull for
204 extracting metadata even if the video is not actually
205 available for download (experimental)
0930b11f 206 format_sort: A list of fields by which to sort the video formats.
207 See "Sorting Formats" for more details.
c25228e5 208 format_sort_force: Force the given format_sort. see "Sorting Formats"
209 for more details.
08d30158 210 prefer_free_formats: Whether to prefer video formats with free containers
211 over non-free ones of same quality.
c25228e5 212 allow_multiple_video_streams: Allow multiple video streams to be merged
213 into a single file
214 allow_multiple_audio_streams: Allow multiple audio streams to be merged
215 into a single file
0ba692ac 216 check_formats Whether to test if the formats are downloadable.
9f1a1c36 217 Can be True (check all), False (check none),
218 'selected' (check selected formats),
0ba692ac 219 or None (check only if requested by extractor)
4524baf0 220 paths: Dictionary of output paths. The allowed keys are 'home'
221 'temp' and the keys of OUTTMPL_TYPES (in utils.py)
de6000d9 222 outtmpl: Dictionary of templates for output names. Allowed keys
4524baf0 223 are 'default' and the keys of OUTTMPL_TYPES (in utils.py).
34488702 224 For compatibility with youtube-dl, a single string can also be used
a820dc72
RA
225 outtmpl_na_placeholder: Placeholder for unavailable meta fields.
226 restrictfilenames: Do not allow "&" and spaces in file names
227 trim_file_name: Limit length of filename (extension excluded)
4524baf0 228 windowsfilenames: Force the filenames to be windows compatible
b1940459 229 ignoreerrors: Do not stop on download/postprocessing errors.
230 Can be 'only_download' to ignore only download errors.
231 Default is 'only_download' for CLI, but False for API
26e2805c 232 skip_playlist_after_errors: Number of allowed failures until the rest of
233 the playlist is skipped
d22dec74 234 force_generic_extractor: Force downloader to use the generic extractor
0c3d0f51 235 overwrites: Overwrite all video and metadata files if True,
236 overwrite only non-video files if None
237 and don't overwrite any file if False
34488702 238 For compatibility with youtube-dl,
239 "nooverwrites" may also be used instead
c14e88f0 240 playlist_items: Specific indices of playlist to download.
75822ca7 241 playlistrandom: Download playlist items in random order.
7e9a6125 242 lazy_playlist: Process playlist entries as they are received.
8222d8de
JMF
243 matchtitle: Download only matching titles.
244 rejecttitle: Reject downloads for matching titles.
8bf9319e 245 logger: Log messages to a logging.Logger instance.
8222d8de 246 logtostderr: Log messages to stderr instead of stdout.
819e0531 247 consoletitle: Display progress in console window's titlebar.
8222d8de
JMF
248 writedescription: Write the video description to a .description file
249 writeinfojson: Write the video description to a .info.json file
75d43ca0 250 clean_infojson: Remove private fields from the infojson
34488702 251 getcomments: Extract video comments. This will not be written to disk
06167fbb 252 unless writeinfojson is also given
1fb07d10 253 writeannotations: Write the video annotations to a .annotations.xml file
8222d8de 254 writethumbnail: Write the thumbnail image to a file
c25228e5 255 allow_playlist_files: Whether to write playlists' description, infojson etc
256 also to disk when using the 'write*' options
ec82d85a 257 write_all_thumbnails: Write all thumbnail formats to files
732044af 258 writelink: Write an internet shortcut file, depending on the
259 current platform (.url/.webloc/.desktop)
260 writeurllink: Write a Windows internet shortcut file (.url)
261 writewebloclink: Write a macOS internet shortcut file (.webloc)
262 writedesktoplink: Write a Linux internet shortcut file (.desktop)
8222d8de 263 writesubtitles: Write the video subtitles to a file
741dd8ea 264 writeautomaticsub: Write the automatically generated subtitles to a file
8222d8de 265 listsubtitles: Lists all available subtitles for the video
a504ced0 266 subtitlesformat: The format code for subtitles
c32b0aab 267 subtitleslangs: List of languages of the subtitles to download (can be regex).
268 The list may contain "all" to refer to all the available
269 subtitles. The language can be prefixed with a "-" to
270 exclude it from the requested languages. Eg: ['all', '-live_chat']
8222d8de
JMF
271 keepvideo: Keep the video file after post-processing
272 daterange: A DateRange object, download only if the upload_date is in the range.
273 skip_download: Skip the actual download of the video file
c35f9e72 274 cachedir: Location of the cache files in the filesystem.
a0e07d31 275 False to disable filesystem cache.
47192f92 276 noplaylist: Download single video instead of a playlist if in doubt.
8dbe9899
PH
277 age_limit: An integer representing the user's age in years.
278 Unsuitable videos for the given age are skipped.
5fe18bdb
PH
279 min_views: An integer representing the minimum view count the video
280 must have in order to not be skipped.
281 Videos without view count information are always
282 downloaded. None for no limit.
283 max_views: An integer representing the maximum view count.
284 Videos that are more popular than that are not
285 downloaded.
286 Videos without view count information are always
287 downloaded. None for no limit.
288 download_archive: File name of a file where all downloads are recorded.
c1c9a79c
PH
289 Videos already present in the file are not downloaded
290 again.
8a51f564 291 break_on_existing: Stop the download process after attempting to download a
292 file that is in the archive.
293 break_on_reject: Stop the download process when encountering a video that
294 has been filtered out.
b222c271 295 break_per_url: Whether break_on_reject and break_on_existing
296 should act on each input URL as opposed to for the entire queue
d76fa1f3 297 cookiefile: File name or text stream from where cookies should be read and dumped to
f59f5ef8
MB
298 cookiesfrombrowser: A tuple containing the name of the browser, the profile
299 name/pathfrom where cookies are loaded, and the name of the
300 keyring. Eg: ('chrome', ) or ('vivaldi', 'default', 'BASICTEXT')
f81c62a6 301 legacyserverconnect: Explicitly allow HTTPS connection to servers that do not
302 support RFC 5746 secure renegotiation
f59f5ef8 303 nocheckcertificate: Do not verify SSL certificates
bb58c9ed 304 client_certificate: Path to client certificate file in PEM format. May include the private key
305 client_certificate_key: Path to private key file for client certificate
306 client_certificate_password: Password for client certificate private key, if encrypted.
307 If not provided and the key is encrypted, yt-dlp will ask interactively
7e8c0af0
PH
308 prefer_insecure: Use HTTP instead of HTTPS to retrieve information.
309 At the moment, this is only supported by YouTube.
8b7539d2 310 http_headers: A dictionary of custom headers to be used for all requests
a1ee09e8 311 proxy: URL of the proxy server to use
38cce791 312 geo_verification_proxy: URL of the proxy to use for IP address verification
504f20dd 313 on geo-restricted sites.
e344693b 314 socket_timeout: Time to wait for unresponsive hosts, in seconds
0783b09b
PH
315 bidi_workaround: Work around buggy terminals without bidirectional text
316 support, using fridibi
a0ddb8a2 317 debug_printtraffic:Print out sent and received HTTP traffic
04b4d394
PH
318 default_search: Prepend this string if an input url is not valid.
319 'auto' for elaborate guessing
62fec3b2 320 encoding: Use this encoding instead of the system-specified.
e8ee972c 321 extract_flat: Do not resolve URLs, return the immediate result.
057a5206
PH
322 Pass in 'in_playlist' to only show this behavior for
323 playlist items.
f2ebc5c7 324 wait_for_video: If given, wait for scheduled streams to become available.
325 The value should be a tuple containing the range
326 (min_secs, max_secs) to wait between retries
4f026faf 327 postprocessors: A list of dictionaries, each with an entry
71b640cc 328 * key: The name of the postprocessor. See
7a5c1cfe 329 yt_dlp/postprocessor/__init__.py for a list.
bb66c247 330 * when: When to run the postprocessor. Allowed values are
331 the entries of utils.POSTPROCESS_WHEN
56d868db 332 Assumed to be 'post_process' if not given
71b640cc
PH
333 progress_hooks: A list of functions that get called on download
334 progress, with a dictionary with the entries
5cda4eda 335 * status: One of "downloading", "error", or "finished".
ee69b99a 336 Check this first and ignore unknown values.
3ba7740d 337 * info_dict: The extracted info_dict
71b640cc 338
5cda4eda 339 If status is one of "downloading", or "finished", the
ee69b99a
PH
340 following properties may also be present:
341 * filename: The final filename (always present)
5cda4eda 342 * tmpfilename: The filename we're currently writing to
71b640cc
PH
343 * downloaded_bytes: Bytes on disk
344 * total_bytes: Size of the whole file, None if unknown
5cda4eda
PH
345 * total_bytes_estimate: Guess of the eventual file size,
346 None if unavailable.
347 * elapsed: The number of seconds since download started.
71b640cc
PH
348 * eta: The estimated time in seconds, None if unknown
349 * speed: The download speed in bytes/second, None if
350 unknown
5cda4eda
PH
351 * fragment_index: The counter of the currently
352 downloaded video fragment.
353 * fragment_count: The number of fragments (= individual
354 files that will be merged)
71b640cc
PH
355
356 Progress hooks are guaranteed to be called at least once
357 (with status "finished") if the download is successful.
819e0531 358 postprocessor_hooks: A list of functions that get called on postprocessing
359 progress, with a dictionary with the entries
360 * status: One of "started", "processing", or "finished".
361 Check this first and ignore unknown values.
362 * postprocessor: Name of the postprocessor
363 * info_dict: The extracted info_dict
364
365 Progress hooks are guaranteed to be called at least twice
366 (with status "started" and "finished") if the processing is successful.
45598f15 367 merge_output_format: Extension to use when merging formats.
6b591b29 368 final_ext: Expected final extension; used to detect when the file was
59a7a13e 369 already downloaded and converted
6271f1ca
PH
370 fixup: Automatically correct known faults of the file.
371 One of:
372 - "never": do nothing
373 - "warn": only emit a warning
374 - "detect_or_warn": check whether we can do anything
62cd676c 375 about it, warn otherwise (default)
504f20dd 376 source_address: Client-side IP address to bind to.
1cf376f5 377 sleep_interval_requests: Number of seconds to sleep between requests
378 during extraction
7aa589a5
S
379 sleep_interval: Number of seconds to sleep before each download when
380 used alone or a lower bound of a range for randomized
381 sleep before each download (minimum possible number
382 of seconds to sleep) when used along with
383 max_sleep_interval.
384 max_sleep_interval:Upper bound of a range for randomized sleep before each
385 download (maximum possible number of seconds to sleep).
386 Must only be used along with sleep_interval.
387 Actual sleep time will be a random float from range
388 [sleep_interval; max_sleep_interval].
1cf376f5 389 sleep_interval_subtitles: Number of seconds to sleep before each subtitle download
cfb56d1a
PH
390 listformats: Print an overview of available video formats and exit.
391 list_thumbnails: Print a table of all thumbnails and exit.
0a41f331 392 match_filter: A function that gets called for every video with the signature
393 (info_dict, *, incomplete: bool) -> Optional[str]
394 For backward compatibility with youtube-dl, the signature
395 (info_dict) -> Optional[str] is also allowed.
396 - If it returns a message, the video is ignored.
397 - If it returns None, the video is downloaded.
398 - If it returns utils.NO_DEFAULT, the user is interactively
399 asked whether to download the video.
347de493 400 match_filter_func in utils.py is one example for this.
7e5db8c9 401 no_color: Do not emit color codes in output.
0a840f58 402 geo_bypass: Bypass geographic restriction via faking X-Forwarded-For
504f20dd 403 HTTP header
0a840f58 404 geo_bypass_country:
773f291d
S
405 Two-letter ISO 3166-2 country code that will be used for
406 explicit geographic restriction bypassing via faking
504f20dd 407 X-Forwarded-For HTTP header
5f95927a
S
408 geo_bypass_ip_block:
409 IP range in CIDR notation that will be used similarly to
504f20dd 410 geo_bypass_country
52a8a1e1 411 external_downloader: A dictionary of protocol keys and the executable of the
412 external downloader to use for it. The allowed protocols
413 are default|http|ftp|m3u8|dash|rtsp|rtmp|mms.
414 Set the value to 'native' to use the native downloader
53ed7066 415 compat_opts: Compatibility options. See "Differences in default behavior".
3acf6d38 416 The following options do not work when used through the API:
b5ae35ee 417 filename, abort-on-error, multistreams, no-live-chat, format-sort
dac5df5a 418 no-clean-infojson, no-playlist-metafiles, no-keep-subs, no-attach-info-json.
e4f02757 419 Refer __init__.py for their implementation
819e0531 420 progress_template: Dictionary of templates for progress outputs.
421 Allowed keys are 'download', 'postprocess',
422 'download-title' (console title) and 'postprocess-title'.
423 The template is mapped on a dictionary with keys 'progress' and 'info'
23326151 424 retry_sleep_functions: Dictionary of functions that takes the number of attempts
425 as argument and returns the time to sleep in seconds.
426 Allowed keys are 'http', 'fragment', 'file_access'
5ec1b6b7 427 download_ranges: A function that gets called for every video with the signature
428 (info_dict, *, ydl) -> Iterable[Section].
429 Only the returned sections will be downloaded. Each Section contains:
430 * start_time: Start time of the section in seconds
431 * end_time: End time of the section in seconds
432 * title: Section title (Optional)
433 * index: Section number (Optional)
fe7e0c98 434
8222d8de 435 The following parameters are not used by YoutubeDL itself, they are used by
7a5c1cfe 436 the downloader (see yt_dlp/downloader/common.py):
51d9739f 437 nopart, updatetime, buffersize, ratelimit, throttledratelimit, min_filesize,
205a0654
EH
438 max_filesize, test, noresizebuffer, retries, file_access_retries, fragment_retries,
439 continuedl, noprogress, xattr_set_filesize, hls_use_mpegts, http_chunk_size,
59a7a13e 440 external_downloader_args, concurrent_fragment_downloads.
76b1bd67
JMF
441
442 The following options are used by the post processors:
c0b7d117
S
443 ffmpeg_location: Location of the ffmpeg/avconv binary; either the path
444 to the binary or its containing directory.
43820c03 445 postprocessor_args: A dictionary of postprocessor/executable keys (in lower case)
34488702 446 and a list of additional command-line arguments for the
447 postprocessor/executable. The dict can also have "PP+EXE" keys
448 which are used when the given exe is used by the given PP.
449 Use 'default' as the name for arguments to passed to all PP
450 For compatibility with youtube-dl, a single list of args
451 can also be used
e409895f 452
453 The following options are used by the extractors:
62bff2c1 454 extractor_retries: Number of times to retry for known errors
455 dynamic_mpd: Whether to process dynamic DASH manifests (default: True)
e409895f 456 hls_split_discontinuity: Split HLS playlists to different formats at
62bff2c1 457 discontinuities such as ad breaks (default: False)
5d3a0e79 458 extractor_args: A dictionary of arguments to be passed to the extractors.
459 See "EXTRACTOR ARGUMENTS" for details.
460 Eg: {'youtube': {'skip': ['dash', 'hls']}}
88f23a18 461 mark_watched: Mark videos watched (even with --simulate). Only for YouTube
1890fc63 462
463 The following options are deprecated and may be removed in the future:
464
7e9a6125 465 playliststart: - Use playlist_items
466 Playlist item to start at.
467 playlistend: - Use playlist_items
468 Playlist item to end at.
469 playlistreverse: - Use playlist_items
470 Download playlist items in reverse order.
1890fc63 471 forceurl: - Use forceprint
472 Force printing final URL.
473 forcetitle: - Use forceprint
474 Force printing title.
475 forceid: - Use forceprint
476 Force printing ID.
477 forcethumbnail: - Use forceprint
478 Force printing thumbnail URL.
479 forcedescription: - Use forceprint
480 Force printing description.
481 forcefilename: - Use forceprint
482 Force printing final filename.
483 forceduration: - Use forceprint
484 Force printing duration.
485 allsubtitles: - Use subtitleslangs = ['all']
486 Downloads all the subtitles of the video
487 (requires writesubtitles or writeautomaticsub)
488 include_ads: - Doesn't work
489 Download ads as well
490 call_home: - Not implemented
491 Boolean, true iff we are allowed to contact the
492 yt-dlp servers for debugging.
493 post_hooks: - Register a custom postprocessor
494 A list of functions that get called as the final step
495 for each video file, after all postprocessors have been
496 called. The filename will be passed as the only argument.
497 hls_prefer_native: - Use external_downloader = {'m3u8': 'native'} or {'m3u8': 'ffmpeg'}.
498 Use the native HLS downloader instead of ffmpeg/avconv
499 if True, otherwise use ffmpeg/avconv if False, otherwise
500 use downloader suggested by extractor if None.
501 prefer_ffmpeg: - avconv support is deprecated
502 If False, use avconv instead of ffmpeg if both are available,
503 otherwise prefer ffmpeg.
504 youtube_include_dash_manifest: - Use extractor_args
5d3a0e79 505 If True (default), DASH manifests and related
62bff2c1 506 data will be downloaded and processed by extractor.
507 You can reduce network I/O by disabling it if you don't
508 care about DASH. (only for youtube)
1890fc63 509 youtube_include_hls_manifest: - Use extractor_args
5d3a0e79 510 If True (default), HLS manifests and related
62bff2c1 511 data will be downloaded and processed by extractor.
512 You can reduce network I/O by disabling it if you don't
513 care about HLS. (only for youtube)
8222d8de
JMF
514 """
515
86e5f3ed 516 _NUMERIC_FIELDS = {
c9969434 517 'width', 'height', 'tbr', 'abr', 'asr', 'vbr', 'fps', 'filesize', 'filesize_approx',
e6f21b3d 518 'timestamp', 'release_timestamp',
c9969434
S
519 'duration', 'view_count', 'like_count', 'dislike_count', 'repost_count',
520 'average_rating', 'comment_count', 'age_limit',
521 'start_time', 'end_time',
522 'chapter_number', 'season_number', 'episode_number',
523 'track_number', 'disc_number', 'release_year',
86e5f3ed 524 }
c9969434 525
6db9c4d5 526 _format_fields = {
527 # NB: Keep in sync with the docstring of extractor/common.py
a44ca5a4 528 'url', 'manifest_url', 'manifest_stream_number', 'ext', 'format', 'format_id', 'format_note',
6db9c4d5 529 'width', 'height', 'resolution', 'dynamic_range', 'tbr', 'abr', 'acodec', 'asr',
530 'vbr', 'fps', 'vcodec', 'container', 'filesize', 'filesize_approx',
531 'player_url', 'protocol', 'fragment_base_url', 'fragments', 'is_from_start',
532 'preference', 'language', 'language_preference', 'quality', 'source_preference',
533 'http_headers', 'stretched_ratio', 'no_resume', 'has_drm', 'downloader_options',
534 'page_url', 'app', 'play_path', 'tc_url', 'flash_version', 'rtmp_live', 'rtmp_conn', 'rtmp_protocol', 'rtmp_real_time'
535 }
48ee10ee 536 _format_selection_exts = {
537 'audio': {'m4a', 'mp3', 'ogg', 'aac'},
538 'video': {'mp4', 'flv', 'webm', '3gp'},
539 'storyboards': {'mhtml'},
540 }
541
3511266b 542 def __init__(self, params=None, auto_init=True):
883d4b1e 543 """Create a FileDownloader object with the given options.
544 @param auto_init Whether to load the default extractors and print header (if verbose).
49a57e70 545 Set to 'no_verbose_header' to not print the header
883d4b1e 546 """
e9f9a10f
JMF
547 if params is None:
548 params = {}
592b7485 549 self.params = params
8b7491c8 550 self._ies = {}
56c73665 551 self._ies_instances = {}
1e43a6f7 552 self._pps = {k: [] for k in POSTPROCESS_WHEN}
b35496d8 553 self._printed_messages = set()
1cf376f5 554 self._first_webpage_request = True
ab8e5e51 555 self._post_hooks = []
933605d7 556 self._progress_hooks = []
819e0531 557 self._postprocessor_hooks = []
8222d8de
JMF
558 self._download_retcode = 0
559 self._num_downloads = 0
9c906919 560 self._num_videos = 0
592b7485 561 self._playlist_level = 0
562 self._playlist_urls = set()
a0e07d31 563 self.cache = Cache(self)
34308b30 564
819e0531 565 windows_enable_vt_mode()
591bb9d3 566 stdout = sys.stderr if self.params.get('logtostderr') else sys.stdout
567 self._out_files = Namespace(
568 out=stdout,
569 error=sys.stderr,
570 screen=sys.stderr if self.params.get('quiet') else stdout,
571 console=None if compat_os_name == 'nt' else next(
cf4f42cb 572 filter(supports_terminal_sequences, (sys.stderr, sys.stdout)), None)
591bb9d3 573 )
574 self._allow_colors = Namespace(**{
575 type_: not self.params.get('no_color') and supports_terminal_sequences(stream)
64fa820c 576 for type_, stream in self._out_files.items_ if type_ != 'console'
591bb9d3 577 })
819e0531 578
eff42759 579 MIN_SUPPORTED, MIN_RECOMMENDED = (3, 6), (3, 7)
580 current_version = sys.version_info[:2]
581 if current_version < MIN_RECOMMENDED:
9d339c41 582 msg = ('Support for Python version %d.%d has been deprecated. '
583 'See https://github.com/yt-dlp/yt-dlp/issues/3764 for more details. '
584 'You will recieve only one more update on this version')
eff42759 585 if current_version < MIN_SUPPORTED:
586 msg = 'Python version %d.%d is no longer supported'
587 self.deprecation_warning(
588 f'{msg}! Please update to Python %d.%d or above' % (*current_version, *MIN_RECOMMENDED))
a61f4b28 589
88acdbc2 590 if self.params.get('allow_unplayable_formats'):
591 self.report_warning(
ec11a9f4 592 f'You have asked for {self._format_err("UNPLAYABLE", self.Styles.EMPHASIS)} formats to be listed/downloaded. '
819e0531 593 'This is a developer option intended for debugging. \n'
594 ' If you experience any issues while using this option, '
ec11a9f4 595 f'{self._format_err("DO NOT", self.Styles.ERROR)} open a bug report')
88acdbc2 596
be5df5ee
S
597 def check_deprecated(param, option, suggestion):
598 if self.params.get(param) is not None:
86e5f3ed 599 self.report_warning(f'{option} is deprecated. Use {suggestion} instead')
be5df5ee
S
600 return True
601 return False
602
603 if check_deprecated('cn_verification_proxy', '--cn-verification-proxy', '--geo-verification-proxy'):
38cce791
YCH
604 if self.params.get('geo_verification_proxy') is None:
605 self.params['geo_verification_proxy'] = self.params['cn_verification_proxy']
606
0d1bb027 607 check_deprecated('autonumber', '--auto-number', '-o "%(autonumber)s-%(title)s.%(ext)s"')
608 check_deprecated('usetitle', '--title', '-o "%(title)s-%(id)s.%(ext)s"')
53ed7066 609 check_deprecated('useid', '--id', '-o "%(id)s.%(ext)s"')
0d1bb027 610
49a57e70 611 for msg in self.params.get('_warnings', []):
0d1bb027 612 self.report_warning(msg)
ee8dd27a 613 for msg in self.params.get('_deprecation_warnings', []):
614 self.deprecation_warning(msg)
0d1bb027 615
8a82af35 616 self.params['compat_opts'] = set(self.params.get('compat_opts', ()))
617 if not compat_has_legacy:
618 self.params['compat_opts'].add('no-compat-legacy')
619 if 'list-formats' in self.params['compat_opts']:
ec11a9f4 620 self.params['listformats_table'] = False
621
b5ae35ee 622 if 'overwrites' not in self.params and self.params.get('nooverwrites') is not None:
b868936c 623 # nooverwrites was unnecessarily changed to overwrites
624 # in 0c3d0f51778b153f65c21906031c2e091fcfb641
625 # This ensures compatibility with both keys
626 self.params['overwrites'] = not self.params['nooverwrites']
b5ae35ee 627 elif self.params.get('overwrites') is None:
628 self.params.pop('overwrites', None)
b868936c 629 else:
630 self.params['nooverwrites'] = not self.params['overwrites']
b9d973be 631
455a15e2 632 self.params.setdefault('forceprint', {})
633 self.params.setdefault('print_to_file', {})
bb66c247 634
635 # Compatibility with older syntax
ca30f449 636 if not isinstance(params['forceprint'], dict):
455a15e2 637 self.params['forceprint'] = {'video': params['forceprint']}
ca30f449 638
455a15e2 639 if self.params.get('bidi_workaround', False):
1c088fa8
PH
640 try:
641 import pty
642 master, slave = pty.openpty()
ac668111 643 width = shutil.get_terminal_size().columns
591bb9d3 644 width_args = [] if width is None else ['-w', str(width)]
645 sp_kwargs = {'stdin': subprocess.PIPE, 'stdout': slave, 'stderr': self._out_files.error}
5d681e96 646 try:
d3c93ec2 647 self._output_process = Popen(['bidiv'] + width_args, **sp_kwargs)
5d681e96 648 except OSError:
d3c93ec2 649 self._output_process = Popen(['fribidi', '-c', 'UTF-8'] + width_args, **sp_kwargs)
5d681e96 650 self._output_channel = os.fdopen(master, 'rb')
1c088fa8 651 except OSError as ose:
66e7ace1 652 if ose.errno == errno.ENOENT:
49a57e70 653 self.report_warning(
654 'Could not find fribidi executable, ignoring --bidi-workaround. '
655 'Make sure that fribidi is an executable file in one of the directories in your $PATH.')
1c088fa8
PH
656 else:
657 raise
0783b09b 658
97ec5bc5 659 if auto_init:
660 if auto_init != 'no_verbose_header':
661 self.print_debug_header()
662 self.add_default_info_extractors()
663
3089bc74
S
664 if (sys.platform != 'win32'
665 and sys.getfilesystemencoding() in ['ascii', 'ANSI_X3.4-1968']
455a15e2 666 and not self.params.get('restrictfilenames', False)):
e9137224 667 # Unicode filesystem API will throw errors (#1474, #13027)
34308b30 668 self.report_warning(
6febd1c1 669 'Assuming --restrict-filenames since file system encoding '
1b725173 670 'cannot encode all characters. '
6febd1c1 671 'Set the LC_ALL environment variable to fix this.')
4a98cdbf 672 self.params['restrictfilenames'] = True
34308b30 673
bf1824b3 674 self._parse_outtmpl()
486dd09e 675
187986a8 676 # Creating format selector here allows us to catch syntax errors before the extraction
677 self.format_selector = (
fa9f30b8 678 self.params.get('format') if self.params.get('format') in (None, '-')
093a1710 679 else self.params['format'] if callable(self.params['format'])
187986a8 680 else self.build_format_selector(self.params['format']))
681
8b7539d2 682 # Set http_headers defaults according to std_headers
683 self.params['http_headers'] = merge_headers(std_headers, self.params.get('http_headers', {}))
684
013b50b7 685 hooks = {
686 'post_hooks': self.add_post_hook,
687 'progress_hooks': self.add_progress_hook,
688 'postprocessor_hooks': self.add_postprocessor_hook,
689 }
690 for opt, fn in hooks.items():
691 for ph in self.params.get(opt, []):
692 fn(ph)
71b640cc 693
5bfc8bee 694 for pp_def_raw in self.params.get('postprocessors', []):
695 pp_def = dict(pp_def_raw)
696 when = pp_def.pop('when', 'post_process')
697 self.add_post_processor(
f9934b96 698 get_postprocessor(pp_def.pop('key'))(self, **pp_def),
5bfc8bee 699 when=when)
700
97ec5bc5 701 self._setup_opener()
51fb4995
YCH
702 register_socks_protocols()
703
ed39cac5 704 def preload_download_archive(fn):
705 """Preload the archive, if any is specified"""
706 if fn is None:
707 return False
49a57e70 708 self.write_debug(f'Loading archive file {fn!r}')
ed39cac5 709 try:
710 with locked_file(fn, 'r', encoding='utf-8') as archive_file:
711 for line in archive_file:
712 self.archive.add(line.strip())
86e5f3ed 713 except OSError as ioe:
ed39cac5 714 if ioe.errno != errno.ENOENT:
715 raise
716 return False
717 return True
718
719 self.archive = set()
720 preload_download_archive(self.params.get('download_archive'))
721
7d4111ed
PH
722 def warn_if_short_id(self, argv):
723 # short YouTube ID starting with dash?
724 idxs = [
725 i for i, a in enumerate(argv)
726 if re.match(r'^-[0-9A-Za-z_-]{10}$', a)]
727 if idxs:
728 correct_argv = (
7a5c1cfe 729 ['yt-dlp']
3089bc74
S
730 + [a for i, a in enumerate(argv) if i not in idxs]
731 + ['--'] + [argv[i] for i in idxs]
7d4111ed
PH
732 )
733 self.report_warning(
734 'Long argument string detected. '
49a57e70 735 'Use -- to separate parameters and URLs, like this:\n%s' %
7d4111ed
PH
736 args_to_str(correct_argv))
737
8222d8de
JMF
738 def add_info_extractor(self, ie):
739 """Add an InfoExtractor object to the end of the list."""
8b7491c8 740 ie_key = ie.ie_key()
741 self._ies[ie_key] = ie
e52d7f85 742 if not isinstance(ie, type):
8b7491c8 743 self._ies_instances[ie_key] = ie
e52d7f85 744 ie.set_downloader(self)
8222d8de 745
8b7491c8 746 def _get_info_extractor_class(self, ie_key):
747 ie = self._ies.get(ie_key)
748 if ie is None:
749 ie = get_info_extractor(ie_key)
750 self.add_info_extractor(ie)
751 return ie
752
56c73665
JMF
753 def get_info_extractor(self, ie_key):
754 """
755 Get an instance of an IE with name ie_key, it will try to get one from
756 the _ies list, if there's no instance it will create a new one and add
757 it to the extractor list.
758 """
759 ie = self._ies_instances.get(ie_key)
760 if ie is None:
761 ie = get_info_extractor(ie_key)()
762 self.add_info_extractor(ie)
763 return ie
764
023fa8c4
JMF
765 def add_default_info_extractors(self):
766 """
767 Add the InfoExtractors returned by gen_extractors to the end of the list
768 """
e52d7f85 769 for ie in gen_extractor_classes():
023fa8c4
JMF
770 self.add_info_extractor(ie)
771
56d868db 772 def add_post_processor(self, pp, when='post_process'):
8222d8de 773 """Add a PostProcessor object to the end of the chain."""
8aa0e7cd 774 assert when in POSTPROCESS_WHEN, f'Invalid when={when}'
5bfa4862 775 self._pps[when].append(pp)
8222d8de
JMF
776 pp.set_downloader(self)
777
ab8e5e51
AM
778 def add_post_hook(self, ph):
779 """Add the post hook"""
780 self._post_hooks.append(ph)
781
933605d7 782 def add_progress_hook(self, ph):
819e0531 783 """Add the download progress hook"""
933605d7 784 self._progress_hooks.append(ph)
8ab470f1 785
819e0531 786 def add_postprocessor_hook(self, ph):
787 """Add the postprocessing progress hook"""
788 self._postprocessor_hooks.append(ph)
5bfc8bee 789 for pps in self._pps.values():
790 for pp in pps:
791 pp.add_progress_hook(ph)
819e0531 792
1c088fa8 793 def _bidi_workaround(self, message):
5d681e96 794 if not hasattr(self, '_output_channel'):
1c088fa8
PH
795 return message
796
5d681e96 797 assert hasattr(self, '_output_process')
14f25df2 798 assert isinstance(message, str)
6febd1c1 799 line_count = message.count('\n') + 1
0f06bcd7 800 self._output_process.stdin.write((message + '\n').encode())
5d681e96 801 self._output_process.stdin.flush()
0f06bcd7 802 res = ''.join(self._output_channel.readline().decode()
9e1a5b84 803 for _ in range(line_count))
6febd1c1 804 return res[:-len('\n')]
1c088fa8 805
b35496d8 806 def _write_string(self, message, out=None, only_once=False):
807 if only_once:
808 if message in self._printed_messages:
809 return
810 self._printed_messages.add(message)
811 write_string(message, out=out, encoding=self.params.get('encoding'))
734f90bb 812
cf4f42cb 813 def to_stdout(self, message, skip_eol=False, quiet=None):
0760b0a7 814 """Print message to stdout"""
cf4f42cb 815 if quiet is not None:
ae6a1b95 816 self.deprecation_warning('"YoutubeDL.to_stdout" no longer accepts the argument quiet. Use "YoutubeDL.to_screen" instead')
8a82af35 817 if skip_eol is not False:
818 self.deprecation_warning('"YoutubeDL.to_stdout" no longer accepts the argument skip_eol. Use "YoutubeDL.to_screen" instead')
0bf9dc1e 819 self._write_string(f'{self._bidi_workaround(message)}\n', self._out_files.out)
cf4f42cb 820
821 def to_screen(self, message, skip_eol=False, quiet=None):
822 """Print message to screen if not in quiet mode"""
8bf9319e 823 if self.params.get('logger'):
43afe285 824 self.params['logger'].debug(message)
cf4f42cb 825 return
826 if (self.params.get('quiet') if quiet is None else quiet) and not self.params.get('verbose'):
827 return
828 self._write_string(
829 '%s%s' % (self._bidi_workaround(message), ('' if skip_eol else '\n')),
591bb9d3 830 self._out_files.screen)
8222d8de 831
b35496d8 832 def to_stderr(self, message, only_once=False):
0760b0a7 833 """Print message to stderr"""
14f25df2 834 assert isinstance(message, str)
8bf9319e 835 if self.params.get('logger'):
43afe285
IB
836 self.params['logger'].error(message)
837 else:
5792c950 838 self._write_string(f'{self._bidi_workaround(message)}\n', self._out_files.error, only_once=only_once)
cf4f42cb 839
840 def _send_console_code(self, code):
591bb9d3 841 if compat_os_name == 'nt' or not self._out_files.console:
cf4f42cb 842 return
591bb9d3 843 self._write_string(code, self._out_files.console)
8222d8de 844
1e5b9a95
PH
845 def to_console_title(self, message):
846 if not self.params.get('consoletitle', False):
847 return
3efb96a6 848 message = remove_terminal_sequences(message)
4bede0d8
C
849 if compat_os_name == 'nt':
850 if ctypes.windll.kernel32.GetConsoleWindow():
851 # c_wchar_p() might not be necessary if `message` is
852 # already of type unicode()
853 ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(message))
cf4f42cb 854 else:
855 self._send_console_code(f'\033]0;{message}\007')
1e5b9a95 856
bdde425c 857 def save_console_title(self):
cf4f42cb 858 if not self.params.get('consoletitle') or self.params.get('simulate'):
bdde425c 859 return
592b7485 860 self._send_console_code('\033[22;0t') # Save the title on stack
bdde425c
PH
861
862 def restore_console_title(self):
cf4f42cb 863 if not self.params.get('consoletitle') or self.params.get('simulate'):
bdde425c 864 return
592b7485 865 self._send_console_code('\033[23;0t') # Restore the title from stack
bdde425c
PH
866
867 def __enter__(self):
868 self.save_console_title()
869 return self
870
871 def __exit__(self, *args):
872 self.restore_console_title()
f89197d7 873
dca08720 874 if self.params.get('cookiefile') is not None:
1bab3437 875 self.cookiejar.save(ignore_discard=True, ignore_expires=True)
bdde425c 876
fa9f30b8 877 def trouble(self, message=None, tb=None, is_error=True):
8222d8de
JMF
878 """Determine action to take when a download problem appears.
879
880 Depending on if the downloader has been configured to ignore
881 download errors or not, this method may throw an exception or
882 not when errors are found, after printing the message.
883
fa9f30b8 884 @param tb If given, is additional traceback information
885 @param is_error Whether to raise error according to ignorerrors
8222d8de
JMF
886 """
887 if message is not None:
888 self.to_stderr(message)
889 if self.params.get('verbose'):
890 if tb is None:
891 if sys.exc_info()[0]: # if .trouble has been called from an except block
6febd1c1 892 tb = ''
8222d8de 893 if hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
6febd1c1 894 tb += ''.join(traceback.format_exception(*sys.exc_info()[1].exc_info))
c0384f22 895 tb += encode_compat_str(traceback.format_exc())
8222d8de
JMF
896 else:
897 tb_data = traceback.format_list(traceback.extract_stack())
6febd1c1 898 tb = ''.join(tb_data)
c19bc311 899 if tb:
900 self.to_stderr(tb)
fa9f30b8 901 if not is_error:
902 return
b1940459 903 if not self.params.get('ignoreerrors'):
8222d8de
JMF
904 if sys.exc_info()[0] and hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
905 exc_info = sys.exc_info()[1].exc_info
906 else:
907 exc_info = sys.exc_info()
908 raise DownloadError(message, exc_info)
909 self._download_retcode = 1
910
19a03940 911 Styles = Namespace(
912 HEADERS='yellow',
913 EMPHASIS='light blue',
492272fe 914 FILENAME='green',
19a03940 915 ID='green',
916 DELIM='blue',
917 ERROR='red',
918 WARNING='yellow',
919 SUPPRESS='light black',
920 )
ec11a9f4 921
7578d77d 922 def _format_text(self, handle, allow_colors, text, f, fallback=None, *, test_encoding=False):
e5a998f3 923 text = str(text)
ec11a9f4 924 if test_encoding:
925 original_text = text
5c104538 926 # handle.encoding can be None. See https://github.com/yt-dlp/yt-dlp/issues/2711
927 encoding = self.params.get('encoding') or getattr(handle, 'encoding', None) or 'ascii'
ec11a9f4 928 text = text.encode(encoding, 'ignore').decode(encoding)
929 if fallback is not None and text != original_text:
930 text = fallback
7578d77d 931 return format_text(text, f) if allow_colors else text if fallback is None else fallback
ec11a9f4 932
591bb9d3 933 def _format_out(self, *args, **kwargs):
934 return self._format_text(self._out_files.out, self._allow_colors.out, *args, **kwargs)
935
ec11a9f4 936 def _format_screen(self, *args, **kwargs):
591bb9d3 937 return self._format_text(self._out_files.screen, self._allow_colors.screen, *args, **kwargs)
ec11a9f4 938
939 def _format_err(self, *args, **kwargs):
591bb9d3 940 return self._format_text(self._out_files.error, self._allow_colors.error, *args, **kwargs)
819e0531 941
c84aeac6 942 def report_warning(self, message, only_once=False):
8222d8de
JMF
943 '''
944 Print the message to stderr, it will be prefixed with 'WARNING:'
945 If stderr is a tty file the 'WARNING:' will be colored
946 '''
6d07ce01
JMF
947 if self.params.get('logger') is not None:
948 self.params['logger'].warning(message)
8222d8de 949 else:
ad8915b7
PH
950 if self.params.get('no_warnings'):
951 return
ec11a9f4 952 self.to_stderr(f'{self._format_err("WARNING:", self.Styles.WARNING)} {message}', only_once)
8222d8de 953
ee8dd27a 954 def deprecation_warning(self, message):
955 if self.params.get('logger') is not None:
a44ca5a4 956 self.params['logger'].warning(f'DeprecationWarning: {message}')
ee8dd27a 957 else:
958 self.to_stderr(f'{self._format_err("DeprecationWarning:", self.Styles.ERROR)} {message}', True)
959
fa9f30b8 960 def report_error(self, message, *args, **kwargs):
8222d8de
JMF
961 '''
962 Do the same as trouble, but prefixes the message with 'ERROR:', colored
963 in red if stderr is a tty file.
964 '''
fa9f30b8 965 self.trouble(f'{self._format_err("ERROR:", self.Styles.ERROR)} {message}', *args, **kwargs)
8222d8de 966
b35496d8 967 def write_debug(self, message, only_once=False):
0760b0a7 968 '''Log debug message or Print message to stderr'''
969 if not self.params.get('verbose', False):
970 return
8a82af35 971 message = f'[debug] {message}'
0760b0a7 972 if self.params.get('logger'):
973 self.params['logger'].debug(message)
974 else:
b35496d8 975 self.to_stderr(message, only_once)
0760b0a7 976
8222d8de
JMF
977 def report_file_already_downloaded(self, file_name):
978 """Report file has already been fully downloaded."""
979 try:
6febd1c1 980 self.to_screen('[download] %s has already been downloaded' % file_name)
ce02ed60 981 except UnicodeEncodeError:
6febd1c1 982 self.to_screen('[download] The file has already been downloaded')
8222d8de 983
0c3d0f51 984 def report_file_delete(self, file_name):
985 """Report that existing file will be deleted."""
986 try:
c25228e5 987 self.to_screen('Deleting existing file %s' % file_name)
0c3d0f51 988 except UnicodeEncodeError:
c25228e5 989 self.to_screen('Deleting existing file')
0c3d0f51 990
319b6059 991 def raise_no_formats(self, info, forced=False, *, msg=None):
0a5a191a 992 has_drm = info.get('_has_drm')
319b6059 993 ignored, expected = self.params.get('ignore_no_formats_error'), bool(msg)
994 msg = msg or has_drm and 'This video is DRM protected' or 'No video formats found!'
995 if forced or not ignored:
1151c407 996 raise ExtractorError(msg, video_id=info['id'], ie=info['extractor'],
319b6059 997 expected=has_drm or ignored or expected)
88acdbc2 998 else:
999 self.report_warning(msg)
1000
de6000d9 1001 def parse_outtmpl(self):
bf1824b3 1002 self.deprecation_warning('"YoutubeDL.parse_outtmpl" is deprecated and may be removed in a future version')
1003 self._parse_outtmpl()
1004 return self.params['outtmpl']
1005
1006 def _parse_outtmpl(self):
7b2c3f47 1007 sanitize = IDENTITY
bf1824b3 1008 if self.params.get('restrictfilenames'): # Remove spaces in the default template
71ce444a 1009 sanitize = lambda x: x.replace(' - ', ' ').replace(' ', '-')
bf1824b3 1010
1011 outtmpl = self.params.setdefault('outtmpl', {})
1012 if not isinstance(outtmpl, dict):
1013 self.params['outtmpl'] = outtmpl = {'default': outtmpl}
1014 outtmpl.update({k: sanitize(v) for k, v in DEFAULT_OUTTMPL.items() if outtmpl.get(k) is None})
de6000d9 1015
21cd8fae 1016 def get_output_path(self, dir_type='', filename=None):
1017 paths = self.params.get('paths', {})
1018 assert isinstance(paths, dict)
1019 path = os.path.join(
1020 expand_path(paths.get('home', '').strip()),
1021 expand_path(paths.get(dir_type, '').strip()) if dir_type else '',
1022 filename or '')
21cd8fae 1023 return sanitize_path(path, force=self.params.get('windowsfilenames'))
1024
76a264ac 1025 @staticmethod
901130bb 1026 def _outtmpl_expandpath(outtmpl):
1027 # expand_path translates '%%' into '%' and '$$' into '$'
1028 # correspondingly that is not what we want since we need to keep
1029 # '%%' intact for template dict substitution step. Working around
1030 # with boundary-alike separator hack.
1031 sep = ''.join([random.choice(ascii_letters) for _ in range(32)])
86e5f3ed 1032 outtmpl = outtmpl.replace('%%', f'%{sep}%').replace('$$', f'${sep}$')
901130bb 1033
1034 # outtmpl should be expand_path'ed before template dict substitution
1035 # because meta fields may contain env variables we don't want to
1036 # be expanded. For example, for outtmpl "%(title)s.%(ext)s" and
1037 # title "Hello $PATH", we don't want `$PATH` to be expanded.
1038 return expand_path(outtmpl).replace(sep, '')
1039
1040 @staticmethod
1041 def escape_outtmpl(outtmpl):
1042 ''' Escape any remaining strings like %s, %abc% etc. '''
1043 return re.sub(
1044 STR_FORMAT_RE_TMPL.format('', '(?![%(\0])'),
1045 lambda mobj: ('' if mobj.group('has_key') else '%') + mobj.group(0),
1046 outtmpl)
1047
1048 @classmethod
1049 def validate_outtmpl(cls, outtmpl):
76a264ac 1050 ''' @return None or Exception object '''
7d1eb38a 1051 outtmpl = re.sub(
47cdc68e 1052 STR_FORMAT_RE_TMPL.format('[^)]*', '[ljhqBUDS]'),
7d1eb38a 1053 lambda mobj: f'{mobj.group(0)[:-1]}s',
1054 cls._outtmpl_expandpath(outtmpl))
76a264ac 1055 try:
7d1eb38a 1056 cls.escape_outtmpl(outtmpl) % collections.defaultdict(int)
76a264ac 1057 return None
1058 except ValueError as err:
1059 return err
1060
03b4de72 1061 @staticmethod
1062 def _copy_infodict(info_dict):
1063 info_dict = dict(info_dict)
09b49e1f 1064 info_dict.pop('__postprocessors', None)
415f8d51 1065 info_dict.pop('__pending_error', None)
03b4de72 1066 return info_dict
1067
e0fd9573 1068 def prepare_outtmpl(self, outtmpl, info_dict, sanitize=False):
1069 """ Make the outtmpl and info_dict suitable for substitution: ydl.escape_outtmpl(outtmpl) % info_dict
1070 @param sanitize Whether to sanitize the output as a filename.
1071 For backward compatibility, a function can also be passed
1072 """
1073
6e84b215 1074 info_dict.setdefault('epoch', int(time.time())) # keep epoch consistent once set
143db31d 1075
03b4de72 1076 info_dict = self._copy_infodict(info_dict)
752cda38 1077 info_dict['duration_string'] = ( # %(duration>%H-%M-%S)s is wrong if duration > 24hrs
53c18592 1078 formatSeconds(info_dict['duration'], '-' if sanitize else ':')
143db31d 1079 if info_dict.get('duration', None) is not None
1080 else None)
1d485a1a 1081 info_dict['autonumber'] = int(self.params.get('autonumber_start', 1) - 1 + self._num_downloads)
9c906919 1082 info_dict['video_autonumber'] = self._num_videos
752cda38 1083 if info_dict.get('resolution') is None:
1084 info_dict['resolution'] = self.format_resolution(info_dict, default=None)
143db31d 1085
e6f21b3d 1086 # For fields playlist_index, playlist_autonumber and autonumber convert all occurrences
143db31d 1087 # of %(field)s to %(field)0Nd for backward compatibility
1088 field_size_compat_map = {
0a5a191a 1089 'playlist_index': number_of_digits(info_dict.get('__last_playlist_index') or 0),
ec11a9f4 1090 'playlist_autonumber': number_of_digits(info_dict.get('n_entries') or 0),
752cda38 1091 'autonumber': self.params.get('autonumber_size') or 5,
143db31d 1092 }
752cda38 1093
385a27fa 1094 TMPL_DICT = {}
47cdc68e 1095 EXTERNAL_FORMAT_RE = re.compile(STR_FORMAT_RE_TMPL.format('[^)]*', f'[{STR_FORMAT_TYPES}ljhqBUDS]'))
385a27fa 1096 MATH_FUNCTIONS = {
1097 '+': float.__add__,
1098 '-': float.__sub__,
1099 }
e625be0d 1100 # Field is of the form key1.key2...
1101 # where keys (except first) can be string, int or slice
2b8a2973 1102 FIELD_RE = r'\w*(?:\.(?:\w+|{num}|{num}?(?::{num}?){{1,2}}))*'.format(num=r'(?:-?\d+)')
1d485a1a 1103 MATH_FIELD_RE = rf'(?:{FIELD_RE}|-?{NUMBER_RE})'
385a27fa 1104 MATH_OPERATORS_RE = r'(?:%s)' % '|'.join(map(re.escape, MATH_FUNCTIONS.keys()))
1d485a1a 1105 INTERNAL_FORMAT_RE = re.compile(rf'''(?x)
e625be0d 1106 (?P<negate>-)?
1d485a1a 1107 (?P<fields>{FIELD_RE})
1108 (?P<maths>(?:{MATH_OPERATORS_RE}{MATH_FIELD_RE})*)
e625be0d 1109 (?:>(?P<strf_format>.+?))?
34baa9fd 1110 (?P<remaining>
1111 (?P<alternate>(?<!\\),[^|&)]+)?
1112 (?:&(?P<replacement>.*?))?
1113 (?:\|(?P<default>.*?))?
1d485a1a 1114 )$''')
752cda38 1115
2b8a2973 1116 def _traverse_infodict(k):
1117 k = k.split('.')
1118 if k[0] == '':
1119 k.pop(0)
1120 return traverse_obj(info_dict, k, is_user_input=True, traverse_string=True)
76a264ac 1121
752cda38 1122 def get_value(mdict):
1123 # Object traversal
2b8a2973 1124 value = _traverse_infodict(mdict['fields'])
752cda38 1125 # Negative
1126 if mdict['negate']:
1127 value = float_or_none(value)
1128 if value is not None:
1129 value *= -1
1130 # Do maths
385a27fa 1131 offset_key = mdict['maths']
1132 if offset_key:
752cda38 1133 value = float_or_none(value)
1134 operator = None
385a27fa 1135 while offset_key:
1136 item = re.match(
1137 MATH_FIELD_RE if operator else MATH_OPERATORS_RE,
1138 offset_key).group(0)
1139 offset_key = offset_key[len(item):]
1140 if operator is None:
752cda38 1141 operator = MATH_FUNCTIONS[item]
385a27fa 1142 continue
1143 item, multiplier = (item[1:], -1) if item[0] == '-' else (item, 1)
1144 offset = float_or_none(item)
1145 if offset is None:
2b8a2973 1146 offset = float_or_none(_traverse_infodict(item))
385a27fa 1147 try:
1148 value = operator(value, multiplier * offset)
1149 except (TypeError, ZeroDivisionError):
1150 return None
1151 operator = None
752cda38 1152 # Datetime formatting
1153 if mdict['strf_format']:
7c37ff97 1154 value = strftime_or_none(value, mdict['strf_format'].replace('\\,', ','))
752cda38 1155
1156 return value
1157
b868936c 1158 na = self.params.get('outtmpl_na_placeholder', 'NA')
1159
e0fd9573 1160 def filename_sanitizer(key, value, restricted=self.params.get('restrictfilenames')):
5c3895ff 1161 return sanitize_filename(str(value), restricted=restricted, is_id=(
1162 bool(re.search(r'(^|[_.])id(\.|$)', key))
8a82af35 1163 if 'filename-sanitization' in self.params['compat_opts']
5c3895ff 1164 else NO_DEFAULT))
e0fd9573 1165
1166 sanitizer = sanitize if callable(sanitize) else filename_sanitizer
1167 sanitize = bool(sanitize)
1168
6e84b215 1169 def _dumpjson_default(obj):
1170 if isinstance(obj, (set, LazyList)):
1171 return list(obj)
adbc4ec4 1172 return repr(obj)
6e84b215 1173
752cda38 1174 def create_key(outer_mobj):
1175 if not outer_mobj.group('has_key'):
b836dc94 1176 return outer_mobj.group(0)
752cda38 1177 key = outer_mobj.group('key')
752cda38 1178 mobj = re.match(INTERNAL_FORMAT_RE, key)
e0fd9573 1179 initial_field = mobj.group('fields') if mobj else ''
e978789f 1180 value, replacement, default = None, None, na
7c37ff97 1181 while mobj:
e625be0d 1182 mobj = mobj.groupdict()
7c37ff97 1183 default = mobj['default'] if mobj['default'] is not None else default
752cda38 1184 value = get_value(mobj)
e978789f 1185 replacement = mobj['replacement']
7c37ff97 1186 if value is None and mobj['alternate']:
34baa9fd 1187 mobj = re.match(INTERNAL_FORMAT_RE, mobj['remaining'][1:])
7c37ff97 1188 else:
1189 break
752cda38 1190
b868936c 1191 fmt = outer_mobj.group('format')
752cda38 1192 if fmt == 's' and value is not None and key in field_size_compat_map.keys():
86e5f3ed 1193 fmt = f'0{field_size_compat_map[key]:d}d'
752cda38 1194
e978789f 1195 value = default if value is None else value if replacement is None else replacement
752cda38 1196
4476d2c7 1197 flags = outer_mobj.group('conversion') or ''
7d1eb38a 1198 str_fmt = f'{fmt[:-1]}s'
524e2e4f 1199 if fmt[-1] == 'l': # list
4476d2c7 1200 delim = '\n' if '#' in flags else ', '
9e907ebd 1201 value, fmt = delim.join(map(str, variadic(value, allowed_types=(str, bytes)))), str_fmt
524e2e4f 1202 elif fmt[-1] == 'j': # json
4476d2c7 1203 value, fmt = json.dumps(value, default=_dumpjson_default, indent=4 if '#' in flags else None), str_fmt
47cdc68e 1204 elif fmt[-1] == 'h': # html
1205 value, fmt = escapeHTML(value), str_fmt
524e2e4f 1206 elif fmt[-1] == 'q': # quoted
4476d2c7 1207 value = map(str, variadic(value) if '#' in flags else [value])
1208 value, fmt = ' '.join(map(compat_shlex_quote, value)), str_fmt
524e2e4f 1209 elif fmt[-1] == 'B': # bytes
0f06bcd7 1210 value = f'%{str_fmt}'.encode() % str(value).encode()
f5aa5cfb 1211 value, fmt = value.decode('utf-8', 'ignore'), 's'
524e2e4f 1212 elif fmt[-1] == 'U': # unicode normalized
524e2e4f 1213 value, fmt = unicodedata.normalize(
1214 # "+" = compatibility equivalence, "#" = NFD
4476d2c7 1215 'NF%s%s' % ('K' if '+' in flags else '', 'D' if '#' in flags else 'C'),
524e2e4f 1216 value), str_fmt
e0fd9573 1217 elif fmt[-1] == 'D': # decimal suffix
abbeeebc 1218 num_fmt, fmt = fmt[:-1].replace('#', ''), 's'
1219 value = format_decimal_suffix(value, f'%{num_fmt}f%s' if num_fmt else '%d%s',
1220 factor=1024 if '#' in flags else 1000)
37893bb0 1221 elif fmt[-1] == 'S': # filename sanitization
e0fd9573 1222 value, fmt = filename_sanitizer(initial_field, value, restricted='#' in flags), str_fmt
7d1eb38a 1223 elif fmt[-1] == 'c':
524e2e4f 1224 if value:
1225 value = str(value)[0]
76a264ac 1226 else:
524e2e4f 1227 fmt = str_fmt
76a264ac 1228 elif fmt[-1] not in 'rs': # numeric
a439a3a4 1229 value = float_or_none(value)
752cda38 1230 if value is None:
1231 value, fmt = default, 's'
901130bb 1232
752cda38 1233 if sanitize:
1234 if fmt[-1] == 'r':
1235 # If value is an object, sanitize might convert it to a string
1236 # So we convert it to repr first
7d1eb38a 1237 value, fmt = repr(value), str_fmt
639f1cea 1238 if fmt[-1] in 'csr':
e0fd9573 1239 value = sanitizer(initial_field, value)
901130bb 1240
b868936c 1241 key = '%s\0%s' % (key.replace('%', '%\0'), outer_mobj.group('format'))
385a27fa 1242 TMPL_DICT[key] = value
b868936c 1243 return '{prefix}%({key}){fmt}'.format(key=key, fmt=fmt, prefix=outer_mobj.group('prefix'))
752cda38 1244
385a27fa 1245 return EXTERNAL_FORMAT_RE.sub(create_key, outtmpl), TMPL_DICT
143db31d 1246
819e0531 1247 def evaluate_outtmpl(self, outtmpl, info_dict, *args, **kwargs):
1248 outtmpl, info_dict = self.prepare_outtmpl(outtmpl, info_dict, *args, **kwargs)
1249 return self.escape_outtmpl(outtmpl) % info_dict
1250
5127e92a 1251 def _prepare_filename(self, info_dict, *, outtmpl=None, tmpl_type=None):
1252 assert None in (outtmpl, tmpl_type), 'outtmpl and tmpl_type are mutually exclusive'
1253 if outtmpl is None:
bf1824b3 1254 outtmpl = self.params['outtmpl'].get(tmpl_type or 'default', self.params['outtmpl']['default'])
8222d8de 1255 try:
5127e92a 1256 outtmpl = self._outtmpl_expandpath(outtmpl)
e0fd9573 1257 filename = self.evaluate_outtmpl(outtmpl, info_dict, True)
6a0546e3 1258 if not filename:
1259 return None
15da37c7 1260
5127e92a 1261 if tmpl_type in ('', 'temp'):
6a0546e3 1262 final_ext, ext = self.params.get('final_ext'), info_dict.get('ext')
1263 if final_ext and ext and final_ext != ext and filename.endswith(f'.{final_ext}'):
1264 filename = replace_extension(filename, ext, final_ext)
5127e92a 1265 elif tmpl_type:
6a0546e3 1266 force_ext = OUTTMPL_TYPES[tmpl_type]
1267 if force_ext:
1268 filename = replace_extension(filename, force_ext, info_dict.get('ext'))
de6000d9 1269
bdc3fd2f
U
1270 # https://github.com/blackjack4494/youtube-dlc/issues/85
1271 trim_file_name = self.params.get('trim_file_name', False)
1272 if trim_file_name:
5c22c63d 1273 no_ext, *ext = filename.rsplit('.', 2)
1274 filename = join_nonempty(no_ext[:trim_file_name], *ext, delim='.')
bdc3fd2f 1275
0202b52a 1276 return filename
8222d8de 1277 except ValueError as err:
6febd1c1 1278 self.report_error('Error in output template: ' + str(err) + ' (encoding: ' + repr(preferredencoding()) + ')')
8222d8de
JMF
1279 return None
1280
5127e92a 1281 def prepare_filename(self, info_dict, dir_type='', *, outtmpl=None, warn=False):
1282 """Generate the output filename"""
1283 if outtmpl:
1284 assert not dir_type, 'outtmpl and dir_type are mutually exclusive'
1285 dir_type = None
1286 filename = self._prepare_filename(info_dict, tmpl_type=dir_type, outtmpl=outtmpl)
80c03fa9 1287 if not filename and dir_type not in ('', 'temp'):
1288 return ''
de6000d9 1289
c84aeac6 1290 if warn:
21cd8fae 1291 if not self.params.get('paths'):
de6000d9 1292 pass
1293 elif filename == '-':
c84aeac6 1294 self.report_warning('--paths is ignored when an outputting to stdout', only_once=True)
de6000d9 1295 elif os.path.isabs(filename):
c84aeac6 1296 self.report_warning('--paths is ignored since an absolute path is given in output template', only_once=True)
de6000d9 1297 if filename == '-' or not filename:
1298 return filename
1299
21cd8fae 1300 return self.get_output_path(dir_type, filename)
0202b52a 1301
120fe513 1302 def _match_entry(self, info_dict, incomplete=False, silent=False):
ecdec191 1303 """ Returns None if the file should be downloaded """
8222d8de 1304
c77495e3 1305 video_title = info_dict.get('title', info_dict.get('id', 'video'))
1306
8b0d7497 1307 def check_filter():
8b0d7497 1308 if 'title' in info_dict:
1309 # This can happen when we're just evaluating the playlist
1310 title = info_dict['title']
1311 matchtitle = self.params.get('matchtitle', False)
1312 if matchtitle:
1313 if not re.search(matchtitle, title, re.IGNORECASE):
1314 return '"' + title + '" title did not match pattern "' + matchtitle + '"'
1315 rejecttitle = self.params.get('rejecttitle', False)
1316 if rejecttitle:
1317 if re.search(rejecttitle, title, re.IGNORECASE):
1318 return '"' + title + '" title matched reject pattern "' + rejecttitle + '"'
1319 date = info_dict.get('upload_date')
1320 if date is not None:
1321 dateRange = self.params.get('daterange', DateRange())
1322 if date not in dateRange:
86e5f3ed 1323 return f'{date_from_str(date).isoformat()} upload date is not in range {dateRange}'
8b0d7497 1324 view_count = info_dict.get('view_count')
1325 if view_count is not None:
1326 min_views = self.params.get('min_views')
1327 if min_views is not None and view_count < min_views:
1328 return 'Skipping %s, because it has not reached minimum view count (%d/%d)' % (video_title, view_count, min_views)
1329 max_views = self.params.get('max_views')
1330 if max_views is not None and view_count > max_views:
1331 return 'Skipping %s, because it has exceeded the maximum view count (%d/%d)' % (video_title, view_count, max_views)
1332 if age_restricted(info_dict.get('age_limit'), self.params.get('age_limit')):
1333 return 'Skipping "%s" because it is age restricted' % video_title
8b0d7497 1334
8f18aca8 1335 match_filter = self.params.get('match_filter')
1336 if match_filter is not None:
1337 try:
1338 ret = match_filter(info_dict, incomplete=incomplete)
1339 except TypeError:
1340 # For backward compatibility
1341 ret = None if incomplete else match_filter(info_dict)
492272fe 1342 if ret is NO_DEFAULT:
1343 while True:
1344 filename = self._format_screen(self.prepare_filename(info_dict), self.Styles.FILENAME)
1345 reply = input(self._format_screen(
1346 f'Download "{filename}"? (Y/n): ', self.Styles.EMPHASIS)).lower().strip()
1347 if reply in {'y', ''}:
1348 return None
1349 elif reply == 'n':
1350 return f'Skipping {video_title}'
492272fe 1351 elif ret is not None:
8f18aca8 1352 return ret
8b0d7497 1353 return None
1354
c77495e3 1355 if self.in_download_archive(info_dict):
1356 reason = '%s has already been recorded in the archive' % video_title
1357 break_opt, break_err = 'break_on_existing', ExistingVideoReached
1358 else:
1359 reason = check_filter()
1360 break_opt, break_err = 'break_on_reject', RejectedVideoReached
8b0d7497 1361 if reason is not None:
120fe513 1362 if not silent:
1363 self.to_screen('[download] ' + reason)
c77495e3 1364 if self.params.get(break_opt, False):
1365 raise break_err()
8b0d7497 1366 return reason
fe7e0c98 1367
b6c45014
JMF
1368 @staticmethod
1369 def add_extra_info(info_dict, extra_info):
1370 '''Set the keys from extra_info in info dict if they are missing'''
1371 for key, value in extra_info.items():
1372 info_dict.setdefault(key, value)
1373
409e1828 1374 def extract_info(self, url, download=True, ie_key=None, extra_info=None,
61aa5ba3 1375 process=True, force_generic_extractor=False):
41d1cca3 1376 """
1377 Return a list with a dictionary for each video extracted.
1378
1379 Arguments:
1380 url -- URL to extract
1381
1382 Keyword arguments:
1383 download -- whether to download videos during extraction
1384 ie_key -- extractor key hint
1385 extra_info -- dictionary containing the extra values to add to each result
1386 process -- whether to resolve all unresolved references (URLs, playlist items),
1387 must be True for download to work.
1388 force_generic_extractor -- force using the generic extractor
1389 """
fe7e0c98 1390
409e1828 1391 if extra_info is None:
1392 extra_info = {}
1393
61aa5ba3 1394 if not ie_key and force_generic_extractor:
d22dec74
S
1395 ie_key = 'Generic'
1396
8222d8de 1397 if ie_key:
8b7491c8 1398 ies = {ie_key: self._get_info_extractor_class(ie_key)}
8222d8de
JMF
1399 else:
1400 ies = self._ies
1401
8b7491c8 1402 for ie_key, ie in ies.items():
8222d8de
JMF
1403 if not ie.suitable(url):
1404 continue
1405
1406 if not ie.working():
6febd1c1
PH
1407 self.report_warning('The program functionality for this site has been marked as broken, '
1408 'and will probably not work.')
8222d8de 1409
1151c407 1410 temp_id = ie.get_temp_id(url)
a0566bbf 1411 if temp_id is not None and self.in_download_archive({'id': temp_id, 'ie_key': ie_key}):
5e5be0c0 1412 self.to_screen(f'[{ie_key}] {temp_id}: has already been recorded in the archive')
1413 if self.params.get('break_on_existing', False):
1414 raise ExistingVideoReached()
a0566bbf 1415 break
8b7491c8 1416 return self.__extract_info(url, self.get_info_extractor(ie_key), download, extra_info, process)
a0566bbf 1417 else:
1418 self.report_error('no suitable InfoExtractor for URL %s' % url)
1419
7e88d7d7 1420 def _handle_extraction_exceptions(func):
b5ae35ee 1421 @functools.wraps(func)
a0566bbf 1422 def wrapper(self, *args, **kwargs):
6da22e7d 1423 while True:
1424 try:
1425 return func(self, *args, **kwargs)
1426 except (DownloadCancelled, LazyList.IndexError, PagedList.IndexError):
8222d8de 1427 raise
6da22e7d 1428 except ReExtractInfo as e:
1429 if e.expected:
1430 self.to_screen(f'{e}; Re-extracting data')
1431 else:
1432 self.to_stderr('\r')
1433 self.report_warning(f'{e}; Re-extracting data')
1434 continue
1435 except GeoRestrictedError as e:
1436 msg = e.msg
1437 if e.countries:
1438 msg += '\nThis video is available in %s.' % ', '.join(
1439 map(ISO3166Utils.short2full, e.countries))
1440 msg += '\nYou might want to use a VPN or a proxy server (with --proxy) to workaround.'
1441 self.report_error(msg)
1442 except ExtractorError as e: # An error we somewhat expected
1443 self.report_error(str(e), e.format_traceback())
1444 except Exception as e:
1445 if self.params.get('ignoreerrors'):
1446 self.report_error(str(e), tb=encode_compat_str(traceback.format_exc()))
1447 else:
1448 raise
1449 break
a0566bbf 1450 return wrapper
1451
f2ebc5c7 1452 def _wait_for_video(self, ie_result):
1453 if (not self.params.get('wait_for_video')
1454 or ie_result.get('_type', 'video') != 'video'
1455 or ie_result.get('formats') or ie_result.get('url')):
1456 return
1457
1458 format_dur = lambda dur: '%02d:%02d:%02d' % timetuple_from_msec(dur * 1000)[:-1]
1459 last_msg = ''
1460
1461 def progress(msg):
1462 nonlocal last_msg
1463 self.to_screen(msg + ' ' * (len(last_msg) - len(msg)) + '\r', skip_eol=True)
1464 last_msg = msg
1465
1466 min_wait, max_wait = self.params.get('wait_for_video')
1467 diff = try_get(ie_result, lambda x: x['release_timestamp'] - time.time())
1468 if diff is None and ie_result.get('live_status') == 'is_upcoming':
16c620bc 1469 diff = round(random.uniform(min_wait, max_wait) if (max_wait and min_wait) else (max_wait or min_wait), 0)
f2ebc5c7 1470 self.report_warning('Release time of video is not known')
1471 elif (diff or 0) <= 0:
1472 self.report_warning('Video should already be available according to extracted info')
38d79fd1 1473 diff = min(max(diff or 0, min_wait or 0), max_wait or float('inf'))
f2ebc5c7 1474 self.to_screen(f'[wait] Waiting for {format_dur(diff)} - Press Ctrl+C to try now')
1475
1476 wait_till = time.time() + diff
1477 try:
1478 while True:
1479 diff = wait_till - time.time()
1480 if diff <= 0:
1481 progress('')
1482 raise ReExtractInfo('[wait] Wait period ended', expected=True)
1483 progress(f'[wait] Remaining time until next attempt: {self._format_screen(format_dur(diff), self.Styles.EMPHASIS)}')
1484 time.sleep(1)
1485 except KeyboardInterrupt:
1486 progress('')
1487 raise ReExtractInfo('[wait] Interrupted by user', expected=True)
1488 except BaseException as e:
1489 if not isinstance(e, ReExtractInfo):
1490 self.to_screen('')
1491 raise
1492
7e88d7d7 1493 @_handle_extraction_exceptions
58f197b7 1494 def __extract_info(self, url, ie, download, extra_info, process):
a0566bbf 1495 ie_result = ie.extract(url)
1496 if ie_result is None: # Finished already (backwards compatibility; listformats and friends should be moved here)
1497 return
1498 if isinstance(ie_result, list):
1499 # Backwards compatibility: old IE result format
1500 ie_result = {
1501 '_type': 'compat_list',
1502 'entries': ie_result,
1503 }
e37d0efb 1504 if extra_info.get('original_url'):
1505 ie_result.setdefault('original_url', extra_info['original_url'])
a0566bbf 1506 self.add_default_extra_info(ie_result, ie, url)
1507 if process:
f2ebc5c7 1508 self._wait_for_video(ie_result)
a0566bbf 1509 return self.process_ie_result(ie_result, download, extra_info)
8222d8de 1510 else:
a0566bbf 1511 return ie_result
fe7e0c98 1512
ea38e55f 1513 def add_default_extra_info(self, ie_result, ie, url):
6033d980 1514 if url is not None:
1515 self.add_extra_info(ie_result, {
1516 'webpage_url': url,
1517 'original_url': url,
57ebfca3 1518 })
1519 webpage_url = ie_result.get('webpage_url')
1520 if webpage_url:
1521 self.add_extra_info(ie_result, {
1522 'webpage_url_basename': url_basename(webpage_url),
1523 'webpage_url_domain': get_domain(webpage_url),
6033d980 1524 })
1525 if ie is not None:
1526 self.add_extra_info(ie_result, {
1527 'extractor': ie.IE_NAME,
1528 'extractor_key': ie.ie_key(),
1529 })
ea38e55f 1530
58adec46 1531 def process_ie_result(self, ie_result, download=True, extra_info=None):
8222d8de
JMF
1532 """
1533 Take the result of the ie(may be modified) and resolve all unresolved
1534 references (URLs, playlist items).
1535
1536 It will also download the videos if 'download'.
1537 Returns the resolved ie_result.
1538 """
58adec46 1539 if extra_info is None:
1540 extra_info = {}
e8ee972c
PH
1541 result_type = ie_result.get('_type', 'video')
1542
057a5206 1543 if result_type in ('url', 'url_transparent'):
134c6ea8 1544 ie_result['url'] = sanitize_url(ie_result['url'])
e37d0efb 1545 if ie_result.get('original_url'):
1546 extra_info.setdefault('original_url', ie_result['original_url'])
1547
057a5206 1548 extract_flat = self.params.get('extract_flat', False)
3089bc74
S
1549 if ((extract_flat == 'in_playlist' and 'playlist' in extra_info)
1550 or extract_flat is True):
ecb54191 1551 info_copy = ie_result.copy()
6033d980 1552 ie = try_get(ie_result.get('ie_key'), self.get_info_extractor)
360167b9 1553 if ie and not ie_result.get('id'):
4614bc22 1554 info_copy['id'] = ie.get_temp_id(ie_result['url'])
6033d980 1555 self.add_default_extra_info(info_copy, ie, ie_result['url'])
4614bc22 1556 self.add_extra_info(info_copy, extra_info)
b5475f11 1557 info_copy, _ = self.pre_process(info_copy)
ecb54191 1558 self.__forced_printings(info_copy, self.prepare_filename(info_copy), incomplete=True)
415f8d51 1559 self._raise_pending_errors(info_copy)
4614bc22 1560 if self.params.get('force_write_download_archive', False):
1561 self.record_download_archive(info_copy)
e8ee972c
PH
1562 return ie_result
1563
8222d8de 1564 if result_type == 'video':
b6c45014 1565 self.add_extra_info(ie_result, extra_info)
9c2b75b5 1566 ie_result = self.process_video_result(ie_result, download=download)
415f8d51 1567 self._raise_pending_errors(ie_result)
28b0eb0f 1568 additional_urls = (ie_result or {}).get('additional_urls')
9c2b75b5 1569 if additional_urls:
e9f4ccd1 1570 # TODO: Improve MetadataParserPP to allow setting a list
14f25df2 1571 if isinstance(additional_urls, str):
9c2b75b5 1572 additional_urls = [additional_urls]
1573 self.to_screen(
1574 '[info] %s: %d additional URL(s) requested' % (ie_result['id'], len(additional_urls)))
1575 self.write_debug('Additional URLs: "%s"' % '", "'.join(additional_urls))
1576 ie_result['additional_entries'] = [
1577 self.extract_info(
b69fd25c 1578 url, download, extra_info=extra_info,
9c2b75b5 1579 force_generic_extractor=self.params.get('force_generic_extractor'))
1580 for url in additional_urls
1581 ]
1582 return ie_result
8222d8de
JMF
1583 elif result_type == 'url':
1584 # We have to add extra_info to the results because it may be
1585 # contained in a playlist
07cce701 1586 return self.extract_info(
1587 ie_result['url'], download,
1588 ie_key=ie_result.get('ie_key'),
1589 extra_info=extra_info)
7fc3fa05
PH
1590 elif result_type == 'url_transparent':
1591 # Use the information from the embedding page
1592 info = self.extract_info(
1593 ie_result['url'], ie_key=ie_result.get('ie_key'),
1594 extra_info=extra_info, download=False, process=False)
1595
1640eb09
S
1596 # extract_info may return None when ignoreerrors is enabled and
1597 # extraction failed with an error, don't crash and return early
1598 # in this case
1599 if not info:
1600 return info
1601
3975b4d2 1602 exempted_fields = {'_type', 'url', 'ie_key'}
1603 if not ie_result.get('section_end') and ie_result.get('section_start') is None:
1604 # For video clips, the id etc of the clip extractor should be used
1605 exempted_fields |= {'id', 'extractor', 'extractor_key'}
1606
412c617d 1607 new_result = info.copy()
3975b4d2 1608 new_result.update(filter_dict(ie_result, lambda k, v: v is not None and k not in exempted_fields))
7fc3fa05 1609
0563f7ac
S
1610 # Extracted info may not be a video result (i.e.
1611 # info.get('_type', 'video') != video) but rather an url or
1612 # url_transparent. In such cases outer metadata (from ie_result)
1613 # should be propagated to inner one (info). For this to happen
1614 # _type of info should be overridden with url_transparent. This
067aa17e 1615 # fixes issue from https://github.com/ytdl-org/youtube-dl/pull/11163.
0563f7ac
S
1616 if new_result.get('_type') == 'url':
1617 new_result['_type'] = 'url_transparent'
7fc3fa05
PH
1618
1619 return self.process_ie_result(
1620 new_result, download=download, extra_info=extra_info)
40fcba5e 1621 elif result_type in ('playlist', 'multi_video'):
30a074c2 1622 # Protect from infinite recursion due to recursively nested playlists
1623 # (see https://github.com/ytdl-org/youtube-dl/issues/27833)
1624 webpage_url = ie_result['webpage_url']
1625 if webpage_url in self._playlist_urls:
7e85e872 1626 self.to_screen(
30a074c2 1627 '[download] Skipping already downloaded playlist: %s'
1628 % ie_result.get('title') or ie_result.get('id'))
1629 return
7e85e872 1630
30a074c2 1631 self._playlist_level += 1
1632 self._playlist_urls.add(webpage_url)
03f83004 1633 self._fill_common_fields(ie_result, False)
bc516a3f 1634 self._sanitize_thumbnails(ie_result)
30a074c2 1635 try:
1636 return self.__process_playlist(ie_result, download)
1637 finally:
1638 self._playlist_level -= 1
1639 if not self._playlist_level:
1640 self._playlist_urls.clear()
8222d8de 1641 elif result_type == 'compat_list':
c9bf4114
PH
1642 self.report_warning(
1643 'Extractor %s returned a compat_list result. '
1644 'It needs to be updated.' % ie_result.get('extractor'))
5f6a1245 1645
8222d8de 1646 def _fixup(r):
b868936c 1647 self.add_extra_info(r, {
1648 'extractor': ie_result['extractor'],
1649 'webpage_url': ie_result['webpage_url'],
1650 'webpage_url_basename': url_basename(ie_result['webpage_url']),
0bb322b9 1651 'webpage_url_domain': get_domain(ie_result['webpage_url']),
b868936c 1652 'extractor_key': ie_result['extractor_key'],
1653 })
8222d8de
JMF
1654 return r
1655 ie_result['entries'] = [
b6c45014 1656 self.process_ie_result(_fixup(r), download, extra_info)
8222d8de
JMF
1657 for r in ie_result['entries']
1658 ]
1659 return ie_result
1660 else:
1661 raise Exception('Invalid result type: %s' % result_type)
1662
e92caff5 1663 def _ensure_dir_exists(self, path):
1664 return make_dir(path, self.report_error)
1665
3b603dbd 1666 @staticmethod
1667 def _playlist_infodict(ie_result, **kwargs):
1668 return {
1669 **ie_result,
1670 'playlist': ie_result.get('title') or ie_result.get('id'),
1671 'playlist_id': ie_result.get('id'),
1672 'playlist_title': ie_result.get('title'),
1673 'playlist_uploader': ie_result.get('uploader'),
1674 'playlist_uploader_id': ie_result.get('uploader_id'),
1675 'playlist_index': 0,
1676 **kwargs,
1677 }
1678
30a074c2 1679 def __process_playlist(self, ie_result, download):
7e88d7d7 1680 """Process each entry in the playlist"""
1681 title = ie_result.get('title') or ie_result.get('id') or '<Untitled>'
1682 self.to_screen(f'[download] Downloading playlist: {title}')
f0d785d3 1683
7e88d7d7 1684 all_entries = PlaylistEntries(self, ie_result)
7e9a6125 1685 entries = orderedSet(all_entries.get_requested_items(), lazy=True)
1686
1687 lazy = self.params.get('lazy_playlist')
1688 if lazy:
1689 resolved_entries, n_entries = [], 'N/A'
1690 ie_result['requested_entries'], ie_result['entries'] = None, None
1691 else:
1692 entries = resolved_entries = list(entries)
1693 n_entries = len(resolved_entries)
1694 ie_result['requested_entries'], ie_result['entries'] = tuple(zip(*resolved_entries)) or ([], [])
1695 if not ie_result.get('playlist_count'):
1696 # Better to do this after potentially exhausting entries
1697 ie_result['playlist_count'] = all_entries.get_full_count()
498f5606 1698
e08a85d8 1699 _infojson_written = False
0bfc53d0 1700 write_playlist_files = self.params.get('allow_playlist_files', True)
1701 if write_playlist_files and self.params.get('list_thumbnails'):
1702 self.list_thumbnails(ie_result)
1703 if write_playlist_files and not self.params.get('simulate'):
7e9a6125 1704 ie_copy = self._playlist_infodict(ie_result, n_entries=int_or_none(n_entries))
e08a85d8 1705 _infojson_written = self._write_info_json(
1706 'playlist', ie_result, self.prepare_filename(ie_copy, 'pl_infojson'))
1707 if _infojson_written is None:
80c03fa9 1708 return
1709 if self._write_description('playlist', ie_result,
1710 self.prepare_filename(ie_copy, 'pl_description')) is None:
1711 return
681de68e 1712 # TODO: This should be passed to ThumbnailsConvertor if necessary
80c03fa9 1713 self._write_thumbnails('playlist', ie_copy, self.prepare_filename(ie_copy, 'pl_thumbnail'))
30a074c2 1714
7e9a6125 1715 if lazy:
1716 if self.params.get('playlistreverse') or self.params.get('playlistrandom'):
1717 self.report_warning('playlistreverse and playlistrandom are not supported with lazy_playlist', only_once=True)
1718 elif self.params.get('playlistreverse'):
1719 entries.reverse()
1720 elif self.params.get('playlistrandom'):
30a074c2 1721 random.shuffle(entries)
1722
7e88d7d7 1723 self.to_screen(f'[{ie_result["extractor"]}] Playlist {title}: Downloading {n_entries} videos'
1724 f'{format_field(ie_result, "playlist_count", " of %s")}')
30a074c2 1725
26e2805c 1726 failures = 0
1727 max_failures = self.params.get('skip_playlist_after_errors') or float('inf')
7e9a6125 1728 for i, (playlist_index, entry) in enumerate(entries):
1729 if lazy:
1730 resolved_entries.append((playlist_index, entry))
1731
7e88d7d7 1732 # TODO: Add auto-generated fields
1ac4fd80 1733 if not entry or self._match_entry(entry, incomplete=True) is not None:
7e88d7d7 1734 continue
1735
19a03940 1736 self.to_screen('[download] Downloading video %s of %s' % (
7e9a6125 1737 self._format_screen(i + 1, self.Styles.ID), self._format_screen(n_entries, self.Styles.EMPHASIS)))
7e88d7d7 1738
1739 entry['__x_forwarded_for_ip'] = ie_result.get('__x_forwarded_for_ip')
7e9a6125 1740 if not lazy and 'playlist-index' in self.params.get('compat_opts', []):
1741 playlist_index = ie_result['requested_entries'][i]
1742
7e88d7d7 1743 entry_result = self.__process_iterable_entry(entry, download, {
7e9a6125 1744 'n_entries': int_or_none(n_entries),
1745 '__last_playlist_index': max(ie_result['requested_entries'] or (0, 0)),
f0d785d3 1746 'playlist_count': ie_result.get('playlist_count'),
71729754 1747 'playlist_index': playlist_index,
7e9a6125 1748 'playlist_autonumber': i + 1,
7e88d7d7 1749 'playlist': title,
30a074c2 1750 'playlist_id': ie_result.get('id'),
1751 'playlist_title': ie_result.get('title'),
1752 'playlist_uploader': ie_result.get('uploader'),
1753 'playlist_uploader_id': ie_result.get('uploader_id'),
30a074c2 1754 'extractor': ie_result['extractor'],
1755 'webpage_url': ie_result['webpage_url'],
1756 'webpage_url_basename': url_basename(ie_result['webpage_url']),
0bb322b9 1757 'webpage_url_domain': get_domain(ie_result['webpage_url']),
30a074c2 1758 'extractor_key': ie_result['extractor_key'],
7e88d7d7 1759 })
26e2805c 1760 if not entry_result:
1761 failures += 1
1762 if failures >= max_failures:
1763 self.report_error(
7e88d7d7 1764 f'Skipping the remaining entries in playlist "{title}" since {failures} items failed extraction')
26e2805c 1765 break
7e9a6125 1766 resolved_entries[i] = (playlist_index, entry_result)
7e88d7d7 1767
1768 # Update with processed data
7e9a6125 1769 ie_result['requested_entries'], ie_result['entries'] = tuple(zip(*resolved_entries)) or ([], [])
e08a85d8 1770
1771 # Write the updated info to json
cb96c5be 1772 if _infojson_written is True and self._write_info_json(
e08a85d8 1773 'updated playlist', ie_result,
1774 self.prepare_filename(ie_copy, 'pl_infojson'), overwrite=True) is None:
1775 return
ca30f449 1776
ed5835b4 1777 ie_result = self.run_all_pps('playlist', ie_result)
7e88d7d7 1778 self.to_screen(f'[download] Finished downloading playlist: {title}')
30a074c2 1779 return ie_result
1780
7e88d7d7 1781 @_handle_extraction_exceptions
a0566bbf 1782 def __process_iterable_entry(self, entry, download, extra_info):
1783 return self.process_ie_result(
1784 entry, download=download, extra_info=extra_info)
1785
67134eab
JMF
1786 def _build_format_filter(self, filter_spec):
1787 " Returns a function to filter the formats according to the filter_spec "
083c9df9
PH
1788
1789 OPERATORS = {
1790 '<': operator.lt,
1791 '<=': operator.le,
1792 '>': operator.gt,
1793 '>=': operator.ge,
1794 '=': operator.eq,
1795 '!=': operator.ne,
1796 }
67134eab 1797 operator_rex = re.compile(r'''(?x)\s*
187986a8 1798 (?P<key>width|height|tbr|abr|vbr|asr|filesize|filesize_approx|fps)\s*
1799 (?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
1800 (?P<value>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)\s*
083c9df9 1801 ''' % '|'.join(map(re.escape, OPERATORS.keys())))
187986a8 1802 m = operator_rex.fullmatch(filter_spec)
9ddb6925
S
1803 if m:
1804 try:
1805 comparison_value = int(m.group('value'))
1806 except ValueError:
1807 comparison_value = parse_filesize(m.group('value'))
1808 if comparison_value is None:
1809 comparison_value = parse_filesize(m.group('value') + 'B')
1810 if comparison_value is None:
1811 raise ValueError(
1812 'Invalid value %r in format specification %r' % (
67134eab 1813 m.group('value'), filter_spec))
9ddb6925
S
1814 op = OPERATORS[m.group('op')]
1815
083c9df9 1816 if not m:
9ddb6925
S
1817 STR_OPERATORS = {
1818 '=': operator.eq,
10d33b34
YCH
1819 '^=': lambda attr, value: attr.startswith(value),
1820 '$=': lambda attr, value: attr.endswith(value),
1821 '*=': lambda attr, value: value in attr,
1ce9a3cb 1822 '~=': lambda attr, value: value.search(attr) is not None
9ddb6925 1823 }
187986a8 1824 str_operator_rex = re.compile(r'''(?x)\s*
1825 (?P<key>[a-zA-Z0-9._-]+)\s*
1ce9a3cb
LF
1826 (?P<negation>!\s*)?(?P<op>%s)\s*(?P<none_inclusive>\?\s*)?
1827 (?P<quote>["'])?
1828 (?P<value>(?(quote)(?:(?!(?P=quote))[^\\]|\\.)+|[\w.-]+))
1829 (?(quote)(?P=quote))\s*
9ddb6925 1830 ''' % '|'.join(map(re.escape, STR_OPERATORS.keys())))
187986a8 1831 m = str_operator_rex.fullmatch(filter_spec)
9ddb6925 1832 if m:
1ce9a3cb
LF
1833 if m.group('op') == '~=':
1834 comparison_value = re.compile(m.group('value'))
1835 else:
1836 comparison_value = re.sub(r'''\\([\\"'])''', r'\1', m.group('value'))
2cc779f4
S
1837 str_op = STR_OPERATORS[m.group('op')]
1838 if m.group('negation'):
e118a879 1839 op = lambda attr, value: not str_op(attr, value)
2cc779f4
S
1840 else:
1841 op = str_op
083c9df9 1842
9ddb6925 1843 if not m:
187986a8 1844 raise SyntaxError('Invalid filter specification %r' % filter_spec)
083c9df9
PH
1845
1846 def _filter(f):
1847 actual_value = f.get(m.group('key'))
1848 if actual_value is None:
1849 return m.group('none_inclusive')
1850 return op(actual_value, comparison_value)
67134eab
JMF
1851 return _filter
1852
9f1a1c36 1853 def _check_formats(self, formats):
1854 for f in formats:
1855 self.to_screen('[info] Testing format %s' % f['format_id'])
75689fe5 1856 path = self.get_output_path('temp')
1857 if not self._ensure_dir_exists(f'{path}/'):
1858 continue
1859 temp_file = tempfile.NamedTemporaryFile(suffix='.tmp', delete=False, dir=path or None)
9f1a1c36 1860 temp_file.close()
1861 try:
1862 success, _ = self.dl(temp_file.name, f, test=True)
8a82af35 1863 except (DownloadError, OSError, ValueError) + network_exceptions:
9f1a1c36 1864 success = False
1865 finally:
1866 if os.path.exists(temp_file.name):
1867 try:
1868 os.remove(temp_file.name)
1869 except OSError:
1870 self.report_warning('Unable to delete temporary file "%s"' % temp_file.name)
1871 if success:
1872 yield f
1873 else:
1874 self.to_screen('[info] Unable to download format %s. Skipping...' % f['format_id'])
1875
0017d9ad 1876 def _default_format_spec(self, info_dict, download=True):
0017d9ad 1877
af0f7428
S
1878 def can_merge():
1879 merger = FFmpegMergerPP(self)
1880 return merger.available and merger.can_merge()
1881
91ebc640 1882 prefer_best = (
b7b04c78 1883 not self.params.get('simulate')
91ebc640 1884 and download
1885 and (
1886 not can_merge()
21633673 1887 or info_dict.get('is_live') and not self.params.get('live_from_start')
bf1824b3 1888 or self.params['outtmpl']['default'] == '-'))
53ed7066 1889 compat = (
1890 prefer_best
1891 or self.params.get('allow_multiple_audio_streams', False)
8a82af35 1892 or 'format-spec' in self.params['compat_opts'])
91ebc640 1893
1894 return (
53ed7066 1895 'best/bestvideo+bestaudio' if prefer_best
1896 else 'bestvideo*+bestaudio/best' if not compat
91ebc640 1897 else 'bestvideo+bestaudio/best')
0017d9ad 1898
67134eab
JMF
1899 def build_format_selector(self, format_spec):
1900 def syntax_error(note, start):
1901 message = (
1902 'Invalid format specification: '
86e5f3ed 1903 '{}\n\t{}\n\t{}^'.format(note, format_spec, ' ' * start[1]))
67134eab
JMF
1904 return SyntaxError(message)
1905
1906 PICKFIRST = 'PICKFIRST'
1907 MERGE = 'MERGE'
1908 SINGLE = 'SINGLE'
0130afb7 1909 GROUP = 'GROUP'
67134eab
JMF
1910 FormatSelector = collections.namedtuple('FormatSelector', ['type', 'selector', 'filters'])
1911
91ebc640 1912 allow_multiple_streams = {'audio': self.params.get('allow_multiple_audio_streams', False),
1913 'video': self.params.get('allow_multiple_video_streams', False)}
909d24dd 1914
9f1a1c36 1915 check_formats = self.params.get('check_formats') == 'selected'
e8e73840 1916
67134eab
JMF
1917 def _parse_filter(tokens):
1918 filter_parts = []
1919 for type, string, start, _, _ in tokens:
1920 if type == tokenize.OP and string == ']':
1921 return ''.join(filter_parts)
1922 else:
1923 filter_parts.append(string)
1924
232541df 1925 def _remove_unused_ops(tokens):
17cc1534 1926 # Remove operators that we don't use and join them with the surrounding strings
232541df
JMF
1927 # for example: 'mp4' '-' 'baseline' '-' '16x9' is converted to 'mp4-baseline-16x9'
1928 ALLOWED_OPS = ('/', '+', ',', '(', ')')
1929 last_string, last_start, last_end, last_line = None, None, None, None
1930 for type, string, start, end, line in tokens:
1931 if type == tokenize.OP and string == '[':
1932 if last_string:
1933 yield tokenize.NAME, last_string, last_start, last_end, last_line
1934 last_string = None
1935 yield type, string, start, end, line
1936 # everything inside brackets will be handled by _parse_filter
1937 for type, string, start, end, line in tokens:
1938 yield type, string, start, end, line
1939 if type == tokenize.OP and string == ']':
1940 break
1941 elif type == tokenize.OP and string in ALLOWED_OPS:
1942 if last_string:
1943 yield tokenize.NAME, last_string, last_start, last_end, last_line
1944 last_string = None
1945 yield type, string, start, end, line
1946 elif type in [tokenize.NAME, tokenize.NUMBER, tokenize.OP]:
1947 if not last_string:
1948 last_string = string
1949 last_start = start
1950 last_end = end
1951 else:
1952 last_string += string
1953 if last_string:
1954 yield tokenize.NAME, last_string, last_start, last_end, last_line
1955
cf2ac6df 1956 def _parse_format_selection(tokens, inside_merge=False, inside_choice=False, inside_group=False):
67134eab
JMF
1957 selectors = []
1958 current_selector = None
1959 for type, string, start, _, _ in tokens:
1960 # ENCODING is only defined in python 3.x
1961 if type == getattr(tokenize, 'ENCODING', None):
1962 continue
1963 elif type in [tokenize.NAME, tokenize.NUMBER]:
1964 current_selector = FormatSelector(SINGLE, string, [])
1965 elif type == tokenize.OP:
cf2ac6df
JMF
1966 if string == ')':
1967 if not inside_group:
1968 # ')' will be handled by the parentheses group
1969 tokens.restore_last_token()
67134eab 1970 break
cf2ac6df 1971 elif inside_merge and string in ['/', ',']:
0130afb7
JMF
1972 tokens.restore_last_token()
1973 break
cf2ac6df
JMF
1974 elif inside_choice and string == ',':
1975 tokens.restore_last_token()
1976 break
1977 elif string == ',':
0a31a350
JMF
1978 if not current_selector:
1979 raise syntax_error('"," must follow a format selector', start)
67134eab
JMF
1980 selectors.append(current_selector)
1981 current_selector = None
1982 elif string == '/':
d96d604e
JMF
1983 if not current_selector:
1984 raise syntax_error('"/" must follow a format selector', start)
67134eab 1985 first_choice = current_selector
cf2ac6df 1986 second_choice = _parse_format_selection(tokens, inside_choice=True)
f5f4a27a 1987 current_selector = FormatSelector(PICKFIRST, (first_choice, second_choice), [])
67134eab
JMF
1988 elif string == '[':
1989 if not current_selector:
1990 current_selector = FormatSelector(SINGLE, 'best', [])
1991 format_filter = _parse_filter(tokens)
1992 current_selector.filters.append(format_filter)
0130afb7
JMF
1993 elif string == '(':
1994 if current_selector:
1995 raise syntax_error('Unexpected "("', start)
cf2ac6df
JMF
1996 group = _parse_format_selection(tokens, inside_group=True)
1997 current_selector = FormatSelector(GROUP, group, [])
67134eab 1998 elif string == '+':
d03cfdce 1999 if not current_selector:
2000 raise syntax_error('Unexpected "+"', start)
2001 selector_1 = current_selector
2002 selector_2 = _parse_format_selection(tokens, inside_merge=True)
2003 if not selector_2:
2004 raise syntax_error('Expected a selector', start)
2005 current_selector = FormatSelector(MERGE, (selector_1, selector_2), [])
67134eab 2006 else:
86e5f3ed 2007 raise syntax_error(f'Operator not recognized: "{string}"', start)
67134eab
JMF
2008 elif type == tokenize.ENDMARKER:
2009 break
2010 if current_selector:
2011 selectors.append(current_selector)
2012 return selectors
2013
f8d4ad9a 2014 def _merge(formats_pair):
2015 format_1, format_2 = formats_pair
2016
2017 formats_info = []
2018 formats_info.extend(format_1.get('requested_formats', (format_1,)))
2019 formats_info.extend(format_2.get('requested_formats', (format_2,)))
2020
2021 if not allow_multiple_streams['video'] or not allow_multiple_streams['audio']:
551f9388 2022 get_no_more = {'video': False, 'audio': False}
f8d4ad9a 2023 for (i, fmt_info) in enumerate(formats_info):
551f9388 2024 if fmt_info.get('acodec') == fmt_info.get('vcodec') == 'none':
2025 formats_info.pop(i)
2026 continue
2027 for aud_vid in ['audio', 'video']:
f8d4ad9a 2028 if not allow_multiple_streams[aud_vid] and fmt_info.get(aud_vid[0] + 'codec') != 'none':
2029 if get_no_more[aud_vid]:
2030 formats_info.pop(i)
f5510afe 2031 break
f8d4ad9a 2032 get_no_more[aud_vid] = True
2033
2034 if len(formats_info) == 1:
2035 return formats_info[0]
2036
2037 video_fmts = [fmt_info for fmt_info in formats_info if fmt_info.get('vcodec') != 'none']
2038 audio_fmts = [fmt_info for fmt_info in formats_info if fmt_info.get('acodec') != 'none']
2039
2040 the_only_video = video_fmts[0] if len(video_fmts) == 1 else None
2041 the_only_audio = audio_fmts[0] if len(audio_fmts) == 1 else None
2042
2043 output_ext = self.params.get('merge_output_format')
2044 if not output_ext:
2045 if the_only_video:
2046 output_ext = the_only_video['ext']
2047 elif the_only_audio and not video_fmts:
2048 output_ext = the_only_audio['ext']
2049 else:
2050 output_ext = 'mkv'
2051
975a0d0d 2052 filtered = lambda *keys: filter(None, (traverse_obj(fmt, *keys) for fmt in formats_info))
2053
f8d4ad9a 2054 new_dict = {
2055 'requested_formats': formats_info,
975a0d0d 2056 'format': '+'.join(filtered('format')),
2057 'format_id': '+'.join(filtered('format_id')),
f8d4ad9a 2058 'ext': output_ext,
975a0d0d 2059 'protocol': '+'.join(map(determine_protocol, formats_info)),
093a1710 2060 'language': '+'.join(orderedSet(filtered('language'))) or None,
2061 'format_note': '+'.join(orderedSet(filtered('format_note'))) or None,
2062 'filesize_approx': sum(filtered('filesize', 'filesize_approx')) or None,
975a0d0d 2063 'tbr': sum(filtered('tbr', 'vbr', 'abr')),
f8d4ad9a 2064 }
2065
2066 if the_only_video:
2067 new_dict.update({
2068 'width': the_only_video.get('width'),
2069 'height': the_only_video.get('height'),
2070 'resolution': the_only_video.get('resolution') or self.format_resolution(the_only_video),
2071 'fps': the_only_video.get('fps'),
49a57e70 2072 'dynamic_range': the_only_video.get('dynamic_range'),
f8d4ad9a 2073 'vcodec': the_only_video.get('vcodec'),
2074 'vbr': the_only_video.get('vbr'),
2075 'stretched_ratio': the_only_video.get('stretched_ratio'),
2076 })
2077
2078 if the_only_audio:
2079 new_dict.update({
2080 'acodec': the_only_audio.get('acodec'),
2081 'abr': the_only_audio.get('abr'),
975a0d0d 2082 'asr': the_only_audio.get('asr'),
f8d4ad9a 2083 })
2084
2085 return new_dict
2086
e8e73840 2087 def _check_formats(formats):
981052c9 2088 if not check_formats:
2089 yield from formats
b5ac45b1 2090 return
9f1a1c36 2091 yield from self._check_formats(formats)
e8e73840 2092
67134eab 2093 def _build_selector_function(selector):
909d24dd 2094 if isinstance(selector, list): # ,
67134eab
JMF
2095 fs = [_build_selector_function(s) for s in selector]
2096
317f7ab6 2097 def selector_function(ctx):
67134eab 2098 for f in fs:
981052c9 2099 yield from f(ctx)
67134eab 2100 return selector_function
909d24dd 2101
2102 elif selector.type == GROUP: # ()
0130afb7 2103 selector_function = _build_selector_function(selector.selector)
909d24dd 2104
2105 elif selector.type == PICKFIRST: # /
67134eab
JMF
2106 fs = [_build_selector_function(s) for s in selector.selector]
2107
317f7ab6 2108 def selector_function(ctx):
67134eab 2109 for f in fs:
317f7ab6 2110 picked_formats = list(f(ctx))
67134eab
JMF
2111 if picked_formats:
2112 return picked_formats
2113 return []
67134eab 2114
981052c9 2115 elif selector.type == MERGE: # +
2116 selector_1, selector_2 = map(_build_selector_function, selector.selector)
2117
2118 def selector_function(ctx):
adbc4ec4 2119 for pair in itertools.product(selector_1(ctx), selector_2(ctx)):
981052c9 2120 yield _merge(pair)
2121
909d24dd 2122 elif selector.type == SINGLE: # atom
598d185d 2123 format_spec = selector.selector or 'best'
909d24dd 2124
f8d4ad9a 2125 # TODO: Add allvideo, allaudio etc by generalizing the code with best/worst selector
909d24dd 2126 if format_spec == 'all':
2127 def selector_function(ctx):
9222c381 2128 yield from _check_formats(ctx['formats'][::-1])
f8d4ad9a 2129 elif format_spec == 'mergeall':
2130 def selector_function(ctx):
316f2650 2131 formats = list(_check_formats(
2132 f for f in ctx['formats'] if f.get('vcodec') != 'none' or f.get('acodec') != 'none'))
e01d6aa4 2133 if not formats:
2134 return
921b76ca 2135 merged_format = formats[-1]
2136 for f in formats[-2::-1]:
f8d4ad9a 2137 merged_format = _merge((merged_format, f))
2138 yield merged_format
909d24dd 2139
2140 else:
85e801a9 2141 format_fallback, seperate_fallback, format_reverse, format_idx = False, None, True, 1
eff63539 2142 mobj = re.match(
2143 r'(?P<bw>best|worst|b|w)(?P<type>video|audio|v|a)?(?P<mod>\*)?(?:\.(?P<n>[1-9]\d*))?$',
2144 format_spec)
2145 if mobj is not None:
2146 format_idx = int_or_none(mobj.group('n'), default=1)
e8e73840 2147 format_reverse = mobj.group('bw')[0] == 'b'
eff63539 2148 format_type = (mobj.group('type') or [None])[0]
2149 not_format_type = {'v': 'a', 'a': 'v'}.get(format_type)
2150 format_modified = mobj.group('mod') is not None
909d24dd 2151
2152 format_fallback = not format_type and not format_modified # for b, w
8326b00a 2153 _filter_f = (
eff63539 2154 (lambda f: f.get('%scodec' % format_type) != 'none')
2155 if format_type and format_modified # bv*, ba*, wv*, wa*
2156 else (lambda f: f.get('%scodec' % not_format_type) == 'none')
2157 if format_type # bv, ba, wv, wa
2158 else (lambda f: f.get('vcodec') != 'none' and f.get('acodec') != 'none')
2159 if not format_modified # b, w
8326b00a 2160 else lambda f: True) # b*, w*
2161 filter_f = lambda f: _filter_f(f) and (
2162 f.get('vcodec') != 'none' or f.get('acodec') != 'none')
67134eab 2163 else:
48ee10ee 2164 if format_spec in self._format_selection_exts['audio']:
b11c04a8 2165 filter_f = lambda f: f.get('ext') == format_spec and f.get('acodec') != 'none'
48ee10ee 2166 elif format_spec in self._format_selection_exts['video']:
b11c04a8 2167 filter_f = lambda f: f.get('ext') == format_spec and f.get('acodec') != 'none' and f.get('vcodec') != 'none'
85e801a9 2168 seperate_fallback = lambda f: f.get('ext') == format_spec and f.get('vcodec') != 'none'
48ee10ee 2169 elif format_spec in self._format_selection_exts['storyboards']:
b11c04a8 2170 filter_f = lambda f: f.get('ext') == format_spec and f.get('acodec') == 'none' and f.get('vcodec') == 'none'
2171 else:
b5ae35ee 2172 filter_f = lambda f: f.get('format_id') == format_spec # id
909d24dd 2173
2174 def selector_function(ctx):
2175 formats = list(ctx['formats'])
909d24dd 2176 matches = list(filter(filter_f, formats)) if filter_f is not None else formats
85e801a9 2177 if not matches:
2178 if format_fallback and ctx['incomplete_formats']:
2179 # for extractors with incomplete formats (audio only (soundcloud)
2180 # or video only (imgur)) best/worst will fallback to
2181 # best/worst {video,audio}-only format
2182 matches = formats
2183 elif seperate_fallback and not ctx['has_merged_format']:
2184 # for compatibility with youtube-dl when there is no pre-merged format
2185 matches = list(filter(seperate_fallback, formats))
981052c9 2186 matches = LazyList(_check_formats(matches[::-1 if format_reverse else 1]))
2187 try:
e8e73840 2188 yield matches[format_idx - 1]
4abea8ca 2189 except LazyList.IndexError:
981052c9 2190 return
083c9df9 2191
67134eab 2192 filters = [self._build_format_filter(f) for f in selector.filters]
083c9df9 2193
317f7ab6 2194 def final_selector(ctx):
adbc4ec4 2195 ctx_copy = dict(ctx)
67134eab 2196 for _filter in filters:
317f7ab6
S
2197 ctx_copy['formats'] = list(filter(_filter, ctx_copy['formats']))
2198 return selector_function(ctx_copy)
67134eab 2199 return final_selector
083c9df9 2200
0f06bcd7 2201 stream = io.BytesIO(format_spec.encode())
0130afb7 2202 try:
f9934b96 2203 tokens = list(_remove_unused_ops(tokenize.tokenize(stream.readline)))
0130afb7
JMF
2204 except tokenize.TokenError:
2205 raise syntax_error('Missing closing/opening brackets or parenthesis', (0, len(format_spec)))
2206
86e5f3ed 2207 class TokenIterator:
0130afb7
JMF
2208 def __init__(self, tokens):
2209 self.tokens = tokens
2210 self.counter = 0
2211
2212 def __iter__(self):
2213 return self
2214
2215 def __next__(self):
2216 if self.counter >= len(self.tokens):
2217 raise StopIteration()
2218 value = self.tokens[self.counter]
2219 self.counter += 1
2220 return value
2221
2222 next = __next__
2223
2224 def restore_last_token(self):
2225 self.counter -= 1
2226
2227 parsed_selector = _parse_format_selection(iter(TokenIterator(tokens)))
67134eab 2228 return _build_selector_function(parsed_selector)
a9c58ad9 2229
e5660ee6 2230 def _calc_headers(self, info_dict):
8b7539d2 2231 res = merge_headers(self.params['http_headers'], info_dict.get('http_headers') or {})
e5660ee6 2232
c487cf00 2233 cookies = self._calc_cookies(info_dict['url'])
e5660ee6
JMF
2234 if cookies:
2235 res['Cookie'] = cookies
2236
0016b84e
S
2237 if 'X-Forwarded-For' not in res:
2238 x_forwarded_for_ip = info_dict.get('__x_forwarded_for_ip')
2239 if x_forwarded_for_ip:
2240 res['X-Forwarded-For'] = x_forwarded_for_ip
2241
e5660ee6
JMF
2242 return res
2243
c487cf00 2244 def _calc_cookies(self, url):
2245 pr = sanitized_Request(url)
e5660ee6 2246 self.cookiejar.add_cookie_header(pr)
662435f7 2247 return pr.get_header('Cookie')
e5660ee6 2248
9f1a1c36 2249 def _sort_thumbnails(self, thumbnails):
2250 thumbnails.sort(key=lambda t: (
2251 t.get('preference') if t.get('preference') is not None else -1,
2252 t.get('width') if t.get('width') is not None else -1,
2253 t.get('height') if t.get('height') is not None else -1,
2254 t.get('id') if t.get('id') is not None else '',
2255 t.get('url')))
2256
b0249bca 2257 def _sanitize_thumbnails(self, info_dict):
bc516a3f 2258 thumbnails = info_dict.get('thumbnails')
2259 if thumbnails is None:
2260 thumbnail = info_dict.get('thumbnail')
2261 if thumbnail:
2262 info_dict['thumbnails'] = thumbnails = [{'url': thumbnail}]
9f1a1c36 2263 if not thumbnails:
2264 return
2265
2266 def check_thumbnails(thumbnails):
2267 for t in thumbnails:
2268 self.to_screen(f'[info] Testing thumbnail {t["id"]}')
2269 try:
2270 self.urlopen(HEADRequest(t['url']))
2271 except network_exceptions as err:
2272 self.to_screen(f'[info] Unable to connect to thumbnail {t["id"]} URL {t["url"]!r} - {err}. Skipping...')
2273 continue
2274 yield t
2275
2276 self._sort_thumbnails(thumbnails)
2277 for i, t in enumerate(thumbnails):
2278 if t.get('id') is None:
2279 t['id'] = '%d' % i
2280 if t.get('width') and t.get('height'):
2281 t['resolution'] = '%dx%d' % (t['width'], t['height'])
2282 t['url'] = sanitize_url(t['url'])
2283
2284 if self.params.get('check_formats') is True:
282f5709 2285 info_dict['thumbnails'] = LazyList(check_thumbnails(thumbnails[::-1]), reverse=True)
9f1a1c36 2286 else:
2287 info_dict['thumbnails'] = thumbnails
bc516a3f 2288
03f83004
LNO
2289 def _fill_common_fields(self, info_dict, is_video=True):
2290 # TODO: move sanitization here
2291 if is_video:
2292 # playlists are allowed to lack "title"
d4736fdb 2293 title = info_dict.get('title', NO_DEFAULT)
2294 if title is NO_DEFAULT:
03f83004
LNO
2295 raise ExtractorError('Missing "title" field in extractor result',
2296 video_id=info_dict['id'], ie=info_dict['extractor'])
d4736fdb 2297 info_dict['fulltitle'] = title
2298 if not title:
2299 if title == '':
2300 self.write_debug('Extractor gave empty title. Creating a generic title')
2301 else:
2302 self.report_warning('Extractor failed to obtain "title". Creating a generic title instead')
1d485a1a 2303 info_dict['title'] = f'{info_dict["extractor"].replace(":", "-")} video #{info_dict["id"]}'
03f83004
LNO
2304
2305 if info_dict.get('duration') is not None:
2306 info_dict['duration_string'] = formatSeconds(info_dict['duration'])
2307
2308 for ts_key, date_key in (
2309 ('timestamp', 'upload_date'),
2310 ('release_timestamp', 'release_date'),
2311 ('modified_timestamp', 'modified_date'),
2312 ):
2313 if info_dict.get(date_key) is None and info_dict.get(ts_key) is not None:
2314 # Working around out-of-range timestamp values (e.g. negative ones on Windows,
2315 # see http://bugs.python.org/issue1646728)
19a03940 2316 with contextlib.suppress(ValueError, OverflowError, OSError):
03f83004
LNO
2317 upload_date = datetime.datetime.utcfromtimestamp(info_dict[ts_key])
2318 info_dict[date_key] = upload_date.strftime('%Y%m%d')
03f83004
LNO
2319
2320 live_keys = ('is_live', 'was_live')
2321 live_status = info_dict.get('live_status')
2322 if live_status is None:
2323 for key in live_keys:
2324 if info_dict.get(key) is False:
2325 continue
2326 if info_dict.get(key):
2327 live_status = key
2328 break
2329 if all(info_dict.get(key) is False for key in live_keys):
2330 live_status = 'not_live'
2331 if live_status:
2332 info_dict['live_status'] = live_status
2333 for key in live_keys:
2334 if info_dict.get(key) is None:
2335 info_dict[key] = (live_status == key)
2336
2337 # Auto generate title fields corresponding to the *_number fields when missing
2338 # in order to always have clean titles. This is very common for TV series.
2339 for field in ('chapter', 'season', 'episode'):
2340 if info_dict.get('%s_number' % field) is not None and not info_dict.get(field):
2341 info_dict[field] = '%s %d' % (field.capitalize(), info_dict['%s_number' % field])
2342
415f8d51 2343 def _raise_pending_errors(self, info):
2344 err = info.pop('__pending_error', None)
2345 if err:
2346 self.report_error(err, tb=False)
2347
dd82ffea
JMF
2348 def process_video_result(self, info_dict, download=True):
2349 assert info_dict.get('_type', 'video') == 'video'
9c906919 2350 self._num_videos += 1
dd82ffea 2351
bec1fad2 2352 if 'id' not in info_dict:
fc08bdd6 2353 raise ExtractorError('Missing "id" field in extractor result', ie=info_dict['extractor'])
2354 elif not info_dict.get('id'):
2355 raise ExtractorError('Extractor failed to obtain "id"', ie=info_dict['extractor'])
455a15e2 2356
c9969434
S
2357 def report_force_conversion(field, field_not, conversion):
2358 self.report_warning(
2359 '"%s" field is not %s - forcing %s conversion, there is an error in extractor'
2360 % (field, field_not, conversion))
2361
2362 def sanitize_string_field(info, string_field):
2363 field = info.get(string_field)
14f25df2 2364 if field is None or isinstance(field, str):
c9969434
S
2365 return
2366 report_force_conversion(string_field, 'a string', 'string')
14f25df2 2367 info[string_field] = str(field)
c9969434
S
2368
2369 def sanitize_numeric_fields(info):
2370 for numeric_field in self._NUMERIC_FIELDS:
2371 field = info.get(numeric_field)
f9934b96 2372 if field is None or isinstance(field, (int, float)):
c9969434
S
2373 continue
2374 report_force_conversion(numeric_field, 'numeric', 'int')
2375 info[numeric_field] = int_or_none(field)
2376
2377 sanitize_string_field(info_dict, 'id')
2378 sanitize_numeric_fields(info_dict)
3975b4d2 2379 if info_dict.get('section_end') and info_dict.get('section_start') is not None:
2380 info_dict['duration'] = round(info_dict['section_end'] - info_dict['section_start'], 3)
4c3f8c3f 2381 if (info_dict.get('duration') or 0) <= 0 and info_dict.pop('duration', None):
50e93e03 2382 self.report_warning('"duration" field is negative, there is an error in extractor')
be6217b2 2383
9eef7c4e 2384 chapters = info_dict.get('chapters') or []
a3976e07 2385 if chapters and chapters[0].get('start_time'):
2386 chapters.insert(0, {'start_time': 0})
2387
9eef7c4e 2388 dummy_chapter = {'end_time': 0, 'start_time': info_dict.get('duration')}
a3976e07 2389 for idx, (prev, current, next_) in enumerate(zip(
2390 (dummy_chapter, *chapters), chapters, (*chapters[1:], dummy_chapter)), 1):
9eef7c4e 2391 if current.get('start_time') is None:
2392 current['start_time'] = prev.get('end_time')
2393 if not current.get('end_time'):
2394 current['end_time'] = next_.get('start_time')
a3976e07 2395 if not current.get('title'):
2396 current['title'] = f'<Untitled Chapter {idx}>'
9eef7c4e 2397
dd82ffea
JMF
2398 if 'playlist' not in info_dict:
2399 # It isn't part of a playlist
2400 info_dict['playlist'] = None
2401 info_dict['playlist_index'] = None
2402
bc516a3f 2403 self._sanitize_thumbnails(info_dict)
d5519808 2404
536a55da 2405 thumbnail = info_dict.get('thumbnail')
bc516a3f 2406 thumbnails = info_dict.get('thumbnails')
536a55da
S
2407 if thumbnail:
2408 info_dict['thumbnail'] = sanitize_url(thumbnail)
2409 elif thumbnails:
d5519808
PH
2410 info_dict['thumbnail'] = thumbnails[-1]['url']
2411
ae30b840 2412 if info_dict.get('display_id') is None and 'id' in info_dict:
0afef30b
PH
2413 info_dict['display_id'] = info_dict['id']
2414
03f83004 2415 self._fill_common_fields(info_dict)
33d2fc2f 2416
05108a49
S
2417 for cc_kind in ('subtitles', 'automatic_captions'):
2418 cc = info_dict.get(cc_kind)
2419 if cc:
2420 for _, subtitle in cc.items():
2421 for subtitle_format in subtitle:
2422 if subtitle_format.get('url'):
2423 subtitle_format['url'] = sanitize_url(subtitle_format['url'])
2424 if subtitle_format.get('ext') is None:
2425 subtitle_format['ext'] = determine_ext(subtitle_format['url']).lower()
2426
2427 automatic_captions = info_dict.get('automatic_captions')
4bba3716 2428 subtitles = info_dict.get('subtitles')
4bba3716 2429
360e1ca5 2430 info_dict['requested_subtitles'] = self.process_subtitles(
05108a49 2431 info_dict['id'], subtitles, automatic_captions)
a504ced0 2432
dd82ffea
JMF
2433 if info_dict.get('formats') is None:
2434 # There's only one format available
2435 formats = [info_dict]
2436 else:
2437 formats = info_dict['formats']
2438
0a5a191a 2439 # or None ensures --clean-infojson removes it
2440 info_dict['_has_drm'] = any(f.get('has_drm') for f in formats) or None
88acdbc2 2441 if not self.params.get('allow_unplayable_formats'):
2442 formats = [f for f in formats if not f.get('has_drm')]
0a5a191a 2443 if info_dict['_has_drm'] and all(
c0b6e5c7 2444 f.get('acodec') == f.get('vcodec') == 'none' for f in formats):
2445 self.report_warning(
2446 'This video is DRM protected and only images are available for download. '
2447 'Use --list-formats to see them')
88acdbc2 2448
319b6059 2449 get_from_start = not info_dict.get('is_live') or bool(self.params.get('live_from_start'))
2450 if not get_from_start:
2451 info_dict['title'] += ' ' + datetime.datetime.now().strftime('%Y-%m-%d %H:%M')
2452 if info_dict.get('is_live') and formats:
adbc4ec4 2453 formats = [f for f in formats if bool(f.get('is_from_start')) == get_from_start]
319b6059 2454 if get_from_start and not formats:
a44ca5a4 2455 self.raise_no_formats(info_dict, msg=(
2456 '--live-from-start is passed, but there are no formats that can be downloaded from the start. '
2457 'If you want to download from the current time, use --no-live-from-start'))
adbc4ec4 2458
db95dc13 2459 if not formats:
1151c407 2460 self.raise_no_formats(info_dict)
db95dc13 2461
73af5cc8
S
2462 def is_wellformed(f):
2463 url = f.get('url')
a5ac0c47 2464 if not url:
73af5cc8
S
2465 self.report_warning(
2466 '"url" field is missing or empty - skipping format, '
2467 'there is an error in extractor')
a5ac0c47
S
2468 return False
2469 if isinstance(url, bytes):
2470 sanitize_string_field(f, 'url')
2471 return True
73af5cc8
S
2472
2473 # Filter out malformed formats for better extraction robustness
2474 formats = list(filter(is_wellformed, formats))
2475
181c7053
S
2476 formats_dict = {}
2477
dd82ffea 2478 # We check that all the formats have the format and format_id fields
db95dc13 2479 for i, format in enumerate(formats):
c9969434
S
2480 sanitize_string_field(format, 'format_id')
2481 sanitize_numeric_fields(format)
dcf77cf1 2482 format['url'] = sanitize_url(format['url'])
e74e3b63 2483 if not format.get('format_id'):
14f25df2 2484 format['format_id'] = str(i)
e2effb08
S
2485 else:
2486 # Sanitize format_id from characters used in format selector expression
ec85ded8 2487 format['format_id'] = re.sub(r'[\s,/+\[\]()]', '_', format['format_id'])
181c7053
S
2488 format_id = format['format_id']
2489 if format_id not in formats_dict:
2490 formats_dict[format_id] = []
2491 formats_dict[format_id].append(format)
2492
2493 # Make sure all formats have unique format_id
03b4de72 2494 common_exts = set(itertools.chain(*self._format_selection_exts.values()))
181c7053 2495 for format_id, ambiguous_formats in formats_dict.items():
48ee10ee 2496 ambigious_id = len(ambiguous_formats) > 1
2497 for i, format in enumerate(ambiguous_formats):
2498 if ambigious_id:
181c7053 2499 format['format_id'] = '%s-%d' % (format_id, i)
48ee10ee 2500 if format.get('ext') is None:
2501 format['ext'] = determine_ext(format['url']).lower()
2502 # Ensure there is no conflict between id and ext in format selection
2503 # See https://github.com/yt-dlp/yt-dlp/issues/1282
2504 if format['format_id'] != format['ext'] and format['format_id'] in common_exts:
2505 format['format_id'] = 'f%s' % format['format_id']
181c7053
S
2506
2507 for i, format in enumerate(formats):
8c51aa65 2508 if format.get('format') is None:
6febd1c1 2509 format['format'] = '{id} - {res}{note}'.format(
8c51aa65
JMF
2510 id=format['format_id'],
2511 res=self.format_resolution(format),
b868936c 2512 note=format_field(format, 'format_note', ' (%s)'),
8c51aa65 2513 )
6f0be937 2514 if format.get('protocol') is None:
b5559424 2515 format['protocol'] = determine_protocol(format)
239df021 2516 if format.get('resolution') is None:
2517 format['resolution'] = self.format_resolution(format, default=None)
176f1866 2518 if format.get('dynamic_range') is None and format.get('vcodec') != 'none':
2519 format['dynamic_range'] = 'SDR'
f2fe69c7 2520 if (info_dict.get('duration') and format.get('tbr')
2521 and not format.get('filesize') and not format.get('filesize_approx')):
56ba69e4 2522 format['filesize_approx'] = int(info_dict['duration'] * format['tbr'] * (1024 / 8))
f2fe69c7 2523
e5660ee6
JMF
2524 # Add HTTP headers, so that external programs can use them from the
2525 # json output
2526 full_format_info = info_dict.copy()
2527 full_format_info.update(format)
2528 format['http_headers'] = self._calc_headers(full_format_info)
0016b84e
S
2529 # Remove private housekeeping stuff
2530 if '__x_forwarded_for_ip' in info_dict:
2531 del info_dict['__x_forwarded_for_ip']
dd82ffea 2532
9f1a1c36 2533 if self.params.get('check_formats') is True:
282f5709 2534 formats = LazyList(self._check_formats(formats[::-1]), reverse=True)
9f1a1c36 2535
88acdbc2 2536 if not formats or formats[0] is not info_dict:
b3d9ef88
JMF
2537 # only set the 'formats' fields if the original info_dict list them
2538 # otherwise we end up with a circular reference, the first (and unique)
f89197d7 2539 # element in the 'formats' field in info_dict is info_dict itself,
dfb1b146 2540 # which can't be exported to json
b3d9ef88 2541 info_dict['formats'] = formats
4ec82a72 2542
2543 info_dict, _ = self.pre_process(info_dict)
2544
6db9c4d5 2545 if self._match_entry(info_dict, incomplete=self._format_fields) is not None:
09b49e1f 2546 return info_dict
2547
2548 self.post_extract(info_dict)
2549 info_dict, _ = self.pre_process(info_dict, 'after_filter')
2550
093a1710 2551 # The pre-processors may have modified the formats
2552 formats = info_dict.get('formats', [info_dict])
2553
fa9f30b8 2554 list_only = self.params.get('simulate') is None and (
2555 self.params.get('list_thumbnails') or self.params.get('listformats') or self.params.get('listsubtitles'))
2556 interactive_format_selection = not list_only and self.format_selector == '-'
b7b04c78 2557 if self.params.get('list_thumbnails'):
2558 self.list_thumbnails(info_dict)
b7b04c78 2559 if self.params.get('listsubtitles'):
2560 if 'automatic_captions' in info_dict:
2561 self.list_subtitles(
2562 info_dict['id'], automatic_captions, 'automatic captions')
2563 self.list_subtitles(info_dict['id'], subtitles, 'subtitles')
fa9f30b8 2564 if self.params.get('listformats') or interactive_format_selection:
b69fd25c 2565 self.list_formats(info_dict)
169dbde9 2566 if list_only:
b7b04c78 2567 # Without this printing, -F --print-json will not work
169dbde9 2568 self.__forced_printings(info_dict, self.prepare_filename(info_dict), incomplete=True)
c487cf00 2569 return info_dict
bfaae0a7 2570
187986a8 2571 format_selector = self.format_selector
2572 if format_selector is None:
0017d9ad 2573 req_format = self._default_format_spec(info_dict, download=download)
0760b0a7 2574 self.write_debug('Default format spec: %s' % req_format)
187986a8 2575 format_selector = self.build_format_selector(req_format)
317f7ab6 2576
fa9f30b8 2577 while True:
2578 if interactive_format_selection:
2579 req_format = input(
2580 self._format_screen('\nEnter format selector: ', self.Styles.EMPHASIS))
2581 try:
2582 format_selector = self.build_format_selector(req_format)
2583 except SyntaxError as err:
2584 self.report_error(err, tb=False, is_error=False)
2585 continue
2586
85e801a9 2587 formats_to_download = list(format_selector({
fa9f30b8 2588 'formats': formats,
85e801a9 2589 'has_merged_format': any('none' not in (f.get('acodec'), f.get('vcodec')) for f in formats),
2590 'incomplete_formats': (
2591 # All formats are video-only or
2592 all(f.get('vcodec') != 'none' and f.get('acodec') == 'none' for f in formats)
2593 # all formats are audio-only
2594 or all(f.get('vcodec') == 'none' and f.get('acodec') != 'none' for f in formats)),
2595 }))
fa9f30b8 2596 if interactive_format_selection and not formats_to_download:
2597 self.report_error('Requested format is not available', tb=False, is_error=False)
2598 continue
2599 break
317f7ab6 2600
dd82ffea 2601 if not formats_to_download:
b7da73eb 2602 if not self.params.get('ignore_no_formats_error'):
c0b6e5c7 2603 raise ExtractorError(
2604 'Requested format is not available. Use --list-formats for a list of available formats',
2605 expected=True, video_id=info_dict['id'], ie=info_dict['extractor'])
b62fa6d7 2606 self.report_warning('Requested format is not available')
2607 # Process what we can, even without any available formats.
2608 formats_to_download = [{}]
a13e6848 2609
5ec1b6b7 2610 requested_ranges = self.params.get('download_ranges')
2611 if requested_ranges:
2612 requested_ranges = tuple(requested_ranges(info_dict, self))
2613
2614 best_format, downloaded_formats = formats_to_download[-1], []
b62fa6d7 2615 if download:
2616 if best_format:
5ec1b6b7 2617 def to_screen(*msg):
2618 self.to_screen(f'[info] {info_dict["id"]}: {" ".join(", ".join(variadic(m)) for m in msg)}')
2619
2620 to_screen(f'Downloading {len(formats_to_download)} format(s):',
2621 (f['format_id'] for f in formats_to_download))
2622 if requested_ranges:
2623 to_screen(f'Downloading {len(requested_ranges)} time ranges:',
2624 (f'{int(c["start_time"])}-{int(c["end_time"])}' for c in requested_ranges))
a13e6848 2625 max_downloads_reached = False
5ec1b6b7 2626
2627 for fmt, chapter in itertools.product(formats_to_download, requested_ranges or [{}]):
2628 new_info = self._copy_infodict(info_dict)
b7da73eb 2629 new_info.update(fmt)
3975b4d2 2630 offset, duration = info_dict.get('section_start') or 0, info_dict.get('duration') or float('inf')
2631 if chapter or offset:
5ec1b6b7 2632 new_info.update({
3975b4d2 2633 'section_start': offset + chapter.get('start_time', 0),
bc401608 2634 'section_end': offset + min(chapter.get('end_time', duration), duration),
5ec1b6b7 2635 'section_title': chapter.get('title'),
2636 'section_number': chapter.get('index'),
2637 })
2638 downloaded_formats.append(new_info)
a13e6848 2639 try:
2640 self.process_info(new_info)
2641 except MaxDownloadsReached:
2642 max_downloads_reached = True
415f8d51 2643 self._raise_pending_errors(new_info)
f46e2f9d 2644 # Remove copied info
2645 for key, val in tuple(new_info.items()):
2646 if info_dict.get(key) == val:
2647 new_info.pop(key)
a13e6848 2648 if max_downloads_reached:
2649 break
ebed8b37 2650
5ec1b6b7 2651 write_archive = {f.get('__write_download_archive', False) for f in downloaded_formats}
a13e6848 2652 assert write_archive.issubset({True, False, 'ignore'})
2653 if True in write_archive and False not in write_archive:
2654 self.record_download_archive(info_dict)
be72c624 2655
5ec1b6b7 2656 info_dict['requested_downloads'] = downloaded_formats
ed5835b4 2657 info_dict = self.run_all_pps('after_video', info_dict)
a13e6848 2658 if max_downloads_reached:
2659 raise MaxDownloadsReached()
ebed8b37 2660
49a57e70 2661 # We update the info dict with the selected best quality format (backwards compatibility)
be72c624 2662 info_dict.update(best_format)
dd82ffea
JMF
2663 return info_dict
2664
98c70d6f 2665 def process_subtitles(self, video_id, normal_subtitles, automatic_captions):
a504ced0 2666 """Select the requested subtitles and their format"""
d8a58ddc 2667 available_subs, normal_sub_langs = {}, []
98c70d6f
JMF
2668 if normal_subtitles and self.params.get('writesubtitles'):
2669 available_subs.update(normal_subtitles)
d8a58ddc 2670 normal_sub_langs = tuple(normal_subtitles.keys())
98c70d6f
JMF
2671 if automatic_captions and self.params.get('writeautomaticsub'):
2672 for lang, cap_info in automatic_captions.items():
360e1ca5
JMF
2673 if lang not in available_subs:
2674 available_subs[lang] = cap_info
2675
4d171848
JMF
2676 if (not self.params.get('writesubtitles') and not
2677 self.params.get('writeautomaticsub') or not
2678 available_subs):
2679 return None
a504ced0 2680
d8a58ddc 2681 all_sub_langs = tuple(available_subs.keys())
a504ced0 2682 if self.params.get('allsubtitles', False):
c32b0aab 2683 requested_langs = all_sub_langs
2684 elif self.params.get('subtitleslangs', False):
77c4a9ef 2685 # A list is used so that the order of languages will be the same as
2686 # given in subtitleslangs. See https://github.com/yt-dlp/yt-dlp/issues/1041
2687 requested_langs = []
2688 for lang_re in self.params.get('subtitleslangs'):
77c4a9ef 2689 discard = lang_re[0] == '-'
c32b0aab 2690 if discard:
77c4a9ef 2691 lang_re = lang_re[1:]
3aa91540 2692 if lang_re == 'all':
2693 if discard:
2694 requested_langs = []
2695 else:
2696 requested_langs.extend(all_sub_langs)
2697 continue
77c4a9ef 2698 current_langs = filter(re.compile(lang_re + '$').match, all_sub_langs)
c32b0aab 2699 if discard:
2700 for lang in current_langs:
77c4a9ef 2701 while lang in requested_langs:
2702 requested_langs.remove(lang)
c32b0aab 2703 else:
77c4a9ef 2704 requested_langs.extend(current_langs)
2705 requested_langs = orderedSet(requested_langs)
d8a58ddc 2706 elif normal_sub_langs:
2707 requested_langs = ['en'] if 'en' in normal_sub_langs else normal_sub_langs[:1]
a504ced0 2708 else:
d8a58ddc 2709 requested_langs = ['en'] if 'en' in all_sub_langs else all_sub_langs[:1]
ad3dc496 2710 if requested_langs:
2711 self.write_debug('Downloading subtitles: %s' % ', '.join(requested_langs))
a504ced0
JMF
2712
2713 formats_query = self.params.get('subtitlesformat', 'best')
2714 formats_preference = formats_query.split('/') if formats_query else []
2715 subs = {}
2716 for lang in requested_langs:
2717 formats = available_subs.get(lang)
2718 if formats is None:
86e5f3ed 2719 self.report_warning(f'{lang} subtitles not available for {video_id}')
a504ced0 2720 continue
a504ced0
JMF
2721 for ext in formats_preference:
2722 if ext == 'best':
2723 f = formats[-1]
2724 break
2725 matches = list(filter(lambda f: f['ext'] == ext, formats))
2726 if matches:
2727 f = matches[-1]
2728 break
2729 else:
2730 f = formats[-1]
2731 self.report_warning(
2732 'No subtitle format found matching "%s" for language %s, '
2733 'using %s' % (formats_query, lang, f['ext']))
2734 subs[lang] = f
2735 return subs
2736
bb66c247 2737 def _forceprint(self, key, info_dict):
2738 if info_dict is None:
2739 return
2740 info_copy = info_dict.copy()
2741 info_copy['formats_table'] = self.render_formats_table(info_dict)
2742 info_copy['thumbnails_table'] = self.render_thumbnails_table(info_dict)
2743 info_copy['subtitles_table'] = self.render_subtitles_table(info_dict.get('id'), info_dict.get('subtitles'))
2744 info_copy['automatic_captions_table'] = self.render_subtitles_table(info_dict.get('id'), info_dict.get('automatic_captions'))
2745
2746 def format_tmpl(tmpl):
2747 mobj = re.match(r'\w+(=?)$', tmpl)
2748 if mobj and mobj.group(1):
2749 return f'{tmpl[:-1]} = %({tmpl[:-1]})r'
2750 elif mobj:
2751 return f'%({tmpl})s'
2752 return tmpl
8130779d 2753
bb66c247 2754 for tmpl in self.params['forceprint'].get(key, []):
2755 self.to_stdout(self.evaluate_outtmpl(format_tmpl(tmpl), info_copy))
2756
2757 for tmpl, file_tmpl in self.params['print_to_file'].get(key, []):
5127e92a 2758 filename = self.prepare_filename(info_dict, outtmpl=file_tmpl)
bb66c247 2759 tmpl = format_tmpl(tmpl)
2760 self.to_screen(f'[info] Writing {tmpl!r} to: {filename}')
8d93e69d 2761 if self._ensure_dir_exists(filename):
86e5f3ed 2762 with open(filename, 'a', encoding='utf-8') as f:
8d93e69d 2763 f.write(self.evaluate_outtmpl(tmpl, info_copy) + '\n')
ca30f449 2764
d06daf23 2765 def __forced_printings(self, info_dict, filename, incomplete):
53c18592 2766 def print_mandatory(field, actual_field=None):
2767 if actual_field is None:
2768 actual_field = field
d06daf23 2769 if (self.params.get('force%s' % field, False)
53c18592 2770 and (not incomplete or info_dict.get(actual_field) is not None)):
2771 self.to_stdout(info_dict[actual_field])
d06daf23
S
2772
2773 def print_optional(field):
2774 if (self.params.get('force%s' % field, False)
2775 and info_dict.get(field) is not None):
2776 self.to_stdout(info_dict[field])
2777
53c18592 2778 info_dict = info_dict.copy()
2779 if filename is not None:
2780 info_dict['filename'] = filename
2781 if info_dict.get('requested_formats') is not None:
2782 # For RTMP URLs, also include the playpath
2783 info_dict['urls'] = '\n'.join(f['url'] + f.get('play_path', '') for f in info_dict['requested_formats'])
10331a26 2784 elif info_dict.get('url'):
53c18592 2785 info_dict['urls'] = info_dict['url'] + info_dict.get('play_path', '')
2786
bb66c247 2787 if (self.params.get('forcejson')
2788 or self.params['forceprint'].get('video')
2789 or self.params['print_to_file'].get('video')):
2b8a2973 2790 self.post_extract(info_dict)
bb66c247 2791 self._forceprint('video', info_dict)
53c18592 2792
d06daf23
S
2793 print_mandatory('title')
2794 print_mandatory('id')
53c18592 2795 print_mandatory('url', 'urls')
d06daf23
S
2796 print_optional('thumbnail')
2797 print_optional('description')
53c18592 2798 print_optional('filename')
b868936c 2799 if self.params.get('forceduration') and info_dict.get('duration') is not None:
d06daf23
S
2800 self.to_stdout(formatSeconds(info_dict['duration']))
2801 print_mandatory('format')
53c18592 2802
2b8a2973 2803 if self.params.get('forcejson'):
6e84b215 2804 self.to_stdout(json.dumps(self.sanitize_info(info_dict)))
d06daf23 2805
e8e73840 2806 def dl(self, name, info, subtitle=False, test=False):
88acdbc2 2807 if not info.get('url'):
1151c407 2808 self.raise_no_formats(info, True)
e8e73840 2809
2810 if test:
2811 verbose = self.params.get('verbose')
2812 params = {
2813 'test': True,
a169858f 2814 'quiet': self.params.get('quiet') or not verbose,
e8e73840 2815 'verbose': verbose,
2816 'noprogress': not verbose,
2817 'nopart': True,
2818 'skip_unavailable_fragments': False,
2819 'keep_fragments': False,
2820 'overwrites': True,
2821 '_no_ytdl_file': True,
2822 }
2823 else:
2824 params = self.params
96fccc10 2825 fd = get_suitable_downloader(info, params, to_stdout=(name == '-'))(self, params)
e8e73840 2826 if not test:
2827 for ph in self._progress_hooks:
2828 fd.add_progress_hook(ph)
42676437
M
2829 urls = '", "'.join(
2830 (f['url'].split(',')[0] + ',<data>' if f['url'].startswith('data:') else f['url'])
2831 for f in info.get('requested_formats', []) or [info])
3a408f9d 2832 self.write_debug(f'Invoking {fd.FD_NAME} downloader on "{urls}"')
03b4de72 2833
adbc4ec4
THD
2834 # Note: Ideally info should be a deep-copied so that hooks cannot modify it.
2835 # But it may contain objects that are not deep-copyable
2836 new_info = self._copy_infodict(info)
e8e73840 2837 if new_info.get('http_headers') is None:
2838 new_info['http_headers'] = self._calc_headers(new_info)
2839 return fd.download(name, new_info, subtitle)
2840
e04938ab 2841 def existing_file(self, filepaths, *, default_overwrite=True):
2842 existing_files = list(filter(os.path.exists, orderedSet(filepaths)))
2843 if existing_files and not self.params.get('overwrites', default_overwrite):
2844 return existing_files[0]
2845
2846 for file in existing_files:
2847 self.report_file_delete(file)
2848 os.remove(file)
2849 return None
2850
8222d8de 2851 def process_info(self, info_dict):
09b49e1f 2852 """Process a single resolved IE result. (Modifies it in-place)"""
8222d8de
JMF
2853
2854 assert info_dict.get('_type', 'video') == 'video'
f46e2f9d 2855 original_infodict = info_dict
fd288278 2856
4513a41a 2857 if 'format' not in info_dict and 'ext' in info_dict:
8222d8de
JMF
2858 info_dict['format'] = info_dict['ext']
2859
09b49e1f 2860 # This is mostly just for backward compatibility of process_info
2861 # As a side-effect, this allows for format-specific filters
c77495e3 2862 if self._match_entry(info_dict) is not None:
9e907ebd 2863 info_dict['__write_download_archive'] = 'ignore'
8222d8de
JMF
2864 return
2865
09b49e1f 2866 # Does nothing under normal operation - for backward compatibility of process_info
277d6ff5 2867 self.post_extract(info_dict)
0c14d66a 2868 self._num_downloads += 1
8222d8de 2869
dcf64d43 2870 # info_dict['_filename'] needs to be set for backward compatibility
de6000d9 2871 info_dict['_filename'] = full_filename = self.prepare_filename(info_dict, warn=True)
2872 temp_filename = self.prepare_filename(info_dict, 'temp')
0202b52a 2873 files_to_move = {}
8222d8de
JMF
2874
2875 # Forced printings
4513a41a 2876 self.__forced_printings(info_dict, full_filename, incomplete=('format' not in info_dict))
8222d8de 2877
ca6d59d2 2878 def check_max_downloads():
2879 if self._num_downloads >= float(self.params.get('max_downloads') or 'inf'):
2880 raise MaxDownloadsReached()
2881
b7b04c78 2882 if self.params.get('simulate'):
9e907ebd 2883 info_dict['__write_download_archive'] = self.params.get('force_write_download_archive')
ca6d59d2 2884 check_max_downloads()
8222d8de
JMF
2885 return
2886
de6000d9 2887 if full_filename is None:
8222d8de 2888 return
e92caff5 2889 if not self._ensure_dir_exists(encodeFilename(full_filename)):
0202b52a 2890 return
e92caff5 2891 if not self._ensure_dir_exists(encodeFilename(temp_filename)):
8222d8de
JMF
2892 return
2893
80c03fa9 2894 if self._write_description('video', info_dict,
2895 self.prepare_filename(info_dict, 'description')) is None:
2896 return
2897
2898 sub_files = self._write_subtitles(info_dict, temp_filename)
2899 if sub_files is None:
2900 return
2901 files_to_move.update(dict(sub_files))
2902
2903 thumb_files = self._write_thumbnails(
2904 'video', info_dict, temp_filename, self.prepare_filename(info_dict, 'thumbnail'))
2905 if thumb_files is None:
2906 return
2907 files_to_move.update(dict(thumb_files))
8222d8de 2908
80c03fa9 2909 infofn = self.prepare_filename(info_dict, 'infojson')
2910 _infojson_written = self._write_info_json('video', info_dict, infofn)
2911 if _infojson_written:
dac5df5a 2912 info_dict['infojson_filename'] = infofn
e75bb0d6 2913 # For backward compatibility, even though it was a private field
80c03fa9 2914 info_dict['__infojson_filename'] = infofn
2915 elif _infojson_written is None:
2916 return
2917
2918 # Note: Annotations are deprecated
2919 annofn = None
1fb07d10 2920 if self.params.get('writeannotations', False):
de6000d9 2921 annofn = self.prepare_filename(info_dict, 'annotation')
80c03fa9 2922 if annofn:
e92caff5 2923 if not self._ensure_dir_exists(encodeFilename(annofn)):
0202b52a 2924 return
0c3d0f51 2925 if not self.params.get('overwrites', True) and os.path.exists(encodeFilename(annofn)):
6febd1c1 2926 self.to_screen('[info] Video annotations are already present')
ffddb112
RA
2927 elif not info_dict.get('annotations'):
2928 self.report_warning('There are no annotations to write.')
7b6fefc9
PH
2929 else:
2930 try:
6febd1c1 2931 self.to_screen('[info] Writing video annotations to: ' + annofn)
86e5f3ed 2932 with open(encodeFilename(annofn), 'w', encoding='utf-8') as annofile:
7b6fefc9
PH
2933 annofile.write(info_dict['annotations'])
2934 except (KeyError, TypeError):
6febd1c1 2935 self.report_warning('There are no annotations to write.')
86e5f3ed 2936 except OSError:
6febd1c1 2937 self.report_error('Cannot write annotations file: ' + annofn)
7b6fefc9 2938 return
1fb07d10 2939
732044af 2940 # Write internet shortcut files
08438d2c 2941 def _write_link_file(link_type):
60f3e995 2942 url = try_get(info_dict['webpage_url'], iri_to_uri)
2943 if not url:
2944 self.report_warning(
2945 f'Cannot write internet shortcut file because the actual URL of "{info_dict["webpage_url"]}" is unknown')
2946 return True
08438d2c 2947 linkfn = replace_extension(self.prepare_filename(info_dict, 'link'), link_type, info_dict.get('ext'))
0e6b018a
Z
2948 if not self._ensure_dir_exists(encodeFilename(linkfn)):
2949 return False
10e3742e 2950 if self.params.get('overwrites', True) and os.path.exists(encodeFilename(linkfn)):
08438d2c 2951 self.to_screen(f'[info] Internet shortcut (.{link_type}) is already present')
2952 return True
2953 try:
2954 self.to_screen(f'[info] Writing internet shortcut (.{link_type}) to: {linkfn}')
86e5f3ed 2955 with open(encodeFilename(to_high_limit_path(linkfn)), 'w', encoding='utf-8',
2956 newline='\r\n' if link_type == 'url' else '\n') as linkfile:
60f3e995 2957 template_vars = {'url': url}
08438d2c 2958 if link_type == 'desktop':
2959 template_vars['filename'] = linkfn[:-(len(link_type) + 1)]
2960 linkfile.write(LINK_TEMPLATES[link_type] % template_vars)
86e5f3ed 2961 except OSError:
08438d2c 2962 self.report_error(f'Cannot write internet shortcut {linkfn}')
2963 return False
732044af 2964 return True
2965
08438d2c 2966 write_links = {
2967 'url': self.params.get('writeurllink'),
2968 'webloc': self.params.get('writewebloclink'),
2969 'desktop': self.params.get('writedesktoplink'),
2970 }
2971 if self.params.get('writelink'):
2972 link_type = ('webloc' if sys.platform == 'darwin'
2973 else 'desktop' if sys.platform.startswith('linux')
2974 else 'url')
2975 write_links[link_type] = True
2976
2977 if any(should_write and not _write_link_file(link_type)
2978 for link_type, should_write in write_links.items()):
2979 return
732044af 2980
f46e2f9d 2981 def replace_info_dict(new_info):
2982 nonlocal info_dict
2983 if new_info == info_dict:
2984 return
2985 info_dict.clear()
2986 info_dict.update(new_info)
2987
415f8d51 2988 new_info, files_to_move = self.pre_process(info_dict, 'before_dl', files_to_move)
2989 replace_info_dict(new_info)
56d868db 2990
a13e6848 2991 if self.params.get('skip_download'):
56d868db 2992 info_dict['filepath'] = temp_filename
2993 info_dict['__finaldir'] = os.path.dirname(os.path.abspath(encodeFilename(full_filename)))
2994 info_dict['__files_to_move'] = files_to_move
f46e2f9d 2995 replace_info_dict(self.run_pp(MoveFilesAfterDownloadPP(self, False), info_dict))
9e907ebd 2996 info_dict['__write_download_archive'] = self.params.get('force_write_download_archive')
56d868db 2997 else:
2998 # Download
b868936c 2999 info_dict.setdefault('__postprocessors', [])
4340deca 3000 try:
0202b52a 3001
e04938ab 3002 def existing_video_file(*filepaths):
6b591b29 3003 ext = info_dict.get('ext')
e04938ab 3004 converted = lambda file: replace_extension(file, self.params.get('final_ext') or ext, ext)
3005 file = self.existing_file(itertools.chain(*zip(map(converted, filepaths), filepaths)),
3006 default_overwrite=False)
3007 if file:
3008 info_dict['ext'] = os.path.splitext(file)[1][1:]
3009 return file
0202b52a 3010
7b2c3f47 3011 fd, success = None, True
fccf90e7 3012 if info_dict.get('protocol') or info_dict.get('url'):
56ba69e4 3013 fd = get_suitable_downloader(info_dict, self.params, to_stdout=temp_filename == '-')
3014 if fd is not FFmpegFD and (
3015 info_dict.get('section_start') or info_dict.get('section_end')):
7b2c3f47 3016 msg = ('This format cannot be partially downloaded' if FFmpegFD.available()
56ba69e4 3017 else 'You have requested downloading the video partially, but ffmpeg is not installed')
3018 self.report_error(f'{msg}. Aborting')
5ec1b6b7 3019 return
5ec1b6b7 3020
4340deca 3021 if info_dict.get('requested_formats') is not None:
81cd954a
S
3022
3023 def compatible_formats(formats):
d03cfdce 3024 # TODO: some formats actually allow this (mkv, webm, ogg, mp4), but not all of them.
3025 video_formats = [format for format in formats if format.get('vcodec') != 'none']
3026 audio_formats = [format for format in formats if format.get('acodec') != 'none']
3027 if len(video_formats) > 2 or len(audio_formats) > 2:
3028 return False
3029
81cd954a 3030 # Check extension
86e5f3ed 3031 exts = {format.get('ext') for format in formats}
d03cfdce 3032 COMPATIBLE_EXTS = (
86e5f3ed 3033 {'mp3', 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'ismv', 'isma'},
3034 {'webm'},
d03cfdce 3035 )
3036 for ext_sets in COMPATIBLE_EXTS:
3037 if ext_sets.issuperset(exts):
3038 return True
81cd954a
S
3039 # TODO: Check acodec/vcodec
3040 return False
3041
3042 requested_formats = info_dict['requested_formats']
0202b52a 3043 old_ext = info_dict['ext']
4e3b637d 3044 if self.params.get('merge_output_format') is None:
3045 if not compatible_formats(requested_formats):
3046 info_dict['ext'] = 'mkv'
3047 self.report_warning(
3048 'Requested formats are incompatible for merge and will be merged into mkv')
3049 if (info_dict['ext'] == 'webm'
3050 and info_dict.get('thumbnails')
3051 # check with type instead of pp_key, __name__, or isinstance
3052 # since we dont want any custom PPs to trigger this
c487cf00 3053 and any(type(pp) == EmbedThumbnailPP for pp in self._pps['post_process'])): # noqa: E721
4e3b637d 3054 info_dict['ext'] = 'mkv'
3055 self.report_warning(
3056 'webm doesn\'t support embedding a thumbnail, mkv will be used')
124bc071 3057 new_ext = info_dict['ext']
0202b52a 3058
124bc071 3059 def correct_ext(filename, ext=new_ext):
96fccc10 3060 if filename == '-':
3061 return filename
0202b52a 3062 filename_real_ext = os.path.splitext(filename)[1][1:]
3063 filename_wo_ext = (
3064 os.path.splitext(filename)[0]
124bc071 3065 if filename_real_ext in (old_ext, new_ext)
0202b52a 3066 else filename)
86e5f3ed 3067 return f'{filename_wo_ext}.{ext}'
0202b52a 3068
38c6902b 3069 # Ensure filename always has a correct extension for successful merge
0202b52a 3070 full_filename = correct_ext(full_filename)
3071 temp_filename = correct_ext(temp_filename)
e04938ab 3072 dl_filename = existing_video_file(full_filename, temp_filename)
1ea24129 3073 info_dict['__real_download'] = False
18e674b4 3074
7b2c3f47 3075 merger = FFmpegMergerPP(self)
adbc4ec4 3076 downloaded = []
dbf5416a 3077 if dl_filename is not None:
6c7274ec 3078 self.report_file_already_downloaded(dl_filename)
adbc4ec4
THD
3079 elif fd:
3080 for f in requested_formats if fd != FFmpegFD else []:
3081 f['filepath'] = fname = prepend_extension(
3082 correct_ext(temp_filename, info_dict['ext']),
3083 'f%s' % f['format_id'], info_dict['ext'])
3084 downloaded.append(fname)
dbf5416a 3085 info_dict['url'] = '\n'.join(f['url'] for f in requested_formats)
3086 success, real_download = self.dl(temp_filename, info_dict)
3087 info_dict['__real_download'] = real_download
18e674b4 3088 else:
18e674b4 3089 if self.params.get('allow_unplayable_formats'):
3090 self.report_warning(
3091 'You have requested merging of multiple formats '
3092 'while also allowing unplayable formats to be downloaded. '
3093 'The formats won\'t be merged to prevent data corruption.')
3094 elif not merger.available:
e8969bda 3095 msg = 'You have requested merging of multiple formats but ffmpeg is not installed'
3096 if not self.params.get('ignoreerrors'):
3097 self.report_error(f'{msg}. Aborting due to --abort-on-error')
3098 return
3099 self.report_warning(f'{msg}. The formats won\'t be merged')
18e674b4 3100
96fccc10 3101 if temp_filename == '-':
adbc4ec4 3102 reason = ('using a downloader other than ffmpeg' if FFmpegFD.can_merge_formats(info_dict, self.params)
96fccc10 3103 else 'but the formats are incompatible for simultaneous download' if merger.available
3104 else 'but ffmpeg is not installed')
3105 self.report_warning(
3106 f'You have requested downloading multiple formats to stdout {reason}. '
3107 'The formats will be streamed one after the other')
3108 fname = temp_filename
dbf5416a 3109 for f in requested_formats:
3110 new_info = dict(info_dict)
3111 del new_info['requested_formats']
3112 new_info.update(f)
96fccc10 3113 if temp_filename != '-':
124bc071 3114 fname = prepend_extension(
3115 correct_ext(temp_filename, new_info['ext']),
3116 'f%s' % f['format_id'], new_info['ext'])
96fccc10 3117 if not self._ensure_dir_exists(fname):
3118 return
a21e0ab1 3119 f['filepath'] = fname
96fccc10 3120 downloaded.append(fname)
dbf5416a 3121 partial_success, real_download = self.dl(fname, new_info)
3122 info_dict['__real_download'] = info_dict['__real_download'] or real_download
3123 success = success and partial_success
adbc4ec4
THD
3124
3125 if downloaded and merger.available and not self.params.get('allow_unplayable_formats'):
3126 info_dict['__postprocessors'].append(merger)
3127 info_dict['__files_to_merge'] = downloaded
3128 # Even if there were no downloads, it is being merged only now
3129 info_dict['__real_download'] = True
3130 else:
3131 for file in downloaded:
3132 files_to_move[file] = None
4340deca
P
3133 else:
3134 # Just a single file
e04938ab 3135 dl_filename = existing_video_file(full_filename, temp_filename)
6c7274ec 3136 if dl_filename is None or dl_filename == temp_filename:
3137 # dl_filename == temp_filename could mean that the file was partially downloaded with --no-part.
3138 # So we should try to resume the download
e8e73840 3139 success, real_download = self.dl(temp_filename, info_dict)
0202b52a 3140 info_dict['__real_download'] = real_download
6c7274ec 3141 else:
3142 self.report_file_already_downloaded(dl_filename)
0202b52a 3143
0202b52a 3144 dl_filename = dl_filename or temp_filename
c571435f 3145 info_dict['__finaldir'] = os.path.dirname(os.path.abspath(encodeFilename(full_filename)))
0202b52a 3146
3158150c 3147 except network_exceptions as err:
7960b056 3148 self.report_error('unable to download video data: %s' % error_to_compat_str(err))
4340deca 3149 return
86e5f3ed 3150 except OSError as err:
4340deca
P
3151 raise UnavailableVideoError(err)
3152 except (ContentTooShortError, ) as err:
86e5f3ed 3153 self.report_error(f'content too short (expected {err.expected} bytes and served {err.downloaded})')
4340deca 3154 return
8222d8de 3155
415f8d51 3156 self._raise_pending_errors(info_dict)
de6000d9 3157 if success and full_filename != '-':
f17f8651 3158
fd7cfb64 3159 def fixup():
3160 do_fixup = True
3161 fixup_policy = self.params.get('fixup')
3162 vid = info_dict['id']
3163
3164 if fixup_policy in ('ignore', 'never'):
3165 return
3166 elif fixup_policy == 'warn':
3fe75fdc 3167 do_fixup = 'warn'
f89b3e2d 3168 elif fixup_policy != 'force':
3169 assert fixup_policy in ('detect_or_warn', None)
3170 if not info_dict.get('__real_download'):
3171 do_fixup = False
fd7cfb64 3172
3173 def ffmpeg_fixup(cndn, msg, cls):
3fe75fdc 3174 if not (do_fixup and cndn):
fd7cfb64 3175 return
3fe75fdc 3176 elif do_fixup == 'warn':
fd7cfb64 3177 self.report_warning(f'{vid}: {msg}')
3178 return
3179 pp = cls(self)
3180 if pp.available:
3181 info_dict['__postprocessors'].append(pp)
3182 else:
3183 self.report_warning(f'{vid}: {msg}. Install ffmpeg to fix this automatically')
3184
3185 stretched_ratio = info_dict.get('stretched_ratio')
ca9def71
LNO
3186 ffmpeg_fixup(stretched_ratio not in (1, None),
3187 f'Non-uniform pixel ratio {stretched_ratio}',
3188 FFmpegFixupStretchedPP)
fd7cfb64 3189
993191c0 3190 downloader = get_suitable_downloader(info_dict, self.params) if 'protocol' in info_dict else None
24146491 3191 downloader = downloader.FD_NAME if downloader else None
adbc4ec4 3192
ca9def71
LNO
3193 ext = info_dict.get('ext')
3194 postprocessed_by_ffmpeg = info_dict.get('requested_formats') or any((
3195 isinstance(pp, FFmpegVideoConvertorPP)
3196 and resolve_recode_mapping(ext, pp.mapping)[0] not in (ext, None)
3197 ) for pp in self._pps['post_process'])
3198
3199 if not postprocessed_by_ffmpeg:
3200 ffmpeg_fixup(ext == 'm4a' and info_dict.get('container') == 'm4a_dash',
f2df4071 3201 'writing DASH m4a. Only some players support this container',
3202 FFmpegFixupM4aPP)
24146491 3203 ffmpeg_fixup(downloader == 'hlsnative' and not self.params.get('hls_use_mpegts')
494f5230 3204 or info_dict.get('is_live') and self.params.get('hls_use_mpegts') is None,
adbc4ec4
THD
3205 'Possible MPEG-TS in MP4 container or malformed AAC timestamps',
3206 FFmpegFixupM3u8PP)
3207 ffmpeg_fixup(info_dict.get('is_live') and downloader == 'DashSegmentsFD',
3208 'Possible duplicate MOOV atoms', FFmpegFixupDuplicateMoovPP)
3209
24146491 3210 ffmpeg_fixup(downloader == 'web_socket_fragment', 'Malformed timestamps detected', FFmpegFixupTimestampPP)
3211 ffmpeg_fixup(downloader == 'web_socket_fragment', 'Malformed duration detected', FFmpegFixupDurationPP)
fd7cfb64 3212
3213 fixup()
8222d8de 3214 try:
f46e2f9d 3215 replace_info_dict(self.post_process(dl_filename, info_dict, files_to_move))
af819c21 3216 except PostProcessingError as err:
3217 self.report_error('Postprocessing: %s' % str(err))
8222d8de 3218 return
ab8e5e51
AM
3219 try:
3220 for ph in self._post_hooks:
23c1a667 3221 ph(info_dict['filepath'])
ab8e5e51
AM
3222 except Exception as err:
3223 self.report_error('post hooks: %s' % str(err))
3224 return
9e907ebd 3225 info_dict['__write_download_archive'] = True
2d30509f 3226
c487cf00 3227 assert info_dict is original_infodict # Make sure the info_dict was modified in-place
a13e6848 3228 if self.params.get('force_write_download_archive'):
9e907ebd 3229 info_dict['__write_download_archive'] = True
ca6d59d2 3230 check_max_downloads()
8222d8de 3231
aa9369a2 3232 def __download_wrapper(self, func):
3233 @functools.wraps(func)
3234 def wrapper(*args, **kwargs):
3235 try:
3236 res = func(*args, **kwargs)
3237 except UnavailableVideoError as e:
3238 self.report_error(e)
b222c271 3239 except DownloadCancelled as e:
3240 self.to_screen(f'[info] {e}')
3241 if not self.params.get('break_per_url'):
3242 raise
aa9369a2 3243 else:
3244 if self.params.get('dump_single_json', False):
3245 self.post_extract(res)
3246 self.to_stdout(json.dumps(self.sanitize_info(res)))
3247 return wrapper
3248
8222d8de
JMF
3249 def download(self, url_list):
3250 """Download a given list of URLs."""
aa9369a2 3251 url_list = variadic(url_list) # Passing a single URL is a common mistake
bf1824b3 3252 outtmpl = self.params['outtmpl']['default']
3089bc74
S
3253 if (len(url_list) > 1
3254 and outtmpl != '-'
3255 and '%' not in outtmpl
3256 and self.params.get('max_downloads') != 1):
acd69589 3257 raise SameFileError(outtmpl)
8222d8de
JMF
3258
3259 for url in url_list:
aa9369a2 3260 self.__download_wrapper(self.extract_info)(
3261 url, force_generic_extractor=self.params.get('force_generic_extractor', False))
8222d8de
JMF
3262
3263 return self._download_retcode
3264
1dcc4c0c 3265 def download_with_info_file(self, info_filename):
31bd3925
JMF
3266 with contextlib.closing(fileinput.FileInput(
3267 [info_filename], mode='r',
3268 openhook=fileinput.hook_encoded('utf-8'))) as f:
3269 # FileInput doesn't have a read method, we can't call json.load
8012d892 3270 info = self.sanitize_info(json.loads('\n'.join(f)), self.params.get('clean_infojson', True))
d4943898 3271 try:
aa9369a2 3272 self.__download_wrapper(self.process_ie_result)(info, download=True)
f2ebc5c7 3273 except (DownloadError, EntryNotInPlaylist, ReExtractInfo) as e:
bf5f605e 3274 if not isinstance(e, EntryNotInPlaylist):
3275 self.to_stderr('\r')
d4943898
JMF
3276 webpage_url = info.get('webpage_url')
3277 if webpage_url is not None:
aa9369a2 3278 self.report_warning(f'The info failed to download: {e}; trying with URL {webpage_url}')
d4943898
JMF
3279 return self.download([webpage_url])
3280 else:
3281 raise
3282 return self._download_retcode
1dcc4c0c 3283
cb202fd2 3284 @staticmethod
8012d892 3285 def sanitize_info(info_dict, remove_private_keys=False):
3286 ''' Sanitize the infodict for converting to json '''
3ad56b42 3287 if info_dict is None:
3288 return info_dict
6e84b215 3289 info_dict.setdefault('epoch', int(time.time()))
6a5a30f9 3290 info_dict.setdefault('_type', 'video')
09b49e1f 3291
8012d892 3292 if remove_private_keys:
0a5a191a 3293 reject = lambda k, v: v is None or k.startswith('__') or k in {
f46e2f9d 3294 'requested_downloads', 'requested_formats', 'requested_subtitles', 'requested_entries',
0a5a191a 3295 'entries', 'filepath', '_filename', 'infojson_filename', 'original_url', 'playlist_autonumber',
6e84b215 3296 }
ae8f99e6 3297 else:
09b49e1f 3298 reject = lambda k, v: False
adbc4ec4
THD
3299
3300 def filter_fn(obj):
3301 if isinstance(obj, dict):
3302 return {k: filter_fn(v) for k, v in obj.items() if not reject(k, v)}
3303 elif isinstance(obj, (list, tuple, set, LazyList)):
3304 return list(map(filter_fn, obj))
3305 elif obj is None or isinstance(obj, (str, int, float, bool)):
3306 return obj
3307 else:
3308 return repr(obj)
3309
5226731e 3310 return filter_fn(info_dict)
cb202fd2 3311
8012d892 3312 @staticmethod
3313 def filter_requested_info(info_dict, actually_filter=True):
3314 ''' Alias of sanitize_info for backward compatibility '''
3315 return YoutubeDL.sanitize_info(info_dict, actually_filter)
3316
43d7f5a5 3317 def _delete_downloaded_files(self, *files_to_delete, info={}, msg=None):
3318 for filename in set(filter(None, files_to_delete)):
3319 if msg:
3320 self.to_screen(msg % filename)
3321 try:
3322 os.remove(filename)
3323 except OSError:
3324 self.report_warning(f'Unable to delete file {filename}')
3325 if filename in info.get('__files_to_move', []): # NB: Delete even if None
3326 del info['__files_to_move'][filename]
3327
ed5835b4 3328 @staticmethod
3329 def post_extract(info_dict):
3330 def actual_post_extract(info_dict):
3331 if info_dict.get('_type') in ('playlist', 'multi_video'):
3332 for video_dict in info_dict.get('entries', {}):
3333 actual_post_extract(video_dict or {})
3334 return
3335
09b49e1f 3336 post_extractor = info_dict.pop('__post_extractor', None) or (lambda: {})
3337 info_dict.update(post_extractor())
ed5835b4 3338
3339 actual_post_extract(info_dict or {})
3340
dcf64d43 3341 def run_pp(self, pp, infodict):
5bfa4862 3342 files_to_delete = []
dcf64d43 3343 if '__files_to_move' not in infodict:
3344 infodict['__files_to_move'] = {}
b1940459 3345 try:
3346 files_to_delete, infodict = pp.run(infodict)
3347 except PostProcessingError as e:
3348 # Must be True and not 'only_download'
3349 if self.params.get('ignoreerrors') is True:
3350 self.report_error(e)
3351 return infodict
3352 raise
3353
5bfa4862 3354 if not files_to_delete:
dcf64d43 3355 return infodict
5bfa4862 3356 if self.params.get('keepvideo', False):
3357 for f in files_to_delete:
dcf64d43 3358 infodict['__files_to_move'].setdefault(f, '')
5bfa4862 3359 else:
43d7f5a5 3360 self._delete_downloaded_files(
3361 *files_to_delete, info=infodict, msg='Deleting original file %s (pass -k to keep)')
dcf64d43 3362 return infodict
5bfa4862 3363
ed5835b4 3364 def run_all_pps(self, key, info, *, additional_pps=None):
bb66c247 3365 self._forceprint(key, info)
ed5835b4 3366 for pp in (additional_pps or []) + self._pps[key]:
dc5f409c 3367 info = self.run_pp(pp, info)
ed5835b4 3368 return info
277d6ff5 3369
56d868db 3370 def pre_process(self, ie_info, key='pre_process', files_to_move=None):
5bfa4862 3371 info = dict(ie_info)
56d868db 3372 info['__files_to_move'] = files_to_move or {}
415f8d51 3373 try:
3374 info = self.run_all_pps(key, info)
3375 except PostProcessingError as err:
3376 msg = f'Preprocessing: {err}'
3377 info.setdefault('__pending_error', msg)
3378 self.report_error(msg, is_error=False)
56d868db 3379 return info, info.pop('__files_to_move', None)
5bfa4862 3380
f46e2f9d 3381 def post_process(self, filename, info, files_to_move=None):
8222d8de 3382 """Run all the postprocessors on the given file."""
8222d8de 3383 info['filepath'] = filename
dcf64d43 3384 info['__files_to_move'] = files_to_move or {}
ed5835b4 3385 info = self.run_all_pps('post_process', info, additional_pps=info.get('__postprocessors'))
dcf64d43 3386 info = self.run_pp(MoveFilesAfterDownloadPP(self), info)
3387 del info['__files_to_move']
ed5835b4 3388 return self.run_all_pps('after_move', info)
c1c9a79c 3389
5db07df6 3390 def _make_archive_id(self, info_dict):
e9fef7ee
S
3391 video_id = info_dict.get('id')
3392 if not video_id:
3393 return
5db07df6
PH
3394 # Future-proof against any change in case
3395 # and backwards compatibility with prior versions
e9fef7ee 3396 extractor = info_dict.get('extractor_key') or info_dict.get('ie_key') # key in a playlist
7012b23c 3397 if extractor is None:
1211bb6d
S
3398 url = str_or_none(info_dict.get('url'))
3399 if not url:
3400 return
e9fef7ee 3401 # Try to find matching extractor for the URL and take its ie_key
8b7491c8 3402 for ie_key, ie in self._ies.items():
1211bb6d 3403 if ie.suitable(url):
8b7491c8 3404 extractor = ie_key
e9fef7ee
S
3405 break
3406 else:
3407 return
86e5f3ed 3408 return f'{extractor.lower()} {video_id}'
5db07df6
PH
3409
3410 def in_download_archive(self, info_dict):
3411 fn = self.params.get('download_archive')
3412 if fn is None:
3413 return False
3414
3415 vid_id = self._make_archive_id(info_dict)
e9fef7ee 3416 if not vid_id:
7012b23c 3417 return False # Incomplete video information
5db07df6 3418
a45e8619 3419 return vid_id in self.archive
c1c9a79c
PH
3420
3421 def record_download_archive(self, info_dict):
3422 fn = self.params.get('download_archive')
3423 if fn is None:
3424 return
5db07df6
PH
3425 vid_id = self._make_archive_id(info_dict)
3426 assert vid_id
a13e6848 3427 self.write_debug(f'Adding to archive: {vid_id}')
c1c9a79c 3428 with locked_file(fn, 'a', encoding='utf-8') as archive_file:
6febd1c1 3429 archive_file.write(vid_id + '\n')
a45e8619 3430 self.archive.add(vid_id)
dd82ffea 3431
8c51aa65 3432 @staticmethod
8abeeb94 3433 def format_resolution(format, default='unknown'):
9359f3d4 3434 if format.get('vcodec') == 'none' and format.get('acodec') != 'none':
fb04e403 3435 return 'audio only'
f49d89ee
PH
3436 if format.get('resolution') is not None:
3437 return format['resolution']
35615307 3438 if format.get('width') and format.get('height'):
ff51ed58 3439 return '%dx%d' % (format['width'], format['height'])
35615307 3440 elif format.get('height'):
ff51ed58 3441 return '%sp' % format['height']
35615307 3442 elif format.get('width'):
ff51ed58 3443 return '%dx?' % format['width']
3444 return default
8c51aa65 3445
8130779d 3446 def _list_format_headers(self, *headers):
3447 if self.params.get('listformats_table', True) is not False:
591bb9d3 3448 return [self._format_out(header, self.Styles.HEADERS) for header in headers]
8130779d 3449 return headers
3450
c57f7757
PH
3451 def _format_note(self, fdict):
3452 res = ''
3453 if fdict.get('ext') in ['f4f', 'f4m']:
f304da8a 3454 res += '(unsupported)'
32f90364
PH
3455 if fdict.get('language'):
3456 if res:
3457 res += ' '
f304da8a 3458 res += '[%s]' % fdict['language']
c57f7757 3459 if fdict.get('format_note') is not None:
f304da8a 3460 if res:
3461 res += ' '
3462 res += fdict['format_note']
c57f7757 3463 if fdict.get('tbr') is not None:
f304da8a 3464 if res:
3465 res += ', '
3466 res += '%4dk' % fdict['tbr']
c57f7757
PH
3467 if fdict.get('container') is not None:
3468 if res:
3469 res += ', '
3470 res += '%s container' % fdict['container']
3089bc74
S
3471 if (fdict.get('vcodec') is not None
3472 and fdict.get('vcodec') != 'none'):
c57f7757
PH
3473 if res:
3474 res += ', '
3475 res += fdict['vcodec']
91c7271a 3476 if fdict.get('vbr') is not None:
c57f7757
PH
3477 res += '@'
3478 elif fdict.get('vbr') is not None and fdict.get('abr') is not None:
3479 res += 'video@'
3480 if fdict.get('vbr') is not None:
3481 res += '%4dk' % fdict['vbr']
fbb21cf5 3482 if fdict.get('fps') is not None:
5d583bdf
S
3483 if res:
3484 res += ', '
3485 res += '%sfps' % fdict['fps']
c57f7757
PH
3486 if fdict.get('acodec') is not None:
3487 if res:
3488 res += ', '
3489 if fdict['acodec'] == 'none':
3490 res += 'video only'
3491 else:
3492 res += '%-5s' % fdict['acodec']
3493 elif fdict.get('abr') is not None:
3494 if res:
3495 res += ', '
3496 res += 'audio'
3497 if fdict.get('abr') is not None:
3498 res += '@%3dk' % fdict['abr']
3499 if fdict.get('asr') is not None:
3500 res += ' (%5dHz)' % fdict['asr']
3501 if fdict.get('filesize') is not None:
3502 if res:
3503 res += ', '
3504 res += format_bytes(fdict['filesize'])
9732d77e
PH
3505 elif fdict.get('filesize_approx') is not None:
3506 if res:
3507 res += ', '
3508 res += '~' + format_bytes(fdict['filesize_approx'])
c57f7757 3509 return res
91c7271a 3510
8130779d 3511 def render_formats_table(self, info_dict):
b69fd25c 3512 if not info_dict.get('formats') and not info_dict.get('url'):
8130779d 3513 return None
b69fd25c 3514
94badb25 3515 formats = info_dict.get('formats', [info_dict])
8130779d 3516 if not self.params.get('listformats_table', True) is not False:
76d321f6 3517 table = [
3518 [
3519 format_field(f, 'format_id'),
3520 format_field(f, 'ext'),
3521 self.format_resolution(f),
8130779d 3522 self._format_note(f)
3523 ] for f in formats if f.get('preference') is None or f['preference'] >= -1000]
3524 return render_table(['format code', 'extension', 'resolution', 'note'], table, extra_gap=1)
3525
591bb9d3 3526 delim = self._format_out('\u2502', self.Styles.DELIM, '|', test_encoding=True)
8130779d 3527 table = [
3528 [
591bb9d3 3529 self._format_out(format_field(f, 'format_id'), self.Styles.ID),
8130779d 3530 format_field(f, 'ext'),
3531 format_field(f, func=self.format_resolution, ignore=('audio only', 'images')),
3532 format_field(f, 'fps', '\t%d'),
3533 format_field(f, 'dynamic_range', '%s', ignore=(None, 'SDR')).replace('HDR', ''),
3534 delim,
3535 format_field(f, 'filesize', ' \t%s', func=format_bytes) + format_field(f, 'filesize_approx', '~\t%s', func=format_bytes),
3536 format_field(f, 'tbr', '\t%dk'),
3537 shorten_protocol_name(f.get('protocol', '')),
3538 delim,
3539 format_field(f, 'vcodec', default='unknown').replace(
3540 'none', 'images' if f.get('acodec') == 'none'
591bb9d3 3541 else self._format_out('audio only', self.Styles.SUPPRESS)),
8130779d 3542 format_field(f, 'vbr', '\t%dk'),
3543 format_field(f, 'acodec', default='unknown').replace(
3544 'none', '' if f.get('vcodec') == 'none'
591bb9d3 3545 else self._format_out('video only', self.Styles.SUPPRESS)),
8130779d 3546 format_field(f, 'abr', '\t%dk'),
ae61d108 3547 format_field(f, 'asr', '\t%s', func=format_decimal_suffix),
8130779d 3548 join_nonempty(
591bb9d3 3549 self._format_out('UNSUPPORTED', 'light red') if f.get('ext') in ('f4f', 'f4m') else None,
8130779d 3550 format_field(f, 'language', '[%s]'),
3551 join_nonempty(format_field(f, 'format_note'),
3552 format_field(f, 'container', ignore=(None, f.get('ext'))),
3553 delim=', '),
3554 delim=' '),
3555 ] for f in formats if f.get('preference') is None or f['preference'] >= -1000]
3556 header_line = self._list_format_headers(
3557 'ID', 'EXT', 'RESOLUTION', '\tFPS', 'HDR', delim, '\tFILESIZE', '\tTBR', 'PROTO',
3558 delim, 'VCODEC', '\tVBR', 'ACODEC', '\tABR', '\tASR', 'MORE INFO')
3559
3560 return render_table(
3561 header_line, table, hide_empty=True,
591bb9d3 3562 delim=self._format_out('\u2500', self.Styles.DELIM, '-', test_encoding=True))
8130779d 3563
3564 def render_thumbnails_table(self, info_dict):
88f23a18 3565 thumbnails = list(info_dict.get('thumbnails') or [])
cfb56d1a 3566 if not thumbnails:
8130779d 3567 return None
3568 return render_table(
ec11a9f4 3569 self._list_format_headers('ID', 'Width', 'Height', 'URL'),
6970b600 3570 [[t.get('id'), t.get('width', 'unknown'), t.get('height', 'unknown'), t['url']] for t in thumbnails])
2412044c 3571
8130779d 3572 def render_subtitles_table(self, video_id, subtitles):
2412044c 3573 def _row(lang, formats):
49c258e1 3574 exts, names = zip(*((f['ext'], f.get('name') or 'unknown') for f in reversed(formats)))
2412044c 3575 if len(set(names)) == 1:
7aee40c1 3576 names = [] if names[0] == 'unknown' else names[:1]
2412044c 3577 return [lang, ', '.join(names), ', '.join(exts)]
3578
8130779d 3579 if not subtitles:
3580 return None
3581 return render_table(
ec11a9f4 3582 self._list_format_headers('Language', 'Name', 'Formats'),
2412044c 3583 [_row(lang, formats) for lang, formats in subtitles.items()],
8130779d 3584 hide_empty=True)
3585
3586 def __list_table(self, video_id, name, func, *args):
3587 table = func(*args)
3588 if not table:
3589 self.to_screen(f'{video_id} has no {name}')
3590 return
3591 self.to_screen(f'[info] Available {name} for {video_id}:')
3592 self.to_stdout(table)
3593
3594 def list_formats(self, info_dict):
3595 self.__list_table(info_dict['id'], 'formats', self.render_formats_table, info_dict)
3596
3597 def list_thumbnails(self, info_dict):
3598 self.__list_table(info_dict['id'], 'thumbnails', self.render_thumbnails_table, info_dict)
3599
3600 def list_subtitles(self, video_id, subtitles, name='subtitles'):
3601 self.__list_table(video_id, name, self.render_subtitles_table, video_id, subtitles)
a504ced0 3602
dca08720
PH
3603 def urlopen(self, req):
3604 """ Start an HTTP download """
f9934b96 3605 if isinstance(req, str):
67dda517 3606 req = sanitized_Request(req)
19a41fc6 3607 return self._opener.open(req, timeout=self._socket_timeout)
dca08720
PH
3608
3609 def print_debug_header(self):
3610 if not self.params.get('verbose'):
3611 return
49a57e70 3612
560738f3 3613 # These imports can be slow. So import them only as needed
3614 from .extractor.extractors import _LAZY_LOADER
3615 from .extractor.extractors import _PLUGIN_CLASSES as plugin_extractors
3616
49a57e70 3617 def get_encoding(stream):
2a938746 3618 ret = str(getattr(stream, 'encoding', 'missing (%s)' % type(stream).__name__))
49a57e70 3619 if not supports_terminal_sequences(stream):
53973b4d 3620 from .utils import WINDOWS_VT_MODE # Must be imported locally
e3c7d495 3621 ret += ' (No VT)' if WINDOWS_VT_MODE is False else ' (No ANSI)'
49a57e70 3622 return ret
3623
591bb9d3 3624 encoding_str = 'Encodings: locale %s, fs %s, pref %s, %s' % (
49a57e70 3625 locale.getpreferredencoding(),
3626 sys.getfilesystemencoding(),
591bb9d3 3627 self.get_encoding(),
3628 ', '.join(
64fa820c 3629 f'{key} {get_encoding(stream)}' for key, stream in self._out_files.items_
591bb9d3 3630 if stream is not None and key != 'console')
3631 )
883d4b1e 3632
3633 logger = self.params.get('logger')
3634 if logger:
3635 write_debug = lambda msg: logger.debug(f'[debug] {msg}')
3636 write_debug(encoding_str)
3637 else:
96565c7e 3638 write_string(f'[debug] {encoding_str}\n', encoding=None)
49a57e70 3639 write_debug = lambda msg: self._write_string(f'[debug] {msg}\n')
734f90bb 3640
4c88ff87 3641 source = detect_variant()
36eaf303 3642 write_debug(join_nonempty(
3643 'yt-dlp version', __version__,
3644 f'[{RELEASE_GIT_HEAD}]' if RELEASE_GIT_HEAD else '',
3645 '' if source == 'unknown' else f'({source})',
3646 delim=' '))
6e21fdd2 3647 if not _LAZY_LOADER:
3648 if os.environ.get('YTDLP_NO_LAZY_EXTRACTORS'):
49a57e70 3649 write_debug('Lazy loading extractors is forcibly disabled')
6e21fdd2 3650 else:
49a57e70 3651 write_debug('Lazy loading extractors is disabled')
3ae5e797 3652 if plugin_extractors or plugin_postprocessors:
49a57e70 3653 write_debug('Plugins: %s' % [
3ae5e797 3654 '%s%s' % (klass.__name__, '' if klass.__name__ == name else f' as {name}')
3655 for name, klass in itertools.chain(plugin_extractors.items(), plugin_postprocessors.items())])
8a82af35 3656 if self.params['compat_opts']:
3657 write_debug('Compatibility options: %s' % ', '.join(self.params['compat_opts']))
36eaf303 3658
3659 if source == 'source':
dca08720 3660 try:
f0c9fb96 3661 stdout, _, _ = Popen.run(
36eaf303 3662 ['git', 'rev-parse', '--short', 'HEAD'],
f0c9fb96 3663 text=True, cwd=os.path.dirname(os.path.abspath(__file__)),
3664 stdout=subprocess.PIPE, stderr=subprocess.PIPE)
3665 if re.fullmatch('[0-9a-f]+', stdout.strip()):
3666 write_debug(f'Git HEAD: {stdout.strip()}')
70a1165b 3667 except Exception:
19a03940 3668 with contextlib.suppress(Exception):
36eaf303 3669 sys.exc_clear()
b300cda4 3670
b1f94422 3671 write_debug(system_identifier())
d28b5171 3672
8913ef74 3673 exe_versions, ffmpeg_features = FFmpegPostProcessor.get_versions_and_features(self)
3674 ffmpeg_features = {key for key, val in ffmpeg_features.items() if val}
3675 if ffmpeg_features:
19a03940 3676 exe_versions['ffmpeg'] += ' (%s)' % ','.join(sorted(ffmpeg_features))
8913ef74 3677
4c83c967 3678 exe_versions['rtmpdump'] = rtmpdump_version()
feee8d32 3679 exe_versions['phantomjs'] = PhantomJSwrapper._version()
d28b5171 3680 exe_str = ', '.join(
2831b468 3681 f'{exe} {v}' for exe, v in sorted(exe_versions.items()) if v
3682 ) or 'none'
49a57e70 3683 write_debug('exe versions: %s' % exe_str)
dca08720 3684
1d485a1a 3685 from .compat.compat_utils import get_package_info
9b8ee23b 3686 from .dependencies import available_dependencies
3687
3688 write_debug('Optional libraries: %s' % (', '.join(sorted({
1d485a1a 3689 join_nonempty(*get_package_info(m)) for m in available_dependencies.values()
9b8ee23b 3690 })) or 'none'))
2831b468 3691
97ec5bc5 3692 self._setup_opener()
dca08720
PH
3693 proxy_map = {}
3694 for handler in self._opener.handlers:
3695 if hasattr(handler, 'proxies'):
3696 proxy_map.update(handler.proxies)
49a57e70 3697 write_debug(f'Proxy map: {proxy_map}')
dca08720 3698
49a57e70 3699 # Not implemented
3700 if False and self.params.get('call_home'):
0f06bcd7 3701 ipaddr = self.urlopen('https://yt-dl.org/ip').read().decode()
49a57e70 3702 write_debug('Public IP address: %s' % ipaddr)
58b1f00d 3703 latest_version = self.urlopen(
0f06bcd7 3704 'https://yt-dl.org/latest/version').read().decode()
58b1f00d
PH
3705 if version_tuple(latest_version) > version_tuple(__version__):
3706 self.report_warning(
3707 'You are using an outdated version (newest version: %s)! '
3708 'See https://yt-dl.org/update if you need help updating.' %
3709 latest_version)
3710
e344693b 3711 def _setup_opener(self):
97ec5bc5 3712 if hasattr(self, '_opener'):
3713 return
6ad14cab 3714 timeout_val = self.params.get('socket_timeout')
17bddf3e 3715 self._socket_timeout = 20 if timeout_val is None else float(timeout_val)
6ad14cab 3716
982ee69a 3717 opts_cookiesfrombrowser = self.params.get('cookiesfrombrowser')
dca08720
PH
3718 opts_cookiefile = self.params.get('cookiefile')
3719 opts_proxy = self.params.get('proxy')
3720
982ee69a 3721 self.cookiejar = load_cookies(opts_cookiefile, opts_cookiesfrombrowser, self)
dca08720 3722
6a3f4c3f 3723 cookie_processor = YoutubeDLCookieProcessor(self.cookiejar)
dca08720
PH
3724 if opts_proxy is not None:
3725 if opts_proxy == '':
3726 proxies = {}
3727 else:
3728 proxies = {'http': opts_proxy, 'https': opts_proxy}
3729 else:
ac668111 3730 proxies = urllib.request.getproxies()
067aa17e 3731 # Set HTTPS proxy to HTTP one if given (https://github.com/ytdl-org/youtube-dl/issues/805)
dca08720
PH
3732 if 'http' in proxies and 'https' not in proxies:
3733 proxies['https'] = proxies['http']
91410c9b 3734 proxy_handler = PerRequestProxyHandler(proxies)
a0ddb8a2
PH
3735
3736 debuglevel = 1 if self.params.get('debug_printtraffic') else 0
be4a824d
PH
3737 https_handler = make_HTTPS_handler(self.params, debuglevel=debuglevel)
3738 ydlh = YoutubeDLHandler(self.params, debuglevel=debuglevel)
fca6dba8 3739 redirect_handler = YoutubeDLRedirectHandler()
f9934b96 3740 data_handler = urllib.request.DataHandler()
6240b0a2
JMF
3741
3742 # When passing our own FileHandler instance, build_opener won't add the
3743 # default FileHandler and allows us to disable the file protocol, which
3744 # can be used for malicious purposes (see
067aa17e 3745 # https://github.com/ytdl-org/youtube-dl/issues/8227)
ac668111 3746 file_handler = urllib.request.FileHandler()
6240b0a2
JMF
3747
3748 def file_open(*args, **kwargs):
ac668111 3749 raise urllib.error.URLError('file:// scheme is explicitly disabled in yt-dlp for security reasons')
6240b0a2
JMF
3750 file_handler.file_open = file_open
3751
ac668111 3752 opener = urllib.request.build_opener(
fca6dba8 3753 proxy_handler, https_handler, cookie_processor, ydlh, redirect_handler, data_handler, file_handler)
2461f79d 3754
dca08720
PH
3755 # Delete the default user-agent header, which would otherwise apply in
3756 # cases where our custom HTTP handler doesn't come into play
067aa17e 3757 # (See https://github.com/ytdl-org/youtube-dl/issues/1309 for details)
dca08720
PH
3758 opener.addheaders = []
3759 self._opener = opener
62fec3b2
PH
3760
3761 def encode(self, s):
3762 if isinstance(s, bytes):
3763 return s # Already encoded
3764
3765 try:
3766 return s.encode(self.get_encoding())
3767 except UnicodeEncodeError as err:
3768 err.reason = err.reason + '. Check your system encoding configuration or use the --encoding option.'
3769 raise
3770
3771 def get_encoding(self):
3772 encoding = self.params.get('encoding')
3773 if encoding is None:
3774 encoding = preferredencoding()
3775 return encoding
ec82d85a 3776
e08a85d8 3777 def _write_info_json(self, label, ie_result, infofn, overwrite=None):
cb96c5be 3778 ''' Write infojson and returns True = written, 'exists' = Already exists, False = skip, None = error '''
e08a85d8 3779 if overwrite is None:
3780 overwrite = self.params.get('overwrites', True)
80c03fa9 3781 if not self.params.get('writeinfojson'):
3782 return False
3783 elif not infofn:
3784 self.write_debug(f'Skipping writing {label} infojson')
3785 return False
3786 elif not self._ensure_dir_exists(infofn):
3787 return None
e08a85d8 3788 elif not overwrite and os.path.exists(infofn):
80c03fa9 3789 self.to_screen(f'[info] {label.title()} metadata is already present')
cb96c5be 3790 return 'exists'
3791
3792 self.to_screen(f'[info] Writing {label} metadata as JSON to: {infofn}')
3793 try:
3794 write_json_file(self.sanitize_info(ie_result, self.params.get('clean_infojson', True)), infofn)
3795 return True
86e5f3ed 3796 except OSError:
cb96c5be 3797 self.report_error(f'Cannot write {label} metadata to JSON file {infofn}')
3798 return None
80c03fa9 3799
3800 def _write_description(self, label, ie_result, descfn):
3801 ''' Write description and returns True = written, False = skip, None = error '''
3802 if not self.params.get('writedescription'):
3803 return False
3804 elif not descfn:
3805 self.write_debug(f'Skipping writing {label} description')
3806 return False
3807 elif not self._ensure_dir_exists(descfn):
3808 return None
3809 elif not self.params.get('overwrites', True) and os.path.exists(descfn):
3810 self.to_screen(f'[info] {label.title()} description is already present')
3811 elif ie_result.get('description') is None:
3812 self.report_warning(f'There\'s no {label} description to write')
3813 return False
3814 else:
3815 try:
3816 self.to_screen(f'[info] Writing {label} description to: {descfn}')
86e5f3ed 3817 with open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile:
80c03fa9 3818 descfile.write(ie_result['description'])
86e5f3ed 3819 except OSError:
80c03fa9 3820 self.report_error(f'Cannot write {label} description file {descfn}')
3821 return None
3822 return True
3823
3824 def _write_subtitles(self, info_dict, filename):
3825 ''' Write subtitles to file and return list of (sub_filename, final_sub_filename); or None if error'''
3826 ret = []
3827 subtitles = info_dict.get('requested_subtitles')
3828 if not subtitles or not (self.params.get('writesubtitles') or self.params.get('writeautomaticsub')):
3829 # subtitles download errors are already managed as troubles in relevant IE
3830 # that way it will silently go on when used with unsupporting IE
3831 return ret
3832
3833 sub_filename_base = self.prepare_filename(info_dict, 'subtitle')
3834 if not sub_filename_base:
3835 self.to_screen('[info] Skipping writing video subtitles')
3836 return ret
3837 for sub_lang, sub_info in subtitles.items():
3838 sub_format = sub_info['ext']
3839 sub_filename = subtitles_filename(filename, sub_lang, sub_format, info_dict.get('ext'))
3840 sub_filename_final = subtitles_filename(sub_filename_base, sub_lang, sub_format, info_dict.get('ext'))
e04938ab 3841 existing_sub = self.existing_file((sub_filename_final, sub_filename))
3842 if existing_sub:
80c03fa9 3843 self.to_screen(f'[info] Video subtitle {sub_lang}.{sub_format} is already present')
e04938ab 3844 sub_info['filepath'] = existing_sub
3845 ret.append((existing_sub, sub_filename_final))
80c03fa9 3846 continue
3847
3848 self.to_screen(f'[info] Writing video subtitles to: {sub_filename}')
3849 if sub_info.get('data') is not None:
3850 try:
3851 # Use newline='' to prevent conversion of newline characters
3852 # See https://github.com/ytdl-org/youtube-dl/issues/10268
86e5f3ed 3853 with open(sub_filename, 'w', encoding='utf-8', newline='') as subfile:
80c03fa9 3854 subfile.write(sub_info['data'])
3855 sub_info['filepath'] = sub_filename
3856 ret.append((sub_filename, sub_filename_final))
3857 continue
86e5f3ed 3858 except OSError:
80c03fa9 3859 self.report_error(f'Cannot write video subtitles file {sub_filename}')
3860 return None
3861
3862 try:
3863 sub_copy = sub_info.copy()
3864 sub_copy.setdefault('http_headers', info_dict.get('http_headers'))
3865 self.dl(sub_filename, sub_copy, subtitle=True)
3866 sub_info['filepath'] = sub_filename
3867 ret.append((sub_filename, sub_filename_final))
6020e05d 3868 except (DownloadError, ExtractorError, IOError, OSError, ValueError) + network_exceptions as err:
c70c418d 3869 msg = f'Unable to download video subtitles for {sub_lang!r}: {err}'
6020e05d 3870 if self.params.get('ignoreerrors') is not True: # False or 'only_download'
c70c418d 3871 if not self.params.get('ignoreerrors'):
3872 self.report_error(msg)
3873 raise DownloadError(msg)
3874 self.report_warning(msg)
519804a9 3875 return ret
80c03fa9 3876
3877 def _write_thumbnails(self, label, info_dict, filename, thumb_filename_base=None):
3878 ''' Write thumbnails to file and return list of (thumb_filename, final_thumb_filename) '''
6c4fd172 3879 write_all = self.params.get('write_all_thumbnails', False)
80c03fa9 3880 thumbnails, ret = [], []
6c4fd172 3881 if write_all or self.params.get('writethumbnail', False):
0202b52a 3882 thumbnails = info_dict.get('thumbnails') or []
6c4fd172 3883 multiple = write_all and len(thumbnails) > 1
ec82d85a 3884
80c03fa9 3885 if thumb_filename_base is None:
3886 thumb_filename_base = filename
3887 if thumbnails and not thumb_filename_base:
3888 self.write_debug(f'Skipping writing {label} thumbnail')
3889 return ret
3890
dd0228ce 3891 for idx, t in list(enumerate(thumbnails))[::-1]:
80c03fa9 3892 thumb_ext = (f'{t["id"]}.' if multiple else '') + determine_ext(t['url'], 'jpg')
aa9369a2 3893 thumb_display_id = f'{label} thumbnail {t["id"]}'
80c03fa9 3894 thumb_filename = replace_extension(filename, thumb_ext, info_dict.get('ext'))
3895 thumb_filename_final = replace_extension(thumb_filename_base, thumb_ext, info_dict.get('ext'))
ec82d85a 3896
e04938ab 3897 existing_thumb = self.existing_file((thumb_filename_final, thumb_filename))
3898 if existing_thumb:
aa9369a2 3899 self.to_screen('[info] %s is already present' % (
3900 thumb_display_id if multiple else f'{label} thumbnail').capitalize())
e04938ab 3901 t['filepath'] = existing_thumb
3902 ret.append((existing_thumb, thumb_filename_final))
ec82d85a 3903 else:
80c03fa9 3904 self.to_screen(f'[info] Downloading {thumb_display_id} ...')
ec82d85a 3905 try:
297e9952 3906 uf = self.urlopen(sanitized_Request(t['url'], headers=t.get('http_headers', {})))
80c03fa9 3907 self.to_screen(f'[info] Writing {thumb_display_id} to: {thumb_filename}')
d3d89c32 3908 with open(encodeFilename(thumb_filename), 'wb') as thumbf:
ec82d85a 3909 shutil.copyfileobj(uf, thumbf)
80c03fa9 3910 ret.append((thumb_filename, thumb_filename_final))
885cc0b7 3911 t['filepath'] = thumb_filename
3158150c 3912 except network_exceptions as err:
dd0228ce 3913 thumbnails.pop(idx)
80c03fa9 3914 self.report_warning(f'Unable to download {thumb_display_id}: {err}')
6c4fd172 3915 if ret and not write_all:
3916 break
0202b52a 3917 return ret