26 from .cache
import Cache
27 from .compat
import functools
, urllib
# isort: split
28 from .compat
import compat_os_name
, urllib_req_to_req
29 from .cookies
import LenientSimpleCookie
, load_cookies
30 from .downloader
import FFmpegFD
, get_suitable_downloader
, shorten_protocol_name
31 from .downloader
.rtmp
import rtmpdump_version
32 from .extractor
import gen_extractor_classes
, get_info_extractor
33 from .extractor
.common
import UnsupportedURLIE
34 from .extractor
.openload
import PhantomJSwrapper
35 from .minicurses
import format_text
36 from .networking
import HEADRequest
, Request
, RequestDirector
37 from .networking
.common
import _REQUEST_HANDLERS
, _RH_PREFERENCES
38 from .networking
.exceptions
import (
45 from .networking
.impersonate
import ImpersonateRequestHandler
46 from .plugins
import directories
as plugin_directories
47 from .postprocessor
import _PLUGIN_CLASSES
as plugin_pps
48 from .postprocessor
import (
50 FFmpegFixupDuplicateMoovPP
,
51 FFmpegFixupDurationPP
,
54 FFmpegFixupStretchedPP
,
55 FFmpegFixupTimestampPP
,
58 FFmpegVideoConvertorPP
,
59 MoveFilesAfterDownloadPP
,
62 from .postprocessor
.ffmpeg
import resolve_mapping
as resolve_recode_mapping
65 _get_system_deprecation
,
101 UnavailableVideoError
,
119 format_decimal_suffix
,
133 orderedSet_from_options
,
137 remove_terminal_sequences
,
147 supports_terminal_sequences
,
158 windows_enable_vt_mode
,
162 from .utils
._utils
import _YDLLogger
163 from .utils
.networking
import (
169 from .version
import CHANNEL
, ORIGIN
, RELEASE_GIT_HEAD
, VARIANT
, __version__
171 if compat_os_name
== 'nt':
178 YoutubeDL objects are the ones responsible of downloading the
179 actual video file and writing it to disk if the user has requested
180 it, among some other tasks. In most cases there should be one per
181 program. As, given a video URL, the downloader doesn't know how to
182 extract all the needed information, task that InfoExtractors do, it
183 has to pass the URL to one of them.
185 For this, YoutubeDL objects have a method that allows
186 InfoExtractors to be registered in a given order. When it is passed
187 a URL, the YoutubeDL object handles it to the first InfoExtractor it
188 finds that reports being able to handle it. The InfoExtractor extracts
189 all the information about the video or videos the URL refers to, and
190 YoutubeDL process the extracted information, possibly using a File
191 Downloader to download the video.
193 YoutubeDL objects accept a lot of parameters. In order not to saturate
194 the object constructor with arguments, it receives a dictionary of
195 options instead. These options are available through the params
196 attribute for the InfoExtractors to use. The YoutubeDL also
197 registers itself as the downloader in charge for the InfoExtractors
198 that are added to it, so this is a "mutual registration".
202 username: Username for authentication purposes.
203 password: Password for authentication purposes.
204 videopassword: Password for accessing a video.
205 ap_mso: Adobe Pass multiple-system operator identifier.
206 ap_username: Multiple-system operator account username.
207 ap_password: Multiple-system operator account password.
208 usenetrc: Use netrc for authentication instead.
209 netrc_location: Location of the netrc file. Defaults to ~/.netrc.
210 netrc_cmd: Use a shell command to get credentials
211 verbose: Print additional info to stdout.
212 quiet: Do not print messages to stdout.
213 no_warnings: Do not print out anything for warnings.
214 forceprint: A dict with keys WHEN mapped to a list of templates to
215 print to stdout. The allowed keys are video or any of the
216 items in utils.POSTPROCESS_WHEN.
217 For compatibility, a single list is also accepted
218 print_to_file: A dict with keys WHEN (same as forceprint) mapped to
219 a list of tuples with (template, filename)
220 forcejson: Force printing info_dict as JSON.
221 dump_single_json: Force printing the info_dict of the whole playlist
222 (or video) as a single JSON line.
223 force_write_download_archive: Force writing download archive regardless
224 of 'skip_download' or 'simulate'.
225 simulate: Do not download the video files. If unset (or None),
226 simulate only if listsubtitles, listformats or list_thumbnails is used
227 format: Video format code. see "FORMAT SELECTION" for more details.
228 You can also pass a function. The function takes 'ctx' as
229 argument and returns the formats to download.
230 See "build_format_selector" for an implementation
231 allow_unplayable_formats: Allow unplayable formats to be extracted and downloaded.
232 ignore_no_formats_error: Ignore "No video formats" error. Usefull for
233 extracting metadata even if the video is not actually
234 available for download (experimental)
235 format_sort: A list of fields by which to sort the video formats.
236 See "Sorting Formats" for more details.
237 format_sort_force: Force the given format_sort. see "Sorting Formats"
239 prefer_free_formats: Whether to prefer video formats with free containers
240 over non-free ones of same quality.
241 allow_multiple_video_streams: Allow multiple video streams to be merged
243 allow_multiple_audio_streams: Allow multiple audio streams to be merged
245 check_formats Whether to test if the formats are downloadable.
246 Can be True (check all), False (check none),
247 'selected' (check selected formats),
248 or None (check only if requested by extractor)
249 paths: Dictionary of output paths. The allowed keys are 'home'
250 'temp' and the keys of OUTTMPL_TYPES (in utils/_utils.py)
251 outtmpl: Dictionary of templates for output names. Allowed keys
252 are 'default' and the keys of OUTTMPL_TYPES (in utils/_utils.py).
253 For compatibility with youtube-dl, a single string can also be used
254 outtmpl_na_placeholder: Placeholder for unavailable meta fields.
255 restrictfilenames: Do not allow "&" and spaces in file names
256 trim_file_name: Limit length of filename (extension excluded)
257 windowsfilenames: Force the filenames to be windows compatible
258 ignoreerrors: Do not stop on download/postprocessing errors.
259 Can be 'only_download' to ignore only download errors.
260 Default is 'only_download' for CLI, but False for API
261 skip_playlist_after_errors: Number of allowed failures until the rest of
262 the playlist is skipped
263 allowed_extractors: List of regexes to match against extractor names that are allowed
264 overwrites: Overwrite all video and metadata files if True,
265 overwrite only non-video files if None
266 and don't overwrite any file if False
267 playlist_items: Specific indices of playlist to download.
268 playlistrandom: Download playlist items in random order.
269 lazy_playlist: Process playlist entries as they are received.
270 matchtitle: Download only matching titles.
271 rejecttitle: Reject downloads for matching titles.
272 logger: Log messages to a logging.Logger instance.
273 logtostderr: Print everything to stderr instead of stdout.
274 consoletitle: Display progress in console window's titlebar.
275 writedescription: Write the video description to a .description file
276 writeinfojson: Write the video description to a .info.json file
277 clean_infojson: Remove internal metadata from the infojson
278 getcomments: Extract video comments. This will not be written to disk
279 unless writeinfojson is also given
280 writeannotations: Write the video annotations to a .annotations.xml file
281 writethumbnail: Write the thumbnail image to a file
282 allow_playlist_files: Whether to write playlists' description, infojson etc
283 also to disk when using the 'write*' options
284 write_all_thumbnails: Write all thumbnail formats to files
285 writelink: Write an internet shortcut file, depending on the
286 current platform (.url/.webloc/.desktop)
287 writeurllink: Write a Windows internet shortcut file (.url)
288 writewebloclink: Write a macOS internet shortcut file (.webloc)
289 writedesktoplink: Write a Linux internet shortcut file (.desktop)
290 writesubtitles: Write the video subtitles to a file
291 writeautomaticsub: Write the automatically generated subtitles to a file
292 listsubtitles: Lists all available subtitles for the video
293 subtitlesformat: The format code for subtitles
294 subtitleslangs: List of languages of the subtitles to download (can be regex).
295 The list may contain "all" to refer to all the available
296 subtitles. The language can be prefixed with a "-" to
297 exclude it from the requested languages, e.g. ['all', '-live_chat']
298 keepvideo: Keep the video file after post-processing
299 daterange: A utils.DateRange object, download only if the upload_date is in the range.
300 skip_download: Skip the actual download of the video file
301 cachedir: Location of the cache files in the filesystem.
302 False to disable filesystem cache.
303 noplaylist: Download single video instead of a playlist if in doubt.
304 age_limit: An integer representing the user's age in years.
305 Unsuitable videos for the given age are skipped.
306 min_views: An integer representing the minimum view count the video
307 must have in order to not be skipped.
308 Videos without view count information are always
309 downloaded. None for no limit.
310 max_views: An integer representing the maximum view count.
311 Videos that are more popular than that are not
313 Videos without view count information are always
314 downloaded. None for no limit.
315 download_archive: A set, or the name of a file where all downloads are recorded.
316 Videos already present in the file are not downloaded again.
317 break_on_existing: Stop the download process after attempting to download a
318 file that is in the archive.
319 break_per_url: Whether break_on_reject and break_on_existing
320 should act on each input URL as opposed to for the entire queue
321 cookiefile: File name or text stream from where cookies should be read and dumped to
322 cookiesfrombrowser: A tuple containing the name of the browser, the profile
323 name/path from where cookies are loaded, the name of the keyring,
324 and the container name, e.g. ('chrome', ) or
325 ('vivaldi', 'default', 'BASICTEXT') or ('firefox', 'default', None, 'Meta')
326 legacyserverconnect: Explicitly allow HTTPS connection to servers that do not
327 support RFC 5746 secure renegotiation
328 nocheckcertificate: Do not verify SSL certificates
329 client_certificate: Path to client certificate file in PEM format. May include the private key
330 client_certificate_key: Path to private key file for client certificate
331 client_certificate_password: Password for client certificate private key, if encrypted.
332 If not provided and the key is encrypted, yt-dlp will ask interactively
333 prefer_insecure: Use HTTP instead of HTTPS to retrieve information.
334 (Only supported by some extractors)
335 enable_file_urls: Enable file:// URLs. This is disabled by default for security reasons.
336 http_headers: A dictionary of custom headers to be used for all requests
337 proxy: URL of the proxy server to use
338 geo_verification_proxy: URL of the proxy to use for IP address verification
339 on geo-restricted sites.
340 socket_timeout: Time to wait for unresponsive hosts, in seconds
341 bidi_workaround: Work around buggy terminals without bidirectional text
342 support, using fridibi
343 debug_printtraffic:Print out sent and received HTTP traffic
344 default_search: Prepend this string if an input url is not valid.
345 'auto' for elaborate guessing
346 encoding: Use this encoding instead of the system-specified.
347 extract_flat: Whether to resolve and process url_results further
348 * False: Always process. Default for API
349 * True: Never process
350 * 'in_playlist': Do not process inside playlist/multi_video
351 * 'discard': Always process, but don't return the result
352 from inside playlist/multi_video
353 * 'discard_in_playlist': Same as "discard", but only for
354 playlists (not multi_video). Default for CLI
355 wait_for_video: If given, wait for scheduled streams to become available.
356 The value should be a tuple containing the range
357 (min_secs, max_secs) to wait between retries
358 postprocessors: A list of dictionaries, each with an entry
359 * key: The name of the postprocessor. See
360 yt_dlp/postprocessor/__init__.py for a list.
361 * when: When to run the postprocessor. Allowed values are
362 the entries of utils.POSTPROCESS_WHEN
363 Assumed to be 'post_process' if not given
364 progress_hooks: A list of functions that get called on download
365 progress, with a dictionary with the entries
366 * status: One of "downloading", "error", or "finished".
367 Check this first and ignore unknown values.
368 * info_dict: The extracted info_dict
370 If status is one of "downloading", or "finished", the
371 following properties may also be present:
372 * filename: The final filename (always present)
373 * tmpfilename: The filename we're currently writing to
374 * downloaded_bytes: Bytes on disk
375 * total_bytes: Size of the whole file, None if unknown
376 * total_bytes_estimate: Guess of the eventual file size,
378 * elapsed: The number of seconds since download started.
379 * eta: The estimated time in seconds, None if unknown
380 * speed: The download speed in bytes/second, None if
382 * fragment_index: The counter of the currently
383 downloaded video fragment.
384 * fragment_count: The number of fragments (= individual
385 files that will be merged)
387 Progress hooks are guaranteed to be called at least once
388 (with status "finished") if the download is successful.
389 postprocessor_hooks: A list of functions that get called on postprocessing
390 progress, with a dictionary with the entries
391 * status: One of "started", "processing", or "finished".
392 Check this first and ignore unknown values.
393 * postprocessor: Name of the postprocessor
394 * info_dict: The extracted info_dict
396 Progress hooks are guaranteed to be called at least twice
397 (with status "started" and "finished") if the processing is successful.
398 merge_output_format: "/" separated list of extensions to use when merging formats.
399 final_ext: Expected final extension; used to detect when the file was
400 already downloaded and converted
401 fixup: Automatically correct known faults of the file.
403 - "never": do nothing
404 - "warn": only emit a warning
405 - "detect_or_warn": check whether we can do anything
406 about it, warn otherwise (default)
407 source_address: Client-side IP address to bind to.
408 impersonate: Client to impersonate for requests.
409 An ImpersonateTarget (from yt_dlp.networking.impersonate)
410 sleep_interval_requests: Number of seconds to sleep between requests
412 sleep_interval: Number of seconds to sleep before each download when
413 used alone or a lower bound of a range for randomized
414 sleep before each download (minimum possible number
415 of seconds to sleep) when used along with
417 max_sleep_interval:Upper bound of a range for randomized sleep before each
418 download (maximum possible number of seconds to sleep).
419 Must only be used along with sleep_interval.
420 Actual sleep time will be a random float from range
421 [sleep_interval; max_sleep_interval].
422 sleep_interval_subtitles: Number of seconds to sleep before each subtitle download
423 listformats: Print an overview of available video formats and exit.
424 list_thumbnails: Print a table of all thumbnails and exit.
425 match_filter: A function that gets called for every video with the signature
426 (info_dict, *, incomplete: bool) -> Optional[str]
427 For backward compatibility with youtube-dl, the signature
428 (info_dict) -> Optional[str] is also allowed.
429 - If it returns a message, the video is ignored.
430 - If it returns None, the video is downloaded.
431 - If it returns utils.NO_DEFAULT, the user is interactively
432 asked whether to download the video.
433 - Raise utils.DownloadCancelled(msg) to abort remaining
434 downloads when a video is rejected.
435 match_filter_func in utils/_utils.py is one example for this.
436 color: A Dictionary with output stream names as keys
437 and their respective color policy as values.
438 Can also just be a single color policy,
439 in which case it applies to all outputs.
440 Valid stream names are 'stdout' and 'stderr'.
441 Valid color policies are one of 'always', 'auto', 'no_color' or 'never'.
442 geo_bypass: Bypass geographic restriction via faking X-Forwarded-For
445 Two-letter ISO 3166-2 country code that will be used for
446 explicit geographic restriction bypassing via faking
447 X-Forwarded-For HTTP header
449 IP range in CIDR notation that will be used similarly to
451 external_downloader: A dictionary of protocol keys and the executable of the
452 external downloader to use for it. The allowed protocols
453 are default|http|ftp|m3u8|dash|rtsp|rtmp|mms.
454 Set the value to 'native' to use the native downloader
455 compat_opts: Compatibility options. See "Differences in default behavior".
456 The following options do not work when used through the API:
457 filename, abort-on-error, multistreams, no-live-chat, format-sort
458 no-clean-infojson, no-playlist-metafiles, no-keep-subs, no-attach-info-json.
459 Refer __init__.py for their implementation
460 progress_template: Dictionary of templates for progress outputs.
461 Allowed keys are 'download', 'postprocess',
462 'download-title' (console title) and 'postprocess-title'.
463 The template is mapped on a dictionary with keys 'progress' and 'info'
464 retry_sleep_functions: Dictionary of functions that takes the number of attempts
465 as argument and returns the time to sleep in seconds.
466 Allowed keys are 'http', 'fragment', 'file_access'
467 download_ranges: A callback function that gets called for every video with
468 the signature (info_dict, ydl) -> Iterable[Section].
469 Only the returned sections will be downloaded.
470 Each Section is a dict with the following keys:
471 * start_time: Start time of the section in seconds
472 * end_time: End time of the section in seconds
473 * title: Section title (Optional)
474 * index: Section number (Optional)
475 force_keyframes_at_cuts: Re-encode the video when downloading ranges to get precise cuts
476 noprogress: Do not print the progress bar
477 live_from_start: Whether to download livestreams videos from the start
479 The following parameters are not used by YoutubeDL itself, they are used by
480 the downloader (see yt_dlp/downloader/common.py):
481 nopart, updatetime, buffersize, ratelimit, throttledratelimit, min_filesize,
482 max_filesize, test, noresizebuffer, retries, file_access_retries, fragment_retries,
483 continuedl, xattr_set_filesize, hls_use_mpegts, http_chunk_size,
484 external_downloader_args, concurrent_fragment_downloads, progress_delta.
486 The following options are used by the post processors:
487 ffmpeg_location: Location of the ffmpeg/avconv binary; either the path
488 to the binary or its containing directory.
489 postprocessor_args: A dictionary of postprocessor/executable keys (in lower case)
490 and a list of additional command-line arguments for the
491 postprocessor/executable. The dict can also have "PP+EXE" keys
492 which are used when the given exe is used by the given PP.
493 Use 'default' as the name for arguments to passed to all PP
494 For compatibility with youtube-dl, a single list of args
497 The following options are used by the extractors:
498 extractor_retries: Number of times to retry for known errors (default: 3)
499 dynamic_mpd: Whether to process dynamic DASH manifests (default: True)
500 hls_split_discontinuity: Split HLS playlists to different formats at
501 discontinuities such as ad breaks (default: False)
502 extractor_args: A dictionary of arguments to be passed to the extractors.
503 See "EXTRACTOR ARGUMENTS" for details.
504 E.g. {'youtube': {'skip': ['dash', 'hls']}}
505 mark_watched: Mark videos watched (even with --simulate). Only for YouTube
507 The following options are deprecated and may be removed in the future:
509 break_on_reject: Stop the download process when encountering a video that
510 has been filtered out.
511 - `raise DownloadCancelled(msg)` in match_filter instead
512 force_generic_extractor: Force downloader to use the generic extractor
513 - Use allowed_extractors = ['generic', 'default']
514 playliststart: - Use playlist_items
515 Playlist item to start at.
516 playlistend: - Use playlist_items
517 Playlist item to end at.
518 playlistreverse: - Use playlist_items
519 Download playlist items in reverse order.
520 forceurl: - Use forceprint
521 Force printing final URL.
522 forcetitle: - Use forceprint
523 Force printing title.
524 forceid: - Use forceprint
526 forcethumbnail: - Use forceprint
527 Force printing thumbnail URL.
528 forcedescription: - Use forceprint
529 Force printing description.
530 forcefilename: - Use forceprint
531 Force printing final filename.
532 forceduration: - Use forceprint
533 Force printing duration.
534 allsubtitles: - Use subtitleslangs = ['all']
535 Downloads all the subtitles of the video
536 (requires writesubtitles or writeautomaticsub)
537 include_ads: - Doesn't work
539 call_home: - Not implemented
540 Boolean, true iff we are allowed to contact the
541 yt-dlp servers for debugging.
542 post_hooks: - Register a custom postprocessor
543 A list of functions that get called as the final step
544 for each video file, after all postprocessors have been
545 called. The filename will be passed as the only argument.
546 hls_prefer_native: - Use external_downloader = {'m3u8': 'native'} or {'m3u8': 'ffmpeg'}.
547 Use the native HLS downloader instead of ffmpeg/avconv
548 if True, otherwise use ffmpeg/avconv if False, otherwise
549 use downloader suggested by extractor if None.
550 prefer_ffmpeg: - avconv support is deprecated
551 If False, use avconv instead of ffmpeg if both are available,
552 otherwise prefer ffmpeg.
553 youtube_include_dash_manifest: - Use extractor_args
554 If True (default), DASH manifests and related
555 data will be downloaded and processed by extractor.
556 You can reduce network I/O by disabling it if you don't
557 care about DASH. (only for youtube)
558 youtube_include_hls_manifest: - Use extractor_args
559 If True (default), HLS manifests and related
560 data will be downloaded and processed by extractor.
561 You can reduce network I/O by disabling it if you don't
562 care about HLS. (only for youtube)
563 no_color: Same as `color='no_color'`
564 no_overwrites: Same as `overwrites=False`
568 'width', 'height', 'asr', 'audio_channels', 'fps',
569 'tbr', 'abr', 'vbr', 'filesize', 'filesize_approx',
570 'timestamp', 'release_timestamp',
571 'duration', 'view_count', 'like_count', 'dislike_count', 'repost_count',
572 'average_rating', 'comment_count', 'age_limit',
573 'start_time', 'end_time',
574 'chapter_number', 'season_number', 'episode_number',
575 'track_number', 'disc_number', 'release_year',
579 # NB: Keep in sync with the docstring of extractor/common.py
580 'url', 'manifest_url', 'manifest_stream_number', 'ext', 'format', 'format_id', 'format_note',
581 'width', 'height', 'aspect_ratio', 'resolution', 'dynamic_range', 'tbr', 'abr', 'acodec', 'asr', 'audio_channels',
582 'vbr', 'fps', 'vcodec', 'container', 'filesize', 'filesize_approx', 'rows', 'columns',
583 'player_url', 'protocol', 'fragment_base_url', 'fragments', 'is_from_start', 'is_dash_periods', 'request_data',
584 'preference', 'language', 'language_preference', 'quality', 'source_preference', 'cookies',
585 'http_headers', 'stretched_ratio', 'no_resume', 'has_drm', 'extra_param_to_segment_url', 'hls_aes', 'downloader_options',
586 'page_url', 'app', 'play_path', 'tc_url', 'flash_version', 'rtmp_live', 'rtmp_conn', 'rtmp_protocol', 'rtmp_real_time'
588 _deprecated_multivalue_fields
= {
589 'album_artist': 'album_artists',
591 'composer': 'composers',
592 'creator': 'creators',
595 _format_selection_exts
= {
596 'audio': set(MEDIA_EXTENSIONS
.common_audio
),
597 'video': set(MEDIA_EXTENSIONS
.common_video
+ ('3gp', )),
598 'storyboards': set(MEDIA_EXTENSIONS
.storyboards
),
601 def __init__(self
, params
=None, auto_init
=True):
602 """Create a FileDownloader object with the given options.
603 @param auto_init Whether to load the default extractors and print header (if verbose).
604 Set to 'no_verbose_header' to not print the header
610 self
._ies
_instances
= {}
611 self
._pps
= {k: [] for k in POSTPROCESS_WHEN}
612 self
._printed
_messages
= set()
613 self
._first
_webpage
_request
= True
614 self
._post
_hooks
= []
615 self
._progress
_hooks
= []
616 self
._postprocessor
_hooks
= []
617 self
._download
_retcode
= 0
618 self
._num
_downloads
= 0
620 self
._playlist
_level
= 0
621 self
._playlist
_urls
= set()
622 self
.cache
= Cache(self
)
623 self
.__header
_cookies
= []
625 stdout
= sys
.stderr
if self
.params
.get('logtostderr') else sys
.stdout
626 self
._out
_files
= Namespace(
629 screen
=sys
.stderr
if self
.params
.get('quiet') else stdout
,
630 console
=None if compat_os_name
== 'nt' else next(
631 filter(supports_terminal_sequences
, (sys
.stderr
, sys
.stdout
)), None)
635 windows_enable_vt_mode()
636 except Exception as e
:
637 self
.write_debug(f
'Failed to enable VT mode: {e}')
639 if self
.params
.get('no_color'):
640 if self
.params
.get('color') is not None:
641 self
.params
.setdefault('_warnings', []).append(
642 'Overwriting params from "color" with "no_color"')
643 self
.params
['color'] = 'no_color'
645 term_allow_color
= os
.getenv('TERM', '').lower() != 'dumb'
646 no_color
= bool(os
.getenv('NO_COLOR'))
648 def process_color_policy(stream
):
649 stream_name
= {sys.stdout: 'stdout', sys.stderr: 'stderr'}
[stream
]
650 policy
= traverse_obj(self
.params
, ('color', (stream_name
, None), {str}
), get_all
=False)
651 if policy
in ('auto', None):
652 if term_allow_color
and supports_terminal_sequences(stream
):
653 return 'no_color' if no_color
else True
655 assert policy
in ('always', 'never', 'no_color'), policy
656 return {'always': True, 'never': False}
.get(policy
, policy
)
658 self
._allow
_colors
= Namespace(**{
659 name
: process_color_policy(stream
)
660 for name
, stream
in self
._out
_files
.items_
if name
!= 'console'
663 system_deprecation
= _get_system_deprecation()
664 if system_deprecation
:
665 self
.deprecated_feature(system_deprecation
.replace('\n', '\n '))
667 if self
.params
.get('allow_unplayable_formats'):
669 f
'You have asked for {self._format_err("UNPLAYABLE", self.Styles.EMPHASIS)} formats to be listed/downloaded. '
670 'This is a developer option intended for debugging. \n'
671 ' If you experience any issues while using this option, '
672 f
'{self._format_err("DO NOT", self.Styles.ERROR)} open a bug report')
674 if self
.params
.get('bidi_workaround', False):
677 master
, slave
= pty
.openpty()
678 width
= shutil
.get_terminal_size().columns
679 width_args
= [] if width
is None else ['-w', str(width
)]
680 sp_kwargs
= {'stdin': subprocess.PIPE, 'stdout': slave, 'stderr': self._out_files.error}
682 self
._output
_process
= Popen(['bidiv'] + width_args
, **sp_kwargs
)
684 self
._output
_process
= Popen(['fribidi', '-c', 'UTF-8'] + width_args
, **sp_kwargs
)
685 self
._output
_channel
= os
.fdopen(master
, 'rb')
686 except OSError as ose
:
687 if ose
.errno
== errno
.ENOENT
:
689 'Could not find fribidi executable, ignoring --bidi-workaround. '
690 'Make sure that fribidi is an executable file in one of the directories in your $PATH.')
694 self
.params
['compat_opts'] = set(self
.params
.get('compat_opts', ()))
695 self
.params
['http_headers'] = HTTPHeaderDict(std_headers
, self
.params
.get('http_headers'))
696 self
._load
_cookies
(self
.params
['http_headers'].get('Cookie')) # compat
697 self
.params
['http_headers'].pop('Cookie', None)
699 if auto_init
and auto_init
!= 'no_verbose_header':
700 self
.print_debug_header()
702 def check_deprecated(param
, option
, suggestion
):
703 if self
.params
.get(param
) is not None:
704 self
.report_warning(f
'{option} is deprecated. Use {suggestion} instead')
708 if check_deprecated('cn_verification_proxy', '--cn-verification-proxy', '--geo-verification-proxy'):
709 if self
.params
.get('geo_verification_proxy') is None:
710 self
.params
['geo_verification_proxy'] = self
.params
['cn_verification_proxy']
712 check_deprecated('autonumber', '--auto-number', '-o "%(autonumber)s-%(title)s.%(ext)s"')
713 check_deprecated('usetitle', '--title', '-o "%(title)s-%(id)s.%(ext)s"')
714 check_deprecated('useid', '--id', '-o "%(id)s.%(ext)s"')
716 for msg
in self
.params
.get('_warnings', []):
717 self
.report_warning(msg
)
718 for msg
in self
.params
.get('_deprecation_warnings', []):
719 self
.deprecated_feature(msg
)
721 if impersonate_target
:= self
.params
.get('impersonate'):
722 if not self
._impersonate
_target
_available
(impersonate_target
):
723 raise YoutubeDLError(
724 f
'Impersonate target "{impersonate_target}" is not available. '
725 f
'Use --list-impersonate-targets to see available targets. '
726 f
'You may be missing dependencies required to support this target.')
728 if 'list-formats' in self
.params
['compat_opts']:
729 self
.params
['listformats_table'] = False
731 if 'overwrites' not in self
.params
and self
.params
.get('nooverwrites') is not None:
732 # nooverwrites was unnecessarily changed to overwrites
733 # in 0c3d0f51778b153f65c21906031c2e091fcfb641
734 # This ensures compatibility with both keys
735 self
.params
['overwrites'] = not self
.params
['nooverwrites']
736 elif self
.params
.get('overwrites') is None:
737 self
.params
.pop('overwrites', None)
739 self
.params
['nooverwrites'] = not self
.params
['overwrites']
741 if self
.params
.get('simulate') is None and any((
742 self
.params
.get('list_thumbnails'),
743 self
.params
.get('listformats'),
744 self
.params
.get('listsubtitles'),
746 self
.params
['simulate'] = 'list_only'
748 self
.params
.setdefault('forceprint', {})
749 self
.params
.setdefault('print_to_file', {})
751 # Compatibility with older syntax
752 if not isinstance(params
['forceprint'], dict):
753 self
.params
['forceprint'] = {'video': params['forceprint']}
756 self
.add_default_info_extractors()
758 if (sys
.platform
!= 'win32'
759 and sys
.getfilesystemencoding() in ['ascii', 'ANSI_X3.4-1968']
760 and not self
.params
.get('restrictfilenames', False)):
761 # Unicode filesystem API will throw errors (#1474, #13027)
763 'Assuming --restrict-filenames since file system encoding '
764 'cannot encode all characters. '
765 'Set the LC_ALL environment variable to fix this.')
766 self
.params
['restrictfilenames'] = True
768 self
._parse
_outtmpl
()
770 # Creating format selector here allows us to catch syntax errors before the extraction
771 self
.format_selector
= (
772 self
.params
.get('format') if self
.params
.get('format') in (None, '-')
773 else self
.params
['format'] if callable(self
.params
['format'])
774 else self
.build_format_selector(self
.params
['format']))
777 'post_hooks': self
.add_post_hook
,
778 'progress_hooks': self
.add_progress_hook
,
779 'postprocessor_hooks': self
.add_postprocessor_hook
,
781 for opt
, fn
in hooks
.items():
782 for ph
in self
.params
.get(opt
, []):
785 for pp_def_raw
in self
.params
.get('postprocessors', []):
786 pp_def
= dict(pp_def_raw
)
787 when
= pp_def
.pop('when', 'post_process')
788 self
.add_post_processor(
789 get_postprocessor(pp_def
.pop('key'))(self
, **pp_def
),
792 def preload_download_archive(fn
):
793 """Preload the archive, if any is specified"""
797 elif not is_path_like(fn
):
800 self
.write_debug(f
'Loading archive file {fn!r}')
802 with locked_file(fn
, 'r', encoding
='utf-8') as archive_file
:
803 for line
in archive_file
:
804 archive
.add(line
.strip())
805 except OSError as ioe
:
806 if ioe
.errno
!= errno
.ENOENT
:
810 self
.archive
= preload_download_archive(self
.params
.get('download_archive'))
812 def warn_if_short_id(self
, argv
):
813 # short YouTube ID starting with dash?
815 i
for i
, a
in enumerate(argv
)
816 if re
.match(r
'^-[0-9A-Za-z_-]{10}$', a
)]
820 + [a
for i
, a
in enumerate(argv
) if i
not in idxs
]
821 + ['--'] + [argv
[i
] for i
in idxs
]
824 'Long argument string detected. '
825 'Use -- to separate parameters and URLs, like this:\n%s' %
826 shell_quote(correct_argv
))
828 def add_info_extractor(self
, ie
):
829 """Add an InfoExtractor object to the end of the list."""
831 self
._ies
[ie_key
] = ie
832 if not isinstance(ie
, type):
833 self
._ies
_instances
[ie_key
] = ie
834 ie
.set_downloader(self
)
836 def get_info_extractor(self
, ie_key
):
838 Get an instance of an IE with name ie_key, it will try to get one from
839 the _ies list, if there's no instance it will create a new one and add
840 it to the extractor list.
842 ie
= self
._ies
_instances
.get(ie_key
)
844 ie
= get_info_extractor(ie_key
)()
845 self
.add_info_extractor(ie
)
848 def add_default_info_extractors(self
):
850 Add the InfoExtractors returned by gen_extractors to the end of the list
852 all_ies
= {ie.IE_NAME.lower(): ie for ie in gen_extractor_classes()}
853 all_ies
['end'] = UnsupportedURLIE()
855 ie_names
= orderedSet_from_options(
856 self
.params
.get('allowed_extractors', ['default']), {
857 'all': list(all_ies
),
858 'default': [name
for name
, ie
in all_ies
.items() if ie
._ENABLED
],
860 except re
.error
as e
:
861 raise ValueError(f
'Wrong regex for allowed_extractors: {e.pattern}')
862 for name
in ie_names
:
863 self
.add_info_extractor(all_ies
[name
])
864 self
.write_debug(f
'Loaded {len(ie_names)} extractors')
866 def add_post_processor(self
, pp
, when
='post_process'):
867 """Add a PostProcessor object to the end of the chain."""
868 assert when
in POSTPROCESS_WHEN
, f
'Invalid when={when}'
869 self
._pps
[when
].append(pp
)
870 pp
.set_downloader(self
)
872 def add_post_hook(self
, ph
):
873 """Add the post hook"""
874 self
._post
_hooks
.append(ph
)
876 def add_progress_hook(self
, ph
):
877 """Add the download progress hook"""
878 self
._progress
_hooks
.append(ph
)
880 def add_postprocessor_hook(self
, ph
):
881 """Add the postprocessing progress hook"""
882 self
._postprocessor
_hooks
.append(ph
)
883 for pps
in self
._pps
.values():
885 pp
.add_progress_hook(ph
)
887 def _bidi_workaround(self
, message
):
888 if not hasattr(self
, '_output_channel'):
891 assert hasattr(self
, '_output_process')
892 assert isinstance(message
, str)
893 line_count
= message
.count('\n') + 1
894 self
._output
_process
.stdin
.write((message
+ '\n').encode())
895 self
._output
_process
.stdin
.flush()
896 res
= ''.join(self
._output
_channel
.readline().decode()
897 for _
in range(line_count
))
898 return res
[:-len('\n')]
900 def _write_string(self
, message
, out
=None, only_once
=False):
902 if message
in self
._printed
_messages
:
904 self
._printed
_messages
.add(message
)
905 write_string(message
, out
=out
, encoding
=self
.params
.get('encoding'))
907 def to_stdout(self
, message
, skip_eol
=False, quiet
=None):
908 """Print message to stdout"""
909 if quiet
is not None:
910 self
.deprecation_warning('"YoutubeDL.to_stdout" no longer accepts the argument quiet. '
911 'Use "YoutubeDL.to_screen" instead')
912 if skip_eol
is not False:
913 self
.deprecation_warning('"YoutubeDL.to_stdout" no longer accepts the argument skip_eol. '
914 'Use "YoutubeDL.to_screen" instead')
915 self
._write
_string
(f
'{self._bidi_workaround(message)}\n', self
._out
_files
.out
)
917 def to_screen(self
, message
, skip_eol
=False, quiet
=None, only_once
=False):
918 """Print message to screen if not in quiet mode"""
919 if self
.params
.get('logger'):
920 self
.params
['logger'].debug(message
)
922 if (self
.params
.get('quiet') if quiet
is None else quiet
) and not self
.params
.get('verbose'):
925 '%s%s' % (self
._bidi
_workaround
(message
), ('' if skip_eol
else '\n')),
926 self
._out
_files
.screen
, only_once
=only_once
)
928 def to_stderr(self
, message
, only_once
=False):
929 """Print message to stderr"""
930 assert isinstance(message
, str)
931 if self
.params
.get('logger'):
932 self
.params
['logger'].error(message
)
934 self
._write
_string
(f
'{self._bidi_workaround(message)}\n', self
._out
_files
.error
, only_once
=only_once
)
936 def _send_console_code(self
, code
):
937 if compat_os_name
== 'nt' or not self
._out
_files
.console
:
939 self
._write
_string
(code
, self
._out
_files
.console
)
941 def to_console_title(self
, message
):
942 if not self
.params
.get('consoletitle', False):
944 message
= remove_terminal_sequences(message
)
945 if compat_os_name
== 'nt':
946 if ctypes
.windll
.kernel32
.GetConsoleWindow():
947 # c_wchar_p() might not be necessary if `message` is
948 # already of type unicode()
949 ctypes
.windll
.kernel32
.SetConsoleTitleW(ctypes
.c_wchar_p(message
))
951 self
._send
_console
_code
(f
'\033]0;{message}\007')
953 def save_console_title(self
):
954 if not self
.params
.get('consoletitle') or self
.params
.get('simulate'):
956 self
._send
_console
_code
('\033[22;0t') # Save the title on stack
958 def restore_console_title(self
):
959 if not self
.params
.get('consoletitle') or self
.params
.get('simulate'):
961 self
._send
_console
_code
('\033[23;0t') # Restore the title from stack
964 self
.save_console_title()
967 def save_cookies(self
):
968 if self
.params
.get('cookiefile') is not None:
969 self
.cookiejar
.save()
971 def __exit__(self
, *args
):
972 self
.restore_console_title()
977 if '_request_director' in self
.__dict
__:
978 self
._request
_director
.close()
979 del self
._request
_director
981 def trouble(self
, message
=None, tb
=None, is_error
=True):
982 """Determine action to take when a download problem appears.
984 Depending on if the downloader has been configured to ignore
985 download errors or not, this method may throw an exception or
986 not when errors are found, after printing the message.
988 @param tb If given, is additional traceback information
989 @param is_error Whether to raise error according to ignorerrors
991 if message
is not None:
992 self
.to_stderr(message
)
993 if self
.params
.get('verbose'):
995 if sys
.exc_info()[0]: # if .trouble has been called from an except block
997 if hasattr(sys
.exc_info()[1], 'exc_info') and sys
.exc_info()[1].exc_info
[0]:
998 tb
+= ''.join(traceback
.format_exception(*sys
.exc_info()[1].exc_info
))
999 tb
+= encode_compat_str(traceback
.format_exc())
1001 tb_data
= traceback
.format_list(traceback
.extract_stack())
1002 tb
= ''.join(tb_data
)
1007 if not self
.params
.get('ignoreerrors'):
1008 if sys
.exc_info()[0] and hasattr(sys
.exc_info()[1], 'exc_info') and sys
.exc_info()[1].exc_info
[0]:
1009 exc_info
= sys
.exc_info()[1].exc_info
1011 exc_info
= sys
.exc_info()
1012 raise DownloadError(message
, exc_info
)
1013 self
._download
_retcode
= 1
1017 EMPHASIS
='light blue',
1022 BAD_FORMAT
='light red',
1024 SUPPRESS
='light black',
1027 def _format_text(self
, handle
, allow_colors
, text
, f
, fallback
=None, *, test_encoding
=False):
1030 original_text
= text
1031 # handle.encoding can be None. See https://github.com/yt-dlp/yt-dlp/issues/2711
1032 encoding
= self
.params
.get('encoding') or getattr(handle
, 'encoding', None) or 'ascii'
1033 text
= text
.encode(encoding
, 'ignore').decode(encoding
)
1034 if fallback
is not None and text
!= original_text
:
1036 return format_text(text
, f
) if allow_colors
is True else text
if fallback
is None else fallback
1038 def _format_out(self
, *args
, **kwargs
):
1039 return self
._format
_text
(self
._out
_files
.out
, self
._allow
_colors
.out
, *args
, **kwargs
)
1041 def _format_screen(self
, *args
, **kwargs
):
1042 return self
._format
_text
(self
._out
_files
.screen
, self
._allow
_colors
.screen
, *args
, **kwargs
)
1044 def _format_err(self
, *args
, **kwargs
):
1045 return self
._format
_text
(self
._out
_files
.error
, self
._allow
_colors
.error
, *args
, **kwargs
)
1047 def report_warning(self
, message
, only_once
=False):
1049 Print the message to stderr, it will be prefixed with 'WARNING:'
1050 If stderr is a tty file the 'WARNING:' will be colored
1052 if self
.params
.get('logger') is not None:
1053 self
.params
['logger'].warning(message
)
1055 if self
.params
.get('no_warnings'):
1057 self
.to_stderr(f
'{self._format_err("WARNING:", self.Styles.WARNING)} {message}', only_once
)
1059 def deprecation_warning(self
, message
, *, stacklevel
=0):
1060 deprecation_warning(
1061 message
, stacklevel
=stacklevel
+ 1, printer
=self
.report_error
, is_error
=False)
1063 def deprecated_feature(self
, message
):
1064 if self
.params
.get('logger') is not None:
1065 self
.params
['logger'].warning(f
'Deprecated Feature: {message}')
1066 self
.to_stderr(f
'{self._format_err("Deprecated Feature:", self.Styles.ERROR)} {message}', True)
1068 def report_error(self
, message
, *args
, **kwargs
):
1070 Do the same as trouble, but prefixes the message with 'ERROR:', colored
1071 in red if stderr is a tty file.
1073 self
.trouble(f
'{self._format_err("ERROR:", self.Styles.ERROR)} {message}', *args
, **kwargs
)
1075 def write_debug(self
, message
, only_once
=False):
1076 '''Log debug message or Print message to stderr'''
1077 if not self
.params
.get('verbose', False):
1079 message
= f
'[debug] {message}'
1080 if self
.params
.get('logger'):
1081 self
.params
['logger'].debug(message
)
1083 self
.to_stderr(message
, only_once
)
1085 def report_file_already_downloaded(self
, file_name
):
1086 """Report file has already been fully downloaded."""
1088 self
.to_screen('[download] %s has already been downloaded' % file_name
)
1089 except UnicodeEncodeError:
1090 self
.to_screen('[download] The file has already been downloaded')
1092 def report_file_delete(self
, file_name
):
1093 """Report that existing file will be deleted."""
1095 self
.to_screen('Deleting existing file %s' % file_name
)
1096 except UnicodeEncodeError:
1097 self
.to_screen('Deleting existing file')
1099 def raise_no_formats(self
, info
, forced
=False, *, msg
=None):
1100 has_drm
= info
.get('_has_drm')
1101 ignored
, expected
= self
.params
.get('ignore_no_formats_error'), bool(msg
)
1102 msg
= msg
or has_drm
and 'This video is DRM protected' or 'No video formats found!'
1103 if forced
or not ignored
:
1104 raise ExtractorError(msg
, video_id
=info
['id'], ie
=info
['extractor'],
1105 expected
=has_drm
or ignored
or expected
)
1107 self
.report_warning(msg
)
1109 def parse_outtmpl(self
):
1110 self
.deprecation_warning('"YoutubeDL.parse_outtmpl" is deprecated and may be removed in a future version')
1111 self
._parse
_outtmpl
()
1112 return self
.params
['outtmpl']
1114 def _parse_outtmpl(self
):
1116 if self
.params
.get('restrictfilenames'): # Remove spaces in the default template
1117 sanitize
= lambda x
: x
.replace(' - ', ' ').replace(' ', '-')
1119 outtmpl
= self
.params
.setdefault('outtmpl', {})
1120 if not isinstance(outtmpl
, dict):
1121 self
.params
['outtmpl'] = outtmpl
= {'default': outtmpl}
1122 outtmpl
.update({k: sanitize(v) for k, v in DEFAULT_OUTTMPL.items() if outtmpl.get(k) is None}
)
1124 def get_output_path(self
, dir_type
='', filename
=None):
1125 paths
= self
.params
.get('paths', {})
1126 assert isinstance(paths
, dict), '"paths" parameter must be a dictionary'
1127 path
= os
.path
.join(
1128 expand_path(paths
.get('home', '').strip()),
1129 expand_path(paths
.get(dir_type
, '').strip()) if dir_type
else '',
1131 return sanitize_path(path
, force
=self
.params
.get('windowsfilenames'))
1134 def _outtmpl_expandpath(outtmpl
):
1135 # expand_path translates '%%' into '%' and '$$' into '$'
1136 # correspondingly that is not what we want since we need to keep
1137 # '%%' intact for template dict substitution step. Working around
1138 # with boundary-alike separator hack.
1139 sep
= ''.join(random
.choices(string
.ascii_letters
, k
=32))
1140 outtmpl
= outtmpl
.replace('%%', f
'%{sep}%').replace('$$', f
'${sep}$')
1142 # outtmpl should be expand_path'ed before template dict substitution
1143 # because meta fields may contain env variables we don't want to
1144 # be expanded. E.g. for outtmpl "%(title)s.%(ext)s" and
1145 # title "Hello $PATH", we don't want `$PATH` to be expanded.
1146 return expand_path(outtmpl
).replace(sep
, '')
1149 def escape_outtmpl(outtmpl
):
1150 ''' Escape any remaining strings like %s, %abc% etc. '''
1152 STR_FORMAT_RE_TMPL
.format('', '(?![%(\0])'),
1153 lambda mobj
: ('' if mobj
.group('has_key') else '%') + mobj
.group(0),
1157 def validate_outtmpl(cls
, outtmpl
):
1158 ''' @return None or Exception object '''
1160 STR_FORMAT_RE_TMPL
.format('[^)]*', '[ljhqBUDS]'),
1161 lambda mobj
: f
'{mobj.group(0)[:-1]}s',
1162 cls
._outtmpl
_expandpath
(outtmpl
))
1164 cls
.escape_outtmpl(outtmpl
) % collections
.defaultdict(int)
1166 except ValueError as err
:
1170 def _copy_infodict(info_dict
):
1171 info_dict
= dict(info_dict
)
1172 info_dict
.pop('__postprocessors', None)
1173 info_dict
.pop('__pending_error', None)
1176 def prepare_outtmpl(self
, outtmpl
, info_dict
, sanitize
=False):
1177 """ Make the outtmpl and info_dict suitable for substitution: ydl.escape_outtmpl(outtmpl) % info_dict
1178 @param sanitize Whether to sanitize the output as a filename.
1179 For backward compatibility, a function can also be passed
1182 info_dict
.setdefault('epoch', int(time
.time())) # keep epoch consistent once set
1184 info_dict
= self
._copy
_infodict
(info_dict
)
1185 info_dict
['duration_string'] = ( # %(duration>%H-%M-%S)s is wrong if duration > 24hrs
1186 formatSeconds(info_dict
['duration'], '-' if sanitize
else ':')
1187 if info_dict
.get('duration', None) is not None
1189 info_dict
['autonumber'] = int(self
.params
.get('autonumber_start', 1) - 1 + self
._num
_downloads
)
1190 info_dict
['video_autonumber'] = self
._num
_videos
1191 if info_dict
.get('resolution') is None:
1192 info_dict
['resolution'] = self
.format_resolution(info_dict
, default
=None)
1194 # For fields playlist_index, playlist_autonumber and autonumber convert all occurrences
1195 # of %(field)s to %(field)0Nd for backward compatibility
1196 field_size_compat_map
= {
1197 'playlist_index': number_of_digits(info_dict
.get('__last_playlist_index') or 0),
1198 'playlist_autonumber': number_of_digits(info_dict
.get('n_entries') or 0),
1199 'autonumber': self
.params
.get('autonumber_size') or 5,
1203 EXTERNAL_FORMAT_RE
= re
.compile(STR_FORMAT_RE_TMPL
.format('[^)]*', f
'[{STR_FORMAT_TYPES}ljhqBUDS]'))
1209 # Field is of the form key1.key2...
1210 # where keys (except first) can be string, int, slice or "{field, ...}"
1211 FIELD_INNER_RE
= r
'(?:\w+|%(num)s|%(num)s?(?::%(num)s?){1,2})' % {'num': r'(?:-?\d+)'}
1212 FIELD_RE
= r
'\w*(?:\.(?:%(inner)s|{%(field)s(?:,%(field)s)*}))*' % {
1213 'inner': FIELD_INNER_RE
,
1214 'field': rf
'\w*(?:\.{FIELD_INNER_RE})*'
1216 MATH_FIELD_RE
= rf
'(?:{FIELD_RE}|-?{NUMBER_RE})'
1217 MATH_OPERATORS_RE
= r
'(?:%s)' % '|'.join(map(re
.escape
, MATH_FUNCTIONS
.keys()))
1218 INTERNAL_FORMAT_RE
= re
.compile(rf
'''(?xs)
1220 (?P<fields>{FIELD_RE})
1221 (?P<maths>(?:{MATH_OPERATORS_RE}{MATH_FIELD_RE})*)
1222 (?:>(?P<strf_format>.+?))?
1224 (?P<alternate>(?<!\\),[^|&)]+)?
1225 (?:&(?P<replacement>.*?))?
1226 (?:\|(?P<default>.*?))?
1229 def _from_user_input(field
):
1233 return slice(*map(int_or_none
, field
.split(':')))
1234 elif int_or_none(field
) is not None:
1238 def _traverse_infodict(fields
):
1239 fields
= [f
for x
in re
.split(r
'\.({.+?})\.?', fields
)
1240 for f
in ([x
] if x
.startswith('{') else x
.split('.'))]
1242 if fields
and not fields
[i
]:
1245 for i
, f
in enumerate(fields
):
1246 if not f
.startswith('{'):
1247 fields
[i
] = _from_user_input(f
)
1249 assert f
.endswith('}'), f
'No closing brace for {f} in {fields}'
1250 fields
[i
] = {k: list(map(_from_user_input, k.split('.'))) for k in f[1:-1].split(',')}
1252 return traverse_obj(info_dict
, fields
, traverse_string
=True)
1254 def get_value(mdict
):
1256 value
= _traverse_infodict(mdict
['fields'])
1259 value
= float_or_none(value
)
1260 if value
is not None:
1263 offset_key
= mdict
['maths']
1265 value
= float_or_none(value
)
1269 MATH_FIELD_RE
if operator
else MATH_OPERATORS_RE
,
1270 offset_key
).group(0)
1271 offset_key
= offset_key
[len(item
):]
1272 if operator
is None:
1273 operator
= MATH_FUNCTIONS
[item
]
1275 item
, multiplier
= (item
[1:], -1) if item
[0] == '-' else (item
, 1)
1276 offset
= float_or_none(item
)
1278 offset
= float_or_none(_traverse_infodict(item
))
1280 value
= operator(value
, multiplier
* offset
)
1281 except (TypeError, ZeroDivisionError):
1284 # Datetime formatting
1285 if mdict
['strf_format']:
1286 value
= strftime_or_none(value
, mdict
['strf_format'].replace('\\,', ','))
1288 # XXX: Workaround for https://github.com/yt-dlp/yt-dlp/issues/4485
1289 if sanitize
and value
== '':
1293 na
= self
.params
.get('outtmpl_na_placeholder', 'NA')
1295 def filename_sanitizer(key
, value
, restricted
=self
.params
.get('restrictfilenames')):
1296 return sanitize_filename(str(value
), restricted
=restricted
, is_id
=(
1297 bool(re
.search(r
'(^|[_.])id(\.|$)', key
))
1298 if 'filename-sanitization' in self
.params
['compat_opts']
1301 sanitizer
= sanitize
if callable(sanitize
) else filename_sanitizer
1302 sanitize
= bool(sanitize
)
1304 def _dumpjson_default(obj
):
1305 if isinstance(obj
, (set, LazyList
)):
1309 class _ReplacementFormatter(string
.Formatter
):
1310 def get_field(self
, field_name
, args
, kwargs
):
1311 if field_name
.isdigit():
1313 raise ValueError('Unsupported field')
1315 replacement_formatter
= _ReplacementFormatter()
1317 def create_key(outer_mobj
):
1318 if not outer_mobj
.group('has_key'):
1319 return outer_mobj
.group(0)
1320 key
= outer_mobj
.group('key')
1321 mobj
= re
.match(INTERNAL_FORMAT_RE
, key
)
1322 value
, replacement
, default
, last_field
= None, None, na
, ''
1324 mobj
= mobj
.groupdict()
1325 default
= mobj
['default'] if mobj
['default'] is not None else default
1326 value
= get_value(mobj
)
1327 last_field
, replacement
= mobj
['fields'], mobj
['replacement']
1328 if value
is None and mobj
['alternate']:
1329 mobj
= re
.match(INTERNAL_FORMAT_RE
, mobj
['remaining'][1:])
1333 if None not in (value
, replacement
):
1335 value
= replacement_formatter
.format(replacement
, value
)
1337 value
, default
= None, na
1339 fmt
= outer_mobj
.group('format')
1340 if fmt
== 's' and last_field
in field_size_compat_map
.keys() and isinstance(value
, int):
1341 fmt
= f
'0{field_size_compat_map[last_field]:d}d'
1343 flags
= outer_mobj
.group('conversion') or ''
1344 str_fmt
= f
'{fmt[:-1]}s'
1346 value
, fmt
= default
, 's'
1347 elif fmt
[-1] == 'l': # list
1348 delim
= '\n' if '#' in flags
else ', '
1349 value
, fmt
= delim
.join(map(str, variadic(value
, allowed_types
=(str, bytes)))), str_fmt
1350 elif fmt
[-1] == 'j': # json
1351 value
, fmt
= json
.dumps(
1352 value
, default
=_dumpjson_default
,
1353 indent
=4 if '#' in flags
else None, ensure_ascii
='+' not in flags
), str_fmt
1354 elif fmt
[-1] == 'h': # html
1355 value
, fmt
= escapeHTML(str(value
)), str_fmt
1356 elif fmt
[-1] == 'q': # quoted
1357 value
= map(str, variadic(value
) if '#' in flags
else [value
])
1358 value
, fmt
= shell_quote(value
, shell
=True), str_fmt
1359 elif fmt
[-1] == 'B': # bytes
1360 value
= f
'%{str_fmt}'.encode() % str(value
).encode()
1361 value
, fmt
= value
.decode('utf-8', 'ignore'), 's'
1362 elif fmt
[-1] == 'U': # unicode normalized
1363 value
, fmt
= unicodedata
.normalize(
1364 # "+" = compatibility equivalence, "#" = NFD
1365 'NF%s%s' % ('K' if '+' in flags
else '', 'D' if '#' in flags
else 'C'),
1367 elif fmt
[-1] == 'D': # decimal suffix
1368 num_fmt
, fmt
= fmt
[:-1].replace('#', ''), 's'
1369 value
= format_decimal_suffix(value
, f
'%{num_fmt}f%s' if num_fmt
else '%d%s',
1370 factor
=1024 if '#' in flags
else 1000)
1371 elif fmt
[-1] == 'S': # filename sanitization
1372 value
, fmt
= filename_sanitizer(last_field
, value
, restricted
='#' in flags
), str_fmt
1373 elif fmt
[-1] == 'c':
1375 value
= str(value
)[0]
1378 elif fmt
[-1] not in 'rsa': # numeric
1379 value
= float_or_none(value
)
1381 value
, fmt
= default
, 's'
1384 # If value is an object, sanitize might convert it to a string
1385 # So we convert it to repr first
1387 value
, fmt
= repr(value
), str_fmt
1388 elif fmt
[-1] == 'a':
1389 value
, fmt
= ascii(value
), str_fmt
1390 if fmt
[-1] in 'csra':
1391 value
= sanitizer(last_field
, value
)
1393 key
= '%s\0%s' % (key
.replace('%', '%\0'), outer_mobj
.group('format'))
1394 TMPL_DICT
[key
] = value
1395 return '{prefix}%({key}){fmt}'.format(key
=key
, fmt
=fmt
, prefix
=outer_mobj
.group('prefix'))
1397 return EXTERNAL_FORMAT_RE
.sub(create_key
, outtmpl
), TMPL_DICT
1399 def evaluate_outtmpl(self
, outtmpl
, info_dict
, *args
, **kwargs
):
1400 outtmpl
, info_dict
= self
.prepare_outtmpl(outtmpl
, info_dict
, *args
, **kwargs
)
1401 return self
.escape_outtmpl(outtmpl
) % info_dict
1403 def _prepare_filename(self
, info_dict
, *, outtmpl
=None, tmpl_type
=None):
1404 assert None in (outtmpl
, tmpl_type
), 'outtmpl and tmpl_type are mutually exclusive'
1406 outtmpl
= self
.params
['outtmpl'].get(tmpl_type
or 'default', self
.params
['outtmpl']['default'])
1408 outtmpl
= self
._outtmpl
_expandpath
(outtmpl
)
1409 filename
= self
.evaluate_outtmpl(outtmpl
, info_dict
, True)
1413 if tmpl_type
in ('', 'temp'):
1414 final_ext
, ext
= self
.params
.get('final_ext'), info_dict
.get('ext')
1415 if final_ext
and ext
and final_ext
!= ext
and filename
.endswith(f
'.{final_ext}'):
1416 filename
= replace_extension(filename
, ext
, final_ext
)
1418 force_ext
= OUTTMPL_TYPES
[tmpl_type
]
1420 filename
= replace_extension(filename
, force_ext
, info_dict
.get('ext'))
1422 # https://github.com/blackjack4494/youtube-dlc/issues/85
1423 trim_file_name
= self
.params
.get('trim_file_name', False)
1425 no_ext
, *ext
= filename
.rsplit('.', 2)
1426 filename
= join_nonempty(no_ext
[:trim_file_name
], *ext
, delim
='.')
1429 except ValueError as err
:
1430 self
.report_error('Error in output template: ' + str(err
) + ' (encoding: ' + repr(preferredencoding()) + ')')
1433 def prepare_filename(self
, info_dict
, dir_type
='', *, outtmpl
=None, warn
=False):
1434 """Generate the output filename"""
1436 assert not dir_type
, 'outtmpl and dir_type are mutually exclusive'
1438 filename
= self
._prepare
_filename
(info_dict
, tmpl_type
=dir_type
, outtmpl
=outtmpl
)
1439 if not filename
and dir_type
not in ('', 'temp'):
1443 if not self
.params
.get('paths'):
1445 elif filename
== '-':
1446 self
.report_warning('--paths is ignored when an outputting to stdout', only_once
=True)
1447 elif os
.path
.isabs(filename
):
1448 self
.report_warning('--paths is ignored since an absolute path is given in output template', only_once
=True)
1449 if filename
== '-' or not filename
:
1452 return self
.get_output_path(dir_type
, filename
)
1454 def _match_entry(self
, info_dict
, incomplete
=False, silent
=False):
1455 """Returns None if the file should be downloaded"""
1456 _type
= 'video' if 'playlist-match-filter' in self
.params
['compat_opts'] else info_dict
.get('_type', 'video')
1457 assert incomplete
or _type
== 'video', 'Only video result can be considered complete'
1459 video_title
= info_dict
.get('title', info_dict
.get('id', 'entry'))
1462 if _type
in ('playlist', 'multi_video'):
1464 elif _type
in ('url', 'url_transparent') and not try_call(
1465 lambda: self
.get_info_extractor(info_dict
['ie_key']).is_single_video(info_dict
['url'])):
1468 if 'title' in info_dict
:
1469 # This can happen when we're just evaluating the playlist
1470 title
= info_dict
['title']
1471 matchtitle
= self
.params
.get('matchtitle', False)
1473 if not re
.search(matchtitle
, title
, re
.IGNORECASE
):
1474 return '"' + title
+ '" title did not match pattern "' + matchtitle
+ '"'
1475 rejecttitle
= self
.params
.get('rejecttitle', False)
1477 if re
.search(rejecttitle
, title
, re
.IGNORECASE
):
1478 return '"' + title
+ '" title matched reject pattern "' + rejecttitle
+ '"'
1480 date
= info_dict
.get('upload_date')
1481 if date
is not None:
1482 dateRange
= self
.params
.get('daterange', DateRange())
1483 if date
not in dateRange
:
1484 return f
'{date_from_str(date).isoformat()} upload date is not in range {dateRange}'
1485 view_count
= info_dict
.get('view_count')
1486 if view_count
is not None:
1487 min_views
= self
.params
.get('min_views')
1488 if min_views
is not None and view_count
< min_views
:
1489 return 'Skipping %s, because it has not reached minimum view count (%d/%d)' % (video_title
, view_count
, min_views
)
1490 max_views
= self
.params
.get('max_views')
1491 if max_views
is not None and view_count
> max_views
:
1492 return 'Skipping %s, because it has exceeded the maximum view count (%d/%d)' % (video_title
, view_count
, max_views
)
1493 if age_restricted(info_dict
.get('age_limit'), self
.params
.get('age_limit')):
1494 return 'Skipping "%s" because it is age restricted' % video_title
1496 match_filter
= self
.params
.get('match_filter')
1497 if match_filter
is None:
1503 ret
= match_filter(info_dict
, incomplete
=incomplete
)
1505 # For backward compatibility
1506 ret
= None if incomplete
else match_filter(info_dict
)
1507 except DownloadCancelled
as err
:
1508 if err
.msg
is not NO_DEFAULT
:
1510 ret
, cancelled
= err
.msg
, err
1512 if ret
is NO_DEFAULT
:
1514 filename
= self
._format
_screen
(self
.prepare_filename(info_dict
), self
.Styles
.FILENAME
)
1515 reply
= input(self
._format
_screen
(
1516 f
'Download "{filename}"? (Y/n): ', self
.Styles
.EMPHASIS
)).lower().strip()
1517 if reply
in {'y', ''}
:
1521 raise type(cancelled
)(f
'Skipping {video_title}')
1522 return f
'Skipping {video_title}'
1525 if self
.in_download_archive(info_dict
):
1527 format_field(info_dict
, 'id', f
'{self._format_screen("%s", self.Styles.ID)}: '),
1528 format_field(info_dict
, 'title', f
'{self._format_screen("%s", self.Styles.EMPHASIS)} '),
1529 'has already been recorded in the archive'))
1530 break_opt
, break_err
= 'break_on_existing', ExistingVideoReached
1533 reason
= check_filter()
1534 except DownloadCancelled
as e
:
1535 reason
, break_opt
, break_err
= e
.msg
, 'match_filter', type(e
)
1537 break_opt
, break_err
= 'break_on_reject', RejectedVideoReached
1538 if reason
is not None:
1540 self
.to_screen('[download] ' + reason
)
1541 if self
.params
.get(break_opt
, False):
1546 def add_extra_info(info_dict
, extra_info
):
1547 '''Set the keys from extra_info in info dict if they are missing'''
1548 for key
, value
in extra_info
.items():
1549 info_dict
.setdefault(key
, value
)
1551 def extract_info(self
, url
, download
=True, ie_key
=None, extra_info
=None,
1552 process
=True, force_generic_extractor
=False):
1554 Extract and return the information dictionary of the URL
1557 @param url URL to extract
1560 @param download Whether to download videos
1561 @param process Whether to resolve all unresolved references (URLs, playlist items).
1562 Must be True for download to work
1563 @param ie_key Use only the extractor with this key
1565 @param extra_info Dictionary containing the extra values to add to the info (For internal use only)
1566 @force_generic_extractor Force using the generic extractor (Deprecated; use ie_key='Generic')
1569 if extra_info
is None:
1572 if not ie_key
and force_generic_extractor
:
1576 ies
= {ie_key: self._ies[ie_key]}
if ie_key
in self
._ies
else {}
1580 for key
, ie
in ies
.items():
1581 if not ie
.suitable(url
):
1584 if not ie
.working():
1585 self
.report_warning('The program functionality for this site has been marked as broken, '
1586 'and will probably not work.')
1588 temp_id
= ie
.get_temp_id(url
)
1589 if temp_id
is not None and self
.in_download_archive({'id': temp_id, 'ie_key': key}
):
1590 self
.to_screen(f
'[download] {self._format_screen(temp_id, self.Styles.ID)}: '
1591 'has already been recorded in the archive')
1592 if self
.params
.get('break_on_existing', False):
1593 raise ExistingVideoReached()
1595 return self
.__extract
_info
(url
, self
.get_info_extractor(key
), download
, extra_info
, process
)
1597 extractors_restricted
= self
.params
.get('allowed_extractors') not in (None, ['default'])
1598 self
.report_error(f
'No suitable extractor{format_field(ie_key, None, " (%s)")} found for URL {url}',
1599 tb
=False if extractors_restricted
else None)
1601 def _handle_extraction_exceptions(func
):
1602 @functools.wraps(func
)
1603 def wrapper(self
, *args
, **kwargs
):
1606 return func(self
, *args
, **kwargs
)
1607 except (DownloadCancelled
, LazyList
.IndexError, PagedList
.IndexError):
1609 except ReExtractInfo
as e
:
1611 self
.to_screen(f
'{e}; Re-extracting data')
1613 self
.to_stderr('\r')
1614 self
.report_warning(f
'{e}; Re-extracting data')
1616 except GeoRestrictedError
as e
:
1619 msg
+= '\nThis video is available in %s.' % ', '.join(
1620 map(ISO3166Utils
.short2full
, e
.countries
))
1621 msg
+= '\nYou might want to use a VPN or a proxy server (with --proxy) to workaround.'
1622 self
.report_error(msg
)
1623 except ExtractorError
as e
: # An error we somewhat expected
1624 self
.report_error(str(e
), e
.format_traceback())
1625 except Exception as e
:
1626 if self
.params
.get('ignoreerrors'):
1627 self
.report_error(str(e
), tb
=encode_compat_str(traceback
.format_exc()))
1633 def _wait_for_video(self
, ie_result
={}):
1634 if (not self
.params
.get('wait_for_video')
1635 or ie_result
.get('_type', 'video') != 'video'
1636 or ie_result
.get('formats') or ie_result
.get('url')):
1639 format_dur
= lambda dur
: '%02d:%02d:%02d' % timetuple_from_msec(dur
* 1000)[:-1]
1644 full_msg
= f
'{msg}\n'
1645 if not self
.params
.get('noprogress'):
1646 full_msg
= msg
+ ' ' * (len(last_msg
) - len(msg
)) + '\r'
1649 self
.to_screen(full_msg
, skip_eol
=True)
1652 min_wait
, max_wait
= self
.params
.get('wait_for_video')
1653 diff
= try_get(ie_result
, lambda x
: x
['release_timestamp'] - time
.time())
1654 if diff
is None and ie_result
.get('live_status') == 'is_upcoming':
1655 diff
= round(random
.uniform(min_wait
, max_wait
) if (max_wait
and min_wait
) else (max_wait
or min_wait
), 0)
1656 self
.report_warning('Release time of video is not known')
1657 elif ie_result
and (diff
or 0) <= 0:
1658 self
.report_warning('Video should already be available according to extracted info')
1659 diff
= min(max(diff
or 0, min_wait
or 0), max_wait
or float('inf'))
1660 self
.to_screen(f
'[wait] Waiting for {format_dur(diff)} - Press Ctrl+C to try now')
1662 wait_till
= time
.time() + diff
1665 diff
= wait_till
- time
.time()
1668 raise ReExtractInfo('[wait] Wait period ended', expected
=True)
1669 progress(f
'[wait] Remaining time until next attempt: {self._format_screen(format_dur(diff), self.Styles.EMPHASIS)}')
1671 except KeyboardInterrupt:
1673 raise ReExtractInfo('[wait] Interrupted by user', expected
=True)
1674 except BaseException
as e
:
1675 if not isinstance(e
, ReExtractInfo
):
1679 def _load_cookies(self
, data
, *, autoscope
=True):
1680 """Loads cookies from a `Cookie` header
1682 This tries to work around the security vulnerability of passing cookies to every domain.
1683 See: https://github.com/yt-dlp/yt-dlp/security/advisories/GHSA-v8mc-9377-rwjj
1685 @param data The Cookie header as string to load the cookies from
1686 @param autoscope If `False`, scope cookies using Set-Cookie syntax and error for cookie without domains
1687 If `True`, save cookies for later to be stored in the jar with a limited scope
1688 If a URL, save cookies in the jar with the domain of the URL
1690 for cookie
in LenientSimpleCookie(data
).values():
1691 if autoscope
and any(cookie
.values()):
1692 raise ValueError('Invalid syntax in Cookie Header')
1694 domain
= cookie
.get('domain') or ''
1695 expiry
= cookie
.get('expires')
1696 if expiry
== '': # 0 is valid
1698 prepared_cookie
= http
.cookiejar
.Cookie(
1699 cookie
.get('version') or 0, cookie
.key
, cookie
.value
, None, False,
1700 domain
, True, True, cookie
.get('path') or '', bool(cookie
.get('path')),
1701 cookie
.get('secure') or False, expiry
, False, None, None, {})
1704 self
.cookiejar
.set_cookie(prepared_cookie
)
1705 elif autoscope
is True:
1706 self
.deprecated_feature(
1707 'Passing cookies as a header is a potential security risk; '
1708 'they will be scoped to the domain of the downloaded urls. '
1709 'Please consider loading cookies from a file or browser instead.')
1710 self
.__header
_cookies
.append(prepared_cookie
)
1712 self
.report_warning(
1713 'The extractor result contains an unscoped cookie as an HTTP header. '
1714 f
'If you are using yt-dlp with an input URL{bug_reports_message(before=",")}',
1716 self
._apply
_header
_cookies
(autoscope
, [prepared_cookie
])
1718 self
.report_error('Unscoped cookies are not allowed; please specify some sort of scoping',
1719 tb
=False, is_error
=False)
1721 def _apply_header_cookies(self
, url
, cookies
=None):
1722 """Applies stray header cookies to the provided url
1724 This loads header cookies and scopes them to the domain provided in `url`.
1725 While this is not ideal, it helps reduce the risk of them being sent
1726 to an unintended destination while mostly maintaining compatibility.
1728 parsed
= urllib
.parse
.urlparse(url
)
1729 if not parsed
.hostname
:
1732 for cookie
in map(copy
.copy
, cookies
or self
.__header
_cookies
):
1733 cookie
.domain
= f
'.{parsed.hostname}'
1734 self
.cookiejar
.set_cookie(cookie
)
1736 @_handle_extraction_exceptions
1737 def __extract_info(self
, url
, ie
, download
, extra_info
, process
):
1738 self
._apply
_header
_cookies
(url
)
1741 ie_result
= ie
.extract(url
)
1742 except UserNotLive
as e
:
1744 if self
.params
.get('wait_for_video'):
1745 self
.report_warning(e
)
1746 self
._wait
_for
_video
()
1748 if ie_result
is None: # Finished already (backwards compatibility; listformats and friends should be moved here)
1749 self
.report_warning(f
'Extractor {ie.IE_NAME} returned nothing{bug_reports_message()}')
1751 if isinstance(ie_result
, list):
1752 # Backwards compatibility: old IE result format
1754 '_type': 'compat_list',
1755 'entries': ie_result
,
1757 if extra_info
.get('original_url'):
1758 ie_result
.setdefault('original_url', extra_info
['original_url'])
1759 self
.add_default_extra_info(ie_result
, ie
, url
)
1761 self
._wait
_for
_video
(ie_result
)
1762 return self
.process_ie_result(ie_result
, download
, extra_info
)
1766 def add_default_extra_info(self
, ie_result
, ie
, url
):
1768 self
.add_extra_info(ie_result
, {
1770 'original_url': url
,
1772 webpage_url
= ie_result
.get('webpage_url')
1774 self
.add_extra_info(ie_result
, {
1775 'webpage_url_basename': url_basename(webpage_url
),
1776 'webpage_url_domain': get_domain(webpage_url
),
1779 self
.add_extra_info(ie_result
, {
1780 'extractor': ie
.IE_NAME
,
1781 'extractor_key': ie
.ie_key(),
1784 def process_ie_result(self
, ie_result
, download
=True, extra_info
=None):
1786 Take the result of the ie(may be modified) and resolve all unresolved
1787 references (URLs, playlist items).
1789 It will also download the videos if 'download'.
1790 Returns the resolved ie_result.
1792 if extra_info
is None:
1794 result_type
= ie_result
.get('_type', 'video')
1796 if result_type
in ('url', 'url_transparent'):
1797 ie_result
['url'] = sanitize_url(
1798 ie_result
['url'], scheme
='http' if self
.params
.get('prefer_insecure') else 'https')
1799 if ie_result
.get('original_url') and not extra_info
.get('original_url'):
1800 extra_info
= {'original_url': ie_result['original_url'], **extra_info}
1802 extract_flat
= self
.params
.get('extract_flat', False)
1803 if ((extract_flat
== 'in_playlist' and 'playlist' in extra_info
)
1804 or extract_flat
is True):
1805 info_copy
= ie_result
.copy()
1806 ie
= try_get(ie_result
.get('ie_key'), self
.get_info_extractor
)
1807 if ie
and not ie_result
.get('id'):
1808 info_copy
['id'] = ie
.get_temp_id(ie_result
['url'])
1809 self
.add_default_extra_info(info_copy
, ie
, ie_result
['url'])
1810 self
.add_extra_info(info_copy
, extra_info
)
1811 info_copy
, _
= self
.pre_process(info_copy
)
1812 self
._fill
_common
_fields
(info_copy
, False)
1813 self
.__forced
_printings
(info_copy
)
1814 self
._raise
_pending
_errors
(info_copy
)
1815 if self
.params
.get('force_write_download_archive', False):
1816 self
.record_download_archive(info_copy
)
1819 if result_type
== 'video':
1820 self
.add_extra_info(ie_result
, extra_info
)
1821 ie_result
= self
.process_video_result(ie_result
, download
=download
)
1822 self
._raise
_pending
_errors
(ie_result
)
1823 additional_urls
= (ie_result
or {}).get('additional_urls')
1825 # TODO: Improve MetadataParserPP to allow setting a list
1826 if isinstance(additional_urls
, str):
1827 additional_urls
= [additional_urls
]
1829 '[info] %s: %d additional URL(s) requested' % (ie_result
['id'], len(additional_urls
)))
1830 self
.write_debug('Additional URLs: "%s"' % '", "'.join(additional_urls
))
1831 ie_result
['additional_entries'] = [
1833 url
, download
, extra_info
=extra_info
,
1834 force_generic_extractor
=self
.params
.get('force_generic_extractor'))
1835 for url
in additional_urls
1838 elif result_type
== 'url':
1839 # We have to add extra_info to the results because it may be
1840 # contained in a playlist
1841 return self
.extract_info(
1842 ie_result
['url'], download
,
1843 ie_key
=ie_result
.get('ie_key'),
1844 extra_info
=extra_info
)
1845 elif result_type
== 'url_transparent':
1846 # Use the information from the embedding page
1847 info
= self
.extract_info(
1848 ie_result
['url'], ie_key
=ie_result
.get('ie_key'),
1849 extra_info
=extra_info
, download
=False, process
=False)
1851 # extract_info may return None when ignoreerrors is enabled and
1852 # extraction failed with an error, don't crash and return early
1857 exempted_fields
= {'_type', 'url', 'ie_key'}
1858 if not ie_result
.get('section_end') and ie_result
.get('section_start') is None:
1859 # For video clips, the id etc of the clip extractor should be used
1860 exempted_fields |
= {'id', 'extractor', 'extractor_key'}
1862 new_result
= info
.copy()
1863 new_result
.update(filter_dict(ie_result
, lambda k
, v
: v
is not None and k
not in exempted_fields
))
1865 # Extracted info may not be a video result (i.e.
1866 # info.get('_type', 'video') != video) but rather an url or
1867 # url_transparent. In such cases outer metadata (from ie_result)
1868 # should be propagated to inner one (info). For this to happen
1869 # _type of info should be overridden with url_transparent. This
1870 # fixes issue from https://github.com/ytdl-org/youtube-dl/pull/11163.
1871 if new_result
.get('_type') == 'url':
1872 new_result
['_type'] = 'url_transparent'
1874 return self
.process_ie_result(
1875 new_result
, download
=download
, extra_info
=extra_info
)
1876 elif result_type
in ('playlist', 'multi_video'):
1877 # Protect from infinite recursion due to recursively nested playlists
1878 # (see https://github.com/ytdl-org/youtube-dl/issues/27833)
1879 webpage_url
= ie_result
.get('webpage_url') # Playlists maynot have webpage_url
1880 if webpage_url
and webpage_url
in self
._playlist
_urls
:
1882 '[download] Skipping already downloaded playlist: %s'
1883 % ie_result
.get('title') or ie_result
.get('id'))
1886 self
._playlist
_level
+= 1
1887 self
._playlist
_urls
.add(webpage_url
)
1888 self
._fill
_common
_fields
(ie_result
, False)
1889 self
._sanitize
_thumbnails
(ie_result
)
1891 return self
.__process
_playlist
(ie_result
, download
)
1893 self
._playlist
_level
-= 1
1894 if not self
._playlist
_level
:
1895 self
._playlist
_urls
.clear()
1896 elif result_type
== 'compat_list':
1897 self
.report_warning(
1898 'Extractor %s returned a compat_list result. '
1899 'It needs to be updated.' % ie_result
.get('extractor'))
1902 self
.add_extra_info(r
, {
1903 'extractor': ie_result
['extractor'],
1904 'webpage_url': ie_result
['webpage_url'],
1905 'webpage_url_basename': url_basename(ie_result
['webpage_url']),
1906 'webpage_url_domain': get_domain(ie_result
['webpage_url']),
1907 'extractor_key': ie_result
['extractor_key'],
1910 ie_result
['entries'] = [
1911 self
.process_ie_result(_fixup(r
), download
, extra_info
)
1912 for r
in ie_result
['entries']
1916 raise Exception('Invalid result type: %s' % result_type
)
1918 def _ensure_dir_exists(self
, path
):
1919 return make_dir(path
, self
.report_error
)
1922 def _playlist_infodict(ie_result
, strict
=False, **kwargs
):
1924 'playlist_count': ie_result
.get('playlist_count'),
1925 'playlist': ie_result
.get('title') or ie_result
.get('id'),
1926 'playlist_id': ie_result
.get('id'),
1927 'playlist_title': ie_result
.get('title'),
1928 'playlist_uploader': ie_result
.get('uploader'),
1929 'playlist_uploader_id': ie_result
.get('uploader_id'),
1934 if ie_result
.get('webpage_url'):
1936 'webpage_url': ie_result
['webpage_url'],
1937 'webpage_url_basename': url_basename(ie_result
['webpage_url']),
1938 'webpage_url_domain': get_domain(ie_result
['webpage_url']),
1942 'playlist_index': 0,
1943 '__last_playlist_index': max(ie_result
.get('requested_entries') or (0, 0)),
1944 'extractor': ie_result
['extractor'],
1945 'extractor_key': ie_result
['extractor_key'],
1948 def __process_playlist(self
, ie_result
, download
):
1949 """Process each entry in the playlist"""
1950 assert ie_result
['_type'] in ('playlist', 'multi_video')
1952 common_info
= self
._playlist
_infodict
(ie_result
, strict
=True)
1953 title
= common_info
.get('playlist') or '<Untitled>'
1954 if self
._match
_entry
(common_info
, incomplete
=True) is not None:
1956 self
.to_screen(f
'[download] Downloading {ie_result["_type"]}: {title}')
1958 all_entries
= PlaylistEntries(self
, ie_result
)
1959 entries
= orderedSet(all_entries
.get_requested_items(), lazy
=True)
1961 lazy
= self
.params
.get('lazy_playlist')
1963 resolved_entries
, n_entries
= [], 'N/A'
1964 ie_result
['requested_entries'], ie_result
['entries'] = None, None
1966 entries
= resolved_entries
= list(entries
)
1967 n_entries
= len(resolved_entries
)
1968 ie_result
['requested_entries'], ie_result
['entries'] = tuple(zip(*resolved_entries
)) or ([], [])
1969 if not ie_result
.get('playlist_count'):
1970 # Better to do this after potentially exhausting entries
1971 ie_result
['playlist_count'] = all_entries
.get_full_count()
1973 extra
= self
._playlist
_infodict
(ie_result
, n_entries
=int_or_none(n_entries
))
1974 ie_copy
= collections
.ChainMap(ie_result
, extra
)
1976 _infojson_written
= False
1977 write_playlist_files
= self
.params
.get('allow_playlist_files', True)
1978 if write_playlist_files
and self
.params
.get('list_thumbnails'):
1979 self
.list_thumbnails(ie_result
)
1980 if write_playlist_files
and not self
.params
.get('simulate'):
1981 _infojson_written
= self
._write
_info
_json
(
1982 'playlist', ie_result
, self
.prepare_filename(ie_copy
, 'pl_infojson'))
1983 if _infojson_written
is None:
1985 if self
._write
_description
('playlist', ie_result
,
1986 self
.prepare_filename(ie_copy
, 'pl_description')) is None:
1988 # TODO: This should be passed to ThumbnailsConvertor if necessary
1989 self
._write
_thumbnails
('playlist', ie_result
, self
.prepare_filename(ie_copy
, 'pl_thumbnail'))
1992 if self
.params
.get('playlistreverse') or self
.params
.get('playlistrandom'):
1993 self
.report_warning('playlistreverse and playlistrandom are not supported with lazy_playlist', only_once
=True)
1994 elif self
.params
.get('playlistreverse'):
1996 elif self
.params
.get('playlistrandom'):
1997 random
.shuffle(entries
)
1999 self
.to_screen(f
'[{ie_result["extractor"]}] Playlist {title}: Downloading {n_entries} items'
2000 f
'{format_field(ie_result, "playlist_count", " of %s")}')
2002 keep_resolved_entries
= self
.params
.get('extract_flat') != 'discard'
2003 if self
.params
.get('extract_flat') == 'discard_in_playlist':
2004 keep_resolved_entries
= ie_result
['_type'] != 'playlist'
2005 if keep_resolved_entries
:
2006 self
.write_debug('The information of all playlist entries will be held in memory')
2009 max_failures
= self
.params
.get('skip_playlist_after_errors') or float('inf')
2010 for i
, (playlist_index
, entry
) in enumerate(entries
):
2012 resolved_entries
.append((playlist_index
, entry
))
2016 entry
['__x_forwarded_for_ip'] = ie_result
.get('__x_forwarded_for_ip')
2017 if not lazy
and 'playlist-index' in self
.params
['compat_opts']:
2018 playlist_index
= ie_result
['requested_entries'][i
]
2020 entry_copy
= collections
.ChainMap(entry
, {
2022 'n_entries': int_or_none(n_entries
),
2023 'playlist_index': playlist_index
,
2024 'playlist_autonumber': i
+ 1,
2027 if self
._match
_entry
(entry_copy
, incomplete
=True) is not None:
2028 # For compatabilty with youtube-dl. See https://github.com/yt-dlp/yt-dlp/issues/4369
2029 resolved_entries
[i
] = (playlist_index
, NO_DEFAULT
)
2032 self
.to_screen('[download] Downloading item %s of %s' % (
2033 self
._format
_screen
(i
+ 1, self
.Styles
.ID
), self
._format
_screen
(n_entries
, self
.Styles
.EMPHASIS
)))
2035 entry_result
= self
.__process
_iterable
_entry
(entry
, download
, collections
.ChainMap({
2036 'playlist_index': playlist_index
,
2037 'playlist_autonumber': i
+ 1,
2039 if not entry_result
:
2041 if failures
>= max_failures
:
2043 f
'Skipping the remaining entries in playlist "{title}" since {failures} items failed extraction')
2045 if keep_resolved_entries
:
2046 resolved_entries
[i
] = (playlist_index
, entry_result
)
2048 # Update with processed data
2049 ie_result
['entries'] = [e
for _
, e
in resolved_entries
if e
is not NO_DEFAULT
]
2050 ie_result
['requested_entries'] = [i
for i
, e
in resolved_entries
if e
is not NO_DEFAULT
]
2051 if ie_result
['requested_entries'] == try_call(lambda: list(range(1, ie_result
['playlist_count'] + 1))):
2052 # Do not set for full playlist
2053 ie_result
.pop('requested_entries')
2055 # Write the updated info to json
2056 if _infojson_written
is True and self
._write
_info
_json
(
2057 'updated playlist', ie_result
,
2058 self
.prepare_filename(ie_copy
, 'pl_infojson'), overwrite
=True) is None:
2061 ie_result
= self
.run_all_pps('playlist', ie_result
)
2062 self
.to_screen(f
'[download] Finished downloading playlist: {title}')
2065 @_handle_extraction_exceptions
2066 def __process_iterable_entry(self
, entry
, download
, extra_info
):
2067 return self
.process_ie_result(
2068 entry
, download
=download
, extra_info
=extra_info
)
2070 def _build_format_filter(self
, filter_spec
):
2071 " Returns a function to filter the formats according to the filter_spec "
2081 operator_rex
= re
.compile(r
'''(?x)\s*
2083 (?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
2084 (?P<value>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)\s*
2085 ''' % '|'.join(map(re
.escape
, OPERATORS
.keys())))
2086 m
= operator_rex
.fullmatch(filter_spec
)
2089 comparison_value
= int(m
.group('value'))
2091 comparison_value
= parse_filesize(m
.group('value'))
2092 if comparison_value
is None:
2093 comparison_value
= parse_filesize(m
.group('value') + 'B')
2094 if comparison_value
is None:
2096 'Invalid value %r in format specification %r' % (
2097 m
.group('value'), filter_spec
))
2098 op
= OPERATORS
[m
.group('op')]
2103 '^=': lambda attr
, value
: attr
.startswith(value
),
2104 '$=': lambda attr
, value
: attr
.endswith(value
),
2105 '*=': lambda attr
, value
: value
in attr
,
2106 '~=': lambda attr
, value
: value
.search(attr
) is not None
2108 str_operator_rex
= re
.compile(r
'''(?x)\s*
2109 (?P<key>[a-zA-Z0-9._-]+)\s*
2110 (?P<negation>!\s*)?(?P<op>%s)\s*(?P<none_inclusive>\?\s*)?
2112 (?P<value>(?(quote)(?:(?!(?P=quote))[^\\]|\\.)+|[\w.-]+))
2113 (?(quote)(?P=quote))\s*
2114 ''' % '|'.join(map(re
.escape
, STR_OPERATORS
.keys())))
2115 m
= str_operator_rex
.fullmatch(filter_spec
)
2117 if m
.group('op') == '~=':
2118 comparison_value
= re
.compile(m
.group('value'))
2120 comparison_value
= re
.sub(r
'''\\([\\"'])''', r
'\1', m
.group('value'))
2121 str_op
= STR_OPERATORS
[m
.group('op')]
2122 if m
.group('negation'):
2123 op
= lambda attr
, value
: not str_op(attr
, value
)
2128 raise SyntaxError('Invalid filter specification %r' % filter_spec
)
2131 actual_value
= f
.get(m
.group('key'))
2132 if actual_value
is None:
2133 return m
.group('none_inclusive')
2134 return op(actual_value
, comparison_value
)
2137 def _check_formats(self
, formats
):
2139 self
.to_screen('[info] Testing format %s' % f
['format_id'])
2140 path
= self
.get_output_path('temp')
2141 if not self
._ensure
_dir
_exists
(f
'{path}/'):
2143 temp_file
= tempfile
.NamedTemporaryFile(suffix
='.tmp', delete
=False, dir=path
or None)
2146 success
, _
= self
.dl(temp_file
.name
, f
, test
=True)
2147 except (DownloadError
, OSError, ValueError) + network_exceptions
:
2150 if os
.path
.exists(temp_file
.name
):
2152 os
.remove(temp_file
.name
)
2154 self
.report_warning('Unable to delete temporary file "%s"' % temp_file
.name
)
2158 self
.to_screen('[info] Unable to download format %s. Skipping...' % f
['format_id'])
2160 def _default_format_spec(self
, info_dict
, download
=True):
2163 merger
= FFmpegMergerPP(self
)
2164 return merger
.available
and merger
.can_merge()
2167 not self
.params
.get('simulate')
2171 or info_dict
.get('is_live') and not self
.params
.get('live_from_start')
2172 or self
.params
['outtmpl']['default'] == '-'))
2175 or self
.params
.get('allow_multiple_audio_streams', False)
2176 or 'format-spec' in self
.params
['compat_opts'])
2179 'best/bestvideo+bestaudio' if prefer_best
2180 else 'bestvideo*+bestaudio/best' if not compat
2181 else 'bestvideo+bestaudio/best')
2183 def build_format_selector(self
, format_spec
):
2184 def syntax_error(note
, start
):
2186 'Invalid format specification: '
2187 '{}\n\t{}\n\t{}^'.format(note
, format_spec
, ' ' * start
[1]))
2188 return SyntaxError(message
)
2190 PICKFIRST
= 'PICKFIRST'
2194 FormatSelector
= collections
.namedtuple('FormatSelector', ['type', 'selector', 'filters'])
2196 allow_multiple_streams
= {'audio': self
.params
.get('allow_multiple_audio_streams', False),
2197 'video': self
.params
.get('allow_multiple_video_streams', False)}
2199 def _parse_filter(tokens
):
2201 for type, string_
, start
, _
, _
in tokens
:
2202 if type == tokenize
.OP
and string_
== ']':
2203 return ''.join(filter_parts
)
2205 filter_parts
.append(string_
)
2207 def _remove_unused_ops(tokens
):
2208 # Remove operators that we don't use and join them with the surrounding strings.
2209 # E.g. 'mp4' '-' 'baseline' '-' '16x9' is converted to 'mp4-baseline-16x9'
2210 ALLOWED_OPS
= ('/', '+', ',', '(', ')')
2211 last_string
, last_start
, last_end
, last_line
= None, None, None, None
2212 for type, string_
, start
, end
, line
in tokens
:
2213 if type == tokenize
.OP
and string_
== '[':
2215 yield tokenize
.NAME
, last_string
, last_start
, last_end
, last_line
2217 yield type, string_
, start
, end
, line
2218 # everything inside brackets will be handled by _parse_filter
2219 for type, string_
, start
, end
, line
in tokens
:
2220 yield type, string_
, start
, end
, line
2221 if type == tokenize
.OP
and string_
== ']':
2223 elif type == tokenize
.OP
and string_
in ALLOWED_OPS
:
2225 yield tokenize
.NAME
, last_string
, last_start
, last_end
, last_line
2227 yield type, string_
, start
, end
, line
2228 elif type in [tokenize
.NAME
, tokenize
.NUMBER
, tokenize
.OP
]:
2230 last_string
= string_
2234 last_string
+= string_
2236 yield tokenize
.NAME
, last_string
, last_start
, last_end
, last_line
2238 def _parse_format_selection(tokens
, inside_merge
=False, inside_choice
=False, inside_group
=False):
2240 current_selector
= None
2241 for type, string_
, start
, _
, _
in tokens
:
2242 # ENCODING is only defined in Python 3.x
2243 if type == getattr(tokenize
, 'ENCODING', None):
2245 elif type in [tokenize
.NAME
, tokenize
.NUMBER
]:
2246 current_selector
= FormatSelector(SINGLE
, string_
, [])
2247 elif type == tokenize
.OP
:
2249 if not inside_group
:
2250 # ')' will be handled by the parentheses group
2251 tokens
.restore_last_token()
2253 elif inside_merge
and string_
in ['/', ',']:
2254 tokens
.restore_last_token()
2256 elif inside_choice
and string_
== ',':
2257 tokens
.restore_last_token()
2259 elif string_
== ',':
2260 if not current_selector
:
2261 raise syntax_error('"," must follow a format selector', start
)
2262 selectors
.append(current_selector
)
2263 current_selector
= None
2264 elif string_
== '/':
2265 if not current_selector
:
2266 raise syntax_error('"/" must follow a format selector', start
)
2267 first_choice
= current_selector
2268 second_choice
= _parse_format_selection(tokens
, inside_choice
=True)
2269 current_selector
= FormatSelector(PICKFIRST
, (first_choice
, second_choice
), [])
2270 elif string_
== '[':
2271 if not current_selector
:
2272 current_selector
= FormatSelector(SINGLE
, 'best', [])
2273 format_filter
= _parse_filter(tokens
)
2274 current_selector
.filters
.append(format_filter
)
2275 elif string_
== '(':
2276 if current_selector
:
2277 raise syntax_error('Unexpected "("', start
)
2278 group
= _parse_format_selection(tokens
, inside_group
=True)
2279 current_selector
= FormatSelector(GROUP
, group
, [])
2280 elif string_
== '+':
2281 if not current_selector
:
2282 raise syntax_error('Unexpected "+"', start
)
2283 selector_1
= current_selector
2284 selector_2
= _parse_format_selection(tokens
, inside_merge
=True)
2286 raise syntax_error('Expected a selector', start
)
2287 current_selector
= FormatSelector(MERGE
, (selector_1
, selector_2
), [])
2289 raise syntax_error(f
'Operator not recognized: "{string_}"', start
)
2290 elif type == tokenize
.ENDMARKER
:
2292 if current_selector
:
2293 selectors
.append(current_selector
)
2296 def _merge(formats_pair
):
2297 format_1
, format_2
= formats_pair
2300 formats_info
.extend(format_1
.get('requested_formats', (format_1
,)))
2301 formats_info
.extend(format_2
.get('requested_formats', (format_2
,)))
2303 if not allow_multiple_streams
['video'] or not allow_multiple_streams
['audio']:
2304 get_no_more
= {'video': False, 'audio': False}
2305 for (i
, fmt_info
) in enumerate(formats_info
):
2306 if fmt_info
.get('acodec') == fmt_info
.get('vcodec') == 'none':
2309 for aud_vid
in ['audio', 'video']:
2310 if not allow_multiple_streams
[aud_vid
] and fmt_info
.get(aud_vid
[0] + 'codec') != 'none':
2311 if get_no_more
[aud_vid
]:
2314 get_no_more
[aud_vid
] = True
2316 if len(formats_info
) == 1:
2317 return formats_info
[0]
2319 video_fmts
= [fmt_info
for fmt_info
in formats_info
if fmt_info
.get('vcodec') != 'none']
2320 audio_fmts
= [fmt_info
for fmt_info
in formats_info
if fmt_info
.get('acodec') != 'none']
2322 the_only_video
= video_fmts
[0] if len(video_fmts
) == 1 else None
2323 the_only_audio
= audio_fmts
[0] if len(audio_fmts
) == 1 else None
2325 output_ext
= get_compatible_ext(
2326 vcodecs
=[f
.get('vcodec') for f
in video_fmts
],
2327 acodecs
=[f
.get('acodec') for f
in audio_fmts
],
2328 vexts
=[f
['ext'] for f
in video_fmts
],
2329 aexts
=[f
['ext'] for f
in audio_fmts
],
2330 preferences
=(try_call(lambda: self
.params
['merge_output_format'].split('/'))
2331 or self
.params
.get('prefer_free_formats') and ('webm', 'mkv')))
2333 filtered
= lambda *keys
: filter(None, (traverse_obj(fmt
, *keys
) for fmt
in formats_info
))
2336 'requested_formats': formats_info
,
2337 'format': '+'.join(filtered('format')),
2338 'format_id': '+'.join(filtered('format_id')),
2340 'protocol': '+'.join(map(determine_protocol
, formats_info
)),
2341 'language': '+'.join(orderedSet(filtered('language'))) or None,
2342 'format_note': '+'.join(orderedSet(filtered('format_note'))) or None,
2343 'filesize_approx': sum(filtered('filesize', 'filesize_approx')) or None,
2344 'tbr': sum(filtered('tbr', 'vbr', 'abr')),
2349 'width': the_only_video
.get('width'),
2350 'height': the_only_video
.get('height'),
2351 'resolution': the_only_video
.get('resolution') or self
.format_resolution(the_only_video
),
2352 'fps': the_only_video
.get('fps'),
2353 'dynamic_range': the_only_video
.get('dynamic_range'),
2354 'vcodec': the_only_video
.get('vcodec'),
2355 'vbr': the_only_video
.get('vbr'),
2356 'stretched_ratio': the_only_video
.get('stretched_ratio'),
2357 'aspect_ratio': the_only_video
.get('aspect_ratio'),
2362 'acodec': the_only_audio
.get('acodec'),
2363 'abr': the_only_audio
.get('abr'),
2364 'asr': the_only_audio
.get('asr'),
2365 'audio_channels': the_only_audio
.get('audio_channels')
2370 def _check_formats(formats
):
2371 if self
.params
.get('check_formats') == 'selected':
2372 yield from self
._check
_formats
(formats
)
2374 elif (self
.params
.get('check_formats') is not None
2375 or self
.params
.get('allow_unplayable_formats')):
2380 if f
.get('has_drm') or f
.get('__needs_testing'):
2381 yield from self
._check
_formats
([f
])
2385 def _build_selector_function(selector
):
2386 if isinstance(selector
, list): # ,
2387 fs
= [_build_selector_function(s
) for s
in selector
]
2389 def selector_function(ctx
):
2392 return selector_function
2394 elif selector
.type == GROUP
: # ()
2395 selector_function
= _build_selector_function(selector
.selector
)
2397 elif selector
.type == PICKFIRST
: # /
2398 fs
= [_build_selector_function(s
) for s
in selector
.selector
]
2400 def selector_function(ctx
):
2402 picked_formats
= list(f(ctx
))
2404 return picked_formats
2407 elif selector
.type == MERGE
: # +
2408 selector_1
, selector_2
= map(_build_selector_function
, selector
.selector
)
2410 def selector_function(ctx
):
2411 for pair
in itertools
.product(selector_1(ctx
), selector_2(ctx
)):
2414 elif selector
.type == SINGLE
: # atom
2415 format_spec
= selector
.selector
or 'best'
2417 # TODO: Add allvideo, allaudio etc by generalizing the code with best/worst selector
2418 if format_spec
== 'all':
2419 def selector_function(ctx
):
2420 yield from _check_formats(ctx
['formats'][::-1])
2421 elif format_spec
== 'mergeall':
2422 def selector_function(ctx
):
2423 formats
= list(_check_formats(
2424 f
for f
in ctx
['formats'] if f
.get('vcodec') != 'none' or f
.get('acodec') != 'none'))
2427 merged_format
= formats
[-1]
2428 for f
in formats
[-2::-1]:
2429 merged_format
= _merge((merged_format
, f
))
2433 format_fallback
, seperate_fallback
, format_reverse
, format_idx
= False, None, True, 1
2435 r
'(?P<bw>best|worst|b|w)(?P<type>video|audio|v|a)?(?P<mod>\*)?(?:\.(?P<n>[1-9]\d*))?$',
2437 if mobj
is not None:
2438 format_idx
= int_or_none(mobj
.group('n'), default
=1)
2439 format_reverse
= mobj
.group('bw')[0] == 'b'
2440 format_type
= (mobj
.group('type') or [None])[0]
2441 not_format_type
= {'v': 'a', 'a': 'v'}
.get(format_type
)
2442 format_modified
= mobj
.group('mod') is not None
2444 format_fallback
= not format_type
and not format_modified
# for b, w
2446 (lambda f
: f
.get('%scodec' % format_type
) != 'none')
2447 if format_type
and format_modified
# bv*, ba*, wv*, wa*
2448 else (lambda f
: f
.get('%scodec' % not_format_type
) == 'none')
2449 if format_type
# bv, ba, wv, wa
2450 else (lambda f
: f
.get('vcodec') != 'none' and f
.get('acodec') != 'none')
2451 if not format_modified
# b, w
2452 else lambda f
: True) # b*, w*
2453 filter_f
= lambda f
: _filter_f(f
) and (
2454 f
.get('vcodec') != 'none' or f
.get('acodec') != 'none')
2456 if format_spec
in self
._format
_selection
_exts
['audio']:
2457 filter_f
= lambda f
: f
.get('ext') == format_spec
and f
.get('acodec') != 'none'
2458 elif format_spec
in self
._format
_selection
_exts
['video']:
2459 filter_f
= lambda f
: f
.get('ext') == format_spec
and f
.get('acodec') != 'none' and f
.get('vcodec') != 'none'
2460 seperate_fallback
= lambda f
: f
.get('ext') == format_spec
and f
.get('vcodec') != 'none'
2461 elif format_spec
in self
._format
_selection
_exts
['storyboards']:
2462 filter_f
= lambda f
: f
.get('ext') == format_spec
and f
.get('acodec') == 'none' and f
.get('vcodec') == 'none'
2464 filter_f
= lambda f
: f
.get('format_id') == format_spec
# id
2466 def selector_function(ctx
):
2467 formats
= list(ctx
['formats'])
2468 matches
= list(filter(filter_f
, formats
)) if filter_f
is not None else formats
2470 if format_fallback
and ctx
['incomplete_formats']:
2471 # for extractors with incomplete formats (audio only (soundcloud)
2472 # or video only (imgur)) best/worst will fallback to
2473 # best/worst {video,audio}-only format
2474 matches
= list(filter(lambda f
: f
.get('vcodec') != 'none' or f
.get('acodec') != 'none', formats
))
2475 elif seperate_fallback
and not ctx
['has_merged_format']:
2476 # for compatibility with youtube-dl when there is no pre-merged format
2477 matches
= list(filter(seperate_fallback
, formats
))
2478 matches
= LazyList(_check_formats(matches
[::-1 if format_reverse
else 1]))
2480 yield matches
[format_idx
- 1]
2481 except LazyList
.IndexError:
2484 filters
= [self
._build
_format
_filter
(f
) for f
in selector
.filters
]
2486 def final_selector(ctx
):
2487 ctx_copy
= dict(ctx
)
2488 for _filter
in filters
:
2489 ctx_copy
['formats'] = list(filter(_filter
, ctx_copy
['formats']))
2490 return selector_function(ctx_copy
)
2491 return final_selector
2493 # HACK: Python 3.12 changed the underlying parser, rendering '7_a' invalid
2494 # Prefix numbers with random letters to avoid it being classified as a number
2495 # See: https://github.com/yt-dlp/yt-dlp/pulls/8797
2496 # TODO: Implement parser not reliant on tokenize.tokenize
2497 prefix
= ''.join(random
.choices(string
.ascii_letters
, k
=32))
2498 stream
= io
.BytesIO(re
.sub(r
'\d[_\d]*', rf
'{prefix}\g<0>', format_spec
).encode())
2500 tokens
= list(_remove_unused_ops(
2501 token
._replace
(string
=token
.string
.replace(prefix
, ''))
2502 for token
in tokenize
.tokenize(stream
.readline
)))
2503 except tokenize
.TokenError
:
2504 raise syntax_error('Missing closing/opening brackets or parenthesis', (0, len(format_spec
)))
2506 class TokenIterator
:
2507 def __init__(self
, tokens
):
2508 self
.tokens
= tokens
2515 if self
.counter
>= len(self
.tokens
):
2516 raise StopIteration()
2517 value
= self
.tokens
[self
.counter
]
2523 def restore_last_token(self
):
2526 parsed_selector
= _parse_format_selection(iter(TokenIterator(tokens
)))
2527 return _build_selector_function(parsed_selector
)
2529 def _calc_headers(self
, info_dict
, load_cookies
=False):
2530 res
= HTTPHeaderDict(self
.params
['http_headers'], info_dict
.get('http_headers'))
2533 if load_cookies
: # For --load-info-json
2534 self
._load
_cookies
(res
.get('Cookie'), autoscope
=info_dict
['url']) # compat
2535 self
._load
_cookies
(info_dict
.get('cookies'), autoscope
=False)
2536 # The `Cookie` header is removed to prevent leaks and unscoped cookies.
2537 # See: https://github.com/yt-dlp/yt-dlp/security/advisories/GHSA-v8mc-9377-rwjj
2538 res
.pop('Cookie', None)
2539 cookies
= self
.cookiejar
.get_cookies_for_url(info_dict
['url'])
2541 encoder
= LenientSimpleCookie()
2543 for cookie
in cookies
:
2544 _
, value
= encoder
.value_encode(cookie
.value
)
2545 values
.append(f
'{cookie.name}={value}')
2547 values
.append(f
'Domain={cookie.domain}')
2549 values
.append(f
'Path={cookie.path}')
2551 values
.append('Secure')
2553 values
.append(f
'Expires={cookie.expires}')
2555 values
.append(f
'Version={cookie.version}')
2556 info_dict
['cookies'] = '; '.join(values
)
2558 if 'X-Forwarded-For' not in res
:
2559 x_forwarded_for_ip
= info_dict
.get('__x_forwarded_for_ip')
2560 if x_forwarded_for_ip
:
2561 res
['X-Forwarded-For'] = x_forwarded_for_ip
2565 def _calc_cookies(self
, url
):
2566 self
.deprecation_warning('"YoutubeDL._calc_cookies" is deprecated and may be removed in a future version')
2567 return self
.cookiejar
.get_cookie_header(url
)
2569 def _sort_thumbnails(self
, thumbnails
):
2570 thumbnails
.sort(key
=lambda t
: (
2571 t
.get('preference') if t
.get('preference') is not None else -1,
2572 t
.get('width') if t
.get('width') is not None else -1,
2573 t
.get('height') if t
.get('height') is not None else -1,
2574 t
.get('id') if t
.get('id') is not None else '',
2577 def _sanitize_thumbnails(self
, info_dict
):
2578 thumbnails
= info_dict
.get('thumbnails')
2579 if thumbnails
is None:
2580 thumbnail
= info_dict
.get('thumbnail')
2582 info_dict
['thumbnails'] = thumbnails
= [{'url': thumbnail}
]
2586 def check_thumbnails(thumbnails
):
2587 for t
in thumbnails
:
2588 self
.to_screen(f
'[info] Testing thumbnail {t["id"]}')
2590 self
.urlopen(HEADRequest(t
['url']))
2591 except network_exceptions
as err
:
2592 self
.to_screen(f
'[info] Unable to connect to thumbnail {t["id"]} URL {t["url"]!r} - {err}. Skipping...')
2596 self
._sort
_thumbnails
(thumbnails
)
2597 for i
, t
in enumerate(thumbnails
):
2598 if t
.get('id') is None:
2600 if t
.get('width') and t
.get('height'):
2601 t
['resolution'] = '%dx%d' % (t
['width'], t
['height'])
2602 t
['url'] = sanitize_url(t
['url'])
2604 if self
.params
.get('check_formats') is True:
2605 info_dict
['thumbnails'] = LazyList(check_thumbnails(thumbnails
[::-1]), reverse
=True)
2607 info_dict
['thumbnails'] = thumbnails
2609 def _fill_common_fields(self
, info_dict
, final
=True):
2610 # TODO: move sanitization here
2612 title
= info_dict
['fulltitle'] = info_dict
.get('title')
2615 self
.write_debug('Extractor gave empty title. Creating a generic title')
2617 self
.report_warning('Extractor failed to obtain "title". Creating a generic title instead')
2618 info_dict
['title'] = f
'{info_dict["extractor"].replace(":", "-")} video #{info_dict["id"]}'
2620 if info_dict
.get('duration') is not None:
2621 info_dict
['duration_string'] = formatSeconds(info_dict
['duration'])
2623 for ts_key
, date_key
in (
2624 ('timestamp', 'upload_date'),
2625 ('release_timestamp', 'release_date'),
2626 ('modified_timestamp', 'modified_date'),
2628 if info_dict
.get(date_key
) is None and info_dict
.get(ts_key
) is not None:
2629 # Working around out-of-range timestamp values (e.g. negative ones on Windows,
2630 # see http://bugs.python.org/issue1646728)
2631 with contextlib
.suppress(ValueError, OverflowError, OSError):
2632 upload_date
= dt
.datetime
.fromtimestamp(info_dict
[ts_key
], dt
.timezone
.utc
)
2633 info_dict
[date_key
] = upload_date
.strftime('%Y%m%d')
2635 if not info_dict
.get('release_year'):
2636 info_dict
['release_year'] = traverse_obj(info_dict
, ('release_date', {lambda x: int(x[:4])}
))
2638 live_keys
= ('is_live', 'was_live')
2639 live_status
= info_dict
.get('live_status')
2640 if live_status
is None:
2641 for key
in live_keys
:
2642 if info_dict
.get(key
) is False:
2644 if info_dict
.get(key
):
2647 if all(info_dict
.get(key
) is False for key
in live_keys
):
2648 live_status
= 'not_live'
2650 info_dict
['live_status'] = live_status
2651 for key
in live_keys
:
2652 if info_dict
.get(key
) is None:
2653 info_dict
[key
] = (live_status
== key
)
2654 if live_status
== 'post_live':
2655 info_dict
['was_live'] = True
2657 # Auto generate title fields corresponding to the *_number fields when missing
2658 # in order to always have clean titles. This is very common for TV series.
2659 for field
in ('chapter', 'season', 'episode'):
2660 if final
and info_dict
.get('%s_number' % field
) is not None and not info_dict
.get(field
):
2661 info_dict
[field
] = '%s %d' % (field
.capitalize(), info_dict
['%s_number' % field
])
2663 for old_key
, new_key
in self
._deprecated
_multivalue
_fields
.items():
2664 if new_key
in info_dict
and old_key
in info_dict
:
2665 if '_version' not in info_dict
: # HACK: Do not warn when using --load-info-json
2666 self
.deprecation_warning(f
'Do not return {old_key!r} when {new_key!r} is present')
2667 elif old_value
:= info_dict
.get(old_key
):
2668 info_dict
[new_key
] = old_value
.split(', ')
2669 elif new_value
:= info_dict
.get(new_key
):
2670 info_dict
[old_key
] = ', '.join(v
.replace(',', '\N{FULLWIDTH COMMA}') for v
in new_value
)
2672 def _raise_pending_errors(self
, info
):
2673 err
= info
.pop('__pending_error', None)
2675 self
.report_error(err
, tb
=False)
2677 def sort_formats(self
, info_dict
):
2678 formats
= self
._get
_formats
(info_dict
)
2679 formats
.sort(key
=FormatSorter(
2680 self
, info_dict
.get('_format_sort_fields') or []).calculate_preference
)
2682 def process_video_result(self
, info_dict
, download
=True):
2683 assert info_dict
.get('_type', 'video') == 'video'
2684 self
._num
_videos
+= 1
2686 if 'id' not in info_dict
:
2687 raise ExtractorError('Missing "id" field in extractor result', ie
=info_dict
['extractor'])
2688 elif not info_dict
.get('id'):
2689 raise ExtractorError('Extractor failed to obtain "id"', ie
=info_dict
['extractor'])
2691 def report_force_conversion(field
, field_not
, conversion
):
2692 self
.report_warning(
2693 '"%s" field is not %s - forcing %s conversion, there is an error in extractor'
2694 % (field
, field_not
, conversion
))
2696 def sanitize_string_field(info
, string_field
):
2697 field
= info
.get(string_field
)
2698 if field
is None or isinstance(field
, str):
2700 report_force_conversion(string_field
, 'a string', 'string')
2701 info
[string_field
] = str(field
)
2703 def sanitize_numeric_fields(info
):
2704 for numeric_field
in self
._NUMERIC
_FIELDS
:
2705 field
= info
.get(numeric_field
)
2706 if field
is None or isinstance(field
, (int, float)):
2708 report_force_conversion(numeric_field
, 'numeric', 'int')
2709 info
[numeric_field
] = int_or_none(field
)
2711 sanitize_string_field(info_dict
, 'id')
2712 sanitize_numeric_fields(info_dict
)
2713 if info_dict
.get('section_end') and info_dict
.get('section_start') is not None:
2714 info_dict
['duration'] = round(info_dict
['section_end'] - info_dict
['section_start'], 3)
2715 if (info_dict
.get('duration') or 0) <= 0 and info_dict
.pop('duration', None):
2716 self
.report_warning('"duration" field is negative, there is an error in extractor')
2718 chapters
= info_dict
.get('chapters') or []
2719 if chapters
and chapters
[0].get('start_time'):
2720 chapters
.insert(0, {'start_time': 0}
)
2722 dummy_chapter
= {'end_time': 0, 'start_time': info_dict.get('duration')}
2723 for idx
, (prev
, current
, next_
) in enumerate(zip(
2724 (dummy_chapter
, *chapters
), chapters
, (*chapters
[1:], dummy_chapter
)), 1):
2725 if current
.get('start_time') is None:
2726 current
['start_time'] = prev
.get('end_time')
2727 if not current
.get('end_time'):
2728 current
['end_time'] = next_
.get('start_time')
2729 if not current
.get('title'):
2730 current
['title'] = f
'<Untitled Chapter {idx}>'
2732 if 'playlist' not in info_dict
:
2733 # It isn't part of a playlist
2734 info_dict
['playlist'] = None
2735 info_dict
['playlist_index'] = None
2737 self
._sanitize
_thumbnails
(info_dict
)
2739 thumbnail
= info_dict
.get('thumbnail')
2740 thumbnails
= info_dict
.get('thumbnails')
2742 info_dict
['thumbnail'] = sanitize_url(thumbnail
)
2744 info_dict
['thumbnail'] = thumbnails
[-1]['url']
2746 if info_dict
.get('display_id') is None and 'id' in info_dict
:
2747 info_dict
['display_id'] = info_dict
['id']
2749 self
._fill
_common
_fields
(info_dict
)
2751 for cc_kind
in ('subtitles', 'automatic_captions'):
2752 cc
= info_dict
.get(cc_kind
)
2754 for _
, subtitle
in cc
.items():
2755 for subtitle_format
in subtitle
:
2756 if subtitle_format
.get('url'):
2757 subtitle_format
['url'] = sanitize_url(subtitle_format
['url'])
2758 if subtitle_format
.get('ext') is None:
2759 subtitle_format
['ext'] = determine_ext(subtitle_format
['url']).lower()
2761 automatic_captions
= info_dict
.get('automatic_captions')
2762 subtitles
= info_dict
.get('subtitles')
2764 info_dict
['requested_subtitles'] = self
.process_subtitles(
2765 info_dict
['id'], subtitles
, automatic_captions
)
2767 formats
= self
._get
_formats
(info_dict
)
2769 # Backward compatibility with InfoExtractor._sort_formats
2770 field_preference
= (formats
or [{}])[0].pop('__sort_fields', None)
2771 if field_preference
:
2772 info_dict
['_format_sort_fields'] = field_preference
2774 info_dict
['_has_drm'] = any( # or None ensures --clean-infojson removes it
2775 f
.get('has_drm') and f
['has_drm'] != 'maybe' for f
in formats
) or None
2776 if not self
.params
.get('allow_unplayable_formats'):
2777 formats
= [f
for f
in formats
if not f
.get('has_drm') or f
['has_drm'] == 'maybe']
2779 if formats
and all(f
.get('acodec') == f
.get('vcodec') == 'none' for f
in formats
):
2780 self
.report_warning(
2781 f
'{"This video is DRM protected and " if info_dict["_has_drm"] else ""}'
2782 'only images are available for download. Use --list-formats to see them'.capitalize())
2784 get_from_start
= not info_dict
.get('is_live') or bool(self
.params
.get('live_from_start'))
2785 if not get_from_start
:
2786 info_dict
['title'] += ' ' + dt
.datetime
.now().strftime('%Y-%m-%d %H:%M')
2787 if info_dict
.get('is_live') and formats
:
2788 formats
= [f
for f
in formats
if bool(f
.get('is_from_start')) == get_from_start
]
2789 if get_from_start
and not formats
:
2790 self
.raise_no_formats(info_dict
, msg
=(
2791 '--live-from-start is passed, but there are no formats that can be downloaded from the start. '
2792 'If you want to download from the current time, use --no-live-from-start'))
2794 def is_wellformed(f
):
2797 self
.report_warning(
2798 '"url" field is missing or empty - skipping format, '
2799 'there is an error in extractor')
2801 if isinstance(url
, bytes):
2802 sanitize_string_field(f
, 'url')
2805 # Filter out malformed formats for better extraction robustness
2806 formats
= list(filter(is_wellformed
, formats
or []))
2809 self
.raise_no_formats(info_dict
)
2811 for format
in formats
:
2812 sanitize_string_field(format
, 'format_id')
2813 sanitize_numeric_fields(format
)
2814 format
['url'] = sanitize_url(format
['url'])
2815 if format
.get('ext') is None:
2816 format
['ext'] = determine_ext(format
['url']).lower()
2817 if format
['ext'] in ('aac', 'opus', 'mp3', 'flac', 'vorbis'):
2818 if format
.get('acodec') is None:
2819 format
['acodec'] = format
['ext']
2820 if format
.get('protocol') is None:
2821 format
['protocol'] = determine_protocol(format
)
2822 if format
.get('resolution') is None:
2823 format
['resolution'] = self
.format_resolution(format
, default
=None)
2824 if format
.get('dynamic_range') is None and format
.get('vcodec') != 'none':
2825 format
['dynamic_range'] = 'SDR'
2826 if format
.get('aspect_ratio') is None:
2827 format
['aspect_ratio'] = try_call(lambda: round(format
['width'] / format
['height'], 2))
2828 # For fragmented formats, "tbr" is often max bitrate and not average
2829 if (('manifest-filesize-approx' in self
.params
['compat_opts'] or not format
.get('manifest_url'))
2830 and not format
.get('filesize') and not format
.get('filesize_approx')):
2831 format
['filesize_approx'] = filesize_from_tbr(format
.get('tbr'), info_dict
.get('duration'))
2832 format
['http_headers'] = self
._calc
_headers
(collections
.ChainMap(format
, info_dict
), load_cookies
=True)
2834 # Safeguard against old/insecure infojson when using --load-info-json
2835 if info_dict
.get('http_headers'):
2836 info_dict
['http_headers'] = HTTPHeaderDict(info_dict
['http_headers'])
2837 info_dict
['http_headers'].pop('Cookie', None)
2839 # This is copied to http_headers by the above _calc_headers and can now be removed
2840 if '__x_forwarded_for_ip' in info_dict
:
2841 del info_dict
['__x_forwarded_for_ip']
2845 '_format_sort_fields': info_dict
.get('_format_sort_fields')
2848 # Sanitize and group by format_id
2850 for i
, format
in enumerate(formats
):
2851 if not format
.get('format_id'):
2852 format
['format_id'] = str(i
)
2854 # Sanitize format_id from characters used in format selector expression
2855 format
['format_id'] = re
.sub(r
'[\s,/+\[\]()]', '_', format
['format_id'])
2856 formats_dict
.setdefault(format
['format_id'], []).append(format
)
2858 # Make sure all formats have unique format_id
2859 common_exts
= set(itertools
.chain(*self
._format
_selection
_exts
.values()))
2860 for format_id
, ambiguous_formats
in formats_dict
.items():
2861 ambigious_id
= len(ambiguous_formats
) > 1
2862 for i
, format
in enumerate(ambiguous_formats
):
2864 format
['format_id'] = '%s-%d' % (format_id
, i
)
2865 # Ensure there is no conflict between id and ext in format selection
2866 # See https://github.com/yt-dlp/yt-dlp/issues/1282
2867 if format
['format_id'] != format
['ext'] and format
['format_id'] in common_exts
:
2868 format
['format_id'] = 'f%s' % format
['format_id']
2870 if format
.get('format') is None:
2871 format
['format'] = '{id} - {res}{note}'.format(
2872 id=format
['format_id'],
2873 res
=self
.format_resolution(format
),
2874 note
=format_field(format
, 'format_note', ' (%s)'),
2877 if self
.params
.get('check_formats') is True:
2878 formats
= LazyList(self
._check
_formats
(formats
[::-1]), reverse
=True)
2880 if not formats
or formats
[0] is not info_dict
:
2881 # only set the 'formats' fields if the original info_dict list them
2882 # otherwise we end up with a circular reference, the first (and unique)
2883 # element in the 'formats' field in info_dict is info_dict itself,
2884 # which can't be exported to json
2885 info_dict
['formats'] = formats
2887 info_dict
, _
= self
.pre_process(info_dict
)
2889 if self
._match
_entry
(info_dict
, incomplete
=self
._format
_fields
) is not None:
2892 self
.post_extract(info_dict
)
2893 info_dict
, _
= self
.pre_process(info_dict
, 'after_filter')
2895 # The pre-processors may have modified the formats
2896 formats
= self
._get
_formats
(info_dict
)
2898 list_only
= self
.params
.get('simulate') == 'list_only'
2899 interactive_format_selection
= not list_only
and self
.format_selector
== '-'
2900 if self
.params
.get('list_thumbnails'):
2901 self
.list_thumbnails(info_dict
)
2902 if self
.params
.get('listsubtitles'):
2903 if 'automatic_captions' in info_dict
:
2904 self
.list_subtitles(
2905 info_dict
['id'], automatic_captions
, 'automatic captions')
2906 self
.list_subtitles(info_dict
['id'], subtitles
, 'subtitles')
2907 if self
.params
.get('listformats') or interactive_format_selection
:
2908 self
.list_formats(info_dict
)
2910 # Without this printing, -F --print-json will not work
2911 self
.__forced
_printings
(info_dict
)
2914 format_selector
= self
.format_selector
2916 if interactive_format_selection
:
2917 req_format
= input(self
._format
_screen
('\nEnter format selector ', self
.Styles
.EMPHASIS
)
2918 + '(Press ENTER for default, or Ctrl+C to quit)'
2919 + self
._format
_screen
(': ', self
.Styles
.EMPHASIS
))
2921 format_selector
= self
.build_format_selector(req_format
) if req_format
else None
2922 except SyntaxError as err
:
2923 self
.report_error(err
, tb
=False, is_error
=False)
2926 if format_selector
is None:
2927 req_format
= self
._default
_format
_spec
(info_dict
, download
=download
)
2928 self
.write_debug(f
'Default format spec: {req_format}')
2929 format_selector
= self
.build_format_selector(req_format
)
2931 formats_to_download
= list(format_selector({
2933 'has_merged_format': any('none' not in (f
.get('acodec'), f
.get('vcodec')) for f
in formats
),
2934 'incomplete_formats': (all(f
.get('vcodec') == 'none' for f
in formats
) # No formats with video
2935 or all(f
.get('acodec') == 'none' for f
in formats
)), # OR, No formats with audio
2937 if interactive_format_selection
and not formats_to_download
:
2938 self
.report_error('Requested format is not available', tb
=False, is_error
=False)
2942 if not formats_to_download
:
2943 if not self
.params
.get('ignore_no_formats_error'):
2944 raise ExtractorError(
2945 'Requested format is not available. Use --list-formats for a list of available formats',
2946 expected
=True, video_id
=info_dict
['id'], ie
=info_dict
['extractor'])
2947 self
.report_warning('Requested format is not available')
2948 # Process what we can, even without any available formats.
2949 formats_to_download
= [{}]
2951 requested_ranges
= tuple(self
.params
.get('download_ranges', lambda *_
: [{}])(info_dict
, self
))
2952 best_format
, downloaded_formats
= formats_to_download
[-1], []
2954 if best_format
and requested_ranges
:
2955 def to_screen(*msg
):
2956 self
.to_screen(f
'[info] {info_dict["id"]}: {" ".join(", ".join(variadic(m)) for m in msg)}')
2958 to_screen(f
'Downloading {len(formats_to_download)} format(s):',
2959 (f
['format_id'] for f
in formats_to_download
))
2960 if requested_ranges
!= ({}, ):
2961 to_screen(f
'Downloading {len(requested_ranges)} time ranges:',
2962 (f
'{c["start_time"]:.1f}-{c["end_time"]:.1f}' for c
in requested_ranges
))
2963 max_downloads_reached
= False
2965 for fmt
, chapter
in itertools
.product(formats_to_download
, requested_ranges
):
2966 new_info
= self
._copy
_infodict
(info_dict
)
2967 new_info
.update(fmt
)
2968 offset
, duration
= info_dict
.get('section_start') or 0, info_dict
.get('duration') or float('inf')
2969 end_time
= offset
+ min(chapter
.get('end_time', duration
), duration
)
2970 # duration may not be accurate. So allow deviations <1sec
2971 if end_time
== float('inf') or end_time
> offset
+ duration
+ 1:
2973 if chapter
or offset
:
2975 'section_start': offset
+ chapter
.get('start_time', 0),
2976 'section_end': end_time
,
2977 'section_title': chapter
.get('title'),
2978 'section_number': chapter
.get('index'),
2980 downloaded_formats
.append(new_info
)
2982 self
.process_info(new_info
)
2983 except MaxDownloadsReached
:
2984 max_downloads_reached
= True
2985 self
._raise
_pending
_errors
(new_info
)
2986 # Remove copied info
2987 for key
, val
in tuple(new_info
.items()):
2988 if info_dict
.get(key
) == val
:
2990 if max_downloads_reached
:
2993 write_archive
= {f.get('__write_download_archive', False) for f in downloaded_formats}
2994 assert write_archive
.issubset({True, False, 'ignore'}
)
2995 if True in write_archive
and False not in write_archive
:
2996 self
.record_download_archive(info_dict
)
2998 info_dict
['requested_downloads'] = downloaded_formats
2999 info_dict
= self
.run_all_pps('after_video', info_dict
)
3000 if max_downloads_reached
:
3001 raise MaxDownloadsReached()
3003 # We update the info dict with the selected best quality format (backwards compatibility)
3004 info_dict
.update(best_format
)
3007 def process_subtitles(self
, video_id
, normal_subtitles
, automatic_captions
):
3008 """Select the requested subtitles and their format"""
3009 available_subs
, normal_sub_langs
= {}, []
3010 if normal_subtitles
and self
.params
.get('writesubtitles'):
3011 available_subs
.update(normal_subtitles
)
3012 normal_sub_langs
= tuple(normal_subtitles
.keys())
3013 if automatic_captions
and self
.params
.get('writeautomaticsub'):
3014 for lang
, cap_info
in automatic_captions
.items():
3015 if lang
not in available_subs
:
3016 available_subs
[lang
] = cap_info
3018 if not available_subs
or (
3019 not self
.params
.get('writesubtitles')
3020 and not self
.params
.get('writeautomaticsub')):
3023 all_sub_langs
= tuple(available_subs
.keys())
3024 if self
.params
.get('allsubtitles', False):
3025 requested_langs
= all_sub_langs
3026 elif self
.params
.get('subtitleslangs', False):
3028 requested_langs
= orderedSet_from_options(
3029 self
.params
.get('subtitleslangs'), {'all': all_sub_langs}
, use_regex
=True)
3030 except re
.error
as e
:
3031 raise ValueError(f
'Wrong regex for subtitlelangs: {e.pattern}')
3033 requested_langs
= LazyList(itertools
.chain(
3034 ['en'] if 'en' in normal_sub_langs
else [],
3035 filter(lambda f
: f
.startswith('en'), normal_sub_langs
),
3036 ['en'] if 'en' in all_sub_langs
else [],
3037 filter(lambda f
: f
.startswith('en'), all_sub_langs
),
3038 normal_sub_langs
, all_sub_langs
,
3041 self
.to_screen(f
'[info] {video_id}: Downloading subtitles: {", ".join(requested_langs)}')
3043 formats_query
= self
.params
.get('subtitlesformat', 'best')
3044 formats_preference
= formats_query
.split('/') if formats_query
else []
3046 for lang
in requested_langs
:
3047 formats
= available_subs
.get(lang
)
3049 self
.report_warning(f
'{lang} subtitles not available for {video_id}')
3051 for ext
in formats_preference
:
3055 matches
= list(filter(lambda f
: f
['ext'] == ext
, formats
))
3061 self
.report_warning(
3062 'No subtitle format found matching "%s" for language %s, '
3063 'using %s' % (formats_query
, lang
, f
['ext']))
3067 def _forceprint(self
, key
, info_dict
):
3068 if info_dict
is None:
3070 info_copy
= info_dict
.copy()
3071 info_copy
.setdefault('filename', self
.prepare_filename(info_dict
))
3072 if info_dict
.get('requested_formats') is not None:
3073 # For RTMP URLs, also include the playpath
3074 info_copy
['urls'] = '\n'.join(f
['url'] + f
.get('play_path', '') for f
in info_dict
['requested_formats'])
3075 elif info_dict
.get('url'):
3076 info_copy
['urls'] = info_dict
['url'] + info_dict
.get('play_path', '')
3077 info_copy
['formats_table'] = self
.render_formats_table(info_dict
)
3078 info_copy
['thumbnails_table'] = self
.render_thumbnails_table(info_dict
)
3079 info_copy
['subtitles_table'] = self
.render_subtitles_table(info_dict
.get('id'), info_dict
.get('subtitles'))
3080 info_copy
['automatic_captions_table'] = self
.render_subtitles_table(info_dict
.get('id'), info_dict
.get('automatic_captions'))
3082 def format_tmpl(tmpl
):
3083 mobj
= re
.fullmatch(r
'([\w.:,]|-\d|(?P<dict>{([\w.:,]|-\d)+}))+=?', tmpl
)
3088 if tmpl
.startswith('{'):
3089 tmpl
, fmt
= f
'.{tmpl}', '%({})j'
3090 if tmpl
.endswith('='):
3091 tmpl
, fmt
= tmpl
[:-1], '{0} = %({0})#j'
3092 return '\n'.join(map(fmt
.format
, [tmpl
] if mobj
.group('dict') else tmpl
.split(',')))
3094 for tmpl
in self
.params
['forceprint'].get(key
, []):
3095 self
.to_stdout(self
.evaluate_outtmpl(format_tmpl(tmpl
), info_copy
))
3097 for tmpl
, file_tmpl
in self
.params
['print_to_file'].get(key
, []):
3098 filename
= self
.prepare_filename(info_dict
, outtmpl
=file_tmpl
)
3099 tmpl
= format_tmpl(tmpl
)
3100 self
.to_screen(f
'[info] Writing {tmpl!r} to: {filename}')
3101 if self
._ensure
_dir
_exists
(filename
):
3102 with open(filename
, 'a', encoding
='utf-8', newline
='') as f
:
3103 f
.write(self
.evaluate_outtmpl(tmpl
, info_copy
) + os
.linesep
)
3107 def __forced_printings(self
, info_dict
, filename
=None, incomplete
=True):
3108 if (self
.params
.get('forcejson')
3109 or self
.params
['forceprint'].get('video')
3110 or self
.params
['print_to_file'].get('video')):
3111 self
.post_extract(info_dict
)
3113 info_dict
['filename'] = filename
3114 info_copy
= self
._forceprint
('video', info_dict
)
3116 def print_field(field
, actual_field
=None, optional
=False):
3117 if actual_field
is None:
3118 actual_field
= field
3119 if self
.params
.get(f
'force{field}') and (
3120 info_copy
.get(field
) is not None or (not optional
and not incomplete
)):
3121 self
.to_stdout(info_copy
[actual_field
])
3123 print_field('title')
3125 print_field('url', 'urls')
3126 print_field('thumbnail', optional
=True)
3127 print_field('description', optional
=True)
3128 print_field('filename')
3129 if self
.params
.get('forceduration') and info_copy
.get('duration') is not None:
3130 self
.to_stdout(formatSeconds(info_copy
['duration']))
3131 print_field('format')
3133 if self
.params
.get('forcejson'):
3134 self
.to_stdout(json
.dumps(self
.sanitize_info(info_dict
)))
3136 def dl(self
, name
, info
, subtitle
=False, test
=False):
3137 if not info
.get('url'):
3138 self
.raise_no_formats(info
, True)
3141 verbose
= self
.params
.get('verbose')
3144 'quiet': self
.params
.get('quiet') or not verbose
,
3146 'noprogress': not verbose
,
3148 'skip_unavailable_fragments': False,
3149 'keep_fragments': False,
3151 '_no_ytdl_file': True,
3154 params
= self
.params
3155 fd
= get_suitable_downloader(info
, params
, to_stdout
=(name
== '-'))(self
, params
)
3157 for ph
in self
._progress
_hooks
:
3158 fd
.add_progress_hook(ph
)
3160 (f
['url'].split(',')[0] + ',<data>' if f
['url'].startswith('data:') else f
['url'])
3161 for f
in info
.get('requested_formats', []) or [info
])
3162 self
.write_debug(f
'Invoking {fd.FD_NAME} downloader on "{urls}"')
3164 # Note: Ideally info should be a deep-copied so that hooks cannot modify it.
3165 # But it may contain objects that are not deep-copyable
3166 new_info
= self
._copy
_infodict
(info
)
3167 if new_info
.get('http_headers') is None:
3168 new_info
['http_headers'] = self
._calc
_headers
(new_info
)
3169 return fd
.download(name
, new_info
, subtitle
)
3171 def existing_file(self
, filepaths
, *, default_overwrite
=True):
3172 existing_files
= list(filter(os
.path
.exists
, orderedSet(filepaths
)))
3173 if existing_files
and not self
.params
.get('overwrites', default_overwrite
):
3174 return existing_files
[0]
3176 for file in existing_files
:
3177 self
.report_file_delete(file)
3181 def process_info(self
, info_dict
):
3182 """Process a single resolved IE result. (Modifies it in-place)"""
3184 assert info_dict
.get('_type', 'video') == 'video'
3185 original_infodict
= info_dict
3187 if 'format' not in info_dict
and 'ext' in info_dict
:
3188 info_dict
['format'] = info_dict
['ext']
3190 if self
._match
_entry
(info_dict
) is not None:
3191 info_dict
['__write_download_archive'] = 'ignore'
3194 # Does nothing under normal operation - for backward compatibility of process_info
3195 self
.post_extract(info_dict
)
3197 def replace_info_dict(new_info
):
3199 if new_info
== info_dict
:
3202 info_dict
.update(new_info
)
3204 new_info
, _
= self
.pre_process(info_dict
, 'video')
3205 replace_info_dict(new_info
)
3206 self
._num
_downloads
+= 1
3208 # info_dict['_filename'] needs to be set for backward compatibility
3209 info_dict
['_filename'] = full_filename
= self
.prepare_filename(info_dict
, warn
=True)
3210 temp_filename
= self
.prepare_filename(info_dict
, 'temp')
3214 self
.__forced
_printings
(info_dict
, full_filename
, incomplete
=('format' not in info_dict
))
3216 def check_max_downloads():
3217 if self
._num
_downloads
>= float(self
.params
.get('max_downloads') or 'inf'):
3218 raise MaxDownloadsReached()
3220 if self
.params
.get('simulate'):
3221 info_dict
['__write_download_archive'] = self
.params
.get('force_write_download_archive')
3222 check_max_downloads()
3225 if full_filename
is None:
3227 if not self
._ensure
_dir
_exists
(encodeFilename(full_filename
)):
3229 if not self
._ensure
_dir
_exists
(encodeFilename(temp_filename
)):
3232 if self
._write
_description
('video', info_dict
,
3233 self
.prepare_filename(info_dict
, 'description')) is None:
3236 sub_files
= self
._write
_subtitles
(info_dict
, temp_filename
)
3237 if sub_files
is None:
3239 files_to_move
.update(dict(sub_files
))
3241 thumb_files
= self
._write
_thumbnails
(
3242 'video', info_dict
, temp_filename
, self
.prepare_filename(info_dict
, 'thumbnail'))
3243 if thumb_files
is None:
3245 files_to_move
.update(dict(thumb_files
))
3247 infofn
= self
.prepare_filename(info_dict
, 'infojson')
3248 _infojson_written
= self
._write
_info
_json
('video', info_dict
, infofn
)
3249 if _infojson_written
:
3250 info_dict
['infojson_filename'] = infofn
3251 # For backward compatibility, even though it was a private field
3252 info_dict
['__infojson_filename'] = infofn
3253 elif _infojson_written
is None:
3256 # Note: Annotations are deprecated
3258 if self
.params
.get('writeannotations', False):
3259 annofn
= self
.prepare_filename(info_dict
, 'annotation')
3261 if not self
._ensure
_dir
_exists
(encodeFilename(annofn
)):
3263 if not self
.params
.get('overwrites', True) and os
.path
.exists(encodeFilename(annofn
)):
3264 self
.to_screen('[info] Video annotations are already present')
3265 elif not info_dict
.get('annotations'):
3266 self
.report_warning('There are no annotations to write.')
3269 self
.to_screen('[info] Writing video annotations to: ' + annofn
)
3270 with open(encodeFilename(annofn
), 'w', encoding
='utf-8') as annofile
:
3271 annofile
.write(info_dict
['annotations'])
3272 except (KeyError, TypeError):
3273 self
.report_warning('There are no annotations to write.')
3275 self
.report_error('Cannot write annotations file: ' + annofn
)
3278 # Write internet shortcut files
3279 def _write_link_file(link_type
):
3280 url
= try_get(info_dict
['webpage_url'], iri_to_uri
)
3282 self
.report_warning(
3283 f
'Cannot write internet shortcut file because the actual URL of "{info_dict["webpage_url"]}" is unknown')
3285 linkfn
= replace_extension(self
.prepare_filename(info_dict
, 'link'), link_type
, info_dict
.get('ext'))
3286 if not self
._ensure
_dir
_exists
(encodeFilename(linkfn
)):
3288 if self
.params
.get('overwrites', True) and os
.path
.exists(encodeFilename(linkfn
)):
3289 self
.to_screen(f
'[info] Internet shortcut (.{link_type}) is already present')
3292 self
.to_screen(f
'[info] Writing internet shortcut (.{link_type}) to: {linkfn}')
3293 with open(encodeFilename(to_high_limit_path(linkfn
)), 'w', encoding
='utf-8',
3294 newline
='\r\n' if link_type
== 'url' else '\n') as linkfile
:
3295 template_vars
= {'url': url}
3296 if link_type
== 'desktop':
3297 template_vars
['filename'] = linkfn
[:-(len(link_type
) + 1)]
3298 linkfile
.write(LINK_TEMPLATES
[link_type
] % template_vars
)
3300 self
.report_error(f
'Cannot write internet shortcut {linkfn}')
3305 'url': self
.params
.get('writeurllink'),
3306 'webloc': self
.params
.get('writewebloclink'),
3307 'desktop': self
.params
.get('writedesktoplink'),
3309 if self
.params
.get('writelink'):
3310 link_type
= ('webloc' if sys
.platform
== 'darwin'
3311 else 'desktop' if sys
.platform
.startswith('linux')
3313 write_links
[link_type
] = True
3315 if any(should_write
and not _write_link_file(link_type
)
3316 for link_type
, should_write
in write_links
.items()):
3319 new_info
, files_to_move
= self
.pre_process(info_dict
, 'before_dl', files_to_move
)
3320 replace_info_dict(new_info
)
3322 if self
.params
.get('skip_download'):
3323 info_dict
['filepath'] = temp_filename
3324 info_dict
['__finaldir'] = os
.path
.dirname(os
.path
.abspath(encodeFilename(full_filename
)))
3325 info_dict
['__files_to_move'] = files_to_move
3326 replace_info_dict(self
.run_pp(MoveFilesAfterDownloadPP(self
, False), info_dict
))
3327 info_dict
['__write_download_archive'] = self
.params
.get('force_write_download_archive')
3330 info_dict
.setdefault('__postprocessors', [])
3333 def existing_video_file(*filepaths
):
3334 ext
= info_dict
.get('ext')
3335 converted
= lambda file: replace_extension(file, self
.params
.get('final_ext') or ext
, ext
)
3336 file = self
.existing_file(itertools
.chain(*zip(map(converted
, filepaths
), filepaths
)),
3337 default_overwrite
=False)
3339 info_dict
['ext'] = os
.path
.splitext(file)[1][1:]
3342 fd
, success
= None, True
3343 if info_dict
.get('protocol') or info_dict
.get('url'):
3344 fd
= get_suitable_downloader(info_dict
, self
.params
, to_stdout
=temp_filename
== '-')
3345 if fd
!= FFmpegFD
and 'no-direct-merge' not in self
.params
['compat_opts'] and (
3346 info_dict
.get('section_start') or info_dict
.get('section_end')):
3347 msg
= ('This format cannot be partially downloaded' if FFmpegFD
.available()
3348 else 'You have requested downloading the video partially, but ffmpeg is not installed')
3349 self
.report_error(f
'{msg}. Aborting')
3352 if info_dict
.get('requested_formats') is not None:
3353 old_ext
= info_dict
['ext']
3354 if self
.params
.get('merge_output_format') is None:
3355 if (info_dict
['ext'] == 'webm'
3356 and info_dict
.get('thumbnails')
3357 # check with type instead of pp_key, __name__, or isinstance
3358 # since we dont want any custom PPs to trigger this
3359 and any(type(pp
) == EmbedThumbnailPP
for pp
in self
._pps
['post_process'])): # noqa: E721
3360 info_dict
['ext'] = 'mkv'
3361 self
.report_warning(
3362 'webm doesn\'t support embedding a thumbnail, mkv will be used')
3363 new_ext
= info_dict
['ext']
3365 def correct_ext(filename
, ext
=new_ext
):
3368 filename_real_ext
= os
.path
.splitext(filename
)[1][1:]
3370 os
.path
.splitext(filename
)[0]
3371 if filename_real_ext
in (old_ext
, new_ext
)
3373 return f
'{filename_wo_ext}.{ext}'
3375 # Ensure filename always has a correct extension for successful merge
3376 full_filename
= correct_ext(full_filename
)
3377 temp_filename
= correct_ext(temp_filename
)
3378 dl_filename
= existing_video_file(full_filename
, temp_filename
)
3380 info_dict
['__real_download'] = False
3381 # NOTE: Copy so that original format dicts are not modified
3382 info_dict
['requested_formats'] = list(map(dict, info_dict
['requested_formats']))
3384 merger
= FFmpegMergerPP(self
)
3386 if dl_filename
is not None:
3387 self
.report_file_already_downloaded(dl_filename
)
3389 for f
in info_dict
['requested_formats'] if fd
!= FFmpegFD
else []:
3390 f
['filepath'] = fname
= prepend_extension(
3391 correct_ext(temp_filename
, info_dict
['ext']),
3392 'f%s' % f
['format_id'], info_dict
['ext'])
3393 downloaded
.append(fname
)
3394 info_dict
['url'] = '\n'.join(f
['url'] for f
in info_dict
['requested_formats'])
3395 success
, real_download
= self
.dl(temp_filename
, info_dict
)
3396 info_dict
['__real_download'] = real_download
3398 if self
.params
.get('allow_unplayable_formats'):
3399 self
.report_warning(
3400 'You have requested merging of multiple formats '
3401 'while also allowing unplayable formats to be downloaded. '
3402 'The formats won\'t be merged to prevent data corruption.')
3403 elif not merger
.available
:
3404 msg
= 'You have requested merging of multiple formats but ffmpeg is not installed'
3405 if not self
.params
.get('ignoreerrors'):
3406 self
.report_error(f
'{msg}. Aborting due to --abort-on-error')
3408 self
.report_warning(f
'{msg}. The formats won\'t be merged')
3410 if temp_filename
== '-':
3411 reason
= ('using a downloader other than ffmpeg' if FFmpegFD
.can_merge_formats(info_dict
, self
.params
)
3412 else 'but the formats are incompatible for simultaneous download' if merger
.available
3413 else 'but ffmpeg is not installed')
3414 self
.report_warning(
3415 f
'You have requested downloading multiple formats to stdout {reason}. '
3416 'The formats will be streamed one after the other')
3417 fname
= temp_filename
3418 for f
in info_dict
['requested_formats']:
3419 new_info
= dict(info_dict
)
3420 del new_info
['requested_formats']
3422 if temp_filename
!= '-':
3423 fname
= prepend_extension(
3424 correct_ext(temp_filename
, new_info
['ext']),
3425 'f%s' % f
['format_id'], new_info
['ext'])
3426 if not self
._ensure
_dir
_exists
(fname
):
3428 f
['filepath'] = fname
3429 downloaded
.append(fname
)
3430 partial_success
, real_download
= self
.dl(fname
, new_info
)
3431 info_dict
['__real_download'] = info_dict
['__real_download'] or real_download
3432 success
= success
and partial_success
3434 if downloaded
and merger
.available
and not self
.params
.get('allow_unplayable_formats'):
3435 info_dict
['__postprocessors'].append(merger
)
3436 info_dict
['__files_to_merge'] = downloaded
3437 # Even if there were no downloads, it is being merged only now
3438 info_dict
['__real_download'] = True
3440 for file in downloaded
:
3441 files_to_move
[file] = None
3443 # Just a single file
3444 dl_filename
= existing_video_file(full_filename
, temp_filename
)
3445 if dl_filename
is None or dl_filename
== temp_filename
:
3446 # dl_filename == temp_filename could mean that the file was partially downloaded with --no-part.
3447 # So we should try to resume the download
3448 success
, real_download
= self
.dl(temp_filename
, info_dict
)
3449 info_dict
['__real_download'] = real_download
3451 self
.report_file_already_downloaded(dl_filename
)
3453 dl_filename
= dl_filename
or temp_filename
3454 info_dict
['__finaldir'] = os
.path
.dirname(os
.path
.abspath(encodeFilename(full_filename
)))
3456 except network_exceptions
as err
:
3457 self
.report_error('unable to download video data: %s' % error_to_compat_str(err
))
3459 except OSError as err
:
3460 raise UnavailableVideoError(err
)
3461 except (ContentTooShortError
, ) as err
:
3462 self
.report_error(f
'content too short (expected {err.expected} bytes and served {err.downloaded})')
3465 self
._raise
_pending
_errors
(info_dict
)
3466 if success
and full_filename
!= '-':
3470 fixup_policy
= self
.params
.get('fixup')
3471 vid
= info_dict
['id']
3473 if fixup_policy
in ('ignore', 'never'):
3475 elif fixup_policy
== 'warn':
3477 elif fixup_policy
!= 'force':
3478 assert fixup_policy
in ('detect_or_warn', None)
3479 if not info_dict
.get('__real_download'):
3482 def ffmpeg_fixup(cndn
, msg
, cls
):
3483 if not (do_fixup
and cndn
):
3485 elif do_fixup
== 'warn':
3486 self
.report_warning(f
'{vid}: {msg}')
3490 info_dict
['__postprocessors'].append(pp
)
3492 self
.report_warning(f
'{vid}: {msg}. Install ffmpeg to fix this automatically')
3494 stretched_ratio
= info_dict
.get('stretched_ratio')
3495 ffmpeg_fixup(stretched_ratio
not in (1, None),
3496 f
'Non-uniform pixel ratio {stretched_ratio}',
3497 FFmpegFixupStretchedPP
)
3499 downloader
= get_suitable_downloader(info_dict
, self
.params
) if 'protocol' in info_dict
else None
3500 downloader
= downloader
.FD_NAME
if downloader
else None
3502 ext
= info_dict
.get('ext')
3503 postprocessed_by_ffmpeg
= info_dict
.get('requested_formats') or any((
3504 isinstance(pp
, FFmpegVideoConvertorPP
)
3505 and resolve_recode_mapping(ext
, pp
.mapping
)[0] not in (ext
, None)
3506 ) for pp
in self
._pps
['post_process'])
3508 if not postprocessed_by_ffmpeg
:
3509 ffmpeg_fixup(fd
!= FFmpegFD
and ext
== 'm4a'
3510 and info_dict
.get('container') == 'm4a_dash',
3511 'writing DASH m4a. Only some players support this container',
3513 ffmpeg_fixup(downloader
== 'hlsnative' and not self
.params
.get('hls_use_mpegts')
3514 or info_dict
.get('is_live') and self
.params
.get('hls_use_mpegts') is None,
3515 'Possible MPEG-TS in MP4 container or malformed AAC timestamps',
3517 ffmpeg_fixup(downloader
== 'dashsegments'
3518 and (info_dict
.get('is_live') or info_dict
.get('is_dash_periods')),
3519 'Possible duplicate MOOV atoms', FFmpegFixupDuplicateMoovPP
)
3521 ffmpeg_fixup(downloader
== 'web_socket_fragment', 'Malformed timestamps detected', FFmpegFixupTimestampPP
)
3522 ffmpeg_fixup(downloader
== 'web_socket_fragment', 'Malformed duration detected', FFmpegFixupDurationPP
)
3526 replace_info_dict(self
.post_process(dl_filename
, info_dict
, files_to_move
))
3527 except PostProcessingError
as err
:
3528 self
.report_error('Postprocessing: %s' % str(err
))
3531 for ph
in self
._post
_hooks
:
3532 ph(info_dict
['filepath'])
3533 except Exception as err
:
3534 self
.report_error('post hooks: %s' % str(err
))
3536 info_dict
['__write_download_archive'] = True
3538 assert info_dict
is original_infodict
# Make sure the info_dict was modified in-place
3539 if self
.params
.get('force_write_download_archive'):
3540 info_dict
['__write_download_archive'] = True
3541 check_max_downloads()
3543 def __download_wrapper(self
, func
):
3544 @functools.wraps(func
)
3545 def wrapper(*args
, **kwargs
):
3547 res
= func(*args
, **kwargs
)
3548 except UnavailableVideoError
as e
:
3549 self
.report_error(e
)
3550 except DownloadCancelled
as e
:
3551 self
.to_screen(f
'[info] {e}')
3552 if not self
.params
.get('break_per_url'):
3554 self
._num
_downloads
= 0
3556 if self
.params
.get('dump_single_json', False):
3557 self
.post_extract(res
)
3558 self
.to_stdout(json
.dumps(self
.sanitize_info(res
)))
3561 def download(self
, url_list
):
3562 """Download a given list of URLs."""
3563 url_list
= variadic(url_list
) # Passing a single URL is a common mistake
3564 outtmpl
= self
.params
['outtmpl']['default']
3565 if (len(url_list
) > 1
3567 and '%' not in outtmpl
3568 and self
.params
.get('max_downloads') != 1):
3569 raise SameFileError(outtmpl
)
3571 for url
in url_list
:
3572 self
.__download
_wrapper
(self
.extract_info
)(
3573 url
, force_generic_extractor
=self
.params
.get('force_generic_extractor', False))
3575 return self
._download
_retcode
3577 def download_with_info_file(self
, info_filename
):
3578 with contextlib
.closing(fileinput
.FileInput(
3579 [info_filename
], mode
='r',
3580 openhook
=fileinput
.hook_encoded('utf-8'))) as f
:
3581 # FileInput doesn't have a read method, we can't call json.load
3582 infos
= [self
.sanitize_info(info
, self
.params
.get('clean_infojson', True))
3583 for info
in variadic(json
.loads('\n'.join(f
)))]
3586 self
.__download
_wrapper
(self
.process_ie_result
)(info
, download
=True)
3587 except (DownloadError
, EntryNotInPlaylist
, ReExtractInfo
) as e
:
3588 if not isinstance(e
, EntryNotInPlaylist
):
3589 self
.to_stderr('\r')
3590 webpage_url
= info
.get('webpage_url')
3591 if webpage_url
is None:
3593 self
.report_warning(f
'The info failed to download: {e}; trying with URL {webpage_url}')
3594 self
.download([webpage_url
])
3595 except ExtractorError
as e
:
3596 self
.report_error(e
)
3597 return self
._download
_retcode
3600 def sanitize_info(info_dict
, remove_private_keys
=False):
3601 ''' Sanitize the infodict for converting to json '''
3602 if info_dict
is None:
3604 info_dict
.setdefault('epoch', int(time
.time()))
3605 info_dict
.setdefault('_type', 'video')
3606 info_dict
.setdefault('_version', {
3607 'version': __version__
,
3608 'current_git_head': current_git_head(),
3609 'release_git_head': RELEASE_GIT_HEAD
,
3610 'repository': ORIGIN
,
3613 if remove_private_keys
:
3614 reject
= lambda k
, v
: v
is None or k
.startswith('__') or k
in {
3615 'requested_downloads', 'requested_formats', 'requested_subtitles', 'requested_entries',
3616 'entries', 'filepath', '_filename', 'filename', 'infojson_filename', 'original_url',
3617 'playlist_autonumber',
3620 reject
= lambda k
, v
: False
3623 if isinstance(obj
, dict):
3624 return {k: filter_fn(v) for k, v in obj.items() if not reject(k, v)}
3625 elif isinstance(obj
, (list, tuple, set, LazyList
)):
3626 return list(map(filter_fn
, obj
))
3627 elif obj
is None or isinstance(obj
, (str, int, float, bool)):
3632 return filter_fn(info_dict
)
3635 def filter_requested_info(info_dict
, actually_filter
=True):
3636 ''' Alias of sanitize_info for backward compatibility '''
3637 return YoutubeDL
.sanitize_info(info_dict
, actually_filter
)
3639 def _delete_downloaded_files(self
, *files_to_delete
, info
={}, msg
=None):
3640 for filename
in set(filter(None, files_to_delete
)):
3642 self
.to_screen(msg
% filename
)
3646 self
.report_warning(f
'Unable to delete file {filename}')
3647 if filename
in info
.get('__files_to_move', []): # NB: Delete even if None
3648 del info
['__files_to_move'][filename
]
3651 def post_extract(info_dict
):
3652 def actual_post_extract(info_dict
):
3653 if info_dict
.get('_type') in ('playlist', 'multi_video'):
3654 for video_dict
in info_dict
.get('entries', {}):
3655 actual_post_extract(video_dict
or {})
3658 post_extractor
= info_dict
.pop('__post_extractor', None) or (lambda: {})
3659 info_dict
.update(post_extractor())
3661 actual_post_extract(info_dict
or {})
3663 def run_pp(self
, pp
, infodict
):
3664 files_to_delete
= []
3665 if '__files_to_move' not in infodict
:
3666 infodict
['__files_to_move'] = {}
3668 files_to_delete
, infodict
= pp
.run(infodict
)
3669 except PostProcessingError
as e
:
3670 # Must be True and not 'only_download'
3671 if self
.params
.get('ignoreerrors') is True:
3672 self
.report_error(e
)
3676 if not files_to_delete
:
3678 if self
.params
.get('keepvideo', False):
3679 for f
in files_to_delete
:
3680 infodict
['__files_to_move'].setdefault(f
, '')
3682 self
._delete
_downloaded
_files
(
3683 *files_to_delete
, info
=infodict
, msg
='Deleting original file %s (pass -k to keep)')
3686 def run_all_pps(self
, key
, info
, *, additional_pps
=None):
3688 self
._forceprint
(key
, info
)
3689 for pp
in (additional_pps
or []) + self
._pps
[key
]:
3690 info
= self
.run_pp(pp
, info
)
3693 def pre_process(self
, ie_info
, key
='pre_process', files_to_move
=None):
3694 info
= dict(ie_info
)
3695 info
['__files_to_move'] = files_to_move
or {}
3697 info
= self
.run_all_pps(key
, info
)
3698 except PostProcessingError
as err
:
3699 msg
= f
'Preprocessing: {err}'
3700 info
.setdefault('__pending_error', msg
)
3701 self
.report_error(msg
, is_error
=False)
3702 return info
, info
.pop('__files_to_move', None)
3704 def post_process(self
, filename
, info
, files_to_move
=None):
3705 """Run all the postprocessors on the given file."""
3706 info
['filepath'] = filename
3707 info
['__files_to_move'] = files_to_move
or {}
3708 info
= self
.run_all_pps('post_process', info
, additional_pps
=info
.get('__postprocessors'))
3709 info
= self
.run_pp(MoveFilesAfterDownloadPP(self
), info
)
3710 del info
['__files_to_move']
3711 return self
.run_all_pps('after_move', info
)
3713 def _make_archive_id(self
, info_dict
):
3714 video_id
= info_dict
.get('id')
3717 # Future-proof against any change in case
3718 # and backwards compatibility with prior versions
3719 extractor
= info_dict
.get('extractor_key') or info_dict
.get('ie_key') # key in a playlist
3720 if extractor
is None:
3721 url
= str_or_none(info_dict
.get('url'))
3724 # Try to find matching extractor for the URL and take its ie_key
3725 for ie_key
, ie
in self
._ies
.items():
3726 if ie
.suitable(url
):
3731 return make_archive_id(extractor
, video_id
)
3733 def in_download_archive(self
, info_dict
):
3734 if not self
.archive
:
3737 vid_ids
= [self
._make
_archive
_id
(info_dict
)]
3738 vid_ids
.extend(info_dict
.get('_old_archive_ids') or [])
3739 return any(id_
in self
.archive
for id_
in vid_ids
)
3741 def record_download_archive(self
, info_dict
):
3742 fn
= self
.params
.get('download_archive')
3745 vid_id
= self
._make
_archive
_id
(info_dict
)
3748 self
.write_debug(f
'Adding to archive: {vid_id}')
3749 if is_path_like(fn
):
3750 with locked_file(fn
, 'a', encoding
='utf-8') as archive_file
:
3751 archive_file
.write(vid_id
+ '\n')
3752 self
.archive
.add(vid_id
)
3755 def format_resolution(format
, default
='unknown'):
3756 if format
.get('vcodec') == 'none' and format
.get('acodec') != 'none':
3758 if format
.get('resolution') is not None:
3759 return format
['resolution']
3760 if format
.get('width') and format
.get('height'):
3761 return '%dx%d' % (format
['width'], format
['height'])
3762 elif format
.get('height'):
3763 return '%sp' % format
['height']
3764 elif format
.get('width'):
3765 return '%dx?' % format
['width']
3768 def _list_format_headers(self
, *headers
):
3769 if self
.params
.get('listformats_table', True) is not False:
3770 return [self
._format
_out
(header
, self
.Styles
.HEADERS
) for header
in headers
]
3773 def _format_note(self
, fdict
):
3775 if fdict
.get('ext') in ['f4f', 'f4m']:
3776 res
+= '(unsupported)'
3777 if fdict
.get('language'):
3780 res
+= '[%s]' % fdict
['language']
3781 if fdict
.get('format_note') is not None:
3784 res
+= fdict
['format_note']
3785 if fdict
.get('tbr') is not None:
3788 res
+= '%4dk' % fdict
['tbr']
3789 if fdict
.get('container') is not None:
3792 res
+= '%s container' % fdict
['container']
3793 if (fdict
.get('vcodec') is not None
3794 and fdict
.get('vcodec') != 'none'):
3797 res
+= fdict
['vcodec']
3798 if fdict
.get('vbr') is not None:
3800 elif fdict
.get('vbr') is not None and fdict
.get('abr') is not None:
3802 if fdict
.get('vbr') is not None:
3803 res
+= '%4dk' % fdict
['vbr']
3804 if fdict
.get('fps') is not None:
3807 res
+= '%sfps' % fdict
['fps']
3808 if fdict
.get('acodec') is not None:
3811 if fdict
['acodec'] == 'none':
3814 res
+= '%-5s' % fdict
['acodec']
3815 elif fdict
.get('abr') is not None:
3819 if fdict
.get('abr') is not None:
3820 res
+= '@%3dk' % fdict
['abr']
3821 if fdict
.get('asr') is not None:
3822 res
+= ' (%5dHz)' % fdict
['asr']
3823 if fdict
.get('filesize') is not None:
3826 res
+= format_bytes(fdict
['filesize'])
3827 elif fdict
.get('filesize_approx') is not None:
3830 res
+= '~' + format_bytes(fdict
['filesize_approx'])
3833 def _get_formats(self
, info_dict
):
3834 if info_dict
.get('formats') is None:
3835 if info_dict
.get('url') and info_dict
.get('_type', 'video') == 'video':
3838 return info_dict
['formats']
3840 def render_formats_table(self
, info_dict
):
3841 formats
= self
._get
_formats
(info_dict
)
3844 if not self
.params
.get('listformats_table', True) is not False:
3847 format_field(f
, 'format_id'),
3848 format_field(f
, 'ext'),
3849 self
.format_resolution(f
),
3850 self
._format
_note
(f
)
3851 ] for f
in formats
if (f
.get('preference') or 0) >= -1000]
3852 return render_table(['format code', 'extension', 'resolution', 'note'], table
, extra_gap
=1)
3854 def simplified_codec(f
, field
):
3855 assert field
in ('acodec', 'vcodec')
3856 codec
= f
.get(field
)
3859 elif codec
!= 'none':
3860 return '.'.join(codec
.split('.')[:4])
3862 if field
== 'vcodec' and f
.get('acodec') == 'none':
3864 elif field
== 'acodec' and f
.get('vcodec') == 'none':
3866 return self
._format
_out
('audio only' if field
== 'vcodec' else 'video only',
3867 self
.Styles
.SUPPRESS
)
3869 delim
= self
._format
_out
('\u2502', self
.Styles
.DELIM
, '|', test_encoding
=True)
3872 self
._format
_out
(format_field(f
, 'format_id'), self
.Styles
.ID
),
3873 format_field(f
, 'ext'),
3874 format_field(f
, func
=self
.format_resolution
, ignore
=('audio only', 'images')),
3875 format_field(f
, 'fps', '\t%d', func
=round),
3876 format_field(f
, 'dynamic_range', '%s', ignore
=(None, 'SDR')).replace('HDR', ''),
3877 format_field(f
, 'audio_channels', '\t%s'),
3879 format_field(f
, 'filesize', ' \t%s', func
=format_bytes
)
3880 or format_field(f
, 'filesize_approx', '≈\t%s', func
=format_bytes
)
3881 or format_field(filesize_from_tbr(f
.get('tbr'), info_dict
.get('duration')), None,
3882 self
._format
_out
('~\t%s', self
.Styles
.SUPPRESS
), func
=format_bytes
)),
3883 format_field(f
, 'tbr', '\t%dk', func
=round),
3884 shorten_protocol_name(f
.get('protocol', '')),
3886 simplified_codec(f
, 'vcodec'),
3887 format_field(f
, 'vbr', '\t%dk', func
=round),
3888 simplified_codec(f
, 'acodec'),
3889 format_field(f
, 'abr', '\t%dk', func
=round),
3890 format_field(f
, 'asr', '\t%s', func
=format_decimal_suffix
),
3891 join_nonempty(format_field(f
, 'language', '[%s]'), join_nonempty(
3892 self
._format
_out
('UNSUPPORTED', self
.Styles
.BAD_FORMAT
) if f
.get('ext') in ('f4f', 'f4m') else None,
3893 (self
._format
_out
('Maybe DRM', self
.Styles
.WARNING
) if f
.get('has_drm') == 'maybe'
3894 else self
._format
_out
('DRM', self
.Styles
.BAD_FORMAT
) if f
.get('has_drm') else None),
3895 format_field(f
, 'format_note'),
3896 format_field(f
, 'container', ignore
=(None, f
.get('ext'))),
3897 delim
=', '), delim
=' '),
3898 ] for f
in formats
if f
.get('preference') is None or f
['preference'] >= -1000]
3899 header_line
= self
._list
_format
_headers
(
3900 'ID', 'EXT', 'RESOLUTION', '\tFPS', 'HDR', 'CH', delim
, '\tFILESIZE', '\tTBR', 'PROTO',
3901 delim
, 'VCODEC', '\tVBR', 'ACODEC', '\tABR', '\tASR', 'MORE INFO')
3903 return render_table(
3904 header_line
, table
, hide_empty
=True,
3905 delim
=self
._format
_out
('\u2500', self
.Styles
.DELIM
, '-', test_encoding
=True))
3907 def render_thumbnails_table(self
, info_dict
):
3908 thumbnails
= list(info_dict
.get('thumbnails') or [])
3911 return render_table(
3912 self
._list
_format
_headers
('ID', 'Width', 'Height', 'URL'),
3913 [[t
.get('id'), t
.get('width') or 'unknown', t
.get('height') or 'unknown', t
['url']] for t
in thumbnails
])
3915 def render_subtitles_table(self
, video_id
, subtitles
):
3916 def _row(lang
, formats
):
3917 exts
, names
= zip(*((f
['ext'], f
.get('name') or 'unknown') for f
in reversed(formats
)))
3918 if len(set(names
)) == 1:
3919 names
= [] if names
[0] == 'unknown' else names
[:1]
3920 return [lang
, ', '.join(names
), ', '.join(exts
)]
3924 return render_table(
3925 self
._list
_format
_headers
('Language', 'Name', 'Formats'),
3926 [_row(lang
, formats
) for lang
, formats
in subtitles
.items()],
3929 def __list_table(self
, video_id
, name
, func
, *args
):
3932 self
.to_screen(f
'{video_id} has no {name}')
3934 self
.to_screen(f
'[info] Available {name} for {video_id}:')
3935 self
.to_stdout(table
)
3937 def list_formats(self
, info_dict
):
3938 self
.__list
_table
(info_dict
['id'], 'formats', self
.render_formats_table
, info_dict
)
3940 def list_thumbnails(self
, info_dict
):
3941 self
.__list
_table
(info_dict
['id'], 'thumbnails', self
.render_thumbnails_table
, info_dict
)
3943 def list_subtitles(self
, video_id
, subtitles
, name
='subtitles'):
3944 self
.__list
_table
(video_id
, name
, self
.render_subtitles_table
, video_id
, subtitles
)
3946 def print_debug_header(self
):
3947 if not self
.params
.get('verbose'):
3950 from . import _IN_CLI
# Must be delayed import
3952 # These imports can be slow. So import them only as needed
3953 from .extractor
.extractors
import _LAZY_LOADER
3954 from .extractor
.extractors
import (
3955 _PLUGIN_CLASSES
as plugin_ies
,
3956 _PLUGIN_OVERRIDES
as plugin_ie_overrides
3959 def get_encoding(stream
):
3960 ret
= str(getattr(stream
, 'encoding', 'missing (%s)' % type(stream
).__name
__))
3961 additional_info
= []
3962 if os
.environ
.get('TERM', '').lower() == 'dumb':
3963 additional_info
.append('dumb')
3964 if not supports_terminal_sequences(stream
):
3965 from .utils
import WINDOWS_VT_MODE
# Must be imported locally
3966 additional_info
.append('No VT' if WINDOWS_VT_MODE
is False else 'No ANSI')
3968 ret
= f
'{ret} ({",".join(additional_info)})'
3971 encoding_str
= 'Encodings: locale %s, fs %s, pref %s, %s' % (
3972 locale
.getpreferredencoding(),
3973 sys
.getfilesystemencoding(),
3974 self
.get_encoding(),
3976 f
'{key} {get_encoding(stream)}' for key
, stream
in self
._out
_files
.items_
3977 if stream
is not None and key
!= 'console')
3980 logger
= self
.params
.get('logger')
3982 write_debug
= lambda msg
: logger
.debug(f
'[debug] {msg}')
3983 write_debug(encoding_str
)
3985 write_string(f
'[debug] {encoding_str}\n', encoding
=None)
3986 write_debug
= lambda msg
: self
._write
_string
(f
'[debug] {msg}\n')
3988 source
= detect_variant()
3989 if VARIANT
not in (None, 'pip'):
3992 write_debug(join_nonempty(
3993 f
'{REPOSITORY.rpartition("/")[2]} version',
3994 _make_label(ORIGIN
, CHANNEL
.partition('@')[2] or __version__
, __version__
),
3995 f
'[{RELEASE_GIT_HEAD[:9]}]' if RELEASE_GIT_HEAD
else '',
3996 '' if source
== 'unknown' else f
'({source})',
3997 '' if _IN_CLI
else 'API' if klass
== YoutubeDL
else f
'API:{self.__module__}.{klass.__qualname__}',
4001 write_debug(f
'params: {self.params}')
4003 if not _LAZY_LOADER
:
4004 if os
.environ
.get('YTDLP_NO_LAZY_EXTRACTORS'):
4005 write_debug('Lazy loading extractors is forcibly disabled')
4007 write_debug('Lazy loading extractors is disabled')
4008 if self
.params
['compat_opts']:
4009 write_debug('Compatibility options: %s' % ', '.join(self
.params
['compat_opts']))
4011 if current_git_head():
4012 write_debug(f
'Git HEAD: {current_git_head()}')
4013 write_debug(system_identifier())
4015 exe_versions
, ffmpeg_features
= FFmpegPostProcessor
.get_versions_and_features(self
)
4016 ffmpeg_features
= {key for key, val in ffmpeg_features.items() if val}
4018 exe_versions
['ffmpeg'] += ' (%s)' % ','.join(sorted(ffmpeg_features
))
4020 exe_versions
['rtmpdump'] = rtmpdump_version()
4021 exe_versions
['phantomjs'] = PhantomJSwrapper
._version
()
4022 exe_str
= ', '.join(
4023 f
'{exe} {v}' for exe
, v
in sorted(exe_versions
.items()) if v
4025 write_debug('exe versions: %s' % exe_str
)
4027 from .compat
.compat_utils
import get_package_info
4028 from .dependencies
import available_dependencies
4030 write_debug('Optional libraries: %s' % (', '.join(sorted({
4031 join_nonempty(*get_package_info(m
)) for m
in available_dependencies
.values()
4034 write_debug(f
'Proxy map: {self.proxies}')
4035 write_debug(f
'Request Handlers: {", ".join(rh.RH_NAME for rh in self._request_director.handlers.values())}')
4036 for plugin_type
, plugins
in {'Extractor': plugin_ies, 'Post-Processor': plugin_pps}
.items():
4037 display_list
= ['%s%s' % (
4038 klass
.__name
__, '' if klass
.__name
__ == name
else f
' as {name}')
4039 for name
, klass
in plugins
.items()]
4040 if plugin_type
== 'Extractor':
4041 display_list
.extend(f
'{plugins[-1].IE_NAME.partition("+")[2]} ({parent.__name__})'
4042 for parent
, plugins
in plugin_ie_overrides
.items())
4043 if not display_list
:
4045 write_debug(f
'{plugin_type} Plugins: {", ".join(sorted(display_list))}')
4047 plugin_dirs
= plugin_directories()
4049 write_debug(f
'Plugin directories: {plugin_dirs}')
4052 if False and self
.params
.get('call_home'):
4053 ipaddr
= self
.urlopen('https://yt-dl.org/ip').read().decode()
4054 write_debug('Public IP address: %s' % ipaddr
)
4055 latest_version
= self
.urlopen(
4056 'https://yt-dl.org/latest/version').read().decode()
4057 if version_tuple(latest_version
) > version_tuple(__version__
):
4058 self
.report_warning(
4059 'You are using an outdated version (newest version: %s)! '
4060 'See https://yt-dl.org/update if you need help updating.' %
4063 @functools.cached_property
4065 """Global proxy configuration"""
4066 opts_proxy
= self
.params
.get('proxy')
4067 if opts_proxy
is not None:
4068 if opts_proxy
== '':
4069 opts_proxy
= '__noproxy__'
4070 proxies
= {'all': opts_proxy}
4072 proxies
= urllib
.request
.getproxies()
4073 # compat. Set HTTPS_PROXY to __noproxy__ to revert
4074 if 'http' in proxies
and 'https' not in proxies
:
4075 proxies
['https'] = proxies
['http']
4079 @functools.cached_property
4080 def cookiejar(self
):
4081 """Global cookiejar instance"""
4082 return load_cookies(
4083 self
.params
.get('cookiefile'), self
.params
.get('cookiesfrombrowser'), self
)
4088 Get a urllib OpenerDirector from the Urllib handler (deprecated).
4090 self
.deprecation_warning('YoutubeDL._opener is deprecated, use YoutubeDL.urlopen()')
4091 handler
= self
._request
_director
.handlers
['Urllib']
4092 return handler
._get
_instance
(cookiejar
=self
.cookiejar
, proxies
=self
.proxies
)
4094 def _get_available_impersonate_targets(self
):
4095 # todo(future): make available as public API
4097 (target
, rh
.RH_NAME
)
4098 for rh
in self
._request
_director
.handlers
.values()
4099 if isinstance(rh
, ImpersonateRequestHandler
)
4100 for target
in rh
.supported_targets
4103 def _impersonate_target_available(self
, target
):
4104 # todo(future): make available as public API
4106 rh
.is_supported_target(target
)
4107 for rh
in self
._request
_director
.handlers
.values()
4108 if isinstance(rh
, ImpersonateRequestHandler
))
4110 def urlopen(self
, req
):
4111 """ Start an HTTP download """
4112 if isinstance(req
, str):
4114 elif isinstance(req
, urllib
.request
.Request
):
4115 self
.deprecation_warning(
4116 'Passing a urllib.request.Request object to YoutubeDL.urlopen() is deprecated. '
4117 'Use yt_dlp.networking.common.Request instead.')
4118 req
= urllib_req_to_req(req
)
4119 assert isinstance(req
, Request
)
4121 # compat: Assume user:pass url params are basic auth
4122 url
, basic_auth_header
= extract_basic_auth(req
.url
)
4123 if basic_auth_header
:
4124 req
.headers
['Authorization'] = basic_auth_header
4125 req
.url
= sanitize_url(url
)
4127 clean_proxies(proxies
=req
.proxies
, headers
=req
.headers
)
4128 clean_headers(req
.headers
)
4131 return self
._request
_director
.send(req
)
4132 except NoSupportingHandlers
as e
:
4133 for ue
in e
.unsupported_errors
:
4134 # FIXME: This depends on the order of errors.
4135 if not (ue
.handler
and ue
.msg
):
4137 if ue
.handler
.RH_KEY
== 'Urllib' and 'unsupported url scheme: "file"' in ue
.msg
.lower():
4139 'file:// URLs are disabled by default in yt-dlp for security reasons. '
4140 'Use --enable-file-urls to enable at your own risk.', cause
=ue
) from ue
4142 'unsupported proxy type: "https"' in ue
.msg
.lower()
4143 and 'requests' not in self
._request
_director
.handlers
4144 and 'curl_cffi' not in self
._request
_director
.handlers
4147 'To use an HTTPS proxy for this request, one of the following dependencies needs to be installed: requests, curl_cffi')
4150 re
.match(r
'unsupported url scheme: "wss?"', ue
.msg
.lower())
4151 and 'websockets' not in self
._request
_director
.handlers
4154 'This request requires WebSocket support. '
4155 'Ensure one of the following dependencies are installed: websockets',
4158 elif re
.match(r
'unsupported (?:extensions: impersonate|impersonate target)', ue
.msg
.lower()):
4160 f
'Impersonate target "{req.extensions["impersonate"]}" is not available.'
4161 f
' See --list-impersonate-targets for available targets.'
4162 f
' This request requires browser impersonation, however you may be missing dependencies'
4163 f
' required to support this target.')
4165 except SSLError
as e
:
4166 if 'UNSAFE_LEGACY_RENEGOTIATION_DISABLED' in str(e
):
4167 raise RequestError('UNSAFE_LEGACY_RENEGOTIATION_DISABLED: Try using --legacy-server-connect', cause
=e
) from e
4168 elif 'SSLV3_ALERT_HANDSHAKE_FAILURE' in str(e
):
4170 'SSLV3_ALERT_HANDSHAKE_FAILURE: The server may not support the current cipher list. '
4171 'Try using --legacy-server-connect', cause
=e
) from e
4174 def build_request_director(self
, handlers
, preferences
=None):
4175 logger
= _YDLLogger(self
)
4176 headers
= self
.params
['http_headers'].copy()
4177 proxies
= self
.proxies
.copy()
4178 clean_headers(headers
)
4179 clean_proxies(proxies
, headers
)
4181 director
= RequestDirector(logger
=logger
, verbose
=self
.params
.get('debug_printtraffic'))
4182 for handler
in handlers
:
4183 director
.add_handler(handler(
4186 cookiejar
=self
.cookiejar
,
4188 prefer_system_certs
='no-certifi' in self
.params
['compat_opts'],
4189 verify
=not self
.params
.get('nocheckcertificate'),
4190 **traverse_obj(self
.params
, {
4191 'verbose': 'debug_printtraffic',
4192 'source_address': 'source_address',
4193 'timeout': 'socket_timeout',
4194 'legacy_ssl_support': 'legacyserverconnect',
4195 'enable_file_urls': 'enable_file_urls',
4196 'impersonate': 'impersonate',
4198 'client_certificate': 'client_certificate',
4199 'client_certificate_key': 'client_certificate_key',
4200 'client_certificate_password': 'client_certificate_password',
4204 director
.preferences
.update(preferences
or [])
4205 if 'prefer-legacy-http-handler' in self
.params
['compat_opts']:
4206 director
.preferences
.add(lambda rh
, _
: 500 if rh
.RH_KEY
== 'Urllib' else 0)
4209 @functools.cached_property
4210 def _request_director(self
):
4211 return self
.build_request_director(_REQUEST_HANDLERS
.values(), _RH_PREFERENCES
)
4213 def encode(self
, s
):
4214 if isinstance(s
, bytes):
4215 return s
# Already encoded
4218 return s
.encode(self
.get_encoding())
4219 except UnicodeEncodeError as err
:
4220 err
.reason
= err
.reason
+ '. Check your system encoding configuration or use the --encoding option.'
4223 def get_encoding(self
):
4224 encoding
= self
.params
.get('encoding')
4225 if encoding
is None:
4226 encoding
= preferredencoding()
4229 def _write_info_json(self
, label
, ie_result
, infofn
, overwrite
=None):
4230 ''' Write infojson and returns True = written, 'exists' = Already exists, False = skip, None = error '''
4231 if overwrite
is None:
4232 overwrite
= self
.params
.get('overwrites', True)
4233 if not self
.params
.get('writeinfojson'):
4236 self
.write_debug(f
'Skipping writing {label} infojson')
4238 elif not self
._ensure
_dir
_exists
(infofn
):
4240 elif not overwrite
and os
.path
.exists(infofn
):
4241 self
.to_screen(f
'[info] {label.title()} metadata is already present')
4244 self
.to_screen(f
'[info] Writing {label} metadata as JSON to: {infofn}')
4246 write_json_file(self
.sanitize_info(ie_result
, self
.params
.get('clean_infojson', True)), infofn
)
4249 self
.report_error(f
'Cannot write {label} metadata to JSON file {infofn}')
4252 def _write_description(self
, label
, ie_result
, descfn
):
4253 ''' Write description and returns True = written, False = skip, None = error '''
4254 if not self
.params
.get('writedescription'):
4257 self
.write_debug(f
'Skipping writing {label} description')
4259 elif not self
._ensure
_dir
_exists
(descfn
):
4261 elif not self
.params
.get('overwrites', True) and os
.path
.exists(descfn
):
4262 self
.to_screen(f
'[info] {label.title()} description is already present')
4263 elif ie_result
.get('description') is None:
4264 self
.to_screen(f
'[info] There\'s no {label} description to write')
4268 self
.to_screen(f
'[info] Writing {label} description to: {descfn}')
4269 with open(encodeFilename(descfn
), 'w', encoding
='utf-8') as descfile
:
4270 descfile
.write(ie_result
['description'])
4272 self
.report_error(f
'Cannot write {label} description file {descfn}')
4276 def _write_subtitles(self
, info_dict
, filename
):
4277 ''' Write subtitles to file and return list of (sub_filename, final_sub_filename); or None if error'''
4279 subtitles
= info_dict
.get('requested_subtitles')
4280 if not (self
.params
.get('writesubtitles') or self
.params
.get('writeautomaticsub')):
4281 # subtitles download errors are already managed as troubles in relevant IE
4282 # that way it will silently go on when used with unsupporting IE
4285 self
.to_screen('[info] There are no subtitles for the requested languages')
4287 sub_filename_base
= self
.prepare_filename(info_dict
, 'subtitle')
4288 if not sub_filename_base
:
4289 self
.to_screen('[info] Skipping writing video subtitles')
4292 for sub_lang
, sub_info
in subtitles
.items():
4293 sub_format
= sub_info
['ext']
4294 sub_filename
= subtitles_filename(filename
, sub_lang
, sub_format
, info_dict
.get('ext'))
4295 sub_filename_final
= subtitles_filename(sub_filename_base
, sub_lang
, sub_format
, info_dict
.get('ext'))
4296 existing_sub
= self
.existing_file((sub_filename_final
, sub_filename
))
4298 self
.to_screen(f
'[info] Video subtitle {sub_lang}.{sub_format} is already present')
4299 sub_info
['filepath'] = existing_sub
4300 ret
.append((existing_sub
, sub_filename_final
))
4303 self
.to_screen(f
'[info] Writing video subtitles to: {sub_filename}')
4304 if sub_info
.get('data') is not None:
4306 # Use newline='' to prevent conversion of newline characters
4307 # See https://github.com/ytdl-org/youtube-dl/issues/10268
4308 with open(sub_filename
, 'w', encoding
='utf-8', newline
='') as subfile
:
4309 subfile
.write(sub_info
['data'])
4310 sub_info
['filepath'] = sub_filename
4311 ret
.append((sub_filename
, sub_filename_final
))
4314 self
.report_error(f
'Cannot write video subtitles file {sub_filename}')
4318 sub_copy
= sub_info
.copy()
4319 sub_copy
.setdefault('http_headers', info_dict
.get('http_headers'))
4320 self
.dl(sub_filename
, sub_copy
, subtitle
=True)
4321 sub_info
['filepath'] = sub_filename
4322 ret
.append((sub_filename
, sub_filename_final
))
4323 except (DownloadError
, ExtractorError
, IOError, OSError, ValueError) + network_exceptions
as err
:
4324 msg
= f
'Unable to download video subtitles for {sub_lang!r}: {err}'
4325 if self
.params
.get('ignoreerrors') is not True: # False or 'only_download'
4326 if not self
.params
.get('ignoreerrors'):
4327 self
.report_error(msg
)
4328 raise DownloadError(msg
)
4329 self
.report_warning(msg
)
4332 def _write_thumbnails(self
, label
, info_dict
, filename
, thumb_filename_base
=None):
4333 ''' Write thumbnails to file and return list of (thumb_filename, final_thumb_filename); or None if error '''
4334 write_all
= self
.params
.get('write_all_thumbnails', False)
4335 thumbnails
, ret
= [], []
4336 if write_all
or self
.params
.get('writethumbnail', False):
4337 thumbnails
= info_dict
.get('thumbnails') or []
4339 self
.to_screen(f
'[info] There are no {label} thumbnails to download')
4341 multiple
= write_all
and len(thumbnails
) > 1
4343 if thumb_filename_base
is None:
4344 thumb_filename_base
= filename
4345 if thumbnails
and not thumb_filename_base
:
4346 self
.write_debug(f
'Skipping writing {label} thumbnail')
4349 if thumbnails
and not self
._ensure
_dir
_exists
(filename
):
4352 for idx
, t
in list(enumerate(thumbnails
))[::-1]:
4353 thumb_ext
= (f
'{t["id"]}.' if multiple
else '') + determine_ext(t
['url'], 'jpg')
4354 thumb_display_id
= f
'{label} thumbnail {t["id"]}'
4355 thumb_filename
= replace_extension(filename
, thumb_ext
, info_dict
.get('ext'))
4356 thumb_filename_final
= replace_extension(thumb_filename_base
, thumb_ext
, info_dict
.get('ext'))
4358 existing_thumb
= self
.existing_file((thumb_filename_final
, thumb_filename
))
4360 self
.to_screen('[info] %s is already present' % (
4361 thumb_display_id
if multiple
else f
'{label} thumbnail').capitalize())
4362 t
['filepath'] = existing_thumb
4363 ret
.append((existing_thumb
, thumb_filename_final
))
4365 self
.to_screen(f
'[info] Downloading {thumb_display_id} ...')
4367 uf
= self
.urlopen(Request(t
['url'], headers
=t
.get('http_headers', {})))
4368 self
.to_screen(f
'[info] Writing {thumb_display_id} to: {thumb_filename}')
4369 with open(encodeFilename(thumb_filename
), 'wb') as thumbf
:
4370 shutil
.copyfileobj(uf
, thumbf
)
4371 ret
.append((thumb_filename
, thumb_filename_final
))
4372 t
['filepath'] = thumb_filename
4373 except network_exceptions
as err
:
4374 if isinstance(err
, HTTPError
) and err
.status
== 404:
4375 self
.to_screen(f
'[info] {thumb_display_id.title()} does not exist')
4377 self
.report_warning(f
'Unable to download {thumb_display_id}: {err}')
4379 if ret
and not write_all
: