26 from .cache
import Cache
27 from .compat
import functools
, urllib
# isort: split
28 from .compat
import compat_os_name
, compat_shlex_quote
, urllib_req_to_req
29 from .cookies
import LenientSimpleCookie
, load_cookies
30 from .downloader
import FFmpegFD
, get_suitable_downloader
, shorten_protocol_name
31 from .downloader
.rtmp
import rtmpdump_version
32 from .extractor
import gen_extractor_classes
, get_info_extractor
33 from .extractor
.common
import UnsupportedURLIE
34 from .extractor
.openload
import PhantomJSwrapper
35 from .minicurses
import format_text
36 from .networking
import HEADRequest
, Request
, RequestDirector
37 from .networking
.common
import _REQUEST_HANDLERS
, _RH_PREFERENCES
38 from .networking
.exceptions
import (
45 from .networking
.impersonate
import ImpersonateRequestHandler
46 from .plugins
import directories
as plugin_directories
47 from .postprocessor
import _PLUGIN_CLASSES
as plugin_pps
48 from .postprocessor
import (
50 FFmpegFixupDuplicateMoovPP
,
51 FFmpegFixupDurationPP
,
54 FFmpegFixupStretchedPP
,
55 FFmpegFixupTimestampPP
,
58 FFmpegVideoConvertorPP
,
59 MoveFilesAfterDownloadPP
,
62 from .postprocessor
.ffmpeg
import resolve_mapping
as resolve_recode_mapping
65 _get_system_deprecation
,
101 UnavailableVideoError
,
120 format_decimal_suffix
,
134 orderedSet_from_options
,
138 remove_terminal_sequences
,
147 supports_terminal_sequences
,
157 windows_enable_vt_mode
,
161 from .utils
._utils
import _YDLLogger
162 from .utils
.networking
import (
168 from .version
import CHANNEL
, ORIGIN
, RELEASE_GIT_HEAD
, VARIANT
, __version__
170 if compat_os_name
== 'nt':
177 YoutubeDL objects are the ones responsible of downloading the
178 actual video file and writing it to disk if the user has requested
179 it, among some other tasks. In most cases there should be one per
180 program. As, given a video URL, the downloader doesn't know how to
181 extract all the needed information, task that InfoExtractors do, it
182 has to pass the URL to one of them.
184 For this, YoutubeDL objects have a method that allows
185 InfoExtractors to be registered in a given order. When it is passed
186 a URL, the YoutubeDL object handles it to the first InfoExtractor it
187 finds that reports being able to handle it. The InfoExtractor extracts
188 all the information about the video or videos the URL refers to, and
189 YoutubeDL process the extracted information, possibly using a File
190 Downloader to download the video.
192 YoutubeDL objects accept a lot of parameters. In order not to saturate
193 the object constructor with arguments, it receives a dictionary of
194 options instead. These options are available through the params
195 attribute for the InfoExtractors to use. The YoutubeDL also
196 registers itself as the downloader in charge for the InfoExtractors
197 that are added to it, so this is a "mutual registration".
201 username: Username for authentication purposes.
202 password: Password for authentication purposes.
203 videopassword: Password for accessing a video.
204 ap_mso: Adobe Pass multiple-system operator identifier.
205 ap_username: Multiple-system operator account username.
206 ap_password: Multiple-system operator account password.
207 usenetrc: Use netrc for authentication instead.
208 netrc_location: Location of the netrc file. Defaults to ~/.netrc.
209 netrc_cmd: Use a shell command to get credentials
210 verbose: Print additional info to stdout.
211 quiet: Do not print messages to stdout.
212 no_warnings: Do not print out anything for warnings.
213 forceprint: A dict with keys WHEN mapped to a list of templates to
214 print to stdout. The allowed keys are video or any of the
215 items in utils.POSTPROCESS_WHEN.
216 For compatibility, a single list is also accepted
217 print_to_file: A dict with keys WHEN (same as forceprint) mapped to
218 a list of tuples with (template, filename)
219 forcejson: Force printing info_dict as JSON.
220 dump_single_json: Force printing the info_dict of the whole playlist
221 (or video) as a single JSON line.
222 force_write_download_archive: Force writing download archive regardless
223 of 'skip_download' or 'simulate'.
224 simulate: Do not download the video files. If unset (or None),
225 simulate only if listsubtitles, listformats or list_thumbnails is used
226 format: Video format code. see "FORMAT SELECTION" for more details.
227 You can also pass a function. The function takes 'ctx' as
228 argument and returns the formats to download.
229 See "build_format_selector" for an implementation
230 allow_unplayable_formats: Allow unplayable formats to be extracted and downloaded.
231 ignore_no_formats_error: Ignore "No video formats" error. Usefull for
232 extracting metadata even if the video is not actually
233 available for download (experimental)
234 format_sort: A list of fields by which to sort the video formats.
235 See "Sorting Formats" for more details.
236 format_sort_force: Force the given format_sort. see "Sorting Formats"
238 prefer_free_formats: Whether to prefer video formats with free containers
239 over non-free ones of same quality.
240 allow_multiple_video_streams: Allow multiple video streams to be merged
242 allow_multiple_audio_streams: Allow multiple audio streams to be merged
244 check_formats Whether to test if the formats are downloadable.
245 Can be True (check all), False (check none),
246 'selected' (check selected formats),
247 or None (check only if requested by extractor)
248 paths: Dictionary of output paths. The allowed keys are 'home'
249 'temp' and the keys of OUTTMPL_TYPES (in utils/_utils.py)
250 outtmpl: Dictionary of templates for output names. Allowed keys
251 are 'default' and the keys of OUTTMPL_TYPES (in utils/_utils.py).
252 For compatibility with youtube-dl, a single string can also be used
253 outtmpl_na_placeholder: Placeholder for unavailable meta fields.
254 restrictfilenames: Do not allow "&" and spaces in file names
255 trim_file_name: Limit length of filename (extension excluded)
256 windowsfilenames: Force the filenames to be windows compatible
257 ignoreerrors: Do not stop on download/postprocessing errors.
258 Can be 'only_download' to ignore only download errors.
259 Default is 'only_download' for CLI, but False for API
260 skip_playlist_after_errors: Number of allowed failures until the rest of
261 the playlist is skipped
262 allowed_extractors: List of regexes to match against extractor names that are allowed
263 overwrites: Overwrite all video and metadata files if True,
264 overwrite only non-video files if None
265 and don't overwrite any file if False
266 playlist_items: Specific indices of playlist to download.
267 playlistrandom: Download playlist items in random order.
268 lazy_playlist: Process playlist entries as they are received.
269 matchtitle: Download only matching titles.
270 rejecttitle: Reject downloads for matching titles.
271 logger: Log messages to a logging.Logger instance.
272 logtostderr: Print everything to stderr instead of stdout.
273 consoletitle: Display progress in console window's titlebar.
274 writedescription: Write the video description to a .description file
275 writeinfojson: Write the video description to a .info.json file
276 clean_infojson: Remove internal metadata from the infojson
277 getcomments: Extract video comments. This will not be written to disk
278 unless writeinfojson is also given
279 writeannotations: Write the video annotations to a .annotations.xml file
280 writethumbnail: Write the thumbnail image to a file
281 allow_playlist_files: Whether to write playlists' description, infojson etc
282 also to disk when using the 'write*' options
283 write_all_thumbnails: Write all thumbnail formats to files
284 writelink: Write an internet shortcut file, depending on the
285 current platform (.url/.webloc/.desktop)
286 writeurllink: Write a Windows internet shortcut file (.url)
287 writewebloclink: Write a macOS internet shortcut file (.webloc)
288 writedesktoplink: Write a Linux internet shortcut file (.desktop)
289 writesubtitles: Write the video subtitles to a file
290 writeautomaticsub: Write the automatically generated subtitles to a file
291 listsubtitles: Lists all available subtitles for the video
292 subtitlesformat: The format code for subtitles
293 subtitleslangs: List of languages of the subtitles to download (can be regex).
294 The list may contain "all" to refer to all the available
295 subtitles. The language can be prefixed with a "-" to
296 exclude it from the requested languages, e.g. ['all', '-live_chat']
297 keepvideo: Keep the video file after post-processing
298 daterange: A utils.DateRange object, download only if the upload_date is in the range.
299 skip_download: Skip the actual download of the video file
300 cachedir: Location of the cache files in the filesystem.
301 False to disable filesystem cache.
302 noplaylist: Download single video instead of a playlist if in doubt.
303 age_limit: An integer representing the user's age in years.
304 Unsuitable videos for the given age are skipped.
305 min_views: An integer representing the minimum view count the video
306 must have in order to not be skipped.
307 Videos without view count information are always
308 downloaded. None for no limit.
309 max_views: An integer representing the maximum view count.
310 Videos that are more popular than that are not
312 Videos without view count information are always
313 downloaded. None for no limit.
314 download_archive: A set, or the name of a file where all downloads are recorded.
315 Videos already present in the file are not downloaded again.
316 break_on_existing: Stop the download process after attempting to download a
317 file that is in the archive.
318 break_per_url: Whether break_on_reject and break_on_existing
319 should act on each input URL as opposed to for the entire queue
320 cookiefile: File name or text stream from where cookies should be read and dumped to
321 cookiesfrombrowser: A tuple containing the name of the browser, the profile
322 name/path from where cookies are loaded, the name of the keyring,
323 and the container name, e.g. ('chrome', ) or
324 ('vivaldi', 'default', 'BASICTEXT') or ('firefox', 'default', None, 'Meta')
325 legacyserverconnect: Explicitly allow HTTPS connection to servers that do not
326 support RFC 5746 secure renegotiation
327 nocheckcertificate: Do not verify SSL certificates
328 client_certificate: Path to client certificate file in PEM format. May include the private key
329 client_certificate_key: Path to private key file for client certificate
330 client_certificate_password: Password for client certificate private key, if encrypted.
331 If not provided and the key is encrypted, yt-dlp will ask interactively
332 prefer_insecure: Use HTTP instead of HTTPS to retrieve information.
333 (Only supported by some extractors)
334 enable_file_urls: Enable file:// URLs. This is disabled by default for security reasons.
335 http_headers: A dictionary of custom headers to be used for all requests
336 proxy: URL of the proxy server to use
337 geo_verification_proxy: URL of the proxy to use for IP address verification
338 on geo-restricted sites.
339 socket_timeout: Time to wait for unresponsive hosts, in seconds
340 bidi_workaround: Work around buggy terminals without bidirectional text
341 support, using fridibi
342 debug_printtraffic:Print out sent and received HTTP traffic
343 default_search: Prepend this string if an input url is not valid.
344 'auto' for elaborate guessing
345 encoding: Use this encoding instead of the system-specified.
346 extract_flat: Whether to resolve and process url_results further
347 * False: Always process. Default for API
348 * True: Never process
349 * 'in_playlist': Do not process inside playlist/multi_video
350 * 'discard': Always process, but don't return the result
351 from inside playlist/multi_video
352 * 'discard_in_playlist': Same as "discard", but only for
353 playlists (not multi_video). Default for CLI
354 wait_for_video: If given, wait for scheduled streams to become available.
355 The value should be a tuple containing the range
356 (min_secs, max_secs) to wait between retries
357 postprocessors: A list of dictionaries, each with an entry
358 * key: The name of the postprocessor. See
359 yt_dlp/postprocessor/__init__.py for a list.
360 * when: When to run the postprocessor. Allowed values are
361 the entries of utils.POSTPROCESS_WHEN
362 Assumed to be 'post_process' if not given
363 progress_hooks: A list of functions that get called on download
364 progress, with a dictionary with the entries
365 * status: One of "downloading", "error", or "finished".
366 Check this first and ignore unknown values.
367 * info_dict: The extracted info_dict
369 If status is one of "downloading", or "finished", the
370 following properties may also be present:
371 * filename: The final filename (always present)
372 * tmpfilename: The filename we're currently writing to
373 * downloaded_bytes: Bytes on disk
374 * total_bytes: Size of the whole file, None if unknown
375 * total_bytes_estimate: Guess of the eventual file size,
377 * elapsed: The number of seconds since download started.
378 * eta: The estimated time in seconds, None if unknown
379 * speed: The download speed in bytes/second, None if
381 * fragment_index: The counter of the currently
382 downloaded video fragment.
383 * fragment_count: The number of fragments (= individual
384 files that will be merged)
386 Progress hooks are guaranteed to be called at least once
387 (with status "finished") if the download is successful.
388 postprocessor_hooks: A list of functions that get called on postprocessing
389 progress, with a dictionary with the entries
390 * status: One of "started", "processing", or "finished".
391 Check this first and ignore unknown values.
392 * postprocessor: Name of the postprocessor
393 * info_dict: The extracted info_dict
395 Progress hooks are guaranteed to be called at least twice
396 (with status "started" and "finished") if the processing is successful.
397 merge_output_format: "/" separated list of extensions to use when merging formats.
398 final_ext: Expected final extension; used to detect when the file was
399 already downloaded and converted
400 fixup: Automatically correct known faults of the file.
402 - "never": do nothing
403 - "warn": only emit a warning
404 - "detect_or_warn": check whether we can do anything
405 about it, warn otherwise (default)
406 source_address: Client-side IP address to bind to.
407 impersonate: Client to impersonate for requests.
408 An ImpersonateTarget (from yt_dlp.networking.impersonate)
409 sleep_interval_requests: Number of seconds to sleep between requests
411 sleep_interval: Number of seconds to sleep before each download when
412 used alone or a lower bound of a range for randomized
413 sleep before each download (minimum possible number
414 of seconds to sleep) when used along with
416 max_sleep_interval:Upper bound of a range for randomized sleep before each
417 download (maximum possible number of seconds to sleep).
418 Must only be used along with sleep_interval.
419 Actual sleep time will be a random float from range
420 [sleep_interval; max_sleep_interval].
421 sleep_interval_subtitles: Number of seconds to sleep before each subtitle download
422 listformats: Print an overview of available video formats and exit.
423 list_thumbnails: Print a table of all thumbnails and exit.
424 match_filter: A function that gets called for every video with the signature
425 (info_dict, *, incomplete: bool) -> Optional[str]
426 For backward compatibility with youtube-dl, the signature
427 (info_dict) -> Optional[str] is also allowed.
428 - If it returns a message, the video is ignored.
429 - If it returns None, the video is downloaded.
430 - If it returns utils.NO_DEFAULT, the user is interactively
431 asked whether to download the video.
432 - Raise utils.DownloadCancelled(msg) to abort remaining
433 downloads when a video is rejected.
434 match_filter_func in utils/_utils.py is one example for this.
435 color: A Dictionary with output stream names as keys
436 and their respective color policy as values.
437 Can also just be a single color policy,
438 in which case it applies to all outputs.
439 Valid stream names are 'stdout' and 'stderr'.
440 Valid color policies are one of 'always', 'auto', 'no_color' or 'never'.
441 geo_bypass: Bypass geographic restriction via faking X-Forwarded-For
444 Two-letter ISO 3166-2 country code that will be used for
445 explicit geographic restriction bypassing via faking
446 X-Forwarded-For HTTP header
448 IP range in CIDR notation that will be used similarly to
450 external_downloader: A dictionary of protocol keys and the executable of the
451 external downloader to use for it. The allowed protocols
452 are default|http|ftp|m3u8|dash|rtsp|rtmp|mms.
453 Set the value to 'native' to use the native downloader
454 compat_opts: Compatibility options. See "Differences in default behavior".
455 The following options do not work when used through the API:
456 filename, abort-on-error, multistreams, no-live-chat, format-sort
457 no-clean-infojson, no-playlist-metafiles, no-keep-subs, no-attach-info-json.
458 Refer __init__.py for their implementation
459 progress_template: Dictionary of templates for progress outputs.
460 Allowed keys are 'download', 'postprocess',
461 'download-title' (console title) and 'postprocess-title'.
462 The template is mapped on a dictionary with keys 'progress' and 'info'
463 retry_sleep_functions: Dictionary of functions that takes the number of attempts
464 as argument and returns the time to sleep in seconds.
465 Allowed keys are 'http', 'fragment', 'file_access'
466 download_ranges: A callback function that gets called for every video with
467 the signature (info_dict, ydl) -> Iterable[Section].
468 Only the returned sections will be downloaded.
469 Each Section is a dict with the following keys:
470 * start_time: Start time of the section in seconds
471 * end_time: End time of the section in seconds
472 * title: Section title (Optional)
473 * index: Section number (Optional)
474 force_keyframes_at_cuts: Re-encode the video when downloading ranges to get precise cuts
475 noprogress: Do not print the progress bar
476 live_from_start: Whether to download livestreams videos from the start
478 The following parameters are not used by YoutubeDL itself, they are used by
479 the downloader (see yt_dlp/downloader/common.py):
480 nopart, updatetime, buffersize, ratelimit, throttledratelimit, min_filesize,
481 max_filesize, test, noresizebuffer, retries, file_access_retries, fragment_retries,
482 continuedl, xattr_set_filesize, hls_use_mpegts, http_chunk_size,
483 external_downloader_args, concurrent_fragment_downloads.
485 The following options are used by the post processors:
486 ffmpeg_location: Location of the ffmpeg/avconv binary; either the path
487 to the binary or its containing directory.
488 postprocessor_args: A dictionary of postprocessor/executable keys (in lower case)
489 and a list of additional command-line arguments for the
490 postprocessor/executable. The dict can also have "PP+EXE" keys
491 which are used when the given exe is used by the given PP.
492 Use 'default' as the name for arguments to passed to all PP
493 For compatibility with youtube-dl, a single list of args
496 The following options are used by the extractors:
497 extractor_retries: Number of times to retry for known errors (default: 3)
498 dynamic_mpd: Whether to process dynamic DASH manifests (default: True)
499 hls_split_discontinuity: Split HLS playlists to different formats at
500 discontinuities such as ad breaks (default: False)
501 extractor_args: A dictionary of arguments to be passed to the extractors.
502 See "EXTRACTOR ARGUMENTS" for details.
503 E.g. {'youtube': {'skip': ['dash', 'hls']}}
504 mark_watched: Mark videos watched (even with --simulate). Only for YouTube
506 The following options are deprecated and may be removed in the future:
508 break_on_reject: Stop the download process when encountering a video that
509 has been filtered out.
510 - `raise DownloadCancelled(msg)` in match_filter instead
511 force_generic_extractor: Force downloader to use the generic extractor
512 - Use allowed_extractors = ['generic', 'default']
513 playliststart: - Use playlist_items
514 Playlist item to start at.
515 playlistend: - Use playlist_items
516 Playlist item to end at.
517 playlistreverse: - Use playlist_items
518 Download playlist items in reverse order.
519 forceurl: - Use forceprint
520 Force printing final URL.
521 forcetitle: - Use forceprint
522 Force printing title.
523 forceid: - Use forceprint
525 forcethumbnail: - Use forceprint
526 Force printing thumbnail URL.
527 forcedescription: - Use forceprint
528 Force printing description.
529 forcefilename: - Use forceprint
530 Force printing final filename.
531 forceduration: - Use forceprint
532 Force printing duration.
533 allsubtitles: - Use subtitleslangs = ['all']
534 Downloads all the subtitles of the video
535 (requires writesubtitles or writeautomaticsub)
536 include_ads: - Doesn't work
538 call_home: - Not implemented
539 Boolean, true iff we are allowed to contact the
540 yt-dlp servers for debugging.
541 post_hooks: - Register a custom postprocessor
542 A list of functions that get called as the final step
543 for each video file, after all postprocessors have been
544 called. The filename will be passed as the only argument.
545 hls_prefer_native: - Use external_downloader = {'m3u8': 'native'} or {'m3u8': 'ffmpeg'}.
546 Use the native HLS downloader instead of ffmpeg/avconv
547 if True, otherwise use ffmpeg/avconv if False, otherwise
548 use downloader suggested by extractor if None.
549 prefer_ffmpeg: - avconv support is deprecated
550 If False, use avconv instead of ffmpeg if both are available,
551 otherwise prefer ffmpeg.
552 youtube_include_dash_manifest: - Use extractor_args
553 If True (default), DASH manifests and related
554 data will be downloaded and processed by extractor.
555 You can reduce network I/O by disabling it if you don't
556 care about DASH. (only for youtube)
557 youtube_include_hls_manifest: - Use extractor_args
558 If True (default), HLS manifests and related
559 data will be downloaded and processed by extractor.
560 You can reduce network I/O by disabling it if you don't
561 care about HLS. (only for youtube)
562 no_color: Same as `color='no_color'`
563 no_overwrites: Same as `overwrites=False`
567 'width', 'height', 'asr', 'audio_channels', 'fps',
568 'tbr', 'abr', 'vbr', 'filesize', 'filesize_approx',
569 'timestamp', 'release_timestamp',
570 'duration', 'view_count', 'like_count', 'dislike_count', 'repost_count',
571 'average_rating', 'comment_count', 'age_limit',
572 'start_time', 'end_time',
573 'chapter_number', 'season_number', 'episode_number',
574 'track_number', 'disc_number', 'release_year',
578 # NB: Keep in sync with the docstring of extractor/common.py
579 'url', 'manifest_url', 'manifest_stream_number', 'ext', 'format', 'format_id', 'format_note',
580 'width', 'height', 'aspect_ratio', 'resolution', 'dynamic_range', 'tbr', 'abr', 'acodec', 'asr', 'audio_channels',
581 'vbr', 'fps', 'vcodec', 'container', 'filesize', 'filesize_approx', 'rows', 'columns',
582 'player_url', 'protocol', 'fragment_base_url', 'fragments', 'is_from_start', 'is_dash_periods', 'request_data',
583 'preference', 'language', 'language_preference', 'quality', 'source_preference', 'cookies',
584 'http_headers', 'stretched_ratio', 'no_resume', 'has_drm', 'extra_param_to_segment_url', 'hls_aes', 'downloader_options',
585 'page_url', 'app', 'play_path', 'tc_url', 'flash_version', 'rtmp_live', 'rtmp_conn', 'rtmp_protocol', 'rtmp_real_time'
587 _deprecated_multivalue_fields
= {
588 'album_artist': 'album_artists',
590 'composer': 'composers',
591 'creator': 'creators',
594 _format_selection_exts
= {
595 'audio': set(MEDIA_EXTENSIONS
.common_audio
),
596 'video': set(MEDIA_EXTENSIONS
.common_video
+ ('3gp', )),
597 'storyboards': set(MEDIA_EXTENSIONS
.storyboards
),
600 def __init__(self
, params
=None, auto_init
=True):
601 """Create a FileDownloader object with the given options.
602 @param auto_init Whether to load the default extractors and print header (if verbose).
603 Set to 'no_verbose_header' to not print the header
609 self
._ies
_instances
= {}
610 self
._pps
= {k: [] for k in POSTPROCESS_WHEN}
611 self
._printed
_messages
= set()
612 self
._first
_webpage
_request
= True
613 self
._post
_hooks
= []
614 self
._progress
_hooks
= []
615 self
._postprocessor
_hooks
= []
616 self
._download
_retcode
= 0
617 self
._num
_downloads
= 0
619 self
._playlist
_level
= 0
620 self
._playlist
_urls
= set()
621 self
.cache
= Cache(self
)
622 self
.__header
_cookies
= []
624 stdout
= sys
.stderr
if self
.params
.get('logtostderr') else sys
.stdout
625 self
._out
_files
= Namespace(
628 screen
=sys
.stderr
if self
.params
.get('quiet') else stdout
,
629 console
=None if compat_os_name
== 'nt' else next(
630 filter(supports_terminal_sequences
, (sys
.stderr
, sys
.stdout
)), None)
634 windows_enable_vt_mode()
635 except Exception as e
:
636 self
.write_debug(f
'Failed to enable VT mode: {e}')
638 if self
.params
.get('no_color'):
639 if self
.params
.get('color') is not None:
640 self
.params
.setdefault('_warnings', []).append(
641 'Overwriting params from "color" with "no_color"')
642 self
.params
['color'] = 'no_color'
644 term_allow_color
= os
.getenv('TERM', '').lower() != 'dumb'
645 no_color
= bool(os
.getenv('NO_COLOR'))
647 def process_color_policy(stream
):
648 stream_name
= {sys.stdout: 'stdout', sys.stderr: 'stderr'}
[stream
]
649 policy
= traverse_obj(self
.params
, ('color', (stream_name
, None), {str}
), get_all
=False)
650 if policy
in ('auto', None):
651 if term_allow_color
and supports_terminal_sequences(stream
):
652 return 'no_color' if no_color
else True
654 assert policy
in ('always', 'never', 'no_color'), policy
655 return {'always': True, 'never': False}
.get(policy
, policy
)
657 self
._allow
_colors
= Namespace(**{
658 name
: process_color_policy(stream
)
659 for name
, stream
in self
._out
_files
.items_
if name
!= 'console'
662 system_deprecation
= _get_system_deprecation()
663 if system_deprecation
:
664 self
.deprecated_feature(system_deprecation
.replace('\n', '\n '))
666 if self
.params
.get('allow_unplayable_formats'):
668 f
'You have asked for {self._format_err("UNPLAYABLE", self.Styles.EMPHASIS)} formats to be listed/downloaded. '
669 'This is a developer option intended for debugging. \n'
670 ' If you experience any issues while using this option, '
671 f
'{self._format_err("DO NOT", self.Styles.ERROR)} open a bug report')
673 if self
.params
.get('bidi_workaround', False):
676 master
, slave
= pty
.openpty()
677 width
= shutil
.get_terminal_size().columns
678 width_args
= [] if width
is None else ['-w', str(width
)]
679 sp_kwargs
= {'stdin': subprocess.PIPE, 'stdout': slave, 'stderr': self._out_files.error}
681 self
._output
_process
= Popen(['bidiv'] + width_args
, **sp_kwargs
)
683 self
._output
_process
= Popen(['fribidi', '-c', 'UTF-8'] + width_args
, **sp_kwargs
)
684 self
._output
_channel
= os
.fdopen(master
, 'rb')
685 except OSError as ose
:
686 if ose
.errno
== errno
.ENOENT
:
688 'Could not find fribidi executable, ignoring --bidi-workaround. '
689 'Make sure that fribidi is an executable file in one of the directories in your $PATH.')
693 self
.params
['compat_opts'] = set(self
.params
.get('compat_opts', ()))
694 self
.params
['http_headers'] = HTTPHeaderDict(std_headers
, self
.params
.get('http_headers'))
695 self
._load
_cookies
(self
.params
['http_headers'].get('Cookie')) # compat
696 self
.params
['http_headers'].pop('Cookie', None)
698 if auto_init
and auto_init
!= 'no_verbose_header':
699 self
.print_debug_header()
701 def check_deprecated(param
, option
, suggestion
):
702 if self
.params
.get(param
) is not None:
703 self
.report_warning(f
'{option} is deprecated. Use {suggestion} instead')
707 if check_deprecated('cn_verification_proxy', '--cn-verification-proxy', '--geo-verification-proxy'):
708 if self
.params
.get('geo_verification_proxy') is None:
709 self
.params
['geo_verification_proxy'] = self
.params
['cn_verification_proxy']
711 check_deprecated('autonumber', '--auto-number', '-o "%(autonumber)s-%(title)s.%(ext)s"')
712 check_deprecated('usetitle', '--title', '-o "%(title)s-%(id)s.%(ext)s"')
713 check_deprecated('useid', '--id', '-o "%(id)s.%(ext)s"')
715 for msg
in self
.params
.get('_warnings', []):
716 self
.report_warning(msg
)
717 for msg
in self
.params
.get('_deprecation_warnings', []):
718 self
.deprecated_feature(msg
)
720 if impersonate_target
:= self
.params
.get('impersonate'):
721 if not self
._impersonate
_target
_available
(impersonate_target
):
722 raise YoutubeDLError(
723 f
'Impersonate target "{impersonate_target}" is not available. '
724 f
'Use --list-impersonate-targets to see available targets. '
725 f
'You may be missing dependencies required to support this target.')
727 if 'list-formats' in self
.params
['compat_opts']:
728 self
.params
['listformats_table'] = False
730 if 'overwrites' not in self
.params
and self
.params
.get('nooverwrites') is not None:
731 # nooverwrites was unnecessarily changed to overwrites
732 # in 0c3d0f51778b153f65c21906031c2e091fcfb641
733 # This ensures compatibility with both keys
734 self
.params
['overwrites'] = not self
.params
['nooverwrites']
735 elif self
.params
.get('overwrites') is None:
736 self
.params
.pop('overwrites', None)
738 self
.params
['nooverwrites'] = not self
.params
['overwrites']
740 if self
.params
.get('simulate') is None and any((
741 self
.params
.get('list_thumbnails'),
742 self
.params
.get('listformats'),
743 self
.params
.get('listsubtitles'),
745 self
.params
['simulate'] = 'list_only'
747 self
.params
.setdefault('forceprint', {})
748 self
.params
.setdefault('print_to_file', {})
750 # Compatibility with older syntax
751 if not isinstance(params
['forceprint'], dict):
752 self
.params
['forceprint'] = {'video': params['forceprint']}
755 self
.add_default_info_extractors()
757 if (sys
.platform
!= 'win32'
758 and sys
.getfilesystemencoding() in ['ascii', 'ANSI_X3.4-1968']
759 and not self
.params
.get('restrictfilenames', False)):
760 # Unicode filesystem API will throw errors (#1474, #13027)
762 'Assuming --restrict-filenames since file system encoding '
763 'cannot encode all characters. '
764 'Set the LC_ALL environment variable to fix this.')
765 self
.params
['restrictfilenames'] = True
767 self
._parse
_outtmpl
()
769 # Creating format selector here allows us to catch syntax errors before the extraction
770 self
.format_selector
= (
771 self
.params
.get('format') if self
.params
.get('format') in (None, '-')
772 else self
.params
['format'] if callable(self
.params
['format'])
773 else self
.build_format_selector(self
.params
['format']))
776 'post_hooks': self
.add_post_hook
,
777 'progress_hooks': self
.add_progress_hook
,
778 'postprocessor_hooks': self
.add_postprocessor_hook
,
780 for opt
, fn
in hooks
.items():
781 for ph
in self
.params
.get(opt
, []):
784 for pp_def_raw
in self
.params
.get('postprocessors', []):
785 pp_def
= dict(pp_def_raw
)
786 when
= pp_def
.pop('when', 'post_process')
787 self
.add_post_processor(
788 get_postprocessor(pp_def
.pop('key'))(self
, **pp_def
),
791 def preload_download_archive(fn
):
792 """Preload the archive, if any is specified"""
796 elif not is_path_like(fn
):
799 self
.write_debug(f
'Loading archive file {fn!r}')
801 with locked_file(fn
, 'r', encoding
='utf-8') as archive_file
:
802 for line
in archive_file
:
803 archive
.add(line
.strip())
804 except OSError as ioe
:
805 if ioe
.errno
!= errno
.ENOENT
:
809 self
.archive
= preload_download_archive(self
.params
.get('download_archive'))
811 def warn_if_short_id(self
, argv
):
812 # short YouTube ID starting with dash?
814 i
for i
, a
in enumerate(argv
)
815 if re
.match(r
'^-[0-9A-Za-z_-]{10}$', a
)]
819 + [a
for i
, a
in enumerate(argv
) if i
not in idxs
]
820 + ['--'] + [argv
[i
] for i
in idxs
]
823 'Long argument string detected. '
824 'Use -- to separate parameters and URLs, like this:\n%s' %
825 args_to_str(correct_argv
))
827 def add_info_extractor(self
, ie
):
828 """Add an InfoExtractor object to the end of the list."""
830 self
._ies
[ie_key
] = ie
831 if not isinstance(ie
, type):
832 self
._ies
_instances
[ie_key
] = ie
833 ie
.set_downloader(self
)
835 def get_info_extractor(self
, ie_key
):
837 Get an instance of an IE with name ie_key, it will try to get one from
838 the _ies list, if there's no instance it will create a new one and add
839 it to the extractor list.
841 ie
= self
._ies
_instances
.get(ie_key
)
843 ie
= get_info_extractor(ie_key
)()
844 self
.add_info_extractor(ie
)
847 def add_default_info_extractors(self
):
849 Add the InfoExtractors returned by gen_extractors to the end of the list
851 all_ies
= {ie.IE_NAME.lower(): ie for ie in gen_extractor_classes()}
852 all_ies
['end'] = UnsupportedURLIE()
854 ie_names
= orderedSet_from_options(
855 self
.params
.get('allowed_extractors', ['default']), {
856 'all': list(all_ies
),
857 'default': [name
for name
, ie
in all_ies
.items() if ie
._ENABLED
],
859 except re
.error
as e
:
860 raise ValueError(f
'Wrong regex for allowed_extractors: {e.pattern}')
861 for name
in ie_names
:
862 self
.add_info_extractor(all_ies
[name
])
863 self
.write_debug(f
'Loaded {len(ie_names)} extractors')
865 def add_post_processor(self
, pp
, when
='post_process'):
866 """Add a PostProcessor object to the end of the chain."""
867 assert when
in POSTPROCESS_WHEN
, f
'Invalid when={when}'
868 self
._pps
[when
].append(pp
)
869 pp
.set_downloader(self
)
871 def add_post_hook(self
, ph
):
872 """Add the post hook"""
873 self
._post
_hooks
.append(ph
)
875 def add_progress_hook(self
, ph
):
876 """Add the download progress hook"""
877 self
._progress
_hooks
.append(ph
)
879 def add_postprocessor_hook(self
, ph
):
880 """Add the postprocessing progress hook"""
881 self
._postprocessor
_hooks
.append(ph
)
882 for pps
in self
._pps
.values():
884 pp
.add_progress_hook(ph
)
886 def _bidi_workaround(self
, message
):
887 if not hasattr(self
, '_output_channel'):
890 assert hasattr(self
, '_output_process')
891 assert isinstance(message
, str)
892 line_count
= message
.count('\n') + 1
893 self
._output
_process
.stdin
.write((message
+ '\n').encode())
894 self
._output
_process
.stdin
.flush()
895 res
= ''.join(self
._output
_channel
.readline().decode()
896 for _
in range(line_count
))
897 return res
[:-len('\n')]
899 def _write_string(self
, message
, out
=None, only_once
=False):
901 if message
in self
._printed
_messages
:
903 self
._printed
_messages
.add(message
)
904 write_string(message
, out
=out
, encoding
=self
.params
.get('encoding'))
906 def to_stdout(self
, message
, skip_eol
=False, quiet
=None):
907 """Print message to stdout"""
908 if quiet
is not None:
909 self
.deprecation_warning('"YoutubeDL.to_stdout" no longer accepts the argument quiet. '
910 'Use "YoutubeDL.to_screen" instead')
911 if skip_eol
is not False:
912 self
.deprecation_warning('"YoutubeDL.to_stdout" no longer accepts the argument skip_eol. '
913 'Use "YoutubeDL.to_screen" instead')
914 self
._write
_string
(f
'{self._bidi_workaround(message)}\n', self
._out
_files
.out
)
916 def to_screen(self
, message
, skip_eol
=False, quiet
=None, only_once
=False):
917 """Print message to screen if not in quiet mode"""
918 if self
.params
.get('logger'):
919 self
.params
['logger'].debug(message
)
921 if (self
.params
.get('quiet') if quiet
is None else quiet
) and not self
.params
.get('verbose'):
924 '%s%s' % (self
._bidi
_workaround
(message
), ('' if skip_eol
else '\n')),
925 self
._out
_files
.screen
, only_once
=only_once
)
927 def to_stderr(self
, message
, only_once
=False):
928 """Print message to stderr"""
929 assert isinstance(message
, str)
930 if self
.params
.get('logger'):
931 self
.params
['logger'].error(message
)
933 self
._write
_string
(f
'{self._bidi_workaround(message)}\n', self
._out
_files
.error
, only_once
=only_once
)
935 def _send_console_code(self
, code
):
936 if compat_os_name
== 'nt' or not self
._out
_files
.console
:
938 self
._write
_string
(code
, self
._out
_files
.console
)
940 def to_console_title(self
, message
):
941 if not self
.params
.get('consoletitle', False):
943 message
= remove_terminal_sequences(message
)
944 if compat_os_name
== 'nt':
945 if ctypes
.windll
.kernel32
.GetConsoleWindow():
946 # c_wchar_p() might not be necessary if `message` is
947 # already of type unicode()
948 ctypes
.windll
.kernel32
.SetConsoleTitleW(ctypes
.c_wchar_p(message
))
950 self
._send
_console
_code
(f
'\033]0;{message}\007')
952 def save_console_title(self
):
953 if not self
.params
.get('consoletitle') or self
.params
.get('simulate'):
955 self
._send
_console
_code
('\033[22;0t') # Save the title on stack
957 def restore_console_title(self
):
958 if not self
.params
.get('consoletitle') or self
.params
.get('simulate'):
960 self
._send
_console
_code
('\033[23;0t') # Restore the title from stack
963 self
.save_console_title()
966 def save_cookies(self
):
967 if self
.params
.get('cookiefile') is not None:
968 self
.cookiejar
.save()
970 def __exit__(self
, *args
):
971 self
.restore_console_title()
976 if '_request_director' in self
.__dict
__:
977 self
._request
_director
.close()
978 del self
._request
_director
980 def trouble(self
, message
=None, tb
=None, is_error
=True):
981 """Determine action to take when a download problem appears.
983 Depending on if the downloader has been configured to ignore
984 download errors or not, this method may throw an exception or
985 not when errors are found, after printing the message.
987 @param tb If given, is additional traceback information
988 @param is_error Whether to raise error according to ignorerrors
990 if message
is not None:
991 self
.to_stderr(message
)
992 if self
.params
.get('verbose'):
994 if sys
.exc_info()[0]: # if .trouble has been called from an except block
996 if hasattr(sys
.exc_info()[1], 'exc_info') and sys
.exc_info()[1].exc_info
[0]:
997 tb
+= ''.join(traceback
.format_exception(*sys
.exc_info()[1].exc_info
))
998 tb
+= encode_compat_str(traceback
.format_exc())
1000 tb_data
= traceback
.format_list(traceback
.extract_stack())
1001 tb
= ''.join(tb_data
)
1006 if not self
.params
.get('ignoreerrors'):
1007 if sys
.exc_info()[0] and hasattr(sys
.exc_info()[1], 'exc_info') and sys
.exc_info()[1].exc_info
[0]:
1008 exc_info
= sys
.exc_info()[1].exc_info
1010 exc_info
= sys
.exc_info()
1011 raise DownloadError(message
, exc_info
)
1012 self
._download
_retcode
= 1
1016 EMPHASIS
='light blue',
1021 BAD_FORMAT
='light red',
1023 SUPPRESS
='light black',
1026 def _format_text(self
, handle
, allow_colors
, text
, f
, fallback
=None, *, test_encoding
=False):
1029 original_text
= text
1030 # handle.encoding can be None. See https://github.com/yt-dlp/yt-dlp/issues/2711
1031 encoding
= self
.params
.get('encoding') or getattr(handle
, 'encoding', None) or 'ascii'
1032 text
= text
.encode(encoding
, 'ignore').decode(encoding
)
1033 if fallback
is not None and text
!= original_text
:
1035 return format_text(text
, f
) if allow_colors
is True else text
if fallback
is None else fallback
1037 def _format_out(self
, *args
, **kwargs
):
1038 return self
._format
_text
(self
._out
_files
.out
, self
._allow
_colors
.out
, *args
, **kwargs
)
1040 def _format_screen(self
, *args
, **kwargs
):
1041 return self
._format
_text
(self
._out
_files
.screen
, self
._allow
_colors
.screen
, *args
, **kwargs
)
1043 def _format_err(self
, *args
, **kwargs
):
1044 return self
._format
_text
(self
._out
_files
.error
, self
._allow
_colors
.error
, *args
, **kwargs
)
1046 def report_warning(self
, message
, only_once
=False):
1048 Print the message to stderr, it will be prefixed with 'WARNING:'
1049 If stderr is a tty file the 'WARNING:' will be colored
1051 if self
.params
.get('logger') is not None:
1052 self
.params
['logger'].warning(message
)
1054 if self
.params
.get('no_warnings'):
1056 self
.to_stderr(f
'{self._format_err("WARNING:", self.Styles.WARNING)} {message}', only_once
)
1058 def deprecation_warning(self
, message
, *, stacklevel
=0):
1059 deprecation_warning(
1060 message
, stacklevel
=stacklevel
+ 1, printer
=self
.report_error
, is_error
=False)
1062 def deprecated_feature(self
, message
):
1063 if self
.params
.get('logger') is not None:
1064 self
.params
['logger'].warning(f
'Deprecated Feature: {message}')
1065 self
.to_stderr(f
'{self._format_err("Deprecated Feature:", self.Styles.ERROR)} {message}', True)
1067 def report_error(self
, message
, *args
, **kwargs
):
1069 Do the same as trouble, but prefixes the message with 'ERROR:', colored
1070 in red if stderr is a tty file.
1072 self
.trouble(f
'{self._format_err("ERROR:", self.Styles.ERROR)} {message}', *args
, **kwargs
)
1074 def write_debug(self
, message
, only_once
=False):
1075 '''Log debug message or Print message to stderr'''
1076 if not self
.params
.get('verbose', False):
1078 message
= f
'[debug] {message}'
1079 if self
.params
.get('logger'):
1080 self
.params
['logger'].debug(message
)
1082 self
.to_stderr(message
, only_once
)
1084 def report_file_already_downloaded(self
, file_name
):
1085 """Report file has already been fully downloaded."""
1087 self
.to_screen('[download] %s has already been downloaded' % file_name
)
1088 except UnicodeEncodeError:
1089 self
.to_screen('[download] The file has already been downloaded')
1091 def report_file_delete(self
, file_name
):
1092 """Report that existing file will be deleted."""
1094 self
.to_screen('Deleting existing file %s' % file_name
)
1095 except UnicodeEncodeError:
1096 self
.to_screen('Deleting existing file')
1098 def raise_no_formats(self
, info
, forced
=False, *, msg
=None):
1099 has_drm
= info
.get('_has_drm')
1100 ignored
, expected
= self
.params
.get('ignore_no_formats_error'), bool(msg
)
1101 msg
= msg
or has_drm
and 'This video is DRM protected' or 'No video formats found!'
1102 if forced
or not ignored
:
1103 raise ExtractorError(msg
, video_id
=info
['id'], ie
=info
['extractor'],
1104 expected
=has_drm
or ignored
or expected
)
1106 self
.report_warning(msg
)
1108 def parse_outtmpl(self
):
1109 self
.deprecation_warning('"YoutubeDL.parse_outtmpl" is deprecated and may be removed in a future version')
1110 self
._parse
_outtmpl
()
1111 return self
.params
['outtmpl']
1113 def _parse_outtmpl(self
):
1115 if self
.params
.get('restrictfilenames'): # Remove spaces in the default template
1116 sanitize
= lambda x
: x
.replace(' - ', ' ').replace(' ', '-')
1118 outtmpl
= self
.params
.setdefault('outtmpl', {})
1119 if not isinstance(outtmpl
, dict):
1120 self
.params
['outtmpl'] = outtmpl
= {'default': outtmpl}
1121 outtmpl
.update({k: sanitize(v) for k, v in DEFAULT_OUTTMPL.items() if outtmpl.get(k) is None}
)
1123 def get_output_path(self
, dir_type
='', filename
=None):
1124 paths
= self
.params
.get('paths', {})
1125 assert isinstance(paths
, dict), '"paths" parameter must be a dictionary'
1126 path
= os
.path
.join(
1127 expand_path(paths
.get('home', '').strip()),
1128 expand_path(paths
.get(dir_type
, '').strip()) if dir_type
else '',
1130 return sanitize_path(path
, force
=self
.params
.get('windowsfilenames'))
1133 def _outtmpl_expandpath(outtmpl
):
1134 # expand_path translates '%%' into '%' and '$$' into '$'
1135 # correspondingly that is not what we want since we need to keep
1136 # '%%' intact for template dict substitution step. Working around
1137 # with boundary-alike separator hack.
1138 sep
= ''.join(random
.choices(string
.ascii_letters
, k
=32))
1139 outtmpl
= outtmpl
.replace('%%', f
'%{sep}%').replace('$$', f
'${sep}$')
1141 # outtmpl should be expand_path'ed before template dict substitution
1142 # because meta fields may contain env variables we don't want to
1143 # be expanded. E.g. for outtmpl "%(title)s.%(ext)s" and
1144 # title "Hello $PATH", we don't want `$PATH` to be expanded.
1145 return expand_path(outtmpl
).replace(sep
, '')
1148 def escape_outtmpl(outtmpl
):
1149 ''' Escape any remaining strings like %s, %abc% etc. '''
1151 STR_FORMAT_RE_TMPL
.format('', '(?![%(\0])'),
1152 lambda mobj
: ('' if mobj
.group('has_key') else '%') + mobj
.group(0),
1156 def validate_outtmpl(cls
, outtmpl
):
1157 ''' @return None or Exception object '''
1159 STR_FORMAT_RE_TMPL
.format('[^)]*', '[ljhqBUDS]'),
1160 lambda mobj
: f
'{mobj.group(0)[:-1]}s',
1161 cls
._outtmpl
_expandpath
(outtmpl
))
1163 cls
.escape_outtmpl(outtmpl
) % collections
.defaultdict(int)
1165 except ValueError as err
:
1169 def _copy_infodict(info_dict
):
1170 info_dict
= dict(info_dict
)
1171 info_dict
.pop('__postprocessors', None)
1172 info_dict
.pop('__pending_error', None)
1175 def prepare_outtmpl(self
, outtmpl
, info_dict
, sanitize
=False):
1176 """ Make the outtmpl and info_dict suitable for substitution: ydl.escape_outtmpl(outtmpl) % info_dict
1177 @param sanitize Whether to sanitize the output as a filename.
1178 For backward compatibility, a function can also be passed
1181 info_dict
.setdefault('epoch', int(time
.time())) # keep epoch consistent once set
1183 info_dict
= self
._copy
_infodict
(info_dict
)
1184 info_dict
['duration_string'] = ( # %(duration>%H-%M-%S)s is wrong if duration > 24hrs
1185 formatSeconds(info_dict
['duration'], '-' if sanitize
else ':')
1186 if info_dict
.get('duration', None) is not None
1188 info_dict
['autonumber'] = int(self
.params
.get('autonumber_start', 1) - 1 + self
._num
_downloads
)
1189 info_dict
['video_autonumber'] = self
._num
_videos
1190 if info_dict
.get('resolution') is None:
1191 info_dict
['resolution'] = self
.format_resolution(info_dict
, default
=None)
1193 # For fields playlist_index, playlist_autonumber and autonumber convert all occurrences
1194 # of %(field)s to %(field)0Nd for backward compatibility
1195 field_size_compat_map
= {
1196 'playlist_index': number_of_digits(info_dict
.get('__last_playlist_index') or 0),
1197 'playlist_autonumber': number_of_digits(info_dict
.get('n_entries') or 0),
1198 'autonumber': self
.params
.get('autonumber_size') or 5,
1202 EXTERNAL_FORMAT_RE
= re
.compile(STR_FORMAT_RE_TMPL
.format('[^)]*', f
'[{STR_FORMAT_TYPES}ljhqBUDS]'))
1208 # Field is of the form key1.key2...
1209 # where keys (except first) can be string, int, slice or "{field, ...}"
1210 FIELD_INNER_RE
= r
'(?:\w+|%(num)s|%(num)s?(?::%(num)s?){1,2})' % {'num': r'(?:-?\d+)'}
1211 FIELD_RE
= r
'\w*(?:\.(?:%(inner)s|{%(field)s(?:,%(field)s)*}))*' % {
1212 'inner': FIELD_INNER_RE
,
1213 'field': rf
'\w*(?:\.{FIELD_INNER_RE})*'
1215 MATH_FIELD_RE
= rf
'(?:{FIELD_RE}|-?{NUMBER_RE})'
1216 MATH_OPERATORS_RE
= r
'(?:%s)' % '|'.join(map(re
.escape
, MATH_FUNCTIONS
.keys()))
1217 INTERNAL_FORMAT_RE
= re
.compile(rf
'''(?xs)
1219 (?P<fields>{FIELD_RE})
1220 (?P<maths>(?:{MATH_OPERATORS_RE}{MATH_FIELD_RE})*)
1221 (?:>(?P<strf_format>.+?))?
1223 (?P<alternate>(?<!\\),[^|&)]+)?
1224 (?:&(?P<replacement>.*?))?
1225 (?:\|(?P<default>.*?))?
1228 def _from_user_input(field
):
1232 return slice(*map(int_or_none
, field
.split(':')))
1233 elif int_or_none(field
) is not None:
1237 def _traverse_infodict(fields
):
1238 fields
= [f
for x
in re
.split(r
'\.({.+?})\.?', fields
)
1239 for f
in ([x
] if x
.startswith('{') else x
.split('.'))]
1241 if fields
and not fields
[i
]:
1244 for i
, f
in enumerate(fields
):
1245 if not f
.startswith('{'):
1246 fields
[i
] = _from_user_input(f
)
1248 assert f
.endswith('}'), f
'No closing brace for {f} in {fields}'
1249 fields
[i
] = {k: list(map(_from_user_input, k.split('.'))) for k in f[1:-1].split(',')}
1251 return traverse_obj(info_dict
, fields
, traverse_string
=True)
1253 def get_value(mdict
):
1255 value
= _traverse_infodict(mdict
['fields'])
1258 value
= float_or_none(value
)
1259 if value
is not None:
1262 offset_key
= mdict
['maths']
1264 value
= float_or_none(value
)
1268 MATH_FIELD_RE
if operator
else MATH_OPERATORS_RE
,
1269 offset_key
).group(0)
1270 offset_key
= offset_key
[len(item
):]
1271 if operator
is None:
1272 operator
= MATH_FUNCTIONS
[item
]
1274 item
, multiplier
= (item
[1:], -1) if item
[0] == '-' else (item
, 1)
1275 offset
= float_or_none(item
)
1277 offset
= float_or_none(_traverse_infodict(item
))
1279 value
= operator(value
, multiplier
* offset
)
1280 except (TypeError, ZeroDivisionError):
1283 # Datetime formatting
1284 if mdict
['strf_format']:
1285 value
= strftime_or_none(value
, mdict
['strf_format'].replace('\\,', ','))
1287 # XXX: Workaround for https://github.com/yt-dlp/yt-dlp/issues/4485
1288 if sanitize
and value
== '':
1292 na
= self
.params
.get('outtmpl_na_placeholder', 'NA')
1294 def filename_sanitizer(key
, value
, restricted
=self
.params
.get('restrictfilenames')):
1295 return sanitize_filename(str(value
), restricted
=restricted
, is_id
=(
1296 bool(re
.search(r
'(^|[_.])id(\.|$)', key
))
1297 if 'filename-sanitization' in self
.params
['compat_opts']
1300 sanitizer
= sanitize
if callable(sanitize
) else filename_sanitizer
1301 sanitize
= bool(sanitize
)
1303 def _dumpjson_default(obj
):
1304 if isinstance(obj
, (set, LazyList
)):
1308 class _ReplacementFormatter(string
.Formatter
):
1309 def get_field(self
, field_name
, args
, kwargs
):
1310 if field_name
.isdigit():
1312 raise ValueError('Unsupported field')
1314 replacement_formatter
= _ReplacementFormatter()
1316 def create_key(outer_mobj
):
1317 if not outer_mobj
.group('has_key'):
1318 return outer_mobj
.group(0)
1319 key
= outer_mobj
.group('key')
1320 mobj
= re
.match(INTERNAL_FORMAT_RE
, key
)
1321 value
, replacement
, default
, last_field
= None, None, na
, ''
1323 mobj
= mobj
.groupdict()
1324 default
= mobj
['default'] if mobj
['default'] is not None else default
1325 value
= get_value(mobj
)
1326 last_field
, replacement
= mobj
['fields'], mobj
['replacement']
1327 if value
is None and mobj
['alternate']:
1328 mobj
= re
.match(INTERNAL_FORMAT_RE
, mobj
['remaining'][1:])
1332 if None not in (value
, replacement
):
1334 value
= replacement_formatter
.format(replacement
, value
)
1336 value
, default
= None, na
1338 fmt
= outer_mobj
.group('format')
1339 if fmt
== 's' and last_field
in field_size_compat_map
.keys() and isinstance(value
, int):
1340 fmt
= f
'0{field_size_compat_map[last_field]:d}d'
1342 flags
= outer_mobj
.group('conversion') or ''
1343 str_fmt
= f
'{fmt[:-1]}s'
1345 value
, fmt
= default
, 's'
1346 elif fmt
[-1] == 'l': # list
1347 delim
= '\n' if '#' in flags
else ', '
1348 value
, fmt
= delim
.join(map(str, variadic(value
, allowed_types
=(str, bytes)))), str_fmt
1349 elif fmt
[-1] == 'j': # json
1350 value
, fmt
= json
.dumps(
1351 value
, default
=_dumpjson_default
,
1352 indent
=4 if '#' in flags
else None, ensure_ascii
='+' not in flags
), str_fmt
1353 elif fmt
[-1] == 'h': # html
1354 value
, fmt
= escapeHTML(str(value
)), str_fmt
1355 elif fmt
[-1] == 'q': # quoted
1356 value
= map(str, variadic(value
) if '#' in flags
else [value
])
1357 value
, fmt
= ' '.join(map(compat_shlex_quote
, value
)), str_fmt
1358 elif fmt
[-1] == 'B': # bytes
1359 value
= f
'%{str_fmt}'.encode() % str(value
).encode()
1360 value
, fmt
= value
.decode('utf-8', 'ignore'), 's'
1361 elif fmt
[-1] == 'U': # unicode normalized
1362 value
, fmt
= unicodedata
.normalize(
1363 # "+" = compatibility equivalence, "#" = NFD
1364 'NF%s%s' % ('K' if '+' in flags
else '', 'D' if '#' in flags
else 'C'),
1366 elif fmt
[-1] == 'D': # decimal suffix
1367 num_fmt
, fmt
= fmt
[:-1].replace('#', ''), 's'
1368 value
= format_decimal_suffix(value
, f
'%{num_fmt}f%s' if num_fmt
else '%d%s',
1369 factor
=1024 if '#' in flags
else 1000)
1370 elif fmt
[-1] == 'S': # filename sanitization
1371 value
, fmt
= filename_sanitizer(last_field
, value
, restricted
='#' in flags
), str_fmt
1372 elif fmt
[-1] == 'c':
1374 value
= str(value
)[0]
1377 elif fmt
[-1] not in 'rsa': # numeric
1378 value
= float_or_none(value
)
1380 value
, fmt
= default
, 's'
1383 # If value is an object, sanitize might convert it to a string
1384 # So we convert it to repr first
1386 value
, fmt
= repr(value
), str_fmt
1387 elif fmt
[-1] == 'a':
1388 value
, fmt
= ascii(value
), str_fmt
1389 if fmt
[-1] in 'csra':
1390 value
= sanitizer(last_field
, value
)
1392 key
= '%s\0%s' % (key
.replace('%', '%\0'), outer_mobj
.group('format'))
1393 TMPL_DICT
[key
] = value
1394 return '{prefix}%({key}){fmt}'.format(key
=key
, fmt
=fmt
, prefix
=outer_mobj
.group('prefix'))
1396 return EXTERNAL_FORMAT_RE
.sub(create_key
, outtmpl
), TMPL_DICT
1398 def evaluate_outtmpl(self
, outtmpl
, info_dict
, *args
, **kwargs
):
1399 outtmpl
, info_dict
= self
.prepare_outtmpl(outtmpl
, info_dict
, *args
, **kwargs
)
1400 return self
.escape_outtmpl(outtmpl
) % info_dict
1402 def _prepare_filename(self
, info_dict
, *, outtmpl
=None, tmpl_type
=None):
1403 assert None in (outtmpl
, tmpl_type
), 'outtmpl and tmpl_type are mutually exclusive'
1405 outtmpl
= self
.params
['outtmpl'].get(tmpl_type
or 'default', self
.params
['outtmpl']['default'])
1407 outtmpl
= self
._outtmpl
_expandpath
(outtmpl
)
1408 filename
= self
.evaluate_outtmpl(outtmpl
, info_dict
, True)
1412 if tmpl_type
in ('', 'temp'):
1413 final_ext
, ext
= self
.params
.get('final_ext'), info_dict
.get('ext')
1414 if final_ext
and ext
and final_ext
!= ext
and filename
.endswith(f
'.{final_ext}'):
1415 filename
= replace_extension(filename
, ext
, final_ext
)
1417 force_ext
= OUTTMPL_TYPES
[tmpl_type
]
1419 filename
= replace_extension(filename
, force_ext
, info_dict
.get('ext'))
1421 # https://github.com/blackjack4494/youtube-dlc/issues/85
1422 trim_file_name
= self
.params
.get('trim_file_name', False)
1424 no_ext
, *ext
= filename
.rsplit('.', 2)
1425 filename
= join_nonempty(no_ext
[:trim_file_name
], *ext
, delim
='.')
1428 except ValueError as err
:
1429 self
.report_error('Error in output template: ' + str(err
) + ' (encoding: ' + repr(preferredencoding()) + ')')
1432 def prepare_filename(self
, info_dict
, dir_type
='', *, outtmpl
=None, warn
=False):
1433 """Generate the output filename"""
1435 assert not dir_type
, 'outtmpl and dir_type are mutually exclusive'
1437 filename
= self
._prepare
_filename
(info_dict
, tmpl_type
=dir_type
, outtmpl
=outtmpl
)
1438 if not filename
and dir_type
not in ('', 'temp'):
1442 if not self
.params
.get('paths'):
1444 elif filename
== '-':
1445 self
.report_warning('--paths is ignored when an outputting to stdout', only_once
=True)
1446 elif os
.path
.isabs(filename
):
1447 self
.report_warning('--paths is ignored since an absolute path is given in output template', only_once
=True)
1448 if filename
== '-' or not filename
:
1451 return self
.get_output_path(dir_type
, filename
)
1453 def _match_entry(self
, info_dict
, incomplete
=False, silent
=False):
1454 """Returns None if the file should be downloaded"""
1455 _type
= 'video' if 'playlist-match-filter' in self
.params
['compat_opts'] else info_dict
.get('_type', 'video')
1456 assert incomplete
or _type
== 'video', 'Only video result can be considered complete'
1458 video_title
= info_dict
.get('title', info_dict
.get('id', 'entry'))
1461 if _type
in ('playlist', 'multi_video'):
1463 elif _type
in ('url', 'url_transparent') and not try_call(
1464 lambda: self
.get_info_extractor(info_dict
['ie_key']).is_single_video(info_dict
['url'])):
1467 if 'title' in info_dict
:
1468 # This can happen when we're just evaluating the playlist
1469 title
= info_dict
['title']
1470 matchtitle
= self
.params
.get('matchtitle', False)
1472 if not re
.search(matchtitle
, title
, re
.IGNORECASE
):
1473 return '"' + title
+ '" title did not match pattern "' + matchtitle
+ '"'
1474 rejecttitle
= self
.params
.get('rejecttitle', False)
1476 if re
.search(rejecttitle
, title
, re
.IGNORECASE
):
1477 return '"' + title
+ '" title matched reject pattern "' + rejecttitle
+ '"'
1479 date
= info_dict
.get('upload_date')
1480 if date
is not None:
1481 dateRange
= self
.params
.get('daterange', DateRange())
1482 if date
not in dateRange
:
1483 return f
'{date_from_str(date).isoformat()} upload date is not in range {dateRange}'
1484 view_count
= info_dict
.get('view_count')
1485 if view_count
is not None:
1486 min_views
= self
.params
.get('min_views')
1487 if min_views
is not None and view_count
< min_views
:
1488 return 'Skipping %s, because it has not reached minimum view count (%d/%d)' % (video_title
, view_count
, min_views
)
1489 max_views
= self
.params
.get('max_views')
1490 if max_views
is not None and view_count
> max_views
:
1491 return 'Skipping %s, because it has exceeded the maximum view count (%d/%d)' % (video_title
, view_count
, max_views
)
1492 if age_restricted(info_dict
.get('age_limit'), self
.params
.get('age_limit')):
1493 return 'Skipping "%s" because it is age restricted' % video_title
1495 match_filter
= self
.params
.get('match_filter')
1496 if match_filter
is None:
1502 ret
= match_filter(info_dict
, incomplete
=incomplete
)
1504 # For backward compatibility
1505 ret
= None if incomplete
else match_filter(info_dict
)
1506 except DownloadCancelled
as err
:
1507 if err
.msg
is not NO_DEFAULT
:
1509 ret
, cancelled
= err
.msg
, err
1511 if ret
is NO_DEFAULT
:
1513 filename
= self
._format
_screen
(self
.prepare_filename(info_dict
), self
.Styles
.FILENAME
)
1514 reply
= input(self
._format
_screen
(
1515 f
'Download "{filename}"? (Y/n): ', self
.Styles
.EMPHASIS
)).lower().strip()
1516 if reply
in {'y', ''}
:
1520 raise type(cancelled
)(f
'Skipping {video_title}')
1521 return f
'Skipping {video_title}'
1524 if self
.in_download_archive(info_dict
):
1526 format_field(info_dict
, 'id', f
'{self._format_screen("%s", self.Styles.ID)}: '),
1527 format_field(info_dict
, 'title', f
'{self._format_screen("%s", self.Styles.EMPHASIS)} '),
1528 'has already been recorded in the archive'))
1529 break_opt
, break_err
= 'break_on_existing', ExistingVideoReached
1532 reason
= check_filter()
1533 except DownloadCancelled
as e
:
1534 reason
, break_opt
, break_err
= e
.msg
, 'match_filter', type(e
)
1536 break_opt
, break_err
= 'break_on_reject', RejectedVideoReached
1537 if reason
is not None:
1539 self
.to_screen('[download] ' + reason
)
1540 if self
.params
.get(break_opt
, False):
1545 def add_extra_info(info_dict
, extra_info
):
1546 '''Set the keys from extra_info in info dict if they are missing'''
1547 for key
, value
in extra_info
.items():
1548 info_dict
.setdefault(key
, value
)
1550 def extract_info(self
, url
, download
=True, ie_key
=None, extra_info
=None,
1551 process
=True, force_generic_extractor
=False):
1553 Extract and return the information dictionary of the URL
1556 @param url URL to extract
1559 @param download Whether to download videos
1560 @param process Whether to resolve all unresolved references (URLs, playlist items).
1561 Must be True for download to work
1562 @param ie_key Use only the extractor with this key
1564 @param extra_info Dictionary containing the extra values to add to the info (For internal use only)
1565 @force_generic_extractor Force using the generic extractor (Deprecated; use ie_key='Generic')
1568 if extra_info
is None:
1571 if not ie_key
and force_generic_extractor
:
1575 ies
= {ie_key: self._ies[ie_key]}
if ie_key
in self
._ies
else {}
1579 for key
, ie
in ies
.items():
1580 if not ie
.suitable(url
):
1583 if not ie
.working():
1584 self
.report_warning('The program functionality for this site has been marked as broken, '
1585 'and will probably not work.')
1587 temp_id
= ie
.get_temp_id(url
)
1588 if temp_id
is not None and self
.in_download_archive({'id': temp_id, 'ie_key': key}
):
1589 self
.to_screen(f
'[download] {self._format_screen(temp_id, self.Styles.ID)}: '
1590 'has already been recorded in the archive')
1591 if self
.params
.get('break_on_existing', False):
1592 raise ExistingVideoReached()
1594 return self
.__extract
_info
(url
, self
.get_info_extractor(key
), download
, extra_info
, process
)
1596 extractors_restricted
= self
.params
.get('allowed_extractors') not in (None, ['default'])
1597 self
.report_error(f
'No suitable extractor{format_field(ie_key, None, " (%s)")} found for URL {url}',
1598 tb
=False if extractors_restricted
else None)
1600 def _handle_extraction_exceptions(func
):
1601 @functools.wraps(func
)
1602 def wrapper(self
, *args
, **kwargs
):
1605 return func(self
, *args
, **kwargs
)
1606 except (DownloadCancelled
, LazyList
.IndexError, PagedList
.IndexError):
1608 except ReExtractInfo
as e
:
1610 self
.to_screen(f
'{e}; Re-extracting data')
1612 self
.to_stderr('\r')
1613 self
.report_warning(f
'{e}; Re-extracting data')
1615 except GeoRestrictedError
as e
:
1618 msg
+= '\nThis video is available in %s.' % ', '.join(
1619 map(ISO3166Utils
.short2full
, e
.countries
))
1620 msg
+= '\nYou might want to use a VPN or a proxy server (with --proxy) to workaround.'
1621 self
.report_error(msg
)
1622 except ExtractorError
as e
: # An error we somewhat expected
1623 self
.report_error(str(e
), e
.format_traceback())
1624 except Exception as e
:
1625 if self
.params
.get('ignoreerrors'):
1626 self
.report_error(str(e
), tb
=encode_compat_str(traceback
.format_exc()))
1632 def _wait_for_video(self
, ie_result
={}):
1633 if (not self
.params
.get('wait_for_video')
1634 or ie_result
.get('_type', 'video') != 'video'
1635 or ie_result
.get('formats') or ie_result
.get('url')):
1638 format_dur
= lambda dur
: '%02d:%02d:%02d' % timetuple_from_msec(dur
* 1000)[:-1]
1643 full_msg
= f
'{msg}\n'
1644 if not self
.params
.get('noprogress'):
1645 full_msg
= msg
+ ' ' * (len(last_msg
) - len(msg
)) + '\r'
1648 self
.to_screen(full_msg
, skip_eol
=True)
1651 min_wait
, max_wait
= self
.params
.get('wait_for_video')
1652 diff
= try_get(ie_result
, lambda x
: x
['release_timestamp'] - time
.time())
1653 if diff
is None and ie_result
.get('live_status') == 'is_upcoming':
1654 diff
= round(random
.uniform(min_wait
, max_wait
) if (max_wait
and min_wait
) else (max_wait
or min_wait
), 0)
1655 self
.report_warning('Release time of video is not known')
1656 elif ie_result
and (diff
or 0) <= 0:
1657 self
.report_warning('Video should already be available according to extracted info')
1658 diff
= min(max(diff
or 0, min_wait
or 0), max_wait
or float('inf'))
1659 self
.to_screen(f
'[wait] Waiting for {format_dur(diff)} - Press Ctrl+C to try now')
1661 wait_till
= time
.time() + diff
1664 diff
= wait_till
- time
.time()
1667 raise ReExtractInfo('[wait] Wait period ended', expected
=True)
1668 progress(f
'[wait] Remaining time until next attempt: {self._format_screen(format_dur(diff), self.Styles.EMPHASIS)}')
1670 except KeyboardInterrupt:
1672 raise ReExtractInfo('[wait] Interrupted by user', expected
=True)
1673 except BaseException
as e
:
1674 if not isinstance(e
, ReExtractInfo
):
1678 def _load_cookies(self
, data
, *, autoscope
=True):
1679 """Loads cookies from a `Cookie` header
1681 This tries to work around the security vulnerability of passing cookies to every domain.
1682 See: https://github.com/yt-dlp/yt-dlp/security/advisories/GHSA-v8mc-9377-rwjj
1684 @param data The Cookie header as string to load the cookies from
1685 @param autoscope If `False`, scope cookies using Set-Cookie syntax and error for cookie without domains
1686 If `True`, save cookies for later to be stored in the jar with a limited scope
1687 If a URL, save cookies in the jar with the domain of the URL
1689 for cookie
in LenientSimpleCookie(data
).values():
1690 if autoscope
and any(cookie
.values()):
1691 raise ValueError('Invalid syntax in Cookie Header')
1693 domain
= cookie
.get('domain') or ''
1694 expiry
= cookie
.get('expires')
1695 if expiry
== '': # 0 is valid
1697 prepared_cookie
= http
.cookiejar
.Cookie(
1698 cookie
.get('version') or 0, cookie
.key
, cookie
.value
, None, False,
1699 domain
, True, True, cookie
.get('path') or '', bool(cookie
.get('path')),
1700 cookie
.get('secure') or False, expiry
, False, None, None, {})
1703 self
.cookiejar
.set_cookie(prepared_cookie
)
1704 elif autoscope
is True:
1705 self
.deprecated_feature(
1706 'Passing cookies as a header is a potential security risk; '
1707 'they will be scoped to the domain of the downloaded urls. '
1708 'Please consider loading cookies from a file or browser instead.')
1709 self
.__header
_cookies
.append(prepared_cookie
)
1711 self
.report_warning(
1712 'The extractor result contains an unscoped cookie as an HTTP header. '
1713 f
'If you are using yt-dlp with an input URL{bug_reports_message(before=",")}',
1715 self
._apply
_header
_cookies
(autoscope
, [prepared_cookie
])
1717 self
.report_error('Unscoped cookies are not allowed; please specify some sort of scoping',
1718 tb
=False, is_error
=False)
1720 def _apply_header_cookies(self
, url
, cookies
=None):
1721 """Applies stray header cookies to the provided url
1723 This loads header cookies and scopes them to the domain provided in `url`.
1724 While this is not ideal, it helps reduce the risk of them being sent
1725 to an unintended destination while mostly maintaining compatibility.
1727 parsed
= urllib
.parse
.urlparse(url
)
1728 if not parsed
.hostname
:
1731 for cookie
in map(copy
.copy
, cookies
or self
.__header
_cookies
):
1732 cookie
.domain
= f
'.{parsed.hostname}'
1733 self
.cookiejar
.set_cookie(cookie
)
1735 @_handle_extraction_exceptions
1736 def __extract_info(self
, url
, ie
, download
, extra_info
, process
):
1737 self
._apply
_header
_cookies
(url
)
1740 ie_result
= ie
.extract(url
)
1741 except UserNotLive
as e
:
1743 if self
.params
.get('wait_for_video'):
1744 self
.report_warning(e
)
1745 self
._wait
_for
_video
()
1747 if ie_result
is None: # Finished already (backwards compatibility; listformats and friends should be moved here)
1748 self
.report_warning(f
'Extractor {ie.IE_NAME} returned nothing{bug_reports_message()}')
1750 if isinstance(ie_result
, list):
1751 # Backwards compatibility: old IE result format
1753 '_type': 'compat_list',
1754 'entries': ie_result
,
1756 if extra_info
.get('original_url'):
1757 ie_result
.setdefault('original_url', extra_info
['original_url'])
1758 self
.add_default_extra_info(ie_result
, ie
, url
)
1760 self
._wait
_for
_video
(ie_result
)
1761 return self
.process_ie_result(ie_result
, download
, extra_info
)
1765 def add_default_extra_info(self
, ie_result
, ie
, url
):
1767 self
.add_extra_info(ie_result
, {
1769 'original_url': url
,
1771 webpage_url
= ie_result
.get('webpage_url')
1773 self
.add_extra_info(ie_result
, {
1774 'webpage_url_basename': url_basename(webpage_url
),
1775 'webpage_url_domain': get_domain(webpage_url
),
1778 self
.add_extra_info(ie_result
, {
1779 'extractor': ie
.IE_NAME
,
1780 'extractor_key': ie
.ie_key(),
1783 def process_ie_result(self
, ie_result
, download
=True, extra_info
=None):
1785 Take the result of the ie(may be modified) and resolve all unresolved
1786 references (URLs, playlist items).
1788 It will also download the videos if 'download'.
1789 Returns the resolved ie_result.
1791 if extra_info
is None:
1793 result_type
= ie_result
.get('_type', 'video')
1795 if result_type
in ('url', 'url_transparent'):
1796 ie_result
['url'] = sanitize_url(
1797 ie_result
['url'], scheme
='http' if self
.params
.get('prefer_insecure') else 'https')
1798 if ie_result
.get('original_url') and not extra_info
.get('original_url'):
1799 extra_info
= {'original_url': ie_result['original_url'], **extra_info}
1801 extract_flat
= self
.params
.get('extract_flat', False)
1802 if ((extract_flat
== 'in_playlist' and 'playlist' in extra_info
)
1803 or extract_flat
is True):
1804 info_copy
= ie_result
.copy()
1805 ie
= try_get(ie_result
.get('ie_key'), self
.get_info_extractor
)
1806 if ie
and not ie_result
.get('id'):
1807 info_copy
['id'] = ie
.get_temp_id(ie_result
['url'])
1808 self
.add_default_extra_info(info_copy
, ie
, ie_result
['url'])
1809 self
.add_extra_info(info_copy
, extra_info
)
1810 info_copy
, _
= self
.pre_process(info_copy
)
1811 self
._fill
_common
_fields
(info_copy
, False)
1812 self
.__forced
_printings
(info_copy
)
1813 self
._raise
_pending
_errors
(info_copy
)
1814 if self
.params
.get('force_write_download_archive', False):
1815 self
.record_download_archive(info_copy
)
1818 if result_type
== 'video':
1819 self
.add_extra_info(ie_result
, extra_info
)
1820 ie_result
= self
.process_video_result(ie_result
, download
=download
)
1821 self
._raise
_pending
_errors
(ie_result
)
1822 additional_urls
= (ie_result
or {}).get('additional_urls')
1824 # TODO: Improve MetadataParserPP to allow setting a list
1825 if isinstance(additional_urls
, str):
1826 additional_urls
= [additional_urls
]
1828 '[info] %s: %d additional URL(s) requested' % (ie_result
['id'], len(additional_urls
)))
1829 self
.write_debug('Additional URLs: "%s"' % '", "'.join(additional_urls
))
1830 ie_result
['additional_entries'] = [
1832 url
, download
, extra_info
=extra_info
,
1833 force_generic_extractor
=self
.params
.get('force_generic_extractor'))
1834 for url
in additional_urls
1837 elif result_type
== 'url':
1838 # We have to add extra_info to the results because it may be
1839 # contained in a playlist
1840 return self
.extract_info(
1841 ie_result
['url'], download
,
1842 ie_key
=ie_result
.get('ie_key'),
1843 extra_info
=extra_info
)
1844 elif result_type
== 'url_transparent':
1845 # Use the information from the embedding page
1846 info
= self
.extract_info(
1847 ie_result
['url'], ie_key
=ie_result
.get('ie_key'),
1848 extra_info
=extra_info
, download
=False, process
=False)
1850 # extract_info may return None when ignoreerrors is enabled and
1851 # extraction failed with an error, don't crash and return early
1856 exempted_fields
= {'_type', 'url', 'ie_key'}
1857 if not ie_result
.get('section_end') and ie_result
.get('section_start') is None:
1858 # For video clips, the id etc of the clip extractor should be used
1859 exempted_fields |
= {'id', 'extractor', 'extractor_key'}
1861 new_result
= info
.copy()
1862 new_result
.update(filter_dict(ie_result
, lambda k
, v
: v
is not None and k
not in exempted_fields
))
1864 # Extracted info may not be a video result (i.e.
1865 # info.get('_type', 'video') != video) but rather an url or
1866 # url_transparent. In such cases outer metadata (from ie_result)
1867 # should be propagated to inner one (info). For this to happen
1868 # _type of info should be overridden with url_transparent. This
1869 # fixes issue from https://github.com/ytdl-org/youtube-dl/pull/11163.
1870 if new_result
.get('_type') == 'url':
1871 new_result
['_type'] = 'url_transparent'
1873 return self
.process_ie_result(
1874 new_result
, download
=download
, extra_info
=extra_info
)
1875 elif result_type
in ('playlist', 'multi_video'):
1876 # Protect from infinite recursion due to recursively nested playlists
1877 # (see https://github.com/ytdl-org/youtube-dl/issues/27833)
1878 webpage_url
= ie_result
.get('webpage_url') # Playlists maynot have webpage_url
1879 if webpage_url
and webpage_url
in self
._playlist
_urls
:
1881 '[download] Skipping already downloaded playlist: %s'
1882 % ie_result
.get('title') or ie_result
.get('id'))
1885 self
._playlist
_level
+= 1
1886 self
._playlist
_urls
.add(webpage_url
)
1887 self
._fill
_common
_fields
(ie_result
, False)
1888 self
._sanitize
_thumbnails
(ie_result
)
1890 return self
.__process
_playlist
(ie_result
, download
)
1892 self
._playlist
_level
-= 1
1893 if not self
._playlist
_level
:
1894 self
._playlist
_urls
.clear()
1895 elif result_type
== 'compat_list':
1896 self
.report_warning(
1897 'Extractor %s returned a compat_list result. '
1898 'It needs to be updated.' % ie_result
.get('extractor'))
1901 self
.add_extra_info(r
, {
1902 'extractor': ie_result
['extractor'],
1903 'webpage_url': ie_result
['webpage_url'],
1904 'webpage_url_basename': url_basename(ie_result
['webpage_url']),
1905 'webpage_url_domain': get_domain(ie_result
['webpage_url']),
1906 'extractor_key': ie_result
['extractor_key'],
1909 ie_result
['entries'] = [
1910 self
.process_ie_result(_fixup(r
), download
, extra_info
)
1911 for r
in ie_result
['entries']
1915 raise Exception('Invalid result type: %s' % result_type
)
1917 def _ensure_dir_exists(self
, path
):
1918 return make_dir(path
, self
.report_error
)
1921 def _playlist_infodict(ie_result
, strict
=False, **kwargs
):
1923 'playlist_count': ie_result
.get('playlist_count'),
1924 'playlist': ie_result
.get('title') or ie_result
.get('id'),
1925 'playlist_id': ie_result
.get('id'),
1926 'playlist_title': ie_result
.get('title'),
1927 'playlist_uploader': ie_result
.get('uploader'),
1928 'playlist_uploader_id': ie_result
.get('uploader_id'),
1933 if ie_result
.get('webpage_url'):
1935 'webpage_url': ie_result
['webpage_url'],
1936 'webpage_url_basename': url_basename(ie_result
['webpage_url']),
1937 'webpage_url_domain': get_domain(ie_result
['webpage_url']),
1941 'playlist_index': 0,
1942 '__last_playlist_index': max(ie_result
.get('requested_entries') or (0, 0)),
1943 'extractor': ie_result
['extractor'],
1944 'extractor_key': ie_result
['extractor_key'],
1947 def __process_playlist(self
, ie_result
, download
):
1948 """Process each entry in the playlist"""
1949 assert ie_result
['_type'] in ('playlist', 'multi_video')
1951 common_info
= self
._playlist
_infodict
(ie_result
, strict
=True)
1952 title
= common_info
.get('playlist') or '<Untitled>'
1953 if self
._match
_entry
(common_info
, incomplete
=True) is not None:
1955 self
.to_screen(f
'[download] Downloading {ie_result["_type"]}: {title}')
1957 all_entries
= PlaylistEntries(self
, ie_result
)
1958 entries
= orderedSet(all_entries
.get_requested_items(), lazy
=True)
1960 lazy
= self
.params
.get('lazy_playlist')
1962 resolved_entries
, n_entries
= [], 'N/A'
1963 ie_result
['requested_entries'], ie_result
['entries'] = None, None
1965 entries
= resolved_entries
= list(entries
)
1966 n_entries
= len(resolved_entries
)
1967 ie_result
['requested_entries'], ie_result
['entries'] = tuple(zip(*resolved_entries
)) or ([], [])
1968 if not ie_result
.get('playlist_count'):
1969 # Better to do this after potentially exhausting entries
1970 ie_result
['playlist_count'] = all_entries
.get_full_count()
1972 extra
= self
._playlist
_infodict
(ie_result
, n_entries
=int_or_none(n_entries
))
1973 ie_copy
= collections
.ChainMap(ie_result
, extra
)
1975 _infojson_written
= False
1976 write_playlist_files
= self
.params
.get('allow_playlist_files', True)
1977 if write_playlist_files
and self
.params
.get('list_thumbnails'):
1978 self
.list_thumbnails(ie_result
)
1979 if write_playlist_files
and not self
.params
.get('simulate'):
1980 _infojson_written
= self
._write
_info
_json
(
1981 'playlist', ie_result
, self
.prepare_filename(ie_copy
, 'pl_infojson'))
1982 if _infojson_written
is None:
1984 if self
._write
_description
('playlist', ie_result
,
1985 self
.prepare_filename(ie_copy
, 'pl_description')) is None:
1987 # TODO: This should be passed to ThumbnailsConvertor if necessary
1988 self
._write
_thumbnails
('playlist', ie_result
, self
.prepare_filename(ie_copy
, 'pl_thumbnail'))
1991 if self
.params
.get('playlistreverse') or self
.params
.get('playlistrandom'):
1992 self
.report_warning('playlistreverse and playlistrandom are not supported with lazy_playlist', only_once
=True)
1993 elif self
.params
.get('playlistreverse'):
1995 elif self
.params
.get('playlistrandom'):
1996 random
.shuffle(entries
)
1998 self
.to_screen(f
'[{ie_result["extractor"]}] Playlist {title}: Downloading {n_entries} items'
1999 f
'{format_field(ie_result, "playlist_count", " of %s")}')
2001 keep_resolved_entries
= self
.params
.get('extract_flat') != 'discard'
2002 if self
.params
.get('extract_flat') == 'discard_in_playlist':
2003 keep_resolved_entries
= ie_result
['_type'] != 'playlist'
2004 if keep_resolved_entries
:
2005 self
.write_debug('The information of all playlist entries will be held in memory')
2008 max_failures
= self
.params
.get('skip_playlist_after_errors') or float('inf')
2009 for i
, (playlist_index
, entry
) in enumerate(entries
):
2011 resolved_entries
.append((playlist_index
, entry
))
2015 entry
['__x_forwarded_for_ip'] = ie_result
.get('__x_forwarded_for_ip')
2016 if not lazy
and 'playlist-index' in self
.params
['compat_opts']:
2017 playlist_index
= ie_result
['requested_entries'][i
]
2019 entry_copy
= collections
.ChainMap(entry
, {
2021 'n_entries': int_or_none(n_entries
),
2022 'playlist_index': playlist_index
,
2023 'playlist_autonumber': i
+ 1,
2026 if self
._match
_entry
(entry_copy
, incomplete
=True) is not None:
2027 # For compatabilty with youtube-dl. See https://github.com/yt-dlp/yt-dlp/issues/4369
2028 resolved_entries
[i
] = (playlist_index
, NO_DEFAULT
)
2031 self
.to_screen('[download] Downloading item %s of %s' % (
2032 self
._format
_screen
(i
+ 1, self
.Styles
.ID
), self
._format
_screen
(n_entries
, self
.Styles
.EMPHASIS
)))
2034 entry_result
= self
.__process
_iterable
_entry
(entry
, download
, collections
.ChainMap({
2035 'playlist_index': playlist_index
,
2036 'playlist_autonumber': i
+ 1,
2038 if not entry_result
:
2040 if failures
>= max_failures
:
2042 f
'Skipping the remaining entries in playlist "{title}" since {failures} items failed extraction')
2044 if keep_resolved_entries
:
2045 resolved_entries
[i
] = (playlist_index
, entry_result
)
2047 # Update with processed data
2048 ie_result
['entries'] = [e
for _
, e
in resolved_entries
if e
is not NO_DEFAULT
]
2049 ie_result
['requested_entries'] = [i
for i
, e
in resolved_entries
if e
is not NO_DEFAULT
]
2050 if ie_result
['requested_entries'] == try_call(lambda: list(range(1, ie_result
['playlist_count'] + 1))):
2051 # Do not set for full playlist
2052 ie_result
.pop('requested_entries')
2054 # Write the updated info to json
2055 if _infojson_written
is True and self
._write
_info
_json
(
2056 'updated playlist', ie_result
,
2057 self
.prepare_filename(ie_copy
, 'pl_infojson'), overwrite
=True) is None:
2060 ie_result
= self
.run_all_pps('playlist', ie_result
)
2061 self
.to_screen(f
'[download] Finished downloading playlist: {title}')
2064 @_handle_extraction_exceptions
2065 def __process_iterable_entry(self
, entry
, download
, extra_info
):
2066 return self
.process_ie_result(
2067 entry
, download
=download
, extra_info
=extra_info
)
2069 def _build_format_filter(self
, filter_spec
):
2070 " Returns a function to filter the formats according to the filter_spec "
2080 operator_rex
= re
.compile(r
'''(?x)\s*
2082 (?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
2083 (?P<value>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)\s*
2084 ''' % '|'.join(map(re
.escape
, OPERATORS
.keys())))
2085 m
= operator_rex
.fullmatch(filter_spec
)
2088 comparison_value
= int(m
.group('value'))
2090 comparison_value
= parse_filesize(m
.group('value'))
2091 if comparison_value
is None:
2092 comparison_value
= parse_filesize(m
.group('value') + 'B')
2093 if comparison_value
is None:
2095 'Invalid value %r in format specification %r' % (
2096 m
.group('value'), filter_spec
))
2097 op
= OPERATORS
[m
.group('op')]
2102 '^=': lambda attr
, value
: attr
.startswith(value
),
2103 '$=': lambda attr
, value
: attr
.endswith(value
),
2104 '*=': lambda attr
, value
: value
in attr
,
2105 '~=': lambda attr
, value
: value
.search(attr
) is not None
2107 str_operator_rex
= re
.compile(r
'''(?x)\s*
2108 (?P<key>[a-zA-Z0-9._-]+)\s*
2109 (?P<negation>!\s*)?(?P<op>%s)\s*(?P<none_inclusive>\?\s*)?
2111 (?P<value>(?(quote)(?:(?!(?P=quote))[^\\]|\\.)+|[\w.-]+))
2112 (?(quote)(?P=quote))\s*
2113 ''' % '|'.join(map(re
.escape
, STR_OPERATORS
.keys())))
2114 m
= str_operator_rex
.fullmatch(filter_spec
)
2116 if m
.group('op') == '~=':
2117 comparison_value
= re
.compile(m
.group('value'))
2119 comparison_value
= re
.sub(r
'''\\([\\"'])''', r
'\1', m
.group('value'))
2120 str_op
= STR_OPERATORS
[m
.group('op')]
2121 if m
.group('negation'):
2122 op
= lambda attr
, value
: not str_op(attr
, value
)
2127 raise SyntaxError('Invalid filter specification %r' % filter_spec
)
2130 actual_value
= f
.get(m
.group('key'))
2131 if actual_value
is None:
2132 return m
.group('none_inclusive')
2133 return op(actual_value
, comparison_value
)
2136 def _check_formats(self
, formats
):
2138 self
.to_screen('[info] Testing format %s' % f
['format_id'])
2139 path
= self
.get_output_path('temp')
2140 if not self
._ensure
_dir
_exists
(f
'{path}/'):
2142 temp_file
= tempfile
.NamedTemporaryFile(suffix
='.tmp', delete
=False, dir=path
or None)
2145 success
, _
= self
.dl(temp_file
.name
, f
, test
=True)
2146 except (DownloadError
, OSError, ValueError) + network_exceptions
:
2149 if os
.path
.exists(temp_file
.name
):
2151 os
.remove(temp_file
.name
)
2153 self
.report_warning('Unable to delete temporary file "%s"' % temp_file
.name
)
2157 self
.to_screen('[info] Unable to download format %s. Skipping...' % f
['format_id'])
2159 def _default_format_spec(self
, info_dict
, download
=True):
2162 merger
= FFmpegMergerPP(self
)
2163 return merger
.available
and merger
.can_merge()
2166 not self
.params
.get('simulate')
2170 or info_dict
.get('is_live') and not self
.params
.get('live_from_start')
2171 or self
.params
['outtmpl']['default'] == '-'))
2174 or self
.params
.get('allow_multiple_audio_streams', False)
2175 or 'format-spec' in self
.params
['compat_opts'])
2178 'best/bestvideo+bestaudio' if prefer_best
2179 else 'bestvideo*+bestaudio/best' if not compat
2180 else 'bestvideo+bestaudio/best')
2182 def build_format_selector(self
, format_spec
):
2183 def syntax_error(note
, start
):
2185 'Invalid format specification: '
2186 '{}\n\t{}\n\t{}^'.format(note
, format_spec
, ' ' * start
[1]))
2187 return SyntaxError(message
)
2189 PICKFIRST
= 'PICKFIRST'
2193 FormatSelector
= collections
.namedtuple('FormatSelector', ['type', 'selector', 'filters'])
2195 allow_multiple_streams
= {'audio': self
.params
.get('allow_multiple_audio_streams', False),
2196 'video': self
.params
.get('allow_multiple_video_streams', False)}
2198 def _parse_filter(tokens
):
2200 for type, string_
, start
, _
, _
in tokens
:
2201 if type == tokenize
.OP
and string_
== ']':
2202 return ''.join(filter_parts
)
2204 filter_parts
.append(string_
)
2206 def _remove_unused_ops(tokens
):
2207 # Remove operators that we don't use and join them with the surrounding strings.
2208 # E.g. 'mp4' '-' 'baseline' '-' '16x9' is converted to 'mp4-baseline-16x9'
2209 ALLOWED_OPS
= ('/', '+', ',', '(', ')')
2210 last_string
, last_start
, last_end
, last_line
= None, None, None, None
2211 for type, string_
, start
, end
, line
in tokens
:
2212 if type == tokenize
.OP
and string_
== '[':
2214 yield tokenize
.NAME
, last_string
, last_start
, last_end
, last_line
2216 yield type, string_
, start
, end
, line
2217 # everything inside brackets will be handled by _parse_filter
2218 for type, string_
, start
, end
, line
in tokens
:
2219 yield type, string_
, start
, end
, line
2220 if type == tokenize
.OP
and string_
== ']':
2222 elif type == tokenize
.OP
and string_
in ALLOWED_OPS
:
2224 yield tokenize
.NAME
, last_string
, last_start
, last_end
, last_line
2226 yield type, string_
, start
, end
, line
2227 elif type in [tokenize
.NAME
, tokenize
.NUMBER
, tokenize
.OP
]:
2229 last_string
= string_
2233 last_string
+= string_
2235 yield tokenize
.NAME
, last_string
, last_start
, last_end
, last_line
2237 def _parse_format_selection(tokens
, inside_merge
=False, inside_choice
=False, inside_group
=False):
2239 current_selector
= None
2240 for type, string_
, start
, _
, _
in tokens
:
2241 # ENCODING is only defined in Python 3.x
2242 if type == getattr(tokenize
, 'ENCODING', None):
2244 elif type in [tokenize
.NAME
, tokenize
.NUMBER
]:
2245 current_selector
= FormatSelector(SINGLE
, string_
, [])
2246 elif type == tokenize
.OP
:
2248 if not inside_group
:
2249 # ')' will be handled by the parentheses group
2250 tokens
.restore_last_token()
2252 elif inside_merge
and string_
in ['/', ',']:
2253 tokens
.restore_last_token()
2255 elif inside_choice
and string_
== ',':
2256 tokens
.restore_last_token()
2258 elif string_
== ',':
2259 if not current_selector
:
2260 raise syntax_error('"," must follow a format selector', start
)
2261 selectors
.append(current_selector
)
2262 current_selector
= None
2263 elif string_
== '/':
2264 if not current_selector
:
2265 raise syntax_error('"/" must follow a format selector', start
)
2266 first_choice
= current_selector
2267 second_choice
= _parse_format_selection(tokens
, inside_choice
=True)
2268 current_selector
= FormatSelector(PICKFIRST
, (first_choice
, second_choice
), [])
2269 elif string_
== '[':
2270 if not current_selector
:
2271 current_selector
= FormatSelector(SINGLE
, 'best', [])
2272 format_filter
= _parse_filter(tokens
)
2273 current_selector
.filters
.append(format_filter
)
2274 elif string_
== '(':
2275 if current_selector
:
2276 raise syntax_error('Unexpected "("', start
)
2277 group
= _parse_format_selection(tokens
, inside_group
=True)
2278 current_selector
= FormatSelector(GROUP
, group
, [])
2279 elif string_
== '+':
2280 if not current_selector
:
2281 raise syntax_error('Unexpected "+"', start
)
2282 selector_1
= current_selector
2283 selector_2
= _parse_format_selection(tokens
, inside_merge
=True)
2285 raise syntax_error('Expected a selector', start
)
2286 current_selector
= FormatSelector(MERGE
, (selector_1
, selector_2
), [])
2288 raise syntax_error(f
'Operator not recognized: "{string_}"', start
)
2289 elif type == tokenize
.ENDMARKER
:
2291 if current_selector
:
2292 selectors
.append(current_selector
)
2295 def _merge(formats_pair
):
2296 format_1
, format_2
= formats_pair
2299 formats_info
.extend(format_1
.get('requested_formats', (format_1
,)))
2300 formats_info
.extend(format_2
.get('requested_formats', (format_2
,)))
2302 if not allow_multiple_streams
['video'] or not allow_multiple_streams
['audio']:
2303 get_no_more
= {'video': False, 'audio': False}
2304 for (i
, fmt_info
) in enumerate(formats_info
):
2305 if fmt_info
.get('acodec') == fmt_info
.get('vcodec') == 'none':
2308 for aud_vid
in ['audio', 'video']:
2309 if not allow_multiple_streams
[aud_vid
] and fmt_info
.get(aud_vid
[0] + 'codec') != 'none':
2310 if get_no_more
[aud_vid
]:
2313 get_no_more
[aud_vid
] = True
2315 if len(formats_info
) == 1:
2316 return formats_info
[0]
2318 video_fmts
= [fmt_info
for fmt_info
in formats_info
if fmt_info
.get('vcodec') != 'none']
2319 audio_fmts
= [fmt_info
for fmt_info
in formats_info
if fmt_info
.get('acodec') != 'none']
2321 the_only_video
= video_fmts
[0] if len(video_fmts
) == 1 else None
2322 the_only_audio
= audio_fmts
[0] if len(audio_fmts
) == 1 else None
2324 output_ext
= get_compatible_ext(
2325 vcodecs
=[f
.get('vcodec') for f
in video_fmts
],
2326 acodecs
=[f
.get('acodec') for f
in audio_fmts
],
2327 vexts
=[f
['ext'] for f
in video_fmts
],
2328 aexts
=[f
['ext'] for f
in audio_fmts
],
2329 preferences
=(try_call(lambda: self
.params
['merge_output_format'].split('/'))
2330 or self
.params
.get('prefer_free_formats') and ('webm', 'mkv')))
2332 filtered
= lambda *keys
: filter(None, (traverse_obj(fmt
, *keys
) for fmt
in formats_info
))
2335 'requested_formats': formats_info
,
2336 'format': '+'.join(filtered('format')),
2337 'format_id': '+'.join(filtered('format_id')),
2339 'protocol': '+'.join(map(determine_protocol
, formats_info
)),
2340 'language': '+'.join(orderedSet(filtered('language'))) or None,
2341 'format_note': '+'.join(orderedSet(filtered('format_note'))) or None,
2342 'filesize_approx': sum(filtered('filesize', 'filesize_approx')) or None,
2343 'tbr': sum(filtered('tbr', 'vbr', 'abr')),
2348 'width': the_only_video
.get('width'),
2349 'height': the_only_video
.get('height'),
2350 'resolution': the_only_video
.get('resolution') or self
.format_resolution(the_only_video
),
2351 'fps': the_only_video
.get('fps'),
2352 'dynamic_range': the_only_video
.get('dynamic_range'),
2353 'vcodec': the_only_video
.get('vcodec'),
2354 'vbr': the_only_video
.get('vbr'),
2355 'stretched_ratio': the_only_video
.get('stretched_ratio'),
2356 'aspect_ratio': the_only_video
.get('aspect_ratio'),
2361 'acodec': the_only_audio
.get('acodec'),
2362 'abr': the_only_audio
.get('abr'),
2363 'asr': the_only_audio
.get('asr'),
2364 'audio_channels': the_only_audio
.get('audio_channels')
2369 def _check_formats(formats
):
2370 if self
.params
.get('check_formats') == 'selected':
2371 yield from self
._check
_formats
(formats
)
2373 elif (self
.params
.get('check_formats') is not None
2374 or self
.params
.get('allow_unplayable_formats')):
2379 if f
.get('has_drm') or f
.get('__needs_testing'):
2380 yield from self
._check
_formats
([f
])
2384 def _build_selector_function(selector
):
2385 if isinstance(selector
, list): # ,
2386 fs
= [_build_selector_function(s
) for s
in selector
]
2388 def selector_function(ctx
):
2391 return selector_function
2393 elif selector
.type == GROUP
: # ()
2394 selector_function
= _build_selector_function(selector
.selector
)
2396 elif selector
.type == PICKFIRST
: # /
2397 fs
= [_build_selector_function(s
) for s
in selector
.selector
]
2399 def selector_function(ctx
):
2401 picked_formats
= list(f(ctx
))
2403 return picked_formats
2406 elif selector
.type == MERGE
: # +
2407 selector_1
, selector_2
= map(_build_selector_function
, selector
.selector
)
2409 def selector_function(ctx
):
2410 for pair
in itertools
.product(selector_1(ctx
), selector_2(ctx
)):
2413 elif selector
.type == SINGLE
: # atom
2414 format_spec
= selector
.selector
or 'best'
2416 # TODO: Add allvideo, allaudio etc by generalizing the code with best/worst selector
2417 if format_spec
== 'all':
2418 def selector_function(ctx
):
2419 yield from _check_formats(ctx
['formats'][::-1])
2420 elif format_spec
== 'mergeall':
2421 def selector_function(ctx
):
2422 formats
= list(_check_formats(
2423 f
for f
in ctx
['formats'] if f
.get('vcodec') != 'none' or f
.get('acodec') != 'none'))
2426 merged_format
= formats
[-1]
2427 for f
in formats
[-2::-1]:
2428 merged_format
= _merge((merged_format
, f
))
2432 format_fallback
, seperate_fallback
, format_reverse
, format_idx
= False, None, True, 1
2434 r
'(?P<bw>best|worst|b|w)(?P<type>video|audio|v|a)?(?P<mod>\*)?(?:\.(?P<n>[1-9]\d*))?$',
2436 if mobj
is not None:
2437 format_idx
= int_or_none(mobj
.group('n'), default
=1)
2438 format_reverse
= mobj
.group('bw')[0] == 'b'
2439 format_type
= (mobj
.group('type') or [None])[0]
2440 not_format_type
= {'v': 'a', 'a': 'v'}
.get(format_type
)
2441 format_modified
= mobj
.group('mod') is not None
2443 format_fallback
= not format_type
and not format_modified
# for b, w
2445 (lambda f
: f
.get('%scodec' % format_type
) != 'none')
2446 if format_type
and format_modified
# bv*, ba*, wv*, wa*
2447 else (lambda f
: f
.get('%scodec' % not_format_type
) == 'none')
2448 if format_type
# bv, ba, wv, wa
2449 else (lambda f
: f
.get('vcodec') != 'none' and f
.get('acodec') != 'none')
2450 if not format_modified
# b, w
2451 else lambda f
: True) # b*, w*
2452 filter_f
= lambda f
: _filter_f(f
) and (
2453 f
.get('vcodec') != 'none' or f
.get('acodec') != 'none')
2455 if format_spec
in self
._format
_selection
_exts
['audio']:
2456 filter_f
= lambda f
: f
.get('ext') == format_spec
and f
.get('acodec') != 'none'
2457 elif format_spec
in self
._format
_selection
_exts
['video']:
2458 filter_f
= lambda f
: f
.get('ext') == format_spec
and f
.get('acodec') != 'none' and f
.get('vcodec') != 'none'
2459 seperate_fallback
= lambda f
: f
.get('ext') == format_spec
and f
.get('vcodec') != 'none'
2460 elif format_spec
in self
._format
_selection
_exts
['storyboards']:
2461 filter_f
= lambda f
: f
.get('ext') == format_spec
and f
.get('acodec') == 'none' and f
.get('vcodec') == 'none'
2463 filter_f
= lambda f
: f
.get('format_id') == format_spec
# id
2465 def selector_function(ctx
):
2466 formats
= list(ctx
['formats'])
2467 matches
= list(filter(filter_f
, formats
)) if filter_f
is not None else formats
2469 if format_fallback
and ctx
['incomplete_formats']:
2470 # for extractors with incomplete formats (audio only (soundcloud)
2471 # or video only (imgur)) best/worst will fallback to
2472 # best/worst {video,audio}-only format
2473 matches
= list(filter(lambda f
: f
.get('vcodec') != 'none' or f
.get('acodec') != 'none', formats
))
2474 elif seperate_fallback
and not ctx
['has_merged_format']:
2475 # for compatibility with youtube-dl when there is no pre-merged format
2476 matches
= list(filter(seperate_fallback
, formats
))
2477 matches
= LazyList(_check_formats(matches
[::-1 if format_reverse
else 1]))
2479 yield matches
[format_idx
- 1]
2480 except LazyList
.IndexError:
2483 filters
= [self
._build
_format
_filter
(f
) for f
in selector
.filters
]
2485 def final_selector(ctx
):
2486 ctx_copy
= dict(ctx
)
2487 for _filter
in filters
:
2488 ctx_copy
['formats'] = list(filter(_filter
, ctx_copy
['formats']))
2489 return selector_function(ctx_copy
)
2490 return final_selector
2492 # HACK: Python 3.12 changed the underlying parser, rendering '7_a' invalid
2493 # Prefix numbers with random letters to avoid it being classified as a number
2494 # See: https://github.com/yt-dlp/yt-dlp/pulls/8797
2495 # TODO: Implement parser not reliant on tokenize.tokenize
2496 prefix
= ''.join(random
.choices(string
.ascii_letters
, k
=32))
2497 stream
= io
.BytesIO(re
.sub(r
'\d[_\d]*', rf
'{prefix}\g<0>', format_spec
).encode())
2499 tokens
= list(_remove_unused_ops(
2500 token
._replace
(string
=token
.string
.replace(prefix
, ''))
2501 for token
in tokenize
.tokenize(stream
.readline
)))
2502 except tokenize
.TokenError
:
2503 raise syntax_error('Missing closing/opening brackets or parenthesis', (0, len(format_spec
)))
2505 class TokenIterator
:
2506 def __init__(self
, tokens
):
2507 self
.tokens
= tokens
2514 if self
.counter
>= len(self
.tokens
):
2515 raise StopIteration()
2516 value
= self
.tokens
[self
.counter
]
2522 def restore_last_token(self
):
2525 parsed_selector
= _parse_format_selection(iter(TokenIterator(tokens
)))
2526 return _build_selector_function(parsed_selector
)
2528 def _calc_headers(self
, info_dict
, load_cookies
=False):
2529 res
= HTTPHeaderDict(self
.params
['http_headers'], info_dict
.get('http_headers'))
2532 if load_cookies
: # For --load-info-json
2533 self
._load
_cookies
(res
.get('Cookie'), autoscope
=info_dict
['url']) # compat
2534 self
._load
_cookies
(info_dict
.get('cookies'), autoscope
=False)
2535 # The `Cookie` header is removed to prevent leaks and unscoped cookies.
2536 # See: https://github.com/yt-dlp/yt-dlp/security/advisories/GHSA-v8mc-9377-rwjj
2537 res
.pop('Cookie', None)
2538 cookies
= self
.cookiejar
.get_cookies_for_url(info_dict
['url'])
2540 encoder
= LenientSimpleCookie()
2542 for cookie
in cookies
:
2543 _
, value
= encoder
.value_encode(cookie
.value
)
2544 values
.append(f
'{cookie.name}={value}')
2546 values
.append(f
'Domain={cookie.domain}')
2548 values
.append(f
'Path={cookie.path}')
2550 values
.append('Secure')
2552 values
.append(f
'Expires={cookie.expires}')
2554 values
.append(f
'Version={cookie.version}')
2555 info_dict
['cookies'] = '; '.join(values
)
2557 if 'X-Forwarded-For' not in res
:
2558 x_forwarded_for_ip
= info_dict
.get('__x_forwarded_for_ip')
2559 if x_forwarded_for_ip
:
2560 res
['X-Forwarded-For'] = x_forwarded_for_ip
2564 def _calc_cookies(self
, url
):
2565 self
.deprecation_warning('"YoutubeDL._calc_cookies" is deprecated and may be removed in a future version')
2566 return self
.cookiejar
.get_cookie_header(url
)
2568 def _sort_thumbnails(self
, thumbnails
):
2569 thumbnails
.sort(key
=lambda t
: (
2570 t
.get('preference') if t
.get('preference') is not None else -1,
2571 t
.get('width') if t
.get('width') is not None else -1,
2572 t
.get('height') if t
.get('height') is not None else -1,
2573 t
.get('id') if t
.get('id') is not None else '',
2576 def _sanitize_thumbnails(self
, info_dict
):
2577 thumbnails
= info_dict
.get('thumbnails')
2578 if thumbnails
is None:
2579 thumbnail
= info_dict
.get('thumbnail')
2581 info_dict
['thumbnails'] = thumbnails
= [{'url': thumbnail}
]
2585 def check_thumbnails(thumbnails
):
2586 for t
in thumbnails
:
2587 self
.to_screen(f
'[info] Testing thumbnail {t["id"]}')
2589 self
.urlopen(HEADRequest(t
['url']))
2590 except network_exceptions
as err
:
2591 self
.to_screen(f
'[info] Unable to connect to thumbnail {t["id"]} URL {t["url"]!r} - {err}. Skipping...')
2595 self
._sort
_thumbnails
(thumbnails
)
2596 for i
, t
in enumerate(thumbnails
):
2597 if t
.get('id') is None:
2599 if t
.get('width') and t
.get('height'):
2600 t
['resolution'] = '%dx%d' % (t
['width'], t
['height'])
2601 t
['url'] = sanitize_url(t
['url'])
2603 if self
.params
.get('check_formats') is True:
2604 info_dict
['thumbnails'] = LazyList(check_thumbnails(thumbnails
[::-1]), reverse
=True)
2606 info_dict
['thumbnails'] = thumbnails
2608 def _fill_common_fields(self
, info_dict
, final
=True):
2609 # TODO: move sanitization here
2611 title
= info_dict
['fulltitle'] = info_dict
.get('title')
2614 self
.write_debug('Extractor gave empty title. Creating a generic title')
2616 self
.report_warning('Extractor failed to obtain "title". Creating a generic title instead')
2617 info_dict
['title'] = f
'{info_dict["extractor"].replace(":", "-")} video #{info_dict["id"]}'
2619 if info_dict
.get('duration') is not None:
2620 info_dict
['duration_string'] = formatSeconds(info_dict
['duration'])
2622 for ts_key
, date_key
in (
2623 ('timestamp', 'upload_date'),
2624 ('release_timestamp', 'release_date'),
2625 ('modified_timestamp', 'modified_date'),
2627 if info_dict
.get(date_key
) is None and info_dict
.get(ts_key
) is not None:
2628 # Working around out-of-range timestamp values (e.g. negative ones on Windows,
2629 # see http://bugs.python.org/issue1646728)
2630 with contextlib
.suppress(ValueError, OverflowError, OSError):
2631 upload_date
= datetime
.datetime
.fromtimestamp(info_dict
[ts_key
], datetime
.timezone
.utc
)
2632 info_dict
[date_key
] = upload_date
.strftime('%Y%m%d')
2634 if not info_dict
.get('release_year'):
2635 info_dict
['release_year'] = traverse_obj(info_dict
, ('release_date', {lambda x: int(x[:4])}
))
2637 live_keys
= ('is_live', 'was_live')
2638 live_status
= info_dict
.get('live_status')
2639 if live_status
is None:
2640 for key
in live_keys
:
2641 if info_dict
.get(key
) is False:
2643 if info_dict
.get(key
):
2646 if all(info_dict
.get(key
) is False for key
in live_keys
):
2647 live_status
= 'not_live'
2649 info_dict
['live_status'] = live_status
2650 for key
in live_keys
:
2651 if info_dict
.get(key
) is None:
2652 info_dict
[key
] = (live_status
== key
)
2653 if live_status
== 'post_live':
2654 info_dict
['was_live'] = True
2656 # Auto generate title fields corresponding to the *_number fields when missing
2657 # in order to always have clean titles. This is very common for TV series.
2658 for field
in ('chapter', 'season', 'episode'):
2659 if final
and info_dict
.get('%s_number' % field
) is not None and not info_dict
.get(field
):
2660 info_dict
[field
] = '%s %d' % (field
.capitalize(), info_dict
['%s_number' % field
])
2662 for old_key
, new_key
in self
._deprecated
_multivalue
_fields
.items():
2663 if new_key
in info_dict
and old_key
in info_dict
:
2664 if '_version' not in info_dict
: # HACK: Do not warn when using --load-info-json
2665 self
.deprecation_warning(f
'Do not return {old_key!r} when {new_key!r} is present')
2666 elif old_value
:= info_dict
.get(old_key
):
2667 info_dict
[new_key
] = old_value
.split(', ')
2668 elif new_value
:= info_dict
.get(new_key
):
2669 info_dict
[old_key
] = ', '.join(v
.replace(',', '\N{FULLWIDTH COMMA}') for v
in new_value
)
2671 def _raise_pending_errors(self
, info
):
2672 err
= info
.pop('__pending_error', None)
2674 self
.report_error(err
, tb
=False)
2676 def sort_formats(self
, info_dict
):
2677 formats
= self
._get
_formats
(info_dict
)
2678 formats
.sort(key
=FormatSorter(
2679 self
, info_dict
.get('_format_sort_fields') or []).calculate_preference
)
2681 def process_video_result(self
, info_dict
, download
=True):
2682 assert info_dict
.get('_type', 'video') == 'video'
2683 self
._num
_videos
+= 1
2685 if 'id' not in info_dict
:
2686 raise ExtractorError('Missing "id" field in extractor result', ie
=info_dict
['extractor'])
2687 elif not info_dict
.get('id'):
2688 raise ExtractorError('Extractor failed to obtain "id"', ie
=info_dict
['extractor'])
2690 def report_force_conversion(field
, field_not
, conversion
):
2691 self
.report_warning(
2692 '"%s" field is not %s - forcing %s conversion, there is an error in extractor'
2693 % (field
, field_not
, conversion
))
2695 def sanitize_string_field(info
, string_field
):
2696 field
= info
.get(string_field
)
2697 if field
is None or isinstance(field
, str):
2699 report_force_conversion(string_field
, 'a string', 'string')
2700 info
[string_field
] = str(field
)
2702 def sanitize_numeric_fields(info
):
2703 for numeric_field
in self
._NUMERIC
_FIELDS
:
2704 field
= info
.get(numeric_field
)
2705 if field
is None or isinstance(field
, (int, float)):
2707 report_force_conversion(numeric_field
, 'numeric', 'int')
2708 info
[numeric_field
] = int_or_none(field
)
2710 sanitize_string_field(info_dict
, 'id')
2711 sanitize_numeric_fields(info_dict
)
2712 if info_dict
.get('section_end') and info_dict
.get('section_start') is not None:
2713 info_dict
['duration'] = round(info_dict
['section_end'] - info_dict
['section_start'], 3)
2714 if (info_dict
.get('duration') or 0) <= 0 and info_dict
.pop('duration', None):
2715 self
.report_warning('"duration" field is negative, there is an error in extractor')
2717 chapters
= info_dict
.get('chapters') or []
2718 if chapters
and chapters
[0].get('start_time'):
2719 chapters
.insert(0, {'start_time': 0}
)
2721 dummy_chapter
= {'end_time': 0, 'start_time': info_dict.get('duration')}
2722 for idx
, (prev
, current
, next_
) in enumerate(zip(
2723 (dummy_chapter
, *chapters
), chapters
, (*chapters
[1:], dummy_chapter
)), 1):
2724 if current
.get('start_time') is None:
2725 current
['start_time'] = prev
.get('end_time')
2726 if not current
.get('end_time'):
2727 current
['end_time'] = next_
.get('start_time')
2728 if not current
.get('title'):
2729 current
['title'] = f
'<Untitled Chapter {idx}>'
2731 if 'playlist' not in info_dict
:
2732 # It isn't part of a playlist
2733 info_dict
['playlist'] = None
2734 info_dict
['playlist_index'] = None
2736 self
._sanitize
_thumbnails
(info_dict
)
2738 thumbnail
= info_dict
.get('thumbnail')
2739 thumbnails
= info_dict
.get('thumbnails')
2741 info_dict
['thumbnail'] = sanitize_url(thumbnail
)
2743 info_dict
['thumbnail'] = thumbnails
[-1]['url']
2745 if info_dict
.get('display_id') is None and 'id' in info_dict
:
2746 info_dict
['display_id'] = info_dict
['id']
2748 self
._fill
_common
_fields
(info_dict
)
2750 for cc_kind
in ('subtitles', 'automatic_captions'):
2751 cc
= info_dict
.get(cc_kind
)
2753 for _
, subtitle
in cc
.items():
2754 for subtitle_format
in subtitle
:
2755 if subtitle_format
.get('url'):
2756 subtitle_format
['url'] = sanitize_url(subtitle_format
['url'])
2757 if subtitle_format
.get('ext') is None:
2758 subtitle_format
['ext'] = determine_ext(subtitle_format
['url']).lower()
2760 automatic_captions
= info_dict
.get('automatic_captions')
2761 subtitles
= info_dict
.get('subtitles')
2763 info_dict
['requested_subtitles'] = self
.process_subtitles(
2764 info_dict
['id'], subtitles
, automatic_captions
)
2766 formats
= self
._get
_formats
(info_dict
)
2768 # Backward compatibility with InfoExtractor._sort_formats
2769 field_preference
= (formats
or [{}])[0].pop('__sort_fields', None)
2770 if field_preference
:
2771 info_dict
['_format_sort_fields'] = field_preference
2773 info_dict
['_has_drm'] = any( # or None ensures --clean-infojson removes it
2774 f
.get('has_drm') and f
['has_drm'] != 'maybe' for f
in formats
) or None
2775 if not self
.params
.get('allow_unplayable_formats'):
2776 formats
= [f
for f
in formats
if not f
.get('has_drm') or f
['has_drm'] == 'maybe']
2778 if formats
and all(f
.get('acodec') == f
.get('vcodec') == 'none' for f
in formats
):
2779 self
.report_warning(
2780 f
'{"This video is DRM protected and " if info_dict["_has_drm"] else ""}'
2781 'only images are available for download. Use --list-formats to see them'.capitalize())
2783 get_from_start
= not info_dict
.get('is_live') or bool(self
.params
.get('live_from_start'))
2784 if not get_from_start
:
2785 info_dict
['title'] += ' ' + datetime
.datetime
.now().strftime('%Y-%m-%d %H:%M')
2786 if info_dict
.get('is_live') and formats
:
2787 formats
= [f
for f
in formats
if bool(f
.get('is_from_start')) == get_from_start
]
2788 if get_from_start
and not formats
:
2789 self
.raise_no_formats(info_dict
, msg
=(
2790 '--live-from-start is passed, but there are no formats that can be downloaded from the start. '
2791 'If you want to download from the current time, use --no-live-from-start'))
2793 def is_wellformed(f
):
2796 self
.report_warning(
2797 '"url" field is missing or empty - skipping format, '
2798 'there is an error in extractor')
2800 if isinstance(url
, bytes):
2801 sanitize_string_field(f
, 'url')
2804 # Filter out malformed formats for better extraction robustness
2805 formats
= list(filter(is_wellformed
, formats
or []))
2808 self
.raise_no_formats(info_dict
)
2810 for format
in formats
:
2811 sanitize_string_field(format
, 'format_id')
2812 sanitize_numeric_fields(format
)
2813 format
['url'] = sanitize_url(format
['url'])
2814 if format
.get('ext') is None:
2815 format
['ext'] = determine_ext(format
['url']).lower()
2816 if format
.get('protocol') is None:
2817 format
['protocol'] = determine_protocol(format
)
2818 if format
.get('resolution') is None:
2819 format
['resolution'] = self
.format_resolution(format
, default
=None)
2820 if format
.get('dynamic_range') is None and format
.get('vcodec') != 'none':
2821 format
['dynamic_range'] = 'SDR'
2822 if format
.get('aspect_ratio') is None:
2823 format
['aspect_ratio'] = try_call(lambda: round(format
['width'] / format
['height'], 2))
2824 # For fragmented formats, "tbr" is often max bitrate and not average
2825 if (('manifest-filesize-approx' in self
.params
['compat_opts'] or not format
.get('manifest_url'))
2826 and info_dict
.get('duration') and format
.get('tbr')
2827 and not format
.get('filesize') and not format
.get('filesize_approx')):
2828 format
['filesize_approx'] = int(info_dict
['duration'] * format
['tbr'] * (1024 / 8))
2829 format
['http_headers'] = self
._calc
_headers
(collections
.ChainMap(format
, info_dict
), load_cookies
=True)
2831 # Safeguard against old/insecure infojson when using --load-info-json
2832 if info_dict
.get('http_headers'):
2833 info_dict
['http_headers'] = HTTPHeaderDict(info_dict
['http_headers'])
2834 info_dict
['http_headers'].pop('Cookie', None)
2836 # This is copied to http_headers by the above _calc_headers and can now be removed
2837 if '__x_forwarded_for_ip' in info_dict
:
2838 del info_dict
['__x_forwarded_for_ip']
2842 '_format_sort_fields': info_dict
.get('_format_sort_fields')
2845 # Sanitize and group by format_id
2847 for i
, format
in enumerate(formats
):
2848 if not format
.get('format_id'):
2849 format
['format_id'] = str(i
)
2851 # Sanitize format_id from characters used in format selector expression
2852 format
['format_id'] = re
.sub(r
'[\s,/+\[\]()]', '_', format
['format_id'])
2853 formats_dict
.setdefault(format
['format_id'], []).append(format
)
2855 # Make sure all formats have unique format_id
2856 common_exts
= set(itertools
.chain(*self
._format
_selection
_exts
.values()))
2857 for format_id
, ambiguous_formats
in formats_dict
.items():
2858 ambigious_id
= len(ambiguous_formats
) > 1
2859 for i
, format
in enumerate(ambiguous_formats
):
2861 format
['format_id'] = '%s-%d' % (format_id
, i
)
2862 # Ensure there is no conflict between id and ext in format selection
2863 # See https://github.com/yt-dlp/yt-dlp/issues/1282
2864 if format
['format_id'] != format
['ext'] and format
['format_id'] in common_exts
:
2865 format
['format_id'] = 'f%s' % format
['format_id']
2867 if format
.get('format') is None:
2868 format
['format'] = '{id} - {res}{note}'.format(
2869 id=format
['format_id'],
2870 res
=self
.format_resolution(format
),
2871 note
=format_field(format
, 'format_note', ' (%s)'),
2874 if self
.params
.get('check_formats') is True:
2875 formats
= LazyList(self
._check
_formats
(formats
[::-1]), reverse
=True)
2877 if not formats
or formats
[0] is not info_dict
:
2878 # only set the 'formats' fields if the original info_dict list them
2879 # otherwise we end up with a circular reference, the first (and unique)
2880 # element in the 'formats' field in info_dict is info_dict itself,
2881 # which can't be exported to json
2882 info_dict
['formats'] = formats
2884 info_dict
, _
= self
.pre_process(info_dict
)
2886 if self
._match
_entry
(info_dict
, incomplete
=self
._format
_fields
) is not None:
2889 self
.post_extract(info_dict
)
2890 info_dict
, _
= self
.pre_process(info_dict
, 'after_filter')
2892 # The pre-processors may have modified the formats
2893 formats
= self
._get
_formats
(info_dict
)
2895 list_only
= self
.params
.get('simulate') == 'list_only'
2896 interactive_format_selection
= not list_only
and self
.format_selector
== '-'
2897 if self
.params
.get('list_thumbnails'):
2898 self
.list_thumbnails(info_dict
)
2899 if self
.params
.get('listsubtitles'):
2900 if 'automatic_captions' in info_dict
:
2901 self
.list_subtitles(
2902 info_dict
['id'], automatic_captions
, 'automatic captions')
2903 self
.list_subtitles(info_dict
['id'], subtitles
, 'subtitles')
2904 if self
.params
.get('listformats') or interactive_format_selection
:
2905 self
.list_formats(info_dict
)
2907 # Without this printing, -F --print-json will not work
2908 self
.__forced
_printings
(info_dict
)
2911 format_selector
= self
.format_selector
2913 if interactive_format_selection
:
2914 req_format
= input(self
._format
_screen
('\nEnter format selector ', self
.Styles
.EMPHASIS
)
2915 + '(Press ENTER for default, or Ctrl+C to quit)'
2916 + self
._format
_screen
(': ', self
.Styles
.EMPHASIS
))
2918 format_selector
= self
.build_format_selector(req_format
) if req_format
else None
2919 except SyntaxError as err
:
2920 self
.report_error(err
, tb
=False, is_error
=False)
2923 if format_selector
is None:
2924 req_format
= self
._default
_format
_spec
(info_dict
, download
=download
)
2925 self
.write_debug(f
'Default format spec: {req_format}')
2926 format_selector
= self
.build_format_selector(req_format
)
2928 formats_to_download
= list(format_selector({
2930 'has_merged_format': any('none' not in (f
.get('acodec'), f
.get('vcodec')) for f
in formats
),
2931 'incomplete_formats': (all(f
.get('vcodec') == 'none' for f
in formats
) # No formats with video
2932 or all(f
.get('acodec') == 'none' for f
in formats
)), # OR, No formats with audio
2934 if interactive_format_selection
and not formats_to_download
:
2935 self
.report_error('Requested format is not available', tb
=False, is_error
=False)
2939 if not formats_to_download
:
2940 if not self
.params
.get('ignore_no_formats_error'):
2941 raise ExtractorError(
2942 'Requested format is not available. Use --list-formats for a list of available formats',
2943 expected
=True, video_id
=info_dict
['id'], ie
=info_dict
['extractor'])
2944 self
.report_warning('Requested format is not available')
2945 # Process what we can, even without any available formats.
2946 formats_to_download
= [{}]
2948 requested_ranges
= tuple(self
.params
.get('download_ranges', lambda *_
: [{}])(info_dict
, self
))
2949 best_format
, downloaded_formats
= formats_to_download
[-1], []
2951 if best_format
and requested_ranges
:
2952 def to_screen(*msg
):
2953 self
.to_screen(f
'[info] {info_dict["id"]}: {" ".join(", ".join(variadic(m)) for m in msg)}')
2955 to_screen(f
'Downloading {len(formats_to_download)} format(s):',
2956 (f
['format_id'] for f
in formats_to_download
))
2957 if requested_ranges
!= ({}, ):
2958 to_screen(f
'Downloading {len(requested_ranges)} time ranges:',
2959 (f
'{c["start_time"]:.1f}-{c["end_time"]:.1f}' for c
in requested_ranges
))
2960 max_downloads_reached
= False
2962 for fmt
, chapter
in itertools
.product(formats_to_download
, requested_ranges
):
2963 new_info
= self
._copy
_infodict
(info_dict
)
2964 new_info
.update(fmt
)
2965 offset
, duration
= info_dict
.get('section_start') or 0, info_dict
.get('duration') or float('inf')
2966 end_time
= offset
+ min(chapter
.get('end_time', duration
), duration
)
2967 # duration may not be accurate. So allow deviations <1sec
2968 if end_time
== float('inf') or end_time
> offset
+ duration
+ 1:
2970 if chapter
or offset
:
2972 'section_start': offset
+ chapter
.get('start_time', 0),
2973 'section_end': end_time
,
2974 'section_title': chapter
.get('title'),
2975 'section_number': chapter
.get('index'),
2977 downloaded_formats
.append(new_info
)
2979 self
.process_info(new_info
)
2980 except MaxDownloadsReached
:
2981 max_downloads_reached
= True
2982 self
._raise
_pending
_errors
(new_info
)
2983 # Remove copied info
2984 for key
, val
in tuple(new_info
.items()):
2985 if info_dict
.get(key
) == val
:
2987 if max_downloads_reached
:
2990 write_archive
= {f.get('__write_download_archive', False) for f in downloaded_formats}
2991 assert write_archive
.issubset({True, False, 'ignore'}
)
2992 if True in write_archive
and False not in write_archive
:
2993 self
.record_download_archive(info_dict
)
2995 info_dict
['requested_downloads'] = downloaded_formats
2996 info_dict
= self
.run_all_pps('after_video', info_dict
)
2997 if max_downloads_reached
:
2998 raise MaxDownloadsReached()
3000 # We update the info dict with the selected best quality format (backwards compatibility)
3001 info_dict
.update(best_format
)
3004 def process_subtitles(self
, video_id
, normal_subtitles
, automatic_captions
):
3005 """Select the requested subtitles and their format"""
3006 available_subs
, normal_sub_langs
= {}, []
3007 if normal_subtitles
and self
.params
.get('writesubtitles'):
3008 available_subs
.update(normal_subtitles
)
3009 normal_sub_langs
= tuple(normal_subtitles
.keys())
3010 if automatic_captions
and self
.params
.get('writeautomaticsub'):
3011 for lang
, cap_info
in automatic_captions
.items():
3012 if lang
not in available_subs
:
3013 available_subs
[lang
] = cap_info
3015 if not available_subs
or (
3016 not self
.params
.get('writesubtitles')
3017 and not self
.params
.get('writeautomaticsub')):
3020 all_sub_langs
= tuple(available_subs
.keys())
3021 if self
.params
.get('allsubtitles', False):
3022 requested_langs
= all_sub_langs
3023 elif self
.params
.get('subtitleslangs', False):
3025 requested_langs
= orderedSet_from_options(
3026 self
.params
.get('subtitleslangs'), {'all': all_sub_langs}
, use_regex
=True)
3027 except re
.error
as e
:
3028 raise ValueError(f
'Wrong regex for subtitlelangs: {e.pattern}')
3030 requested_langs
= LazyList(itertools
.chain(
3031 ['en'] if 'en' in normal_sub_langs
else [],
3032 filter(lambda f
: f
.startswith('en'), normal_sub_langs
),
3033 ['en'] if 'en' in all_sub_langs
else [],
3034 filter(lambda f
: f
.startswith('en'), all_sub_langs
),
3035 normal_sub_langs
, all_sub_langs
,
3038 self
.to_screen(f
'[info] {video_id}: Downloading subtitles: {", ".join(requested_langs)}')
3040 formats_query
= self
.params
.get('subtitlesformat', 'best')
3041 formats_preference
= formats_query
.split('/') if formats_query
else []
3043 for lang
in requested_langs
:
3044 formats
= available_subs
.get(lang
)
3046 self
.report_warning(f
'{lang} subtitles not available for {video_id}')
3048 for ext
in formats_preference
:
3052 matches
= list(filter(lambda f
: f
['ext'] == ext
, formats
))
3058 self
.report_warning(
3059 'No subtitle format found matching "%s" for language %s, '
3060 'using %s' % (formats_query
, lang
, f
['ext']))
3064 def _forceprint(self
, key
, info_dict
):
3065 if info_dict
is None:
3067 info_copy
= info_dict
.copy()
3068 info_copy
.setdefault('filename', self
.prepare_filename(info_dict
))
3069 if info_dict
.get('requested_formats') is not None:
3070 # For RTMP URLs, also include the playpath
3071 info_copy
['urls'] = '\n'.join(f
['url'] + f
.get('play_path', '') for f
in info_dict
['requested_formats'])
3072 elif info_dict
.get('url'):
3073 info_copy
['urls'] = info_dict
['url'] + info_dict
.get('play_path', '')
3074 info_copy
['formats_table'] = self
.render_formats_table(info_dict
)
3075 info_copy
['thumbnails_table'] = self
.render_thumbnails_table(info_dict
)
3076 info_copy
['subtitles_table'] = self
.render_subtitles_table(info_dict
.get('id'), info_dict
.get('subtitles'))
3077 info_copy
['automatic_captions_table'] = self
.render_subtitles_table(info_dict
.get('id'), info_dict
.get('automatic_captions'))
3079 def format_tmpl(tmpl
):
3080 mobj
= re
.fullmatch(r
'([\w.:,]|-\d|(?P<dict>{([\w.:,]|-\d)+}))+=?', tmpl
)
3085 if tmpl
.startswith('{'):
3086 tmpl
, fmt
= f
'.{tmpl}', '%({})j'
3087 if tmpl
.endswith('='):
3088 tmpl
, fmt
= tmpl
[:-1], '{0} = %({0})#j'
3089 return '\n'.join(map(fmt
.format
, [tmpl
] if mobj
.group('dict') else tmpl
.split(',')))
3091 for tmpl
in self
.params
['forceprint'].get(key
, []):
3092 self
.to_stdout(self
.evaluate_outtmpl(format_tmpl(tmpl
), info_copy
))
3094 for tmpl
, file_tmpl
in self
.params
['print_to_file'].get(key
, []):
3095 filename
= self
.prepare_filename(info_dict
, outtmpl
=file_tmpl
)
3096 tmpl
= format_tmpl(tmpl
)
3097 self
.to_screen(f
'[info] Writing {tmpl!r} to: {filename}')
3098 if self
._ensure
_dir
_exists
(filename
):
3099 with open(filename
, 'a', encoding
='utf-8', newline
='') as f
:
3100 f
.write(self
.evaluate_outtmpl(tmpl
, info_copy
) + os
.linesep
)
3104 def __forced_printings(self
, info_dict
, filename
=None, incomplete
=True):
3105 if (self
.params
.get('forcejson')
3106 or self
.params
['forceprint'].get('video')
3107 or self
.params
['print_to_file'].get('video')):
3108 self
.post_extract(info_dict
)
3110 info_dict
['filename'] = filename
3111 info_copy
= self
._forceprint
('video', info_dict
)
3113 def print_field(field
, actual_field
=None, optional
=False):
3114 if actual_field
is None:
3115 actual_field
= field
3116 if self
.params
.get(f
'force{field}') and (
3117 info_copy
.get(field
) is not None or (not optional
and not incomplete
)):
3118 self
.to_stdout(info_copy
[actual_field
])
3120 print_field('title')
3122 print_field('url', 'urls')
3123 print_field('thumbnail', optional
=True)
3124 print_field('description', optional
=True)
3125 print_field('filename')
3126 if self
.params
.get('forceduration') and info_copy
.get('duration') is not None:
3127 self
.to_stdout(formatSeconds(info_copy
['duration']))
3128 print_field('format')
3130 if self
.params
.get('forcejson'):
3131 self
.to_stdout(json
.dumps(self
.sanitize_info(info_dict
)))
3133 def dl(self
, name
, info
, subtitle
=False, test
=False):
3134 if not info
.get('url'):
3135 self
.raise_no_formats(info
, True)
3138 verbose
= self
.params
.get('verbose')
3141 'quiet': self
.params
.get('quiet') or not verbose
,
3143 'noprogress': not verbose
,
3145 'skip_unavailable_fragments': False,
3146 'keep_fragments': False,
3148 '_no_ytdl_file': True,
3151 params
= self
.params
3152 fd
= get_suitable_downloader(info
, params
, to_stdout
=(name
== '-'))(self
, params
)
3154 for ph
in self
._progress
_hooks
:
3155 fd
.add_progress_hook(ph
)
3157 (f
['url'].split(',')[0] + ',<data>' if f
['url'].startswith('data:') else f
['url'])
3158 for f
in info
.get('requested_formats', []) or [info
])
3159 self
.write_debug(f
'Invoking {fd.FD_NAME} downloader on "{urls}"')
3161 # Note: Ideally info should be a deep-copied so that hooks cannot modify it.
3162 # But it may contain objects that are not deep-copyable
3163 new_info
= self
._copy
_infodict
(info
)
3164 if new_info
.get('http_headers') is None:
3165 new_info
['http_headers'] = self
._calc
_headers
(new_info
)
3166 return fd
.download(name
, new_info
, subtitle
)
3168 def existing_file(self
, filepaths
, *, default_overwrite
=True):
3169 existing_files
= list(filter(os
.path
.exists
, orderedSet(filepaths
)))
3170 if existing_files
and not self
.params
.get('overwrites', default_overwrite
):
3171 return existing_files
[0]
3173 for file in existing_files
:
3174 self
.report_file_delete(file)
3178 def process_info(self
, info_dict
):
3179 """Process a single resolved IE result. (Modifies it in-place)"""
3181 assert info_dict
.get('_type', 'video') == 'video'
3182 original_infodict
= info_dict
3184 if 'format' not in info_dict
and 'ext' in info_dict
:
3185 info_dict
['format'] = info_dict
['ext']
3187 if self
._match
_entry
(info_dict
) is not None:
3188 info_dict
['__write_download_archive'] = 'ignore'
3191 # Does nothing under normal operation - for backward compatibility of process_info
3192 self
.post_extract(info_dict
)
3194 def replace_info_dict(new_info
):
3196 if new_info
== info_dict
:
3199 info_dict
.update(new_info
)
3201 new_info
, _
= self
.pre_process(info_dict
, 'video')
3202 replace_info_dict(new_info
)
3203 self
._num
_downloads
+= 1
3205 # info_dict['_filename'] needs to be set for backward compatibility
3206 info_dict
['_filename'] = full_filename
= self
.prepare_filename(info_dict
, warn
=True)
3207 temp_filename
= self
.prepare_filename(info_dict
, 'temp')
3211 self
.__forced
_printings
(info_dict
, full_filename
, incomplete
=('format' not in info_dict
))
3213 def check_max_downloads():
3214 if self
._num
_downloads
>= float(self
.params
.get('max_downloads') or 'inf'):
3215 raise MaxDownloadsReached()
3217 if self
.params
.get('simulate'):
3218 info_dict
['__write_download_archive'] = self
.params
.get('force_write_download_archive')
3219 check_max_downloads()
3222 if full_filename
is None:
3224 if not self
._ensure
_dir
_exists
(encodeFilename(full_filename
)):
3226 if not self
._ensure
_dir
_exists
(encodeFilename(temp_filename
)):
3229 if self
._write
_description
('video', info_dict
,
3230 self
.prepare_filename(info_dict
, 'description')) is None:
3233 sub_files
= self
._write
_subtitles
(info_dict
, temp_filename
)
3234 if sub_files
is None:
3236 files_to_move
.update(dict(sub_files
))
3238 thumb_files
= self
._write
_thumbnails
(
3239 'video', info_dict
, temp_filename
, self
.prepare_filename(info_dict
, 'thumbnail'))
3240 if thumb_files
is None:
3242 files_to_move
.update(dict(thumb_files
))
3244 infofn
= self
.prepare_filename(info_dict
, 'infojson')
3245 _infojson_written
= self
._write
_info
_json
('video', info_dict
, infofn
)
3246 if _infojson_written
:
3247 info_dict
['infojson_filename'] = infofn
3248 # For backward compatibility, even though it was a private field
3249 info_dict
['__infojson_filename'] = infofn
3250 elif _infojson_written
is None:
3253 # Note: Annotations are deprecated
3255 if self
.params
.get('writeannotations', False):
3256 annofn
= self
.prepare_filename(info_dict
, 'annotation')
3258 if not self
._ensure
_dir
_exists
(encodeFilename(annofn
)):
3260 if not self
.params
.get('overwrites', True) and os
.path
.exists(encodeFilename(annofn
)):
3261 self
.to_screen('[info] Video annotations are already present')
3262 elif not info_dict
.get('annotations'):
3263 self
.report_warning('There are no annotations to write.')
3266 self
.to_screen('[info] Writing video annotations to: ' + annofn
)
3267 with open(encodeFilename(annofn
), 'w', encoding
='utf-8') as annofile
:
3268 annofile
.write(info_dict
['annotations'])
3269 except (KeyError, TypeError):
3270 self
.report_warning('There are no annotations to write.')
3272 self
.report_error('Cannot write annotations file: ' + annofn
)
3275 # Write internet shortcut files
3276 def _write_link_file(link_type
):
3277 url
= try_get(info_dict
['webpage_url'], iri_to_uri
)
3279 self
.report_warning(
3280 f
'Cannot write internet shortcut file because the actual URL of "{info_dict["webpage_url"]}" is unknown')
3282 linkfn
= replace_extension(self
.prepare_filename(info_dict
, 'link'), link_type
, info_dict
.get('ext'))
3283 if not self
._ensure
_dir
_exists
(encodeFilename(linkfn
)):
3285 if self
.params
.get('overwrites', True) and os
.path
.exists(encodeFilename(linkfn
)):
3286 self
.to_screen(f
'[info] Internet shortcut (.{link_type}) is already present')
3289 self
.to_screen(f
'[info] Writing internet shortcut (.{link_type}) to: {linkfn}')
3290 with open(encodeFilename(to_high_limit_path(linkfn
)), 'w', encoding
='utf-8',
3291 newline
='\r\n' if link_type
== 'url' else '\n') as linkfile
:
3292 template_vars
= {'url': url}
3293 if link_type
== 'desktop':
3294 template_vars
['filename'] = linkfn
[:-(len(link_type
) + 1)]
3295 linkfile
.write(LINK_TEMPLATES
[link_type
] % template_vars
)
3297 self
.report_error(f
'Cannot write internet shortcut {linkfn}')
3302 'url': self
.params
.get('writeurllink'),
3303 'webloc': self
.params
.get('writewebloclink'),
3304 'desktop': self
.params
.get('writedesktoplink'),
3306 if self
.params
.get('writelink'):
3307 link_type
= ('webloc' if sys
.platform
== 'darwin'
3308 else 'desktop' if sys
.platform
.startswith('linux')
3310 write_links
[link_type
] = True
3312 if any(should_write
and not _write_link_file(link_type
)
3313 for link_type
, should_write
in write_links
.items()):
3316 new_info
, files_to_move
= self
.pre_process(info_dict
, 'before_dl', files_to_move
)
3317 replace_info_dict(new_info
)
3319 if self
.params
.get('skip_download'):
3320 info_dict
['filepath'] = temp_filename
3321 info_dict
['__finaldir'] = os
.path
.dirname(os
.path
.abspath(encodeFilename(full_filename
)))
3322 info_dict
['__files_to_move'] = files_to_move
3323 replace_info_dict(self
.run_pp(MoveFilesAfterDownloadPP(self
, False), info_dict
))
3324 info_dict
['__write_download_archive'] = self
.params
.get('force_write_download_archive')
3327 info_dict
.setdefault('__postprocessors', [])
3330 def existing_video_file(*filepaths
):
3331 ext
= info_dict
.get('ext')
3332 converted
= lambda file: replace_extension(file, self
.params
.get('final_ext') or ext
, ext
)
3333 file = self
.existing_file(itertools
.chain(*zip(map(converted
, filepaths
), filepaths
)),
3334 default_overwrite
=False)
3336 info_dict
['ext'] = os
.path
.splitext(file)[1][1:]
3339 fd
, success
= None, True
3340 if info_dict
.get('protocol') or info_dict
.get('url'):
3341 fd
= get_suitable_downloader(info_dict
, self
.params
, to_stdout
=temp_filename
== '-')
3342 if fd
!= FFmpegFD
and 'no-direct-merge' not in self
.params
['compat_opts'] and (
3343 info_dict
.get('section_start') or info_dict
.get('section_end')):
3344 msg
= ('This format cannot be partially downloaded' if FFmpegFD
.available()
3345 else 'You have requested downloading the video partially, but ffmpeg is not installed')
3346 self
.report_error(f
'{msg}. Aborting')
3349 if info_dict
.get('requested_formats') is not None:
3350 old_ext
= info_dict
['ext']
3351 if self
.params
.get('merge_output_format') is None:
3352 if (info_dict
['ext'] == 'webm'
3353 and info_dict
.get('thumbnails')
3354 # check with type instead of pp_key, __name__, or isinstance
3355 # since we dont want any custom PPs to trigger this
3356 and any(type(pp
) == EmbedThumbnailPP
for pp
in self
._pps
['post_process'])): # noqa: E721
3357 info_dict
['ext'] = 'mkv'
3358 self
.report_warning(
3359 'webm doesn\'t support embedding a thumbnail, mkv will be used')
3360 new_ext
= info_dict
['ext']
3362 def correct_ext(filename
, ext
=new_ext
):
3365 filename_real_ext
= os
.path
.splitext(filename
)[1][1:]
3367 os
.path
.splitext(filename
)[0]
3368 if filename_real_ext
in (old_ext
, new_ext
)
3370 return f
'{filename_wo_ext}.{ext}'
3372 # Ensure filename always has a correct extension for successful merge
3373 full_filename
= correct_ext(full_filename
)
3374 temp_filename
= correct_ext(temp_filename
)
3375 dl_filename
= existing_video_file(full_filename
, temp_filename
)
3377 info_dict
['__real_download'] = False
3378 # NOTE: Copy so that original format dicts are not modified
3379 info_dict
['requested_formats'] = list(map(dict, info_dict
['requested_formats']))
3381 merger
= FFmpegMergerPP(self
)
3383 if dl_filename
is not None:
3384 self
.report_file_already_downloaded(dl_filename
)
3386 for f
in info_dict
['requested_formats'] if fd
!= FFmpegFD
else []:
3387 f
['filepath'] = fname
= prepend_extension(
3388 correct_ext(temp_filename
, info_dict
['ext']),
3389 'f%s' % f
['format_id'], info_dict
['ext'])
3390 downloaded
.append(fname
)
3391 info_dict
['url'] = '\n'.join(f
['url'] for f
in info_dict
['requested_formats'])
3392 success
, real_download
= self
.dl(temp_filename
, info_dict
)
3393 info_dict
['__real_download'] = real_download
3395 if self
.params
.get('allow_unplayable_formats'):
3396 self
.report_warning(
3397 'You have requested merging of multiple formats '
3398 'while also allowing unplayable formats to be downloaded. '
3399 'The formats won\'t be merged to prevent data corruption.')
3400 elif not merger
.available
:
3401 msg
= 'You have requested merging of multiple formats but ffmpeg is not installed'
3402 if not self
.params
.get('ignoreerrors'):
3403 self
.report_error(f
'{msg}. Aborting due to --abort-on-error')
3405 self
.report_warning(f
'{msg}. The formats won\'t be merged')
3407 if temp_filename
== '-':
3408 reason
= ('using a downloader other than ffmpeg' if FFmpegFD
.can_merge_formats(info_dict
, self
.params
)
3409 else 'but the formats are incompatible for simultaneous download' if merger
.available
3410 else 'but ffmpeg is not installed')
3411 self
.report_warning(
3412 f
'You have requested downloading multiple formats to stdout {reason}. '
3413 'The formats will be streamed one after the other')
3414 fname
= temp_filename
3415 for f
in info_dict
['requested_formats']:
3416 new_info
= dict(info_dict
)
3417 del new_info
['requested_formats']
3419 if temp_filename
!= '-':
3420 fname
= prepend_extension(
3421 correct_ext(temp_filename
, new_info
['ext']),
3422 'f%s' % f
['format_id'], new_info
['ext'])
3423 if not self
._ensure
_dir
_exists
(fname
):
3425 f
['filepath'] = fname
3426 downloaded
.append(fname
)
3427 partial_success
, real_download
= self
.dl(fname
, new_info
)
3428 info_dict
['__real_download'] = info_dict
['__real_download'] or real_download
3429 success
= success
and partial_success
3431 if downloaded
and merger
.available
and not self
.params
.get('allow_unplayable_formats'):
3432 info_dict
['__postprocessors'].append(merger
)
3433 info_dict
['__files_to_merge'] = downloaded
3434 # Even if there were no downloads, it is being merged only now
3435 info_dict
['__real_download'] = True
3437 for file in downloaded
:
3438 files_to_move
[file] = None
3440 # Just a single file
3441 dl_filename
= existing_video_file(full_filename
, temp_filename
)
3442 if dl_filename
is None or dl_filename
== temp_filename
:
3443 # dl_filename == temp_filename could mean that the file was partially downloaded with --no-part.
3444 # So we should try to resume the download
3445 success
, real_download
= self
.dl(temp_filename
, info_dict
)
3446 info_dict
['__real_download'] = real_download
3448 self
.report_file_already_downloaded(dl_filename
)
3450 dl_filename
= dl_filename
or temp_filename
3451 info_dict
['__finaldir'] = os
.path
.dirname(os
.path
.abspath(encodeFilename(full_filename
)))
3453 except network_exceptions
as err
:
3454 self
.report_error('unable to download video data: %s' % error_to_compat_str(err
))
3456 except OSError as err
:
3457 raise UnavailableVideoError(err
)
3458 except (ContentTooShortError
, ) as err
:
3459 self
.report_error(f
'content too short (expected {err.expected} bytes and served {err.downloaded})')
3462 self
._raise
_pending
_errors
(info_dict
)
3463 if success
and full_filename
!= '-':
3467 fixup_policy
= self
.params
.get('fixup')
3468 vid
= info_dict
['id']
3470 if fixup_policy
in ('ignore', 'never'):
3472 elif fixup_policy
== 'warn':
3474 elif fixup_policy
!= 'force':
3475 assert fixup_policy
in ('detect_or_warn', None)
3476 if not info_dict
.get('__real_download'):
3479 def ffmpeg_fixup(cndn
, msg
, cls
):
3480 if not (do_fixup
and cndn
):
3482 elif do_fixup
== 'warn':
3483 self
.report_warning(f
'{vid}: {msg}')
3487 info_dict
['__postprocessors'].append(pp
)
3489 self
.report_warning(f
'{vid}: {msg}. Install ffmpeg to fix this automatically')
3491 stretched_ratio
= info_dict
.get('stretched_ratio')
3492 ffmpeg_fixup(stretched_ratio
not in (1, None),
3493 f
'Non-uniform pixel ratio {stretched_ratio}',
3494 FFmpegFixupStretchedPP
)
3496 downloader
= get_suitable_downloader(info_dict
, self
.params
) if 'protocol' in info_dict
else None
3497 downloader
= downloader
.FD_NAME
if downloader
else None
3499 ext
= info_dict
.get('ext')
3500 postprocessed_by_ffmpeg
= info_dict
.get('requested_formats') or any((
3501 isinstance(pp
, FFmpegVideoConvertorPP
)
3502 and resolve_recode_mapping(ext
, pp
.mapping
)[0] not in (ext
, None)
3503 ) for pp
in self
._pps
['post_process'])
3505 if not postprocessed_by_ffmpeg
:
3506 ffmpeg_fixup(fd
!= FFmpegFD
and ext
== 'm4a'
3507 and info_dict
.get('container') == 'm4a_dash',
3508 'writing DASH m4a. Only some players support this container',
3510 ffmpeg_fixup(downloader
== 'hlsnative' and not self
.params
.get('hls_use_mpegts')
3511 or info_dict
.get('is_live') and self
.params
.get('hls_use_mpegts') is None,
3512 'Possible MPEG-TS in MP4 container or malformed AAC timestamps',
3514 ffmpeg_fixup(downloader
== 'dashsegments'
3515 and (info_dict
.get('is_live') or info_dict
.get('is_dash_periods')),
3516 'Possible duplicate MOOV atoms', FFmpegFixupDuplicateMoovPP
)
3518 ffmpeg_fixup(downloader
== 'web_socket_fragment', 'Malformed timestamps detected', FFmpegFixupTimestampPP
)
3519 ffmpeg_fixup(downloader
== 'web_socket_fragment', 'Malformed duration detected', FFmpegFixupDurationPP
)
3523 replace_info_dict(self
.post_process(dl_filename
, info_dict
, files_to_move
))
3524 except PostProcessingError
as err
:
3525 self
.report_error('Postprocessing: %s' % str(err
))
3528 for ph
in self
._post
_hooks
:
3529 ph(info_dict
['filepath'])
3530 except Exception as err
:
3531 self
.report_error('post hooks: %s' % str(err
))
3533 info_dict
['__write_download_archive'] = True
3535 assert info_dict
is original_infodict
# Make sure the info_dict was modified in-place
3536 if self
.params
.get('force_write_download_archive'):
3537 info_dict
['__write_download_archive'] = True
3538 check_max_downloads()
3540 def __download_wrapper(self
, func
):
3541 @functools.wraps(func
)
3542 def wrapper(*args
, **kwargs
):
3544 res
= func(*args
, **kwargs
)
3545 except UnavailableVideoError
as e
:
3546 self
.report_error(e
)
3547 except DownloadCancelled
as e
:
3548 self
.to_screen(f
'[info] {e}')
3549 if not self
.params
.get('break_per_url'):
3551 self
._num
_downloads
= 0
3553 if self
.params
.get('dump_single_json', False):
3554 self
.post_extract(res
)
3555 self
.to_stdout(json
.dumps(self
.sanitize_info(res
)))
3558 def download(self
, url_list
):
3559 """Download a given list of URLs."""
3560 url_list
= variadic(url_list
) # Passing a single URL is a common mistake
3561 outtmpl
= self
.params
['outtmpl']['default']
3562 if (len(url_list
) > 1
3564 and '%' not in outtmpl
3565 and self
.params
.get('max_downloads') != 1):
3566 raise SameFileError(outtmpl
)
3568 for url
in url_list
:
3569 self
.__download
_wrapper
(self
.extract_info
)(
3570 url
, force_generic_extractor
=self
.params
.get('force_generic_extractor', False))
3572 return self
._download
_retcode
3574 def download_with_info_file(self
, info_filename
):
3575 with contextlib
.closing(fileinput
.FileInput(
3576 [info_filename
], mode
='r',
3577 openhook
=fileinput
.hook_encoded('utf-8'))) as f
:
3578 # FileInput doesn't have a read method, we can't call json.load
3579 infos
= [self
.sanitize_info(info
, self
.params
.get('clean_infojson', True))
3580 for info
in variadic(json
.loads('\n'.join(f
)))]
3583 self
.__download
_wrapper
(self
.process_ie_result
)(info
, download
=True)
3584 except (DownloadError
, EntryNotInPlaylist
, ReExtractInfo
) as e
:
3585 if not isinstance(e
, EntryNotInPlaylist
):
3586 self
.to_stderr('\r')
3587 webpage_url
= info
.get('webpage_url')
3588 if webpage_url
is None:
3590 self
.report_warning(f
'The info failed to download: {e}; trying with URL {webpage_url}')
3591 self
.download([webpage_url
])
3592 except ExtractorError
as e
:
3593 self
.report_error(e
)
3594 return self
._download
_retcode
3597 def sanitize_info(info_dict
, remove_private_keys
=False):
3598 ''' Sanitize the infodict for converting to json '''
3599 if info_dict
is None:
3601 info_dict
.setdefault('epoch', int(time
.time()))
3602 info_dict
.setdefault('_type', 'video')
3603 info_dict
.setdefault('_version', {
3604 'version': __version__
,
3605 'current_git_head': current_git_head(),
3606 'release_git_head': RELEASE_GIT_HEAD
,
3607 'repository': ORIGIN
,
3610 if remove_private_keys
:
3611 reject
= lambda k
, v
: v
is None or k
.startswith('__') or k
in {
3612 'requested_downloads', 'requested_formats', 'requested_subtitles', 'requested_entries',
3613 'entries', 'filepath', '_filename', 'filename', 'infojson_filename', 'original_url',
3614 'playlist_autonumber',
3617 reject
= lambda k
, v
: False
3620 if isinstance(obj
, dict):
3621 return {k: filter_fn(v) for k, v in obj.items() if not reject(k, v)}
3622 elif isinstance(obj
, (list, tuple, set, LazyList
)):
3623 return list(map(filter_fn
, obj
))
3624 elif obj
is None or isinstance(obj
, (str, int, float, bool)):
3629 return filter_fn(info_dict
)
3632 def filter_requested_info(info_dict
, actually_filter
=True):
3633 ''' Alias of sanitize_info for backward compatibility '''
3634 return YoutubeDL
.sanitize_info(info_dict
, actually_filter
)
3636 def _delete_downloaded_files(self
, *files_to_delete
, info
={}, msg
=None):
3637 for filename
in set(filter(None, files_to_delete
)):
3639 self
.to_screen(msg
% filename
)
3643 self
.report_warning(f
'Unable to delete file {filename}')
3644 if filename
in info
.get('__files_to_move', []): # NB: Delete even if None
3645 del info
['__files_to_move'][filename
]
3648 def post_extract(info_dict
):
3649 def actual_post_extract(info_dict
):
3650 if info_dict
.get('_type') in ('playlist', 'multi_video'):
3651 for video_dict
in info_dict
.get('entries', {}):
3652 actual_post_extract(video_dict
or {})
3655 post_extractor
= info_dict
.pop('__post_extractor', None) or (lambda: {})
3656 info_dict
.update(post_extractor())
3658 actual_post_extract(info_dict
or {})
3660 def run_pp(self
, pp
, infodict
):
3661 files_to_delete
= []
3662 if '__files_to_move' not in infodict
:
3663 infodict
['__files_to_move'] = {}
3665 files_to_delete
, infodict
= pp
.run(infodict
)
3666 except PostProcessingError
as e
:
3667 # Must be True and not 'only_download'
3668 if self
.params
.get('ignoreerrors') is True:
3669 self
.report_error(e
)
3673 if not files_to_delete
:
3675 if self
.params
.get('keepvideo', False):
3676 for f
in files_to_delete
:
3677 infodict
['__files_to_move'].setdefault(f
, '')
3679 self
._delete
_downloaded
_files
(
3680 *files_to_delete
, info
=infodict
, msg
='Deleting original file %s (pass -k to keep)')
3683 def run_all_pps(self
, key
, info
, *, additional_pps
=None):
3685 self
._forceprint
(key
, info
)
3686 for pp
in (additional_pps
or []) + self
._pps
[key
]:
3687 info
= self
.run_pp(pp
, info
)
3690 def pre_process(self
, ie_info
, key
='pre_process', files_to_move
=None):
3691 info
= dict(ie_info
)
3692 info
['__files_to_move'] = files_to_move
or {}
3694 info
= self
.run_all_pps(key
, info
)
3695 except PostProcessingError
as err
:
3696 msg
= f
'Preprocessing: {err}'
3697 info
.setdefault('__pending_error', msg
)
3698 self
.report_error(msg
, is_error
=False)
3699 return info
, info
.pop('__files_to_move', None)
3701 def post_process(self
, filename
, info
, files_to_move
=None):
3702 """Run all the postprocessors on the given file."""
3703 info
['filepath'] = filename
3704 info
['__files_to_move'] = files_to_move
or {}
3705 info
= self
.run_all_pps('post_process', info
, additional_pps
=info
.get('__postprocessors'))
3706 info
= self
.run_pp(MoveFilesAfterDownloadPP(self
), info
)
3707 del info
['__files_to_move']
3708 return self
.run_all_pps('after_move', info
)
3710 def _make_archive_id(self
, info_dict
):
3711 video_id
= info_dict
.get('id')
3714 # Future-proof against any change in case
3715 # and backwards compatibility with prior versions
3716 extractor
= info_dict
.get('extractor_key') or info_dict
.get('ie_key') # key in a playlist
3717 if extractor
is None:
3718 url
= str_or_none(info_dict
.get('url'))
3721 # Try to find matching extractor for the URL and take its ie_key
3722 for ie_key
, ie
in self
._ies
.items():
3723 if ie
.suitable(url
):
3728 return make_archive_id(extractor
, video_id
)
3730 def in_download_archive(self
, info_dict
):
3731 if not self
.archive
:
3734 vid_ids
= [self
._make
_archive
_id
(info_dict
)]
3735 vid_ids
.extend(info_dict
.get('_old_archive_ids') or [])
3736 return any(id_
in self
.archive
for id_
in vid_ids
)
3738 def record_download_archive(self
, info_dict
):
3739 fn
= self
.params
.get('download_archive')
3742 vid_id
= self
._make
_archive
_id
(info_dict
)
3745 self
.write_debug(f
'Adding to archive: {vid_id}')
3746 if is_path_like(fn
):
3747 with locked_file(fn
, 'a', encoding
='utf-8') as archive_file
:
3748 archive_file
.write(vid_id
+ '\n')
3749 self
.archive
.add(vid_id
)
3752 def format_resolution(format
, default
='unknown'):
3753 if format
.get('vcodec') == 'none' and format
.get('acodec') != 'none':
3755 if format
.get('resolution') is not None:
3756 return format
['resolution']
3757 if format
.get('width') and format
.get('height'):
3758 return '%dx%d' % (format
['width'], format
['height'])
3759 elif format
.get('height'):
3760 return '%sp' % format
['height']
3761 elif format
.get('width'):
3762 return '%dx?' % format
['width']
3765 def _list_format_headers(self
, *headers
):
3766 if self
.params
.get('listformats_table', True) is not False:
3767 return [self
._format
_out
(header
, self
.Styles
.HEADERS
) for header
in headers
]
3770 def _format_note(self
, fdict
):
3772 if fdict
.get('ext') in ['f4f', 'f4m']:
3773 res
+= '(unsupported)'
3774 if fdict
.get('language'):
3777 res
+= '[%s]' % fdict
['language']
3778 if fdict
.get('format_note') is not None:
3781 res
+= fdict
['format_note']
3782 if fdict
.get('tbr') is not None:
3785 res
+= '%4dk' % fdict
['tbr']
3786 if fdict
.get('container') is not None:
3789 res
+= '%s container' % fdict
['container']
3790 if (fdict
.get('vcodec') is not None
3791 and fdict
.get('vcodec') != 'none'):
3794 res
+= fdict
['vcodec']
3795 if fdict
.get('vbr') is not None:
3797 elif fdict
.get('vbr') is not None and fdict
.get('abr') is not None:
3799 if fdict
.get('vbr') is not None:
3800 res
+= '%4dk' % fdict
['vbr']
3801 if fdict
.get('fps') is not None:
3804 res
+= '%sfps' % fdict
['fps']
3805 if fdict
.get('acodec') is not None:
3808 if fdict
['acodec'] == 'none':
3811 res
+= '%-5s' % fdict
['acodec']
3812 elif fdict
.get('abr') is not None:
3816 if fdict
.get('abr') is not None:
3817 res
+= '@%3dk' % fdict
['abr']
3818 if fdict
.get('asr') is not None:
3819 res
+= ' (%5dHz)' % fdict
['asr']
3820 if fdict
.get('filesize') is not None:
3823 res
+= format_bytes(fdict
['filesize'])
3824 elif fdict
.get('filesize_approx') is not None:
3827 res
+= '~' + format_bytes(fdict
['filesize_approx'])
3830 def _get_formats(self
, info_dict
):
3831 if info_dict
.get('formats') is None:
3832 if info_dict
.get('url') and info_dict
.get('_type', 'video') == 'video':
3835 return info_dict
['formats']
3837 def render_formats_table(self
, info_dict
):
3838 formats
= self
._get
_formats
(info_dict
)
3841 if not self
.params
.get('listformats_table', True) is not False:
3844 format_field(f
, 'format_id'),
3845 format_field(f
, 'ext'),
3846 self
.format_resolution(f
),
3847 self
._format
_note
(f
)
3848 ] for f
in formats
if (f
.get('preference') or 0) >= -1000]
3849 return render_table(['format code', 'extension', 'resolution', 'note'], table
, extra_gap
=1)
3851 def simplified_codec(f
, field
):
3852 assert field
in ('acodec', 'vcodec')
3853 codec
= f
.get(field
)
3856 elif codec
!= 'none':
3857 return '.'.join(codec
.split('.')[:4])
3859 if field
== 'vcodec' and f
.get('acodec') == 'none':
3861 elif field
== 'acodec' and f
.get('vcodec') == 'none':
3863 return self
._format
_out
('audio only' if field
== 'vcodec' else 'video only',
3864 self
.Styles
.SUPPRESS
)
3866 delim
= self
._format
_out
('\u2502', self
.Styles
.DELIM
, '|', test_encoding
=True)
3869 self
._format
_out
(format_field(f
, 'format_id'), self
.Styles
.ID
),
3870 format_field(f
, 'ext'),
3871 format_field(f
, func
=self
.format_resolution
, ignore
=('audio only', 'images')),
3872 format_field(f
, 'fps', '\t%d', func
=round),
3873 format_field(f
, 'dynamic_range', '%s', ignore
=(None, 'SDR')).replace('HDR', ''),
3874 format_field(f
, 'audio_channels', '\t%s'),
3876 format_field(f
, 'filesize', ' \t%s', func
=format_bytes
)
3877 or format_field(f
, 'filesize_approx', '≈\t%s', func
=format_bytes
)
3878 or format_field(try_call(lambda: format_bytes(int(info_dict
['duration'] * f
['tbr'] * (1024 / 8)))),
3879 None, self
._format
_out
('~\t%s', self
.Styles
.SUPPRESS
))),
3880 format_field(f
, 'tbr', '\t%dk', func
=round),
3881 shorten_protocol_name(f
.get('protocol', '')),
3883 simplified_codec(f
, 'vcodec'),
3884 format_field(f
, 'vbr', '\t%dk', func
=round),
3885 simplified_codec(f
, 'acodec'),
3886 format_field(f
, 'abr', '\t%dk', func
=round),
3887 format_field(f
, 'asr', '\t%s', func
=format_decimal_suffix
),
3888 join_nonempty(format_field(f
, 'language', '[%s]'), join_nonempty(
3889 self
._format
_out
('UNSUPPORTED', self
.Styles
.BAD_FORMAT
) if f
.get('ext') in ('f4f', 'f4m') else None,
3890 (self
._format
_out
('Maybe DRM', self
.Styles
.WARNING
) if f
.get('has_drm') == 'maybe'
3891 else self
._format
_out
('DRM', self
.Styles
.BAD_FORMAT
) if f
.get('has_drm') else None),
3892 format_field(f
, 'format_note'),
3893 format_field(f
, 'container', ignore
=(None, f
.get('ext'))),
3894 delim
=', '), delim
=' '),
3895 ] for f
in formats
if f
.get('preference') is None or f
['preference'] >= -1000]
3896 header_line
= self
._list
_format
_headers
(
3897 'ID', 'EXT', 'RESOLUTION', '\tFPS', 'HDR', 'CH', delim
, '\tFILESIZE', '\tTBR', 'PROTO',
3898 delim
, 'VCODEC', '\tVBR', 'ACODEC', '\tABR', '\tASR', 'MORE INFO')
3900 return render_table(
3901 header_line
, table
, hide_empty
=True,
3902 delim
=self
._format
_out
('\u2500', self
.Styles
.DELIM
, '-', test_encoding
=True))
3904 def render_thumbnails_table(self
, info_dict
):
3905 thumbnails
= list(info_dict
.get('thumbnails') or [])
3908 return render_table(
3909 self
._list
_format
_headers
('ID', 'Width', 'Height', 'URL'),
3910 [[t
.get('id'), t
.get('width') or 'unknown', t
.get('height') or 'unknown', t
['url']] for t
in thumbnails
])
3912 def render_subtitles_table(self
, video_id
, subtitles
):
3913 def _row(lang
, formats
):
3914 exts
, names
= zip(*((f
['ext'], f
.get('name') or 'unknown') for f
in reversed(formats
)))
3915 if len(set(names
)) == 1:
3916 names
= [] if names
[0] == 'unknown' else names
[:1]
3917 return [lang
, ', '.join(names
), ', '.join(exts
)]
3921 return render_table(
3922 self
._list
_format
_headers
('Language', 'Name', 'Formats'),
3923 [_row(lang
, formats
) for lang
, formats
in subtitles
.items()],
3926 def __list_table(self
, video_id
, name
, func
, *args
):
3929 self
.to_screen(f
'{video_id} has no {name}')
3931 self
.to_screen(f
'[info] Available {name} for {video_id}:')
3932 self
.to_stdout(table
)
3934 def list_formats(self
, info_dict
):
3935 self
.__list
_table
(info_dict
['id'], 'formats', self
.render_formats_table
, info_dict
)
3937 def list_thumbnails(self
, info_dict
):
3938 self
.__list
_table
(info_dict
['id'], 'thumbnails', self
.render_thumbnails_table
, info_dict
)
3940 def list_subtitles(self
, video_id
, subtitles
, name
='subtitles'):
3941 self
.__list
_table
(video_id
, name
, self
.render_subtitles_table
, video_id
, subtitles
)
3943 def print_debug_header(self
):
3944 if not self
.params
.get('verbose'):
3947 from . import _IN_CLI
# Must be delayed import
3949 # These imports can be slow. So import them only as needed
3950 from .extractor
.extractors
import _LAZY_LOADER
3951 from .extractor
.extractors
import (
3952 _PLUGIN_CLASSES
as plugin_ies
,
3953 _PLUGIN_OVERRIDES
as plugin_ie_overrides
3956 def get_encoding(stream
):
3957 ret
= str(getattr(stream
, 'encoding', 'missing (%s)' % type(stream
).__name
__))
3958 additional_info
= []
3959 if os
.environ
.get('TERM', '').lower() == 'dumb':
3960 additional_info
.append('dumb')
3961 if not supports_terminal_sequences(stream
):
3962 from .utils
import WINDOWS_VT_MODE
# Must be imported locally
3963 additional_info
.append('No VT' if WINDOWS_VT_MODE
is False else 'No ANSI')
3965 ret
= f
'{ret} ({",".join(additional_info)})'
3968 encoding_str
= 'Encodings: locale %s, fs %s, pref %s, %s' % (
3969 locale
.getpreferredencoding(),
3970 sys
.getfilesystemencoding(),
3971 self
.get_encoding(),
3973 f
'{key} {get_encoding(stream)}' for key
, stream
in self
._out
_files
.items_
3974 if stream
is not None and key
!= 'console')
3977 logger
= self
.params
.get('logger')
3979 write_debug
= lambda msg
: logger
.debug(f
'[debug] {msg}')
3980 write_debug(encoding_str
)
3982 write_string(f
'[debug] {encoding_str}\n', encoding
=None)
3983 write_debug
= lambda msg
: self
._write
_string
(f
'[debug] {msg}\n')
3985 source
= detect_variant()
3986 if VARIANT
not in (None, 'pip'):
3989 write_debug(join_nonempty(
3990 f
'{REPOSITORY.rpartition("/")[2]} version',
3991 _make_label(ORIGIN
, CHANNEL
.partition('@')[2] or __version__
, __version__
),
3992 f
'[{RELEASE_GIT_HEAD[:9]}]' if RELEASE_GIT_HEAD
else '',
3993 '' if source
== 'unknown' else f
'({source})',
3994 '' if _IN_CLI
else 'API' if klass
== YoutubeDL
else f
'API:{self.__module__}.{klass.__qualname__}',
3998 write_debug(f
'params: {self.params}')
4000 if not _LAZY_LOADER
:
4001 if os
.environ
.get('YTDLP_NO_LAZY_EXTRACTORS'):
4002 write_debug('Lazy loading extractors is forcibly disabled')
4004 write_debug('Lazy loading extractors is disabled')
4005 if self
.params
['compat_opts']:
4006 write_debug('Compatibility options: %s' % ', '.join(self
.params
['compat_opts']))
4008 if current_git_head():
4009 write_debug(f
'Git HEAD: {current_git_head()}')
4010 write_debug(system_identifier())
4012 exe_versions
, ffmpeg_features
= FFmpegPostProcessor
.get_versions_and_features(self
)
4013 ffmpeg_features
= {key for key, val in ffmpeg_features.items() if val}
4015 exe_versions
['ffmpeg'] += ' (%s)' % ','.join(sorted(ffmpeg_features
))
4017 exe_versions
['rtmpdump'] = rtmpdump_version()
4018 exe_versions
['phantomjs'] = PhantomJSwrapper
._version
()
4019 exe_str
= ', '.join(
4020 f
'{exe} {v}' for exe
, v
in sorted(exe_versions
.items()) if v
4022 write_debug('exe versions: %s' % exe_str
)
4024 from .compat
.compat_utils
import get_package_info
4025 from .dependencies
import available_dependencies
4027 write_debug('Optional libraries: %s' % (', '.join(sorted({
4028 join_nonempty(*get_package_info(m
)) for m
in available_dependencies
.values()
4031 write_debug(f
'Proxy map: {self.proxies}')
4032 write_debug(f
'Request Handlers: {", ".join(rh.RH_NAME for rh in self._request_director.handlers.values())}')
4033 for plugin_type
, plugins
in {'Extractor': plugin_ies, 'Post-Processor': plugin_pps}
.items():
4034 display_list
= ['%s%s' % (
4035 klass
.__name
__, '' if klass
.__name
__ == name
else f
' as {name}')
4036 for name
, klass
in plugins
.items()]
4037 if plugin_type
== 'Extractor':
4038 display_list
.extend(f
'{plugins[-1].IE_NAME.partition("+")[2]} ({parent.__name__})'
4039 for parent
, plugins
in plugin_ie_overrides
.items())
4040 if not display_list
:
4042 write_debug(f
'{plugin_type} Plugins: {", ".join(sorted(display_list))}')
4044 plugin_dirs
= plugin_directories()
4046 write_debug(f
'Plugin directories: {plugin_dirs}')
4049 if False and self
.params
.get('call_home'):
4050 ipaddr
= self
.urlopen('https://yt-dl.org/ip').read().decode()
4051 write_debug('Public IP address: %s' % ipaddr
)
4052 latest_version
= self
.urlopen(
4053 'https://yt-dl.org/latest/version').read().decode()
4054 if version_tuple(latest_version
) > version_tuple(__version__
):
4055 self
.report_warning(
4056 'You are using an outdated version (newest version: %s)! '
4057 'See https://yt-dl.org/update if you need help updating.' %
4060 @functools.cached_property
4062 """Global proxy configuration"""
4063 opts_proxy
= self
.params
.get('proxy')
4064 if opts_proxy
is not None:
4065 if opts_proxy
== '':
4066 opts_proxy
= '__noproxy__'
4067 proxies
= {'all': opts_proxy}
4069 proxies
= urllib
.request
.getproxies()
4070 # compat. Set HTTPS_PROXY to __noproxy__ to revert
4071 if 'http' in proxies
and 'https' not in proxies
:
4072 proxies
['https'] = proxies
['http']
4076 @functools.cached_property
4077 def cookiejar(self
):
4078 """Global cookiejar instance"""
4079 return load_cookies(
4080 self
.params
.get('cookiefile'), self
.params
.get('cookiesfrombrowser'), self
)
4085 Get a urllib OpenerDirector from the Urllib handler (deprecated).
4087 self
.deprecation_warning('YoutubeDL._opener is deprecated, use YoutubeDL.urlopen()')
4088 handler
= self
._request
_director
.handlers
['Urllib']
4089 return handler
._get
_instance
(cookiejar
=self
.cookiejar
, proxies
=self
.proxies
)
4091 def _get_available_impersonate_targets(self
):
4092 # todo(future): make available as public API
4094 (target
, rh
.RH_NAME
)
4095 for rh
in self
._request
_director
.handlers
.values()
4096 if isinstance(rh
, ImpersonateRequestHandler
)
4097 for target
in rh
.supported_targets
4100 def _impersonate_target_available(self
, target
):
4101 # todo(future): make available as public API
4103 rh
.is_supported_target(target
)
4104 for rh
in self
._request
_director
.handlers
.values()
4105 if isinstance(rh
, ImpersonateRequestHandler
))
4107 def urlopen(self
, req
):
4108 """ Start an HTTP download """
4109 if isinstance(req
, str):
4111 elif isinstance(req
, urllib
.request
.Request
):
4112 self
.deprecation_warning(
4113 'Passing a urllib.request.Request object to YoutubeDL.urlopen() is deprecated. '
4114 'Use yt_dlp.networking.common.Request instead.')
4115 req
= urllib_req_to_req(req
)
4116 assert isinstance(req
, Request
)
4118 # compat: Assume user:pass url params are basic auth
4119 url
, basic_auth_header
= extract_basic_auth(req
.url
)
4120 if basic_auth_header
:
4121 req
.headers
['Authorization'] = basic_auth_header
4122 req
.url
= sanitize_url(url
)
4124 clean_proxies(proxies
=req
.proxies
, headers
=req
.headers
)
4125 clean_headers(req
.headers
)
4128 return self
._request
_director
.send(req
)
4129 except NoSupportingHandlers
as e
:
4130 for ue
in e
.unsupported_errors
:
4131 # FIXME: This depends on the order of errors.
4132 if not (ue
.handler
and ue
.msg
):
4134 if ue
.handler
.RH_KEY
== 'Urllib' and 'unsupported url scheme: "file"' in ue
.msg
.lower():
4136 'file:// URLs are disabled by default in yt-dlp for security reasons. '
4137 'Use --enable-file-urls to enable at your own risk.', cause
=ue
) from ue
4139 'unsupported proxy type: "https"' in ue
.msg
.lower()
4140 and 'requests' not in self
._request
_director
.handlers
4141 and 'curl_cffi' not in self
._request
_director
.handlers
4144 'To use an HTTPS proxy for this request, one of the following dependencies needs to be installed: requests, curl_cffi')
4147 re
.match(r
'unsupported url scheme: "wss?"', ue
.msg
.lower())
4148 and 'websockets' not in self
._request
_director
.handlers
4151 'This request requires WebSocket support. '
4152 'Ensure one of the following dependencies are installed: websockets',
4155 elif re
.match(r
'unsupported (?:extensions: impersonate|impersonate target)', ue
.msg
.lower()):
4157 f
'Impersonate target "{req.extensions["impersonate"]}" is not available.'
4158 f
' See --list-impersonate-targets for available targets.'
4159 f
' This request requires browser impersonation, however you may be missing dependencies'
4160 f
' required to support this target.')
4162 except SSLError
as e
:
4163 if 'UNSAFE_LEGACY_RENEGOTIATION_DISABLED' in str(e
):
4164 raise RequestError('UNSAFE_LEGACY_RENEGOTIATION_DISABLED: Try using --legacy-server-connect', cause
=e
) from e
4165 elif 'SSLV3_ALERT_HANDSHAKE_FAILURE' in str(e
):
4167 'SSLV3_ALERT_HANDSHAKE_FAILURE: The server may not support the current cipher list. '
4168 'Try using --legacy-server-connect', cause
=e
) from e
4171 def build_request_director(self
, handlers
, preferences
=None):
4172 logger
= _YDLLogger(self
)
4173 headers
= self
.params
['http_headers'].copy()
4174 proxies
= self
.proxies
.copy()
4175 clean_headers(headers
)
4176 clean_proxies(proxies
, headers
)
4178 director
= RequestDirector(logger
=logger
, verbose
=self
.params
.get('debug_printtraffic'))
4179 for handler
in handlers
:
4180 director
.add_handler(handler(
4183 cookiejar
=self
.cookiejar
,
4185 prefer_system_certs
='no-certifi' in self
.params
['compat_opts'],
4186 verify
=not self
.params
.get('nocheckcertificate'),
4187 **traverse_obj(self
.params
, {
4188 'verbose': 'debug_printtraffic',
4189 'source_address': 'source_address',
4190 'timeout': 'socket_timeout',
4191 'legacy_ssl_support': 'legacyserverconnect',
4192 'enable_file_urls': 'enable_file_urls',
4193 'impersonate': 'impersonate',
4195 'client_certificate': 'client_certificate',
4196 'client_certificate_key': 'client_certificate_key',
4197 'client_certificate_password': 'client_certificate_password',
4201 director
.preferences
.update(preferences
or [])
4202 if 'prefer-legacy-http-handler' in self
.params
['compat_opts']:
4203 director
.preferences
.add(lambda rh
, _
: 500 if rh
.RH_KEY
== 'Urllib' else 0)
4206 @functools.cached_property
4207 def _request_director(self
):
4208 return self
.build_request_director(_REQUEST_HANDLERS
.values(), _RH_PREFERENCES
)
4210 def encode(self
, s
):
4211 if isinstance(s
, bytes):
4212 return s
# Already encoded
4215 return s
.encode(self
.get_encoding())
4216 except UnicodeEncodeError as err
:
4217 err
.reason
= err
.reason
+ '. Check your system encoding configuration or use the --encoding option.'
4220 def get_encoding(self
):
4221 encoding
= self
.params
.get('encoding')
4222 if encoding
is None:
4223 encoding
= preferredencoding()
4226 def _write_info_json(self
, label
, ie_result
, infofn
, overwrite
=None):
4227 ''' Write infojson and returns True = written, 'exists' = Already exists, False = skip, None = error '''
4228 if overwrite
is None:
4229 overwrite
= self
.params
.get('overwrites', True)
4230 if not self
.params
.get('writeinfojson'):
4233 self
.write_debug(f
'Skipping writing {label} infojson')
4235 elif not self
._ensure
_dir
_exists
(infofn
):
4237 elif not overwrite
and os
.path
.exists(infofn
):
4238 self
.to_screen(f
'[info] {label.title()} metadata is already present')
4241 self
.to_screen(f
'[info] Writing {label} metadata as JSON to: {infofn}')
4243 write_json_file(self
.sanitize_info(ie_result
, self
.params
.get('clean_infojson', True)), infofn
)
4246 self
.report_error(f
'Cannot write {label} metadata to JSON file {infofn}')
4249 def _write_description(self
, label
, ie_result
, descfn
):
4250 ''' Write description and returns True = written, False = skip, None = error '''
4251 if not self
.params
.get('writedescription'):
4254 self
.write_debug(f
'Skipping writing {label} description')
4256 elif not self
._ensure
_dir
_exists
(descfn
):
4258 elif not self
.params
.get('overwrites', True) and os
.path
.exists(descfn
):
4259 self
.to_screen(f
'[info] {label.title()} description is already present')
4260 elif ie_result
.get('description') is None:
4261 self
.to_screen(f
'[info] There\'s no {label} description to write')
4265 self
.to_screen(f
'[info] Writing {label} description to: {descfn}')
4266 with open(encodeFilename(descfn
), 'w', encoding
='utf-8') as descfile
:
4267 descfile
.write(ie_result
['description'])
4269 self
.report_error(f
'Cannot write {label} description file {descfn}')
4273 def _write_subtitles(self
, info_dict
, filename
):
4274 ''' Write subtitles to file and return list of (sub_filename, final_sub_filename); or None if error'''
4276 subtitles
= info_dict
.get('requested_subtitles')
4277 if not (self
.params
.get('writesubtitles') or self
.params
.get('writeautomaticsub')):
4278 # subtitles download errors are already managed as troubles in relevant IE
4279 # that way it will silently go on when used with unsupporting IE
4282 self
.to_screen('[info] There are no subtitles for the requested languages')
4284 sub_filename_base
= self
.prepare_filename(info_dict
, 'subtitle')
4285 if not sub_filename_base
:
4286 self
.to_screen('[info] Skipping writing video subtitles')
4289 for sub_lang
, sub_info
in subtitles
.items():
4290 sub_format
= sub_info
['ext']
4291 sub_filename
= subtitles_filename(filename
, sub_lang
, sub_format
, info_dict
.get('ext'))
4292 sub_filename_final
= subtitles_filename(sub_filename_base
, sub_lang
, sub_format
, info_dict
.get('ext'))
4293 existing_sub
= self
.existing_file((sub_filename_final
, sub_filename
))
4295 self
.to_screen(f
'[info] Video subtitle {sub_lang}.{sub_format} is already present')
4296 sub_info
['filepath'] = existing_sub
4297 ret
.append((existing_sub
, sub_filename_final
))
4300 self
.to_screen(f
'[info] Writing video subtitles to: {sub_filename}')
4301 if sub_info
.get('data') is not None:
4303 # Use newline='' to prevent conversion of newline characters
4304 # See https://github.com/ytdl-org/youtube-dl/issues/10268
4305 with open(sub_filename
, 'w', encoding
='utf-8', newline
='') as subfile
:
4306 subfile
.write(sub_info
['data'])
4307 sub_info
['filepath'] = sub_filename
4308 ret
.append((sub_filename
, sub_filename_final
))
4311 self
.report_error(f
'Cannot write video subtitles file {sub_filename}')
4315 sub_copy
= sub_info
.copy()
4316 sub_copy
.setdefault('http_headers', info_dict
.get('http_headers'))
4317 self
.dl(sub_filename
, sub_copy
, subtitle
=True)
4318 sub_info
['filepath'] = sub_filename
4319 ret
.append((sub_filename
, sub_filename_final
))
4320 except (DownloadError
, ExtractorError
, IOError, OSError, ValueError) + network_exceptions
as err
:
4321 msg
= f
'Unable to download video subtitles for {sub_lang!r}: {err}'
4322 if self
.params
.get('ignoreerrors') is not True: # False or 'only_download'
4323 if not self
.params
.get('ignoreerrors'):
4324 self
.report_error(msg
)
4325 raise DownloadError(msg
)
4326 self
.report_warning(msg
)
4329 def _write_thumbnails(self
, label
, info_dict
, filename
, thumb_filename_base
=None):
4330 ''' Write thumbnails to file and return list of (thumb_filename, final_thumb_filename); or None if error '''
4331 write_all
= self
.params
.get('write_all_thumbnails', False)
4332 thumbnails
, ret
= [], []
4333 if write_all
or self
.params
.get('writethumbnail', False):
4334 thumbnails
= info_dict
.get('thumbnails') or []
4336 self
.to_screen(f
'[info] There are no {label} thumbnails to download')
4338 multiple
= write_all
and len(thumbnails
) > 1
4340 if thumb_filename_base
is None:
4341 thumb_filename_base
= filename
4342 if thumbnails
and not thumb_filename_base
:
4343 self
.write_debug(f
'Skipping writing {label} thumbnail')
4346 if thumbnails
and not self
._ensure
_dir
_exists
(filename
):
4349 for idx
, t
in list(enumerate(thumbnails
))[::-1]:
4350 thumb_ext
= (f
'{t["id"]}.' if multiple
else '') + determine_ext(t
['url'], 'jpg')
4351 thumb_display_id
= f
'{label} thumbnail {t["id"]}'
4352 thumb_filename
= replace_extension(filename
, thumb_ext
, info_dict
.get('ext'))
4353 thumb_filename_final
= replace_extension(thumb_filename_base
, thumb_ext
, info_dict
.get('ext'))
4355 existing_thumb
= self
.existing_file((thumb_filename_final
, thumb_filename
))
4357 self
.to_screen('[info] %s is already present' % (
4358 thumb_display_id
if multiple
else f
'{label} thumbnail').capitalize())
4359 t
['filepath'] = existing_thumb
4360 ret
.append((existing_thumb
, thumb_filename_final
))
4362 self
.to_screen(f
'[info] Downloading {thumb_display_id} ...')
4364 uf
= self
.urlopen(Request(t
['url'], headers
=t
.get('http_headers', {})))
4365 self
.to_screen(f
'[info] Writing {thumb_display_id} to: {thumb_filename}')
4366 with open(encodeFilename(thumb_filename
), 'wb') as thumbf
:
4367 shutil
.copyfileobj(uf
, thumbf
)
4368 ret
.append((thumb_filename
, thumb_filename_final
))
4369 t
['filepath'] = thumb_filename
4370 except network_exceptions
as err
:
4371 if isinstance(err
, HTTPError
) and err
.status
== 404:
4372 self
.to_screen(f
'[info] {thumb_display_id.title()} does not exist')
4374 self
.report_warning(f
'Unable to download {thumb_display_id}: {err}')
4376 if ret
and not write_all
: