26 from .cache
import Cache
27 from .compat
import functools
, urllib
# isort: split
28 from .compat
import compat_os_name
, compat_shlex_quote
, urllib_req_to_req
29 from .cookies
import LenientSimpleCookie
, load_cookies
30 from .downloader
import FFmpegFD
, get_suitable_downloader
, shorten_protocol_name
31 from .downloader
.rtmp
import rtmpdump_version
32 from .extractor
import gen_extractor_classes
, get_info_extractor
33 from .extractor
.common
import UnsupportedURLIE
34 from .extractor
.openload
import PhantomJSwrapper
35 from .minicurses
import format_text
36 from .networking
import HEADRequest
, Request
, RequestDirector
37 from .networking
.common
import _REQUEST_HANDLERS
, _RH_PREFERENCES
38 from .networking
.exceptions
import (
45 from .plugins
import directories
as plugin_directories
46 from .postprocessor
import _PLUGIN_CLASSES
as plugin_pps
47 from .postprocessor
import (
49 FFmpegFixupDuplicateMoovPP
,
50 FFmpegFixupDurationPP
,
53 FFmpegFixupStretchedPP
,
54 FFmpegFixupTimestampPP
,
57 FFmpegVideoConvertorPP
,
58 MoveFilesAfterDownloadPP
,
61 from .postprocessor
.ffmpeg
import resolve_mapping
as resolve_recode_mapping
64 _get_system_deprecation
,
100 UnavailableVideoError
,
118 format_decimal_suffix
,
132 orderedSet_from_options
,
136 remove_terminal_sequences
,
145 supports_terminal_sequences
,
155 windows_enable_vt_mode
,
159 from .utils
._utils
import _YDLLogger
160 from .utils
.networking
import (
166 from .version
import CHANNEL
, ORIGIN
, RELEASE_GIT_HEAD
, VARIANT
, __version__
168 if compat_os_name
== 'nt':
175 YoutubeDL objects are the ones responsible of downloading the
176 actual video file and writing it to disk if the user has requested
177 it, among some other tasks. In most cases there should be one per
178 program. As, given a video URL, the downloader doesn't know how to
179 extract all the needed information, task that InfoExtractors do, it
180 has to pass the URL to one of them.
182 For this, YoutubeDL objects have a method that allows
183 InfoExtractors to be registered in a given order. When it is passed
184 a URL, the YoutubeDL object handles it to the first InfoExtractor it
185 finds that reports being able to handle it. The InfoExtractor extracts
186 all the information about the video or videos the URL refers to, and
187 YoutubeDL process the extracted information, possibly using a File
188 Downloader to download the video.
190 YoutubeDL objects accept a lot of parameters. In order not to saturate
191 the object constructor with arguments, it receives a dictionary of
192 options instead. These options are available through the params
193 attribute for the InfoExtractors to use. The YoutubeDL also
194 registers itself as the downloader in charge for the InfoExtractors
195 that are added to it, so this is a "mutual registration".
199 username: Username for authentication purposes.
200 password: Password for authentication purposes.
201 videopassword: Password for accessing a video.
202 ap_mso: Adobe Pass multiple-system operator identifier.
203 ap_username: Multiple-system operator account username.
204 ap_password: Multiple-system operator account password.
205 usenetrc: Use netrc for authentication instead.
206 netrc_location: Location of the netrc file. Defaults to ~/.netrc.
207 netrc_cmd: Use a shell command to get credentials
208 verbose: Print additional info to stdout.
209 quiet: Do not print messages to stdout.
210 no_warnings: Do not print out anything for warnings.
211 forceprint: A dict with keys WHEN mapped to a list of templates to
212 print to stdout. The allowed keys are video or any of the
213 items in utils.POSTPROCESS_WHEN.
214 For compatibility, a single list is also accepted
215 print_to_file: A dict with keys WHEN (same as forceprint) mapped to
216 a list of tuples with (template, filename)
217 forcejson: Force printing info_dict as JSON.
218 dump_single_json: Force printing the info_dict of the whole playlist
219 (or video) as a single JSON line.
220 force_write_download_archive: Force writing download archive regardless
221 of 'skip_download' or 'simulate'.
222 simulate: Do not download the video files. If unset (or None),
223 simulate only if listsubtitles, listformats or list_thumbnails is used
224 format: Video format code. see "FORMAT SELECTION" for more details.
225 You can also pass a function. The function takes 'ctx' as
226 argument and returns the formats to download.
227 See "build_format_selector" for an implementation
228 allow_unplayable_formats: Allow unplayable formats to be extracted and downloaded.
229 ignore_no_formats_error: Ignore "No video formats" error. Usefull for
230 extracting metadata even if the video is not actually
231 available for download (experimental)
232 format_sort: A list of fields by which to sort the video formats.
233 See "Sorting Formats" for more details.
234 format_sort_force: Force the given format_sort. see "Sorting Formats"
236 prefer_free_formats: Whether to prefer video formats with free containers
237 over non-free ones of same quality.
238 allow_multiple_video_streams: Allow multiple video streams to be merged
240 allow_multiple_audio_streams: Allow multiple audio streams to be merged
242 check_formats Whether to test if the formats are downloadable.
243 Can be True (check all), False (check none),
244 'selected' (check selected formats),
245 or None (check only if requested by extractor)
246 paths: Dictionary of output paths. The allowed keys are 'home'
247 'temp' and the keys of OUTTMPL_TYPES (in utils/_utils.py)
248 outtmpl: Dictionary of templates for output names. Allowed keys
249 are 'default' and the keys of OUTTMPL_TYPES (in utils/_utils.py).
250 For compatibility with youtube-dl, a single string can also be used
251 outtmpl_na_placeholder: Placeholder for unavailable meta fields.
252 restrictfilenames: Do not allow "&" and spaces in file names
253 trim_file_name: Limit length of filename (extension excluded)
254 windowsfilenames: Force the filenames to be windows compatible
255 ignoreerrors: Do not stop on download/postprocessing errors.
256 Can be 'only_download' to ignore only download errors.
257 Default is 'only_download' for CLI, but False for API
258 skip_playlist_after_errors: Number of allowed failures until the rest of
259 the playlist is skipped
260 allowed_extractors: List of regexes to match against extractor names that are allowed
261 overwrites: Overwrite all video and metadata files if True,
262 overwrite only non-video files if None
263 and don't overwrite any file if False
264 playlist_items: Specific indices of playlist to download.
265 playlistrandom: Download playlist items in random order.
266 lazy_playlist: Process playlist entries as they are received.
267 matchtitle: Download only matching titles.
268 rejecttitle: Reject downloads for matching titles.
269 logger: Log messages to a logging.Logger instance.
270 logtostderr: Print everything to stderr instead of stdout.
271 consoletitle: Display progress in console window's titlebar.
272 writedescription: Write the video description to a .description file
273 writeinfojson: Write the video description to a .info.json file
274 clean_infojson: Remove internal metadata from the infojson
275 getcomments: Extract video comments. This will not be written to disk
276 unless writeinfojson is also given
277 writeannotations: Write the video annotations to a .annotations.xml file
278 writethumbnail: Write the thumbnail image to a file
279 allow_playlist_files: Whether to write playlists' description, infojson etc
280 also to disk when using the 'write*' options
281 write_all_thumbnails: Write all thumbnail formats to files
282 writelink: Write an internet shortcut file, depending on the
283 current platform (.url/.webloc/.desktop)
284 writeurllink: Write a Windows internet shortcut file (.url)
285 writewebloclink: Write a macOS internet shortcut file (.webloc)
286 writedesktoplink: Write a Linux internet shortcut file (.desktop)
287 writesubtitles: Write the video subtitles to a file
288 writeautomaticsub: Write the automatically generated subtitles to a file
289 listsubtitles: Lists all available subtitles for the video
290 subtitlesformat: The format code for subtitles
291 subtitleslangs: List of languages of the subtitles to download (can be regex).
292 The list may contain "all" to refer to all the available
293 subtitles. The language can be prefixed with a "-" to
294 exclude it from the requested languages, e.g. ['all', '-live_chat']
295 keepvideo: Keep the video file after post-processing
296 daterange: A utils.DateRange object, download only if the upload_date is in the range.
297 skip_download: Skip the actual download of the video file
298 cachedir: Location of the cache files in the filesystem.
299 False to disable filesystem cache.
300 noplaylist: Download single video instead of a playlist if in doubt.
301 age_limit: An integer representing the user's age in years.
302 Unsuitable videos for the given age are skipped.
303 min_views: An integer representing the minimum view count the video
304 must have in order to not be skipped.
305 Videos without view count information are always
306 downloaded. None for no limit.
307 max_views: An integer representing the maximum view count.
308 Videos that are more popular than that are not
310 Videos without view count information are always
311 downloaded. None for no limit.
312 download_archive: A set, or the name of a file where all downloads are recorded.
313 Videos already present in the file are not downloaded again.
314 break_on_existing: Stop the download process after attempting to download a
315 file that is in the archive.
316 break_per_url: Whether break_on_reject and break_on_existing
317 should act on each input URL as opposed to for the entire queue
318 cookiefile: File name or text stream from where cookies should be read and dumped to
319 cookiesfrombrowser: A tuple containing the name of the browser, the profile
320 name/path from where cookies are loaded, the name of the keyring,
321 and the container name, e.g. ('chrome', ) or
322 ('vivaldi', 'default', 'BASICTEXT') or ('firefox', 'default', None, 'Meta')
323 legacyserverconnect: Explicitly allow HTTPS connection to servers that do not
324 support RFC 5746 secure renegotiation
325 nocheckcertificate: Do not verify SSL certificates
326 client_certificate: Path to client certificate file in PEM format. May include the private key
327 client_certificate_key: Path to private key file for client certificate
328 client_certificate_password: Password for client certificate private key, if encrypted.
329 If not provided and the key is encrypted, yt-dlp will ask interactively
330 prefer_insecure: Use HTTP instead of HTTPS to retrieve information.
331 (Only supported by some extractors)
332 enable_file_urls: Enable file:// URLs. This is disabled by default for security reasons.
333 http_headers: A dictionary of custom headers to be used for all requests
334 proxy: URL of the proxy server to use
335 geo_verification_proxy: URL of the proxy to use for IP address verification
336 on geo-restricted sites.
337 socket_timeout: Time to wait for unresponsive hosts, in seconds
338 bidi_workaround: Work around buggy terminals without bidirectional text
339 support, using fridibi
340 debug_printtraffic:Print out sent and received HTTP traffic
341 default_search: Prepend this string if an input url is not valid.
342 'auto' for elaborate guessing
343 encoding: Use this encoding instead of the system-specified.
344 extract_flat: Whether to resolve and process url_results further
345 * False: Always process. Default for API
346 * True: Never process
347 * 'in_playlist': Do not process inside playlist/multi_video
348 * 'discard': Always process, but don't return the result
349 from inside playlist/multi_video
350 * 'discard_in_playlist': Same as "discard", but only for
351 playlists (not multi_video). Default for CLI
352 wait_for_video: If given, wait for scheduled streams to become available.
353 The value should be a tuple containing the range
354 (min_secs, max_secs) to wait between retries
355 postprocessors: A list of dictionaries, each with an entry
356 * key: The name of the postprocessor. See
357 yt_dlp/postprocessor/__init__.py for a list.
358 * when: When to run the postprocessor. Allowed values are
359 the entries of utils.POSTPROCESS_WHEN
360 Assumed to be 'post_process' if not given
361 progress_hooks: A list of functions that get called on download
362 progress, with a dictionary with the entries
363 * status: One of "downloading", "error", or "finished".
364 Check this first and ignore unknown values.
365 * info_dict: The extracted info_dict
367 If status is one of "downloading", or "finished", the
368 following properties may also be present:
369 * filename: The final filename (always present)
370 * tmpfilename: The filename we're currently writing to
371 * downloaded_bytes: Bytes on disk
372 * total_bytes: Size of the whole file, None if unknown
373 * total_bytes_estimate: Guess of the eventual file size,
375 * elapsed: The number of seconds since download started.
376 * eta: The estimated time in seconds, None if unknown
377 * speed: The download speed in bytes/second, None if
379 * fragment_index: The counter of the currently
380 downloaded video fragment.
381 * fragment_count: The number of fragments (= individual
382 files that will be merged)
384 Progress hooks are guaranteed to be called at least once
385 (with status "finished") if the download is successful.
386 postprocessor_hooks: A list of functions that get called on postprocessing
387 progress, with a dictionary with the entries
388 * status: One of "started", "processing", or "finished".
389 Check this first and ignore unknown values.
390 * postprocessor: Name of the postprocessor
391 * info_dict: The extracted info_dict
393 Progress hooks are guaranteed to be called at least twice
394 (with status "started" and "finished") if the processing is successful.
395 merge_output_format: "/" separated list of extensions to use when merging formats.
396 final_ext: Expected final extension; used to detect when the file was
397 already downloaded and converted
398 fixup: Automatically correct known faults of the file.
400 - "never": do nothing
401 - "warn": only emit a warning
402 - "detect_or_warn": check whether we can do anything
403 about it, warn otherwise (default)
404 source_address: Client-side IP address to bind to.
405 sleep_interval_requests: Number of seconds to sleep between requests
407 sleep_interval: Number of seconds to sleep before each download when
408 used alone or a lower bound of a range for randomized
409 sleep before each download (minimum possible number
410 of seconds to sleep) when used along with
412 max_sleep_interval:Upper bound of a range for randomized sleep before each
413 download (maximum possible number of seconds to sleep).
414 Must only be used along with sleep_interval.
415 Actual sleep time will be a random float from range
416 [sleep_interval; max_sleep_interval].
417 sleep_interval_subtitles: Number of seconds to sleep before each subtitle download
418 listformats: Print an overview of available video formats and exit.
419 list_thumbnails: Print a table of all thumbnails and exit.
420 match_filter: A function that gets called for every video with the signature
421 (info_dict, *, incomplete: bool) -> Optional[str]
422 For backward compatibility with youtube-dl, the signature
423 (info_dict) -> Optional[str] is also allowed.
424 - If it returns a message, the video is ignored.
425 - If it returns None, the video is downloaded.
426 - If it returns utils.NO_DEFAULT, the user is interactively
427 asked whether to download the video.
428 - Raise utils.DownloadCancelled(msg) to abort remaining
429 downloads when a video is rejected.
430 match_filter_func in utils/_utils.py is one example for this.
431 color: A Dictionary with output stream names as keys
432 and their respective color policy as values.
433 Can also just be a single color policy,
434 in which case it applies to all outputs.
435 Valid stream names are 'stdout' and 'stderr'.
436 Valid color policies are one of 'always', 'auto', 'no_color' or 'never'.
437 geo_bypass: Bypass geographic restriction via faking X-Forwarded-For
440 Two-letter ISO 3166-2 country code that will be used for
441 explicit geographic restriction bypassing via faking
442 X-Forwarded-For HTTP header
444 IP range in CIDR notation that will be used similarly to
446 external_downloader: A dictionary of protocol keys and the executable of the
447 external downloader to use for it. The allowed protocols
448 are default|http|ftp|m3u8|dash|rtsp|rtmp|mms.
449 Set the value to 'native' to use the native downloader
450 compat_opts: Compatibility options. See "Differences in default behavior".
451 The following options do not work when used through the API:
452 filename, abort-on-error, multistreams, no-live-chat, format-sort
453 no-clean-infojson, no-playlist-metafiles, no-keep-subs, no-attach-info-json.
454 Refer __init__.py for their implementation
455 progress_template: Dictionary of templates for progress outputs.
456 Allowed keys are 'download', 'postprocess',
457 'download-title' (console title) and 'postprocess-title'.
458 The template is mapped on a dictionary with keys 'progress' and 'info'
459 retry_sleep_functions: Dictionary of functions that takes the number of attempts
460 as argument and returns the time to sleep in seconds.
461 Allowed keys are 'http', 'fragment', 'file_access'
462 download_ranges: A callback function that gets called for every video with
463 the signature (info_dict, ydl) -> Iterable[Section].
464 Only the returned sections will be downloaded.
465 Each Section is a dict with the following keys:
466 * start_time: Start time of the section in seconds
467 * end_time: End time of the section in seconds
468 * title: Section title (Optional)
469 * index: Section number (Optional)
470 force_keyframes_at_cuts: Re-encode the video when downloading ranges to get precise cuts
471 noprogress: Do not print the progress bar
472 live_from_start: Whether to download livestreams videos from the start
474 The following parameters are not used by YoutubeDL itself, they are used by
475 the downloader (see yt_dlp/downloader/common.py):
476 nopart, updatetime, buffersize, ratelimit, throttledratelimit, min_filesize,
477 max_filesize, test, noresizebuffer, retries, file_access_retries, fragment_retries,
478 continuedl, xattr_set_filesize, hls_use_mpegts, http_chunk_size,
479 external_downloader_args, concurrent_fragment_downloads.
481 The following options are used by the post processors:
482 ffmpeg_location: Location of the ffmpeg/avconv binary; either the path
483 to the binary or its containing directory.
484 postprocessor_args: A dictionary of postprocessor/executable keys (in lower case)
485 and a list of additional command-line arguments for the
486 postprocessor/executable. The dict can also have "PP+EXE" keys
487 which are used when the given exe is used by the given PP.
488 Use 'default' as the name for arguments to passed to all PP
489 For compatibility with youtube-dl, a single list of args
492 The following options are used by the extractors:
493 extractor_retries: Number of times to retry for known errors (default: 3)
494 dynamic_mpd: Whether to process dynamic DASH manifests (default: True)
495 hls_split_discontinuity: Split HLS playlists to different formats at
496 discontinuities such as ad breaks (default: False)
497 extractor_args: A dictionary of arguments to be passed to the extractors.
498 See "EXTRACTOR ARGUMENTS" for details.
499 E.g. {'youtube': {'skip': ['dash', 'hls']}}
500 mark_watched: Mark videos watched (even with --simulate). Only for YouTube
502 The following options are deprecated and may be removed in the future:
504 break_on_reject: Stop the download process when encountering a video that
505 has been filtered out.
506 - `raise DownloadCancelled(msg)` in match_filter instead
507 force_generic_extractor: Force downloader to use the generic extractor
508 - Use allowed_extractors = ['generic', 'default']
509 playliststart: - Use playlist_items
510 Playlist item to start at.
511 playlistend: - Use playlist_items
512 Playlist item to end at.
513 playlistreverse: - Use playlist_items
514 Download playlist items in reverse order.
515 forceurl: - Use forceprint
516 Force printing final URL.
517 forcetitle: - Use forceprint
518 Force printing title.
519 forceid: - Use forceprint
521 forcethumbnail: - Use forceprint
522 Force printing thumbnail URL.
523 forcedescription: - Use forceprint
524 Force printing description.
525 forcefilename: - Use forceprint
526 Force printing final filename.
527 forceduration: - Use forceprint
528 Force printing duration.
529 allsubtitles: - Use subtitleslangs = ['all']
530 Downloads all the subtitles of the video
531 (requires writesubtitles or writeautomaticsub)
532 include_ads: - Doesn't work
534 call_home: - Not implemented
535 Boolean, true iff we are allowed to contact the
536 yt-dlp servers for debugging.
537 post_hooks: - Register a custom postprocessor
538 A list of functions that get called as the final step
539 for each video file, after all postprocessors have been
540 called. The filename will be passed as the only argument.
541 hls_prefer_native: - Use external_downloader = {'m3u8': 'native'} or {'m3u8': 'ffmpeg'}.
542 Use the native HLS downloader instead of ffmpeg/avconv
543 if True, otherwise use ffmpeg/avconv if False, otherwise
544 use downloader suggested by extractor if None.
545 prefer_ffmpeg: - avconv support is deprecated
546 If False, use avconv instead of ffmpeg if both are available,
547 otherwise prefer ffmpeg.
548 youtube_include_dash_manifest: - Use extractor_args
549 If True (default), DASH manifests and related
550 data will be downloaded and processed by extractor.
551 You can reduce network I/O by disabling it if you don't
552 care about DASH. (only for youtube)
553 youtube_include_hls_manifest: - Use extractor_args
554 If True (default), HLS manifests and related
555 data will be downloaded and processed by extractor.
556 You can reduce network I/O by disabling it if you don't
557 care about HLS. (only for youtube)
558 no_color: Same as `color='no_color'`
559 no_overwrites: Same as `overwrites=False`
563 'width', 'height', 'asr', 'audio_channels', 'fps',
564 'tbr', 'abr', 'vbr', 'filesize', 'filesize_approx',
565 'timestamp', 'release_timestamp',
566 'duration', 'view_count', 'like_count', 'dislike_count', 'repost_count',
567 'average_rating', 'comment_count', 'age_limit',
568 'start_time', 'end_time',
569 'chapter_number', 'season_number', 'episode_number',
570 'track_number', 'disc_number', 'release_year',
574 # NB: Keep in sync with the docstring of extractor/common.py
575 'url', 'manifest_url', 'manifest_stream_number', 'ext', 'format', 'format_id', 'format_note',
576 'width', 'height', 'aspect_ratio', 'resolution', 'dynamic_range', 'tbr', 'abr', 'acodec', 'asr', 'audio_channels',
577 'vbr', 'fps', 'vcodec', 'container', 'filesize', 'filesize_approx', 'rows', 'columns',
578 'player_url', 'protocol', 'fragment_base_url', 'fragments', 'is_from_start',
579 'preference', 'language', 'language_preference', 'quality', 'source_preference', 'cookies',
580 'http_headers', 'stretched_ratio', 'no_resume', 'has_drm', 'extra_param_to_segment_url', 'hls_aes', 'downloader_options',
581 'page_url', 'app', 'play_path', 'tc_url', 'flash_version', 'rtmp_live', 'rtmp_conn', 'rtmp_protocol', 'rtmp_real_time'
583 _format_selection_exts
= {
584 'audio': set(MEDIA_EXTENSIONS
.common_audio
),
585 'video': set(MEDIA_EXTENSIONS
.common_video
+ ('3gp', )),
586 'storyboards': set(MEDIA_EXTENSIONS
.storyboards
),
589 def __init__(self
, params
=None, auto_init
=True):
590 """Create a FileDownloader object with the given options.
591 @param auto_init Whether to load the default extractors and print header (if verbose).
592 Set to 'no_verbose_header' to not print the header
598 self
._ies
_instances
= {}
599 self
._pps
= {k: [] for k in POSTPROCESS_WHEN}
600 self
._printed
_messages
= set()
601 self
._first
_webpage
_request
= True
602 self
._post
_hooks
= []
603 self
._progress
_hooks
= []
604 self
._postprocessor
_hooks
= []
605 self
._download
_retcode
= 0
606 self
._num
_downloads
= 0
608 self
._playlist
_level
= 0
609 self
._playlist
_urls
= set()
610 self
.cache
= Cache(self
)
611 self
.__header
_cookies
= []
613 stdout
= sys
.stderr
if self
.params
.get('logtostderr') else sys
.stdout
614 self
._out
_files
= Namespace(
617 screen
=sys
.stderr
if self
.params
.get('quiet') else stdout
,
618 console
=None if compat_os_name
== 'nt' else next(
619 filter(supports_terminal_sequences
, (sys
.stderr
, sys
.stdout
)), None)
623 windows_enable_vt_mode()
624 except Exception as e
:
625 self
.write_debug(f
'Failed to enable VT mode: {e}')
627 if self
.params
.get('no_color'):
628 if self
.params
.get('color') is not None:
629 self
.params
.setdefault('_warnings', []).append(
630 'Overwriting params from "color" with "no_color"')
631 self
.params
['color'] = 'no_color'
633 term_allow_color
= os
.getenv('TERM', '').lower() != 'dumb'
634 no_color
= bool(os
.getenv('NO_COLOR'))
636 def process_color_policy(stream
):
637 stream_name
= {sys.stdout: 'stdout', sys.stderr: 'stderr'}
[stream
]
638 policy
= traverse_obj(self
.params
, ('color', (stream_name
, None), {str}
), get_all
=False)
639 if policy
in ('auto', None):
640 if term_allow_color
and supports_terminal_sequences(stream
):
641 return 'no_color' if no_color
else True
643 assert policy
in ('always', 'never', 'no_color'), policy
644 return {'always': True, 'never': False}
.get(policy
, policy
)
646 self
._allow
_colors
= Namespace(**{
647 name
: process_color_policy(stream
)
648 for name
, stream
in self
._out
_files
.items_
if name
!= 'console'
651 system_deprecation
= _get_system_deprecation()
652 if system_deprecation
:
653 self
.deprecated_feature(system_deprecation
.replace('\n', '\n '))
655 if self
.params
.get('allow_unplayable_formats'):
657 f
'You have asked for {self._format_err("UNPLAYABLE", self.Styles.EMPHASIS)} formats to be listed/downloaded. '
658 'This is a developer option intended for debugging. \n'
659 ' If you experience any issues while using this option, '
660 f
'{self._format_err("DO NOT", self.Styles.ERROR)} open a bug report')
662 if self
.params
.get('bidi_workaround', False):
665 master
, slave
= pty
.openpty()
666 width
= shutil
.get_terminal_size().columns
667 width_args
= [] if width
is None else ['-w', str(width
)]
668 sp_kwargs
= {'stdin': subprocess.PIPE, 'stdout': slave, 'stderr': self._out_files.error}
670 self
._output
_process
= Popen(['bidiv'] + width_args
, **sp_kwargs
)
672 self
._output
_process
= Popen(['fribidi', '-c', 'UTF-8'] + width_args
, **sp_kwargs
)
673 self
._output
_channel
= os
.fdopen(master
, 'rb')
674 except OSError as ose
:
675 if ose
.errno
== errno
.ENOENT
:
677 'Could not find fribidi executable, ignoring --bidi-workaround. '
678 'Make sure that fribidi is an executable file in one of the directories in your $PATH.')
682 self
.params
['compat_opts'] = set(self
.params
.get('compat_opts', ()))
683 self
.params
['http_headers'] = HTTPHeaderDict(std_headers
, self
.params
.get('http_headers'))
684 self
._load
_cookies
(self
.params
['http_headers'].get('Cookie')) # compat
685 self
.params
['http_headers'].pop('Cookie', None)
686 self
._request
_director
= self
.build_request_director(_REQUEST_HANDLERS
.values(), _RH_PREFERENCES
)
688 if auto_init
and auto_init
!= 'no_verbose_header':
689 self
.print_debug_header()
691 def check_deprecated(param
, option
, suggestion
):
692 if self
.params
.get(param
) is not None:
693 self
.report_warning(f
'{option} is deprecated. Use {suggestion} instead')
697 if check_deprecated('cn_verification_proxy', '--cn-verification-proxy', '--geo-verification-proxy'):
698 if self
.params
.get('geo_verification_proxy') is None:
699 self
.params
['geo_verification_proxy'] = self
.params
['cn_verification_proxy']
701 check_deprecated('autonumber', '--auto-number', '-o "%(autonumber)s-%(title)s.%(ext)s"')
702 check_deprecated('usetitle', '--title', '-o "%(title)s-%(id)s.%(ext)s"')
703 check_deprecated('useid', '--id', '-o "%(id)s.%(ext)s"')
705 for msg
in self
.params
.get('_warnings', []):
706 self
.report_warning(msg
)
707 for msg
in self
.params
.get('_deprecation_warnings', []):
708 self
.deprecated_feature(msg
)
710 if 'list-formats' in self
.params
['compat_opts']:
711 self
.params
['listformats_table'] = False
713 if 'overwrites' not in self
.params
and self
.params
.get('nooverwrites') is not None:
714 # nooverwrites was unnecessarily changed to overwrites
715 # in 0c3d0f51778b153f65c21906031c2e091fcfb641
716 # This ensures compatibility with both keys
717 self
.params
['overwrites'] = not self
.params
['nooverwrites']
718 elif self
.params
.get('overwrites') is None:
719 self
.params
.pop('overwrites', None)
721 self
.params
['nooverwrites'] = not self
.params
['overwrites']
723 if self
.params
.get('simulate') is None and any((
724 self
.params
.get('list_thumbnails'),
725 self
.params
.get('listformats'),
726 self
.params
.get('listsubtitles'),
728 self
.params
['simulate'] = 'list_only'
730 self
.params
.setdefault('forceprint', {})
731 self
.params
.setdefault('print_to_file', {})
733 # Compatibility with older syntax
734 if not isinstance(params
['forceprint'], dict):
735 self
.params
['forceprint'] = {'video': params['forceprint']}
738 self
.add_default_info_extractors()
740 if (sys
.platform
!= 'win32'
741 and sys
.getfilesystemencoding() in ['ascii', 'ANSI_X3.4-1968']
742 and not self
.params
.get('restrictfilenames', False)):
743 # Unicode filesystem API will throw errors (#1474, #13027)
745 'Assuming --restrict-filenames since file system encoding '
746 'cannot encode all characters. '
747 'Set the LC_ALL environment variable to fix this.')
748 self
.params
['restrictfilenames'] = True
750 self
._parse
_outtmpl
()
752 # Creating format selector here allows us to catch syntax errors before the extraction
753 self
.format_selector
= (
754 self
.params
.get('format') if self
.params
.get('format') in (None, '-')
755 else self
.params
['format'] if callable(self
.params
['format'])
756 else self
.build_format_selector(self
.params
['format']))
759 'post_hooks': self
.add_post_hook
,
760 'progress_hooks': self
.add_progress_hook
,
761 'postprocessor_hooks': self
.add_postprocessor_hook
,
763 for opt
, fn
in hooks
.items():
764 for ph
in self
.params
.get(opt
, []):
767 for pp_def_raw
in self
.params
.get('postprocessors', []):
768 pp_def
= dict(pp_def_raw
)
769 when
= pp_def
.pop('when', 'post_process')
770 self
.add_post_processor(
771 get_postprocessor(pp_def
.pop('key'))(self
, **pp_def
),
774 def preload_download_archive(fn
):
775 """Preload the archive, if any is specified"""
779 elif not is_path_like(fn
):
782 self
.write_debug(f
'Loading archive file {fn!r}')
784 with locked_file(fn
, 'r', encoding
='utf-8') as archive_file
:
785 for line
in archive_file
:
786 archive
.add(line
.strip())
787 except OSError as ioe
:
788 if ioe
.errno
!= errno
.ENOENT
:
792 self
.archive
= preload_download_archive(self
.params
.get('download_archive'))
794 def warn_if_short_id(self
, argv
):
795 # short YouTube ID starting with dash?
797 i
for i
, a
in enumerate(argv
)
798 if re
.match(r
'^-[0-9A-Za-z_-]{10}$', a
)]
802 + [a
for i
, a
in enumerate(argv
) if i
not in idxs
]
803 + ['--'] + [argv
[i
] for i
in idxs
]
806 'Long argument string detected. '
807 'Use -- to separate parameters and URLs, like this:\n%s' %
808 args_to_str(correct_argv
))
810 def add_info_extractor(self
, ie
):
811 """Add an InfoExtractor object to the end of the list."""
813 self
._ies
[ie_key
] = ie
814 if not isinstance(ie
, type):
815 self
._ies
_instances
[ie_key
] = ie
816 ie
.set_downloader(self
)
818 def get_info_extractor(self
, ie_key
):
820 Get an instance of an IE with name ie_key, it will try to get one from
821 the _ies list, if there's no instance it will create a new one and add
822 it to the extractor list.
824 ie
= self
._ies
_instances
.get(ie_key
)
826 ie
= get_info_extractor(ie_key
)()
827 self
.add_info_extractor(ie
)
830 def add_default_info_extractors(self
):
832 Add the InfoExtractors returned by gen_extractors to the end of the list
834 all_ies
= {ie.IE_NAME.lower(): ie for ie in gen_extractor_classes()}
835 all_ies
['end'] = UnsupportedURLIE()
837 ie_names
= orderedSet_from_options(
838 self
.params
.get('allowed_extractors', ['default']), {
839 'all': list(all_ies
),
840 'default': [name
for name
, ie
in all_ies
.items() if ie
._ENABLED
],
842 except re
.error
as e
:
843 raise ValueError(f
'Wrong regex for allowed_extractors: {e.pattern}')
844 for name
in ie_names
:
845 self
.add_info_extractor(all_ies
[name
])
846 self
.write_debug(f
'Loaded {len(ie_names)} extractors')
848 def add_post_processor(self
, pp
, when
='post_process'):
849 """Add a PostProcessor object to the end of the chain."""
850 assert when
in POSTPROCESS_WHEN
, f
'Invalid when={when}'
851 self
._pps
[when
].append(pp
)
852 pp
.set_downloader(self
)
854 def add_post_hook(self
, ph
):
855 """Add the post hook"""
856 self
._post
_hooks
.append(ph
)
858 def add_progress_hook(self
, ph
):
859 """Add the download progress hook"""
860 self
._progress
_hooks
.append(ph
)
862 def add_postprocessor_hook(self
, ph
):
863 """Add the postprocessing progress hook"""
864 self
._postprocessor
_hooks
.append(ph
)
865 for pps
in self
._pps
.values():
867 pp
.add_progress_hook(ph
)
869 def _bidi_workaround(self
, message
):
870 if not hasattr(self
, '_output_channel'):
873 assert hasattr(self
, '_output_process')
874 assert isinstance(message
, str)
875 line_count
= message
.count('\n') + 1
876 self
._output
_process
.stdin
.write((message
+ '\n').encode())
877 self
._output
_process
.stdin
.flush()
878 res
= ''.join(self
._output
_channel
.readline().decode()
879 for _
in range(line_count
))
880 return res
[:-len('\n')]
882 def _write_string(self
, message
, out
=None, only_once
=False):
884 if message
in self
._printed
_messages
:
886 self
._printed
_messages
.add(message
)
887 write_string(message
, out
=out
, encoding
=self
.params
.get('encoding'))
889 def to_stdout(self
, message
, skip_eol
=False, quiet
=None):
890 """Print message to stdout"""
891 if quiet
is not None:
892 self
.deprecation_warning('"YoutubeDL.to_stdout" no longer accepts the argument quiet. '
893 'Use "YoutubeDL.to_screen" instead')
894 if skip_eol
is not False:
895 self
.deprecation_warning('"YoutubeDL.to_stdout" no longer accepts the argument skip_eol. '
896 'Use "YoutubeDL.to_screen" instead')
897 self
._write
_string
(f
'{self._bidi_workaround(message)}\n', self
._out
_files
.out
)
899 def to_screen(self
, message
, skip_eol
=False, quiet
=None, only_once
=False):
900 """Print message to screen if not in quiet mode"""
901 if self
.params
.get('logger'):
902 self
.params
['logger'].debug(message
)
904 if (self
.params
.get('quiet') if quiet
is None else quiet
) and not self
.params
.get('verbose'):
907 '%s%s' % (self
._bidi
_workaround
(message
), ('' if skip_eol
else '\n')),
908 self
._out
_files
.screen
, only_once
=only_once
)
910 def to_stderr(self
, message
, only_once
=False):
911 """Print message to stderr"""
912 assert isinstance(message
, str)
913 if self
.params
.get('logger'):
914 self
.params
['logger'].error(message
)
916 self
._write
_string
(f
'{self._bidi_workaround(message)}\n', self
._out
_files
.error
, only_once
=only_once
)
918 def _send_console_code(self
, code
):
919 if compat_os_name
== 'nt' or not self
._out
_files
.console
:
921 self
._write
_string
(code
, self
._out
_files
.console
)
923 def to_console_title(self
, message
):
924 if not self
.params
.get('consoletitle', False):
926 message
= remove_terminal_sequences(message
)
927 if compat_os_name
== 'nt':
928 if ctypes
.windll
.kernel32
.GetConsoleWindow():
929 # c_wchar_p() might not be necessary if `message` is
930 # already of type unicode()
931 ctypes
.windll
.kernel32
.SetConsoleTitleW(ctypes
.c_wchar_p(message
))
933 self
._send
_console
_code
(f
'\033]0;{message}\007')
935 def save_console_title(self
):
936 if not self
.params
.get('consoletitle') or self
.params
.get('simulate'):
938 self
._send
_console
_code
('\033[22;0t') # Save the title on stack
940 def restore_console_title(self
):
941 if not self
.params
.get('consoletitle') or self
.params
.get('simulate'):
943 self
._send
_console
_code
('\033[23;0t') # Restore the title from stack
946 self
.save_console_title()
949 def save_cookies(self
):
950 if self
.params
.get('cookiefile') is not None:
951 self
.cookiejar
.save()
953 def __exit__(self
, *args
):
954 self
.restore_console_title()
959 self
._request
_director
.close()
961 def trouble(self
, message
=None, tb
=None, is_error
=True):
962 """Determine action to take when a download problem appears.
964 Depending on if the downloader has been configured to ignore
965 download errors or not, this method may throw an exception or
966 not when errors are found, after printing the message.
968 @param tb If given, is additional traceback information
969 @param is_error Whether to raise error according to ignorerrors
971 if message
is not None:
972 self
.to_stderr(message
)
973 if self
.params
.get('verbose'):
975 if sys
.exc_info()[0]: # if .trouble has been called from an except block
977 if hasattr(sys
.exc_info()[1], 'exc_info') and sys
.exc_info()[1].exc_info
[0]:
978 tb
+= ''.join(traceback
.format_exception(*sys
.exc_info()[1].exc_info
))
979 tb
+= encode_compat_str(traceback
.format_exc())
981 tb_data
= traceback
.format_list(traceback
.extract_stack())
982 tb
= ''.join(tb_data
)
987 if not self
.params
.get('ignoreerrors'):
988 if sys
.exc_info()[0] and hasattr(sys
.exc_info()[1], 'exc_info') and sys
.exc_info()[1].exc_info
[0]:
989 exc_info
= sys
.exc_info()[1].exc_info
991 exc_info
= sys
.exc_info()
992 raise DownloadError(message
, exc_info
)
993 self
._download
_retcode
= 1
997 EMPHASIS
='light blue',
1002 BAD_FORMAT
='light red',
1004 SUPPRESS
='light black',
1007 def _format_text(self
, handle
, allow_colors
, text
, f
, fallback
=None, *, test_encoding
=False):
1010 original_text
= text
1011 # handle.encoding can be None. See https://github.com/yt-dlp/yt-dlp/issues/2711
1012 encoding
= self
.params
.get('encoding') or getattr(handle
, 'encoding', None) or 'ascii'
1013 text
= text
.encode(encoding
, 'ignore').decode(encoding
)
1014 if fallback
is not None and text
!= original_text
:
1016 return format_text(text
, f
) if allow_colors
is True else text
if fallback
is None else fallback
1018 def _format_out(self
, *args
, **kwargs
):
1019 return self
._format
_text
(self
._out
_files
.out
, self
._allow
_colors
.out
, *args
, **kwargs
)
1021 def _format_screen(self
, *args
, **kwargs
):
1022 return self
._format
_text
(self
._out
_files
.screen
, self
._allow
_colors
.screen
, *args
, **kwargs
)
1024 def _format_err(self
, *args
, **kwargs
):
1025 return self
._format
_text
(self
._out
_files
.error
, self
._allow
_colors
.error
, *args
, **kwargs
)
1027 def report_warning(self
, message
, only_once
=False):
1029 Print the message to stderr, it will be prefixed with 'WARNING:'
1030 If stderr is a tty file the 'WARNING:' will be colored
1032 if self
.params
.get('logger') is not None:
1033 self
.params
['logger'].warning(message
)
1035 if self
.params
.get('no_warnings'):
1037 self
.to_stderr(f
'{self._format_err("WARNING:", self.Styles.WARNING)} {message}', only_once
)
1039 def deprecation_warning(self
, message
, *, stacklevel
=0):
1040 deprecation_warning(
1041 message
, stacklevel
=stacklevel
+ 1, printer
=self
.report_error
, is_error
=False)
1043 def deprecated_feature(self
, message
):
1044 if self
.params
.get('logger') is not None:
1045 self
.params
['logger'].warning(f
'Deprecated Feature: {message}')
1046 self
.to_stderr(f
'{self._format_err("Deprecated Feature:", self.Styles.ERROR)} {message}', True)
1048 def report_error(self
, message
, *args
, **kwargs
):
1050 Do the same as trouble, but prefixes the message with 'ERROR:', colored
1051 in red if stderr is a tty file.
1053 self
.trouble(f
'{self._format_err("ERROR:", self.Styles.ERROR)} {message}', *args
, **kwargs
)
1055 def write_debug(self
, message
, only_once
=False):
1056 '''Log debug message or Print message to stderr'''
1057 if not self
.params
.get('verbose', False):
1059 message
= f
'[debug] {message}'
1060 if self
.params
.get('logger'):
1061 self
.params
['logger'].debug(message
)
1063 self
.to_stderr(message
, only_once
)
1065 def report_file_already_downloaded(self
, file_name
):
1066 """Report file has already been fully downloaded."""
1068 self
.to_screen('[download] %s has already been downloaded' % file_name
)
1069 except UnicodeEncodeError:
1070 self
.to_screen('[download] The file has already been downloaded')
1072 def report_file_delete(self
, file_name
):
1073 """Report that existing file will be deleted."""
1075 self
.to_screen('Deleting existing file %s' % file_name
)
1076 except UnicodeEncodeError:
1077 self
.to_screen('Deleting existing file')
1079 def raise_no_formats(self
, info
, forced
=False, *, msg
=None):
1080 has_drm
= info
.get('_has_drm')
1081 ignored
, expected
= self
.params
.get('ignore_no_formats_error'), bool(msg
)
1082 msg
= msg
or has_drm
and 'This video is DRM protected' or 'No video formats found!'
1083 if forced
or not ignored
:
1084 raise ExtractorError(msg
, video_id
=info
['id'], ie
=info
['extractor'],
1085 expected
=has_drm
or ignored
or expected
)
1087 self
.report_warning(msg
)
1089 def parse_outtmpl(self
):
1090 self
.deprecation_warning('"YoutubeDL.parse_outtmpl" is deprecated and may be removed in a future version')
1091 self
._parse
_outtmpl
()
1092 return self
.params
['outtmpl']
1094 def _parse_outtmpl(self
):
1096 if self
.params
.get('restrictfilenames'): # Remove spaces in the default template
1097 sanitize
= lambda x
: x
.replace(' - ', ' ').replace(' ', '-')
1099 outtmpl
= self
.params
.setdefault('outtmpl', {})
1100 if not isinstance(outtmpl
, dict):
1101 self
.params
['outtmpl'] = outtmpl
= {'default': outtmpl}
1102 outtmpl
.update({k: sanitize(v) for k, v in DEFAULT_OUTTMPL.items() if outtmpl.get(k) is None}
)
1104 def get_output_path(self
, dir_type
='', filename
=None):
1105 paths
= self
.params
.get('paths', {})
1106 assert isinstance(paths
, dict), '"paths" parameter must be a dictionary'
1107 path
= os
.path
.join(
1108 expand_path(paths
.get('home', '').strip()),
1109 expand_path(paths
.get(dir_type
, '').strip()) if dir_type
else '',
1111 return sanitize_path(path
, force
=self
.params
.get('windowsfilenames'))
1114 def _outtmpl_expandpath(outtmpl
):
1115 # expand_path translates '%%' into '%' and '$$' into '$'
1116 # correspondingly that is not what we want since we need to keep
1117 # '%%' intact for template dict substitution step. Working around
1118 # with boundary-alike separator hack.
1119 sep
= ''.join(random
.choices(string
.ascii_letters
, k
=32))
1120 outtmpl
= outtmpl
.replace('%%', f
'%{sep}%').replace('$$', f
'${sep}$')
1122 # outtmpl should be expand_path'ed before template dict substitution
1123 # because meta fields may contain env variables we don't want to
1124 # be expanded. E.g. for outtmpl "%(title)s.%(ext)s" and
1125 # title "Hello $PATH", we don't want `$PATH` to be expanded.
1126 return expand_path(outtmpl
).replace(sep
, '')
1129 def escape_outtmpl(outtmpl
):
1130 ''' Escape any remaining strings like %s, %abc% etc. '''
1132 STR_FORMAT_RE_TMPL
.format('', '(?![%(\0])'),
1133 lambda mobj
: ('' if mobj
.group('has_key') else '%') + mobj
.group(0),
1137 def validate_outtmpl(cls
, outtmpl
):
1138 ''' @return None or Exception object '''
1140 STR_FORMAT_RE_TMPL
.format('[^)]*', '[ljhqBUDS]'),
1141 lambda mobj
: f
'{mobj.group(0)[:-1]}s',
1142 cls
._outtmpl
_expandpath
(outtmpl
))
1144 cls
.escape_outtmpl(outtmpl
) % collections
.defaultdict(int)
1146 except ValueError as err
:
1150 def _copy_infodict(info_dict
):
1151 info_dict
= dict(info_dict
)
1152 info_dict
.pop('__postprocessors', None)
1153 info_dict
.pop('__pending_error', None)
1156 def prepare_outtmpl(self
, outtmpl
, info_dict
, sanitize
=False):
1157 """ Make the outtmpl and info_dict suitable for substitution: ydl.escape_outtmpl(outtmpl) % info_dict
1158 @param sanitize Whether to sanitize the output as a filename.
1159 For backward compatibility, a function can also be passed
1162 info_dict
.setdefault('epoch', int(time
.time())) # keep epoch consistent once set
1164 info_dict
= self
._copy
_infodict
(info_dict
)
1165 info_dict
['duration_string'] = ( # %(duration>%H-%M-%S)s is wrong if duration > 24hrs
1166 formatSeconds(info_dict
['duration'], '-' if sanitize
else ':')
1167 if info_dict
.get('duration', None) is not None
1169 info_dict
['autonumber'] = int(self
.params
.get('autonumber_start', 1) - 1 + self
._num
_downloads
)
1170 info_dict
['video_autonumber'] = self
._num
_videos
1171 if info_dict
.get('resolution') is None:
1172 info_dict
['resolution'] = self
.format_resolution(info_dict
, default
=None)
1174 # For fields playlist_index, playlist_autonumber and autonumber convert all occurrences
1175 # of %(field)s to %(field)0Nd for backward compatibility
1176 field_size_compat_map
= {
1177 'playlist_index': number_of_digits(info_dict
.get('__last_playlist_index') or 0),
1178 'playlist_autonumber': number_of_digits(info_dict
.get('n_entries') or 0),
1179 'autonumber': self
.params
.get('autonumber_size') or 5,
1183 EXTERNAL_FORMAT_RE
= re
.compile(STR_FORMAT_RE_TMPL
.format('[^)]*', f
'[{STR_FORMAT_TYPES}ljhqBUDS]'))
1189 # Field is of the form key1.key2...
1190 # where keys (except first) can be string, int, slice or "{field, ...}"
1191 FIELD_INNER_RE
= r
'(?:\w+|%(num)s|%(num)s?(?::%(num)s?){1,2})' % {'num': r'(?:-?\d+)'}
1192 FIELD_RE
= r
'\w*(?:\.(?:%(inner)s|{%(field)s(?:,%(field)s)*}))*' % {
1193 'inner': FIELD_INNER_RE
,
1194 'field': rf
'\w*(?:\.{FIELD_INNER_RE})*'
1196 MATH_FIELD_RE
= rf
'(?:{FIELD_RE}|-?{NUMBER_RE})'
1197 MATH_OPERATORS_RE
= r
'(?:%s)' % '|'.join(map(re
.escape
, MATH_FUNCTIONS
.keys()))
1198 INTERNAL_FORMAT_RE
= re
.compile(rf
'''(?xs)
1200 (?P<fields>{FIELD_RE})
1201 (?P<maths>(?:{MATH_OPERATORS_RE}{MATH_FIELD_RE})*)
1202 (?:>(?P<strf_format>.+?))?
1204 (?P<alternate>(?<!\\),[^|&)]+)?
1205 (?:&(?P<replacement>.*?))?
1206 (?:\|(?P<default>.*?))?
1209 def _from_user_input(field
):
1213 return slice(*map(int_or_none
, field
.split(':')))
1214 elif int_or_none(field
) is not None:
1218 def _traverse_infodict(fields
):
1219 fields
= [f
for x
in re
.split(r
'\.({.+?})\.?', fields
)
1220 for f
in ([x
] if x
.startswith('{') else x
.split('.'))]
1222 if fields
and not fields
[i
]:
1225 for i
, f
in enumerate(fields
):
1226 if not f
.startswith('{'):
1227 fields
[i
] = _from_user_input(f
)
1229 assert f
.endswith('}'), f
'No closing brace for {f} in {fields}'
1230 fields
[i
] = {k: list(map(_from_user_input, k.split('.'))) for k in f[1:-1].split(',')}
1232 return traverse_obj(info_dict
, fields
, traverse_string
=True)
1234 def get_value(mdict
):
1236 value
= _traverse_infodict(mdict
['fields'])
1239 value
= float_or_none(value
)
1240 if value
is not None:
1243 offset_key
= mdict
['maths']
1245 value
= float_or_none(value
)
1249 MATH_FIELD_RE
if operator
else MATH_OPERATORS_RE
,
1250 offset_key
).group(0)
1251 offset_key
= offset_key
[len(item
):]
1252 if operator
is None:
1253 operator
= MATH_FUNCTIONS
[item
]
1255 item
, multiplier
= (item
[1:], -1) if item
[0] == '-' else (item
, 1)
1256 offset
= float_or_none(item
)
1258 offset
= float_or_none(_traverse_infodict(item
))
1260 value
= operator(value
, multiplier
* offset
)
1261 except (TypeError, ZeroDivisionError):
1264 # Datetime formatting
1265 if mdict
['strf_format']:
1266 value
= strftime_or_none(value
, mdict
['strf_format'].replace('\\,', ','))
1268 # XXX: Workaround for https://github.com/yt-dlp/yt-dlp/issues/4485
1269 if sanitize
and value
== '':
1273 na
= self
.params
.get('outtmpl_na_placeholder', 'NA')
1275 def filename_sanitizer(key
, value
, restricted
=self
.params
.get('restrictfilenames')):
1276 return sanitize_filename(str(value
), restricted
=restricted
, is_id
=(
1277 bool(re
.search(r
'(^|[_.])id(\.|$)', key
))
1278 if 'filename-sanitization' in self
.params
['compat_opts']
1281 sanitizer
= sanitize
if callable(sanitize
) else filename_sanitizer
1282 sanitize
= bool(sanitize
)
1284 def _dumpjson_default(obj
):
1285 if isinstance(obj
, (set, LazyList
)):
1289 class _ReplacementFormatter(string
.Formatter
):
1290 def get_field(self
, field_name
, args
, kwargs
):
1291 if field_name
.isdigit():
1293 raise ValueError('Unsupported field')
1295 replacement_formatter
= _ReplacementFormatter()
1297 def create_key(outer_mobj
):
1298 if not outer_mobj
.group('has_key'):
1299 return outer_mobj
.group(0)
1300 key
= outer_mobj
.group('key')
1301 mobj
= re
.match(INTERNAL_FORMAT_RE
, key
)
1302 value
, replacement
, default
, last_field
= None, None, na
, ''
1304 mobj
= mobj
.groupdict()
1305 default
= mobj
['default'] if mobj
['default'] is not None else default
1306 value
= get_value(mobj
)
1307 last_field
, replacement
= mobj
['fields'], mobj
['replacement']
1308 if value
is None and mobj
['alternate']:
1309 mobj
= re
.match(INTERNAL_FORMAT_RE
, mobj
['remaining'][1:])
1313 if None not in (value
, replacement
):
1315 value
= replacement_formatter
.format(replacement
, value
)
1317 value
, default
= None, na
1319 fmt
= outer_mobj
.group('format')
1320 if fmt
== 's' and last_field
in field_size_compat_map
.keys() and isinstance(value
, int):
1321 fmt
= f
'0{field_size_compat_map[last_field]:d}d'
1323 flags
= outer_mobj
.group('conversion') or ''
1324 str_fmt
= f
'{fmt[:-1]}s'
1326 value
, fmt
= default
, 's'
1327 elif fmt
[-1] == 'l': # list
1328 delim
= '\n' if '#' in flags
else ', '
1329 value
, fmt
= delim
.join(map(str, variadic(value
, allowed_types
=(str, bytes)))), str_fmt
1330 elif fmt
[-1] == 'j': # json
1331 value
, fmt
= json
.dumps(
1332 value
, default
=_dumpjson_default
,
1333 indent
=4 if '#' in flags
else None, ensure_ascii
='+' not in flags
), str_fmt
1334 elif fmt
[-1] == 'h': # html
1335 value
, fmt
= escapeHTML(str(value
)), str_fmt
1336 elif fmt
[-1] == 'q': # quoted
1337 value
= map(str, variadic(value
) if '#' in flags
else [value
])
1338 value
, fmt
= ' '.join(map(compat_shlex_quote
, value
)), str_fmt
1339 elif fmt
[-1] == 'B': # bytes
1340 value
= f
'%{str_fmt}'.encode() % str(value
).encode()
1341 value
, fmt
= value
.decode('utf-8', 'ignore'), 's'
1342 elif fmt
[-1] == 'U': # unicode normalized
1343 value
, fmt
= unicodedata
.normalize(
1344 # "+" = compatibility equivalence, "#" = NFD
1345 'NF%s%s' % ('K' if '+' in flags
else '', 'D' if '#' in flags
else 'C'),
1347 elif fmt
[-1] == 'D': # decimal suffix
1348 num_fmt
, fmt
= fmt
[:-1].replace('#', ''), 's'
1349 value
= format_decimal_suffix(value
, f
'%{num_fmt}f%s' if num_fmt
else '%d%s',
1350 factor
=1024 if '#' in flags
else 1000)
1351 elif fmt
[-1] == 'S': # filename sanitization
1352 value
, fmt
= filename_sanitizer(last_field
, value
, restricted
='#' in flags
), str_fmt
1353 elif fmt
[-1] == 'c':
1355 value
= str(value
)[0]
1358 elif fmt
[-1] not in 'rsa': # numeric
1359 value
= float_or_none(value
)
1361 value
, fmt
= default
, 's'
1364 # If value is an object, sanitize might convert it to a string
1365 # So we convert it to repr first
1367 value
, fmt
= repr(value
), str_fmt
1368 elif fmt
[-1] == 'a':
1369 value
, fmt
= ascii(value
), str_fmt
1370 if fmt
[-1] in 'csra':
1371 value
= sanitizer(last_field
, value
)
1373 key
= '%s\0%s' % (key
.replace('%', '%\0'), outer_mobj
.group('format'))
1374 TMPL_DICT
[key
] = value
1375 return '{prefix}%({key}){fmt}'.format(key
=key
, fmt
=fmt
, prefix
=outer_mobj
.group('prefix'))
1377 return EXTERNAL_FORMAT_RE
.sub(create_key
, outtmpl
), TMPL_DICT
1379 def evaluate_outtmpl(self
, outtmpl
, info_dict
, *args
, **kwargs
):
1380 outtmpl
, info_dict
= self
.prepare_outtmpl(outtmpl
, info_dict
, *args
, **kwargs
)
1381 return self
.escape_outtmpl(outtmpl
) % info_dict
1383 def _prepare_filename(self
, info_dict
, *, outtmpl
=None, tmpl_type
=None):
1384 assert None in (outtmpl
, tmpl_type
), 'outtmpl and tmpl_type are mutually exclusive'
1386 outtmpl
= self
.params
['outtmpl'].get(tmpl_type
or 'default', self
.params
['outtmpl']['default'])
1388 outtmpl
= self
._outtmpl
_expandpath
(outtmpl
)
1389 filename
= self
.evaluate_outtmpl(outtmpl
, info_dict
, True)
1393 if tmpl_type
in ('', 'temp'):
1394 final_ext
, ext
= self
.params
.get('final_ext'), info_dict
.get('ext')
1395 if final_ext
and ext
and final_ext
!= ext
and filename
.endswith(f
'.{final_ext}'):
1396 filename
= replace_extension(filename
, ext
, final_ext
)
1398 force_ext
= OUTTMPL_TYPES
[tmpl_type
]
1400 filename
= replace_extension(filename
, force_ext
, info_dict
.get('ext'))
1402 # https://github.com/blackjack4494/youtube-dlc/issues/85
1403 trim_file_name
= self
.params
.get('trim_file_name', False)
1405 no_ext
, *ext
= filename
.rsplit('.', 2)
1406 filename
= join_nonempty(no_ext
[:trim_file_name
], *ext
, delim
='.')
1409 except ValueError as err
:
1410 self
.report_error('Error in output template: ' + str(err
) + ' (encoding: ' + repr(preferredencoding()) + ')')
1413 def prepare_filename(self
, info_dict
, dir_type
='', *, outtmpl
=None, warn
=False):
1414 """Generate the output filename"""
1416 assert not dir_type
, 'outtmpl and dir_type are mutually exclusive'
1418 filename
= self
._prepare
_filename
(info_dict
, tmpl_type
=dir_type
, outtmpl
=outtmpl
)
1419 if not filename
and dir_type
not in ('', 'temp'):
1423 if not self
.params
.get('paths'):
1425 elif filename
== '-':
1426 self
.report_warning('--paths is ignored when an outputting to stdout', only_once
=True)
1427 elif os
.path
.isabs(filename
):
1428 self
.report_warning('--paths is ignored since an absolute path is given in output template', only_once
=True)
1429 if filename
== '-' or not filename
:
1432 return self
.get_output_path(dir_type
, filename
)
1434 def _match_entry(self
, info_dict
, incomplete
=False, silent
=False):
1435 """Returns None if the file should be downloaded"""
1436 _type
= 'video' if 'playlist-match-filter' in self
.params
['compat_opts'] else info_dict
.get('_type', 'video')
1437 assert incomplete
or _type
== 'video', 'Only video result can be considered complete'
1439 video_title
= info_dict
.get('title', info_dict
.get('id', 'entry'))
1442 if _type
in ('playlist', 'multi_video'):
1444 elif _type
in ('url', 'url_transparent') and not try_call(
1445 lambda: self
.get_info_extractor(info_dict
['ie_key']).is_single_video(info_dict
['url'])):
1448 if 'title' in info_dict
:
1449 # This can happen when we're just evaluating the playlist
1450 title
= info_dict
['title']
1451 matchtitle
= self
.params
.get('matchtitle', False)
1453 if not re
.search(matchtitle
, title
, re
.IGNORECASE
):
1454 return '"' + title
+ '" title did not match pattern "' + matchtitle
+ '"'
1455 rejecttitle
= self
.params
.get('rejecttitle', False)
1457 if re
.search(rejecttitle
, title
, re
.IGNORECASE
):
1458 return '"' + title
+ '" title matched reject pattern "' + rejecttitle
+ '"'
1460 date
= info_dict
.get('upload_date')
1461 if date
is not None:
1462 dateRange
= self
.params
.get('daterange', DateRange())
1463 if date
not in dateRange
:
1464 return f
'{date_from_str(date).isoformat()} upload date is not in range {dateRange}'
1465 view_count
= info_dict
.get('view_count')
1466 if view_count
is not None:
1467 min_views
= self
.params
.get('min_views')
1468 if min_views
is not None and view_count
< min_views
:
1469 return 'Skipping %s, because it has not reached minimum view count (%d/%d)' % (video_title
, view_count
, min_views
)
1470 max_views
= self
.params
.get('max_views')
1471 if max_views
is not None and view_count
> max_views
:
1472 return 'Skipping %s, because it has exceeded the maximum view count (%d/%d)' % (video_title
, view_count
, max_views
)
1473 if age_restricted(info_dict
.get('age_limit'), self
.params
.get('age_limit')):
1474 return 'Skipping "%s" because it is age restricted' % video_title
1476 match_filter
= self
.params
.get('match_filter')
1477 if match_filter
is None:
1483 ret
= match_filter(info_dict
, incomplete
=incomplete
)
1485 # For backward compatibility
1486 ret
= None if incomplete
else match_filter(info_dict
)
1487 except DownloadCancelled
as err
:
1488 if err
.msg
is not NO_DEFAULT
:
1490 ret
, cancelled
= err
.msg
, err
1492 if ret
is NO_DEFAULT
:
1494 filename
= self
._format
_screen
(self
.prepare_filename(info_dict
), self
.Styles
.FILENAME
)
1495 reply
= input(self
._format
_screen
(
1496 f
'Download "{filename}"? (Y/n): ', self
.Styles
.EMPHASIS
)).lower().strip()
1497 if reply
in {'y', ''}
:
1501 raise type(cancelled
)(f
'Skipping {video_title}')
1502 return f
'Skipping {video_title}'
1505 if self
.in_download_archive(info_dict
):
1507 format_field(info_dict
, 'id', f
'{self._format_screen("%s", self.Styles.ID)}: '),
1508 format_field(info_dict
, 'title', f
'{self._format_screen("%s", self.Styles.EMPHASIS)} '),
1509 'has already been recorded in the archive'))
1510 break_opt
, break_err
= 'break_on_existing', ExistingVideoReached
1513 reason
= check_filter()
1514 except DownloadCancelled
as e
:
1515 reason
, break_opt
, break_err
= e
.msg
, 'match_filter', type(e
)
1517 break_opt
, break_err
= 'break_on_reject', RejectedVideoReached
1518 if reason
is not None:
1520 self
.to_screen('[download] ' + reason
)
1521 if self
.params
.get(break_opt
, False):
1526 def add_extra_info(info_dict
, extra_info
):
1527 '''Set the keys from extra_info in info dict if they are missing'''
1528 for key
, value
in extra_info
.items():
1529 info_dict
.setdefault(key
, value
)
1531 def extract_info(self
, url
, download
=True, ie_key
=None, extra_info
=None,
1532 process
=True, force_generic_extractor
=False):
1534 Extract and return the information dictionary of the URL
1537 @param url URL to extract
1540 @param download Whether to download videos
1541 @param process Whether to resolve all unresolved references (URLs, playlist items).
1542 Must be True for download to work
1543 @param ie_key Use only the extractor with this key
1545 @param extra_info Dictionary containing the extra values to add to the info (For internal use only)
1546 @force_generic_extractor Force using the generic extractor (Deprecated; use ie_key='Generic')
1549 if extra_info
is None:
1552 if not ie_key
and force_generic_extractor
:
1556 ies
= {ie_key: self._ies[ie_key]}
if ie_key
in self
._ies
else {}
1560 for key
, ie
in ies
.items():
1561 if not ie
.suitable(url
):
1564 if not ie
.working():
1565 self
.report_warning('The program functionality for this site has been marked as broken, '
1566 'and will probably not work.')
1568 temp_id
= ie
.get_temp_id(url
)
1569 if temp_id
is not None and self
.in_download_archive({'id': temp_id, 'ie_key': key}
):
1570 self
.to_screen(f
'[download] {self._format_screen(temp_id, self.Styles.ID)}: '
1571 'has already been recorded in the archive')
1572 if self
.params
.get('break_on_existing', False):
1573 raise ExistingVideoReached()
1575 return self
.__extract
_info
(url
, self
.get_info_extractor(key
), download
, extra_info
, process
)
1577 extractors_restricted
= self
.params
.get('allowed_extractors') not in (None, ['default'])
1578 self
.report_error(f
'No suitable extractor{format_field(ie_key, None, " (%s)")} found for URL {url}',
1579 tb
=False if extractors_restricted
else None)
1581 def _handle_extraction_exceptions(func
):
1582 @functools.wraps(func
)
1583 def wrapper(self
, *args
, **kwargs
):
1586 return func(self
, *args
, **kwargs
)
1587 except (DownloadCancelled
, LazyList
.IndexError, PagedList
.IndexError):
1589 except ReExtractInfo
as e
:
1591 self
.to_screen(f
'{e}; Re-extracting data')
1593 self
.to_stderr('\r')
1594 self
.report_warning(f
'{e}; Re-extracting data')
1596 except GeoRestrictedError
as e
:
1599 msg
+= '\nThis video is available in %s.' % ', '.join(
1600 map(ISO3166Utils
.short2full
, e
.countries
))
1601 msg
+= '\nYou might want to use a VPN or a proxy server (with --proxy) to workaround.'
1602 self
.report_error(msg
)
1603 except ExtractorError
as e
: # An error we somewhat expected
1604 self
.report_error(str(e
), e
.format_traceback())
1605 except Exception as e
:
1606 if self
.params
.get('ignoreerrors'):
1607 self
.report_error(str(e
), tb
=encode_compat_str(traceback
.format_exc()))
1613 def _wait_for_video(self
, ie_result
={}):
1614 if (not self
.params
.get('wait_for_video')
1615 or ie_result
.get('_type', 'video') != 'video'
1616 or ie_result
.get('formats') or ie_result
.get('url')):
1619 format_dur
= lambda dur
: '%02d:%02d:%02d' % timetuple_from_msec(dur
* 1000)[:-1]
1624 full_msg
= f
'{msg}\n'
1625 if not self
.params
.get('noprogress'):
1626 full_msg
= msg
+ ' ' * (len(last_msg
) - len(msg
)) + '\r'
1629 self
.to_screen(full_msg
, skip_eol
=True)
1632 min_wait
, max_wait
= self
.params
.get('wait_for_video')
1633 diff
= try_get(ie_result
, lambda x
: x
['release_timestamp'] - time
.time())
1634 if diff
is None and ie_result
.get('live_status') == 'is_upcoming':
1635 diff
= round(random
.uniform(min_wait
, max_wait
) if (max_wait
and min_wait
) else (max_wait
or min_wait
), 0)
1636 self
.report_warning('Release time of video is not known')
1637 elif ie_result
and (diff
or 0) <= 0:
1638 self
.report_warning('Video should already be available according to extracted info')
1639 diff
= min(max(diff
or 0, min_wait
or 0), max_wait
or float('inf'))
1640 self
.to_screen(f
'[wait] Waiting for {format_dur(diff)} - Press Ctrl+C to try now')
1642 wait_till
= time
.time() + diff
1645 diff
= wait_till
- time
.time()
1648 raise ReExtractInfo('[wait] Wait period ended', expected
=True)
1649 progress(f
'[wait] Remaining time until next attempt: {self._format_screen(format_dur(diff), self.Styles.EMPHASIS)}')
1651 except KeyboardInterrupt:
1653 raise ReExtractInfo('[wait] Interrupted by user', expected
=True)
1654 except BaseException
as e
:
1655 if not isinstance(e
, ReExtractInfo
):
1659 def _load_cookies(self
, data
, *, autoscope
=True):
1660 """Loads cookies from a `Cookie` header
1662 This tries to work around the security vulnerability of passing cookies to every domain.
1663 See: https://github.com/yt-dlp/yt-dlp/security/advisories/GHSA-v8mc-9377-rwjj
1665 @param data The Cookie header as string to load the cookies from
1666 @param autoscope If `False`, scope cookies using Set-Cookie syntax and error for cookie without domains
1667 If `True`, save cookies for later to be stored in the jar with a limited scope
1668 If a URL, save cookies in the jar with the domain of the URL
1670 for cookie
in LenientSimpleCookie(data
).values():
1671 if autoscope
and any(cookie
.values()):
1672 raise ValueError('Invalid syntax in Cookie Header')
1674 domain
= cookie
.get('domain') or ''
1675 expiry
= cookie
.get('expires')
1676 if expiry
== '': # 0 is valid
1678 prepared_cookie
= http
.cookiejar
.Cookie(
1679 cookie
.get('version') or 0, cookie
.key
, cookie
.value
, None, False,
1680 domain
, True, True, cookie
.get('path') or '', bool(cookie
.get('path')),
1681 cookie
.get('secure') or False, expiry
, False, None, None, {})
1684 self
.cookiejar
.set_cookie(prepared_cookie
)
1685 elif autoscope
is True:
1686 self
.deprecated_feature(
1687 'Passing cookies as a header is a potential security risk; '
1688 'they will be scoped to the domain of the downloaded urls. '
1689 'Please consider loading cookies from a file or browser instead.')
1690 self
.__header
_cookies
.append(prepared_cookie
)
1692 self
.report_warning(
1693 'The extractor result contains an unscoped cookie as an HTTP header. '
1694 f
'If you are using yt-dlp with an input URL{bug_reports_message(before=",")}',
1696 self
._apply
_header
_cookies
(autoscope
, [prepared_cookie
])
1698 self
.report_error('Unscoped cookies are not allowed; please specify some sort of scoping',
1699 tb
=False, is_error
=False)
1701 def _apply_header_cookies(self
, url
, cookies
=None):
1702 """Applies stray header cookies to the provided url
1704 This loads header cookies and scopes them to the domain provided in `url`.
1705 While this is not ideal, it helps reduce the risk of them being sent
1706 to an unintended destination while mostly maintaining compatibility.
1708 parsed
= urllib
.parse
.urlparse(url
)
1709 if not parsed
.hostname
:
1712 for cookie
in map(copy
.copy
, cookies
or self
.__header
_cookies
):
1713 cookie
.domain
= f
'.{parsed.hostname}'
1714 self
.cookiejar
.set_cookie(cookie
)
1716 @_handle_extraction_exceptions
1717 def __extract_info(self
, url
, ie
, download
, extra_info
, process
):
1718 self
._apply
_header
_cookies
(url
)
1721 ie_result
= ie
.extract(url
)
1722 except UserNotLive
as e
:
1724 if self
.params
.get('wait_for_video'):
1725 self
.report_warning(e
)
1726 self
._wait
_for
_video
()
1728 if ie_result
is None: # Finished already (backwards compatibility; listformats and friends should be moved here)
1729 self
.report_warning(f
'Extractor {ie.IE_NAME} returned nothing{bug_reports_message()}')
1731 if isinstance(ie_result
, list):
1732 # Backwards compatibility: old IE result format
1734 '_type': 'compat_list',
1735 'entries': ie_result
,
1737 if extra_info
.get('original_url'):
1738 ie_result
.setdefault('original_url', extra_info
['original_url'])
1739 self
.add_default_extra_info(ie_result
, ie
, url
)
1741 self
._wait
_for
_video
(ie_result
)
1742 return self
.process_ie_result(ie_result
, download
, extra_info
)
1746 def add_default_extra_info(self
, ie_result
, ie
, url
):
1748 self
.add_extra_info(ie_result
, {
1750 'original_url': url
,
1752 webpage_url
= ie_result
.get('webpage_url')
1754 self
.add_extra_info(ie_result
, {
1755 'webpage_url_basename': url_basename(webpage_url
),
1756 'webpage_url_domain': get_domain(webpage_url
),
1759 self
.add_extra_info(ie_result
, {
1760 'extractor': ie
.IE_NAME
,
1761 'extractor_key': ie
.ie_key(),
1764 def process_ie_result(self
, ie_result
, download
=True, extra_info
=None):
1766 Take the result of the ie(may be modified) and resolve all unresolved
1767 references (URLs, playlist items).
1769 It will also download the videos if 'download'.
1770 Returns the resolved ie_result.
1772 if extra_info
is None:
1774 result_type
= ie_result
.get('_type', 'video')
1776 if result_type
in ('url', 'url_transparent'):
1777 ie_result
['url'] = sanitize_url(
1778 ie_result
['url'], scheme
='http' if self
.params
.get('prefer_insecure') else 'https')
1779 if ie_result
.get('original_url') and not extra_info
.get('original_url'):
1780 extra_info
= {'original_url': ie_result['original_url'], **extra_info}
1782 extract_flat
= self
.params
.get('extract_flat', False)
1783 if ((extract_flat
== 'in_playlist' and 'playlist' in extra_info
)
1784 or extract_flat
is True):
1785 info_copy
= ie_result
.copy()
1786 ie
= try_get(ie_result
.get('ie_key'), self
.get_info_extractor
)
1787 if ie
and not ie_result
.get('id'):
1788 info_copy
['id'] = ie
.get_temp_id(ie_result
['url'])
1789 self
.add_default_extra_info(info_copy
, ie
, ie_result
['url'])
1790 self
.add_extra_info(info_copy
, extra_info
)
1791 info_copy
, _
= self
.pre_process(info_copy
)
1792 self
._fill
_common
_fields
(info_copy
, False)
1793 self
.__forced
_printings
(info_copy
)
1794 self
._raise
_pending
_errors
(info_copy
)
1795 if self
.params
.get('force_write_download_archive', False):
1796 self
.record_download_archive(info_copy
)
1799 if result_type
== 'video':
1800 self
.add_extra_info(ie_result
, extra_info
)
1801 ie_result
= self
.process_video_result(ie_result
, download
=download
)
1802 self
._raise
_pending
_errors
(ie_result
)
1803 additional_urls
= (ie_result
or {}).get('additional_urls')
1805 # TODO: Improve MetadataParserPP to allow setting a list
1806 if isinstance(additional_urls
, str):
1807 additional_urls
= [additional_urls
]
1809 '[info] %s: %d additional URL(s) requested' % (ie_result
['id'], len(additional_urls
)))
1810 self
.write_debug('Additional URLs: "%s"' % '", "'.join(additional_urls
))
1811 ie_result
['additional_entries'] = [
1813 url
, download
, extra_info
=extra_info
,
1814 force_generic_extractor
=self
.params
.get('force_generic_extractor'))
1815 for url
in additional_urls
1818 elif result_type
== 'url':
1819 # We have to add extra_info to the results because it may be
1820 # contained in a playlist
1821 return self
.extract_info(
1822 ie_result
['url'], download
,
1823 ie_key
=ie_result
.get('ie_key'),
1824 extra_info
=extra_info
)
1825 elif result_type
== 'url_transparent':
1826 # Use the information from the embedding page
1827 info
= self
.extract_info(
1828 ie_result
['url'], ie_key
=ie_result
.get('ie_key'),
1829 extra_info
=extra_info
, download
=False, process
=False)
1831 # extract_info may return None when ignoreerrors is enabled and
1832 # extraction failed with an error, don't crash and return early
1837 exempted_fields
= {'_type', 'url', 'ie_key'}
1838 if not ie_result
.get('section_end') and ie_result
.get('section_start') is None:
1839 # For video clips, the id etc of the clip extractor should be used
1840 exempted_fields |
= {'id', 'extractor', 'extractor_key'}
1842 new_result
= info
.copy()
1843 new_result
.update(filter_dict(ie_result
, lambda k
, v
: v
is not None and k
not in exempted_fields
))
1845 # Extracted info may not be a video result (i.e.
1846 # info.get('_type', 'video') != video) but rather an url or
1847 # url_transparent. In such cases outer metadata (from ie_result)
1848 # should be propagated to inner one (info). For this to happen
1849 # _type of info should be overridden with url_transparent. This
1850 # fixes issue from https://github.com/ytdl-org/youtube-dl/pull/11163.
1851 if new_result
.get('_type') == 'url':
1852 new_result
['_type'] = 'url_transparent'
1854 return self
.process_ie_result(
1855 new_result
, download
=download
, extra_info
=extra_info
)
1856 elif result_type
in ('playlist', 'multi_video'):
1857 # Protect from infinite recursion due to recursively nested playlists
1858 # (see https://github.com/ytdl-org/youtube-dl/issues/27833)
1859 webpage_url
= ie_result
.get('webpage_url') # Playlists maynot have webpage_url
1860 if webpage_url
and webpage_url
in self
._playlist
_urls
:
1862 '[download] Skipping already downloaded playlist: %s'
1863 % ie_result
.get('title') or ie_result
.get('id'))
1866 self
._playlist
_level
+= 1
1867 self
._playlist
_urls
.add(webpage_url
)
1868 self
._fill
_common
_fields
(ie_result
, False)
1869 self
._sanitize
_thumbnails
(ie_result
)
1871 return self
.__process
_playlist
(ie_result
, download
)
1873 self
._playlist
_level
-= 1
1874 if not self
._playlist
_level
:
1875 self
._playlist
_urls
.clear()
1876 elif result_type
== 'compat_list':
1877 self
.report_warning(
1878 'Extractor %s returned a compat_list result. '
1879 'It needs to be updated.' % ie_result
.get('extractor'))
1882 self
.add_extra_info(r
, {
1883 'extractor': ie_result
['extractor'],
1884 'webpage_url': ie_result
['webpage_url'],
1885 'webpage_url_basename': url_basename(ie_result
['webpage_url']),
1886 'webpage_url_domain': get_domain(ie_result
['webpage_url']),
1887 'extractor_key': ie_result
['extractor_key'],
1890 ie_result
['entries'] = [
1891 self
.process_ie_result(_fixup(r
), download
, extra_info
)
1892 for r
in ie_result
['entries']
1896 raise Exception('Invalid result type: %s' % result_type
)
1898 def _ensure_dir_exists(self
, path
):
1899 return make_dir(path
, self
.report_error
)
1902 def _playlist_infodict(ie_result
, strict
=False, **kwargs
):
1904 'playlist_count': ie_result
.get('playlist_count'),
1905 'playlist': ie_result
.get('title') or ie_result
.get('id'),
1906 'playlist_id': ie_result
.get('id'),
1907 'playlist_title': ie_result
.get('title'),
1908 'playlist_uploader': ie_result
.get('uploader'),
1909 'playlist_uploader_id': ie_result
.get('uploader_id'),
1914 if ie_result
.get('webpage_url'):
1916 'webpage_url': ie_result
['webpage_url'],
1917 'webpage_url_basename': url_basename(ie_result
['webpage_url']),
1918 'webpage_url_domain': get_domain(ie_result
['webpage_url']),
1922 'playlist_index': 0,
1923 '__last_playlist_index': max(ie_result
.get('requested_entries') or (0, 0)),
1924 'extractor': ie_result
['extractor'],
1925 'extractor_key': ie_result
['extractor_key'],
1928 def __process_playlist(self
, ie_result
, download
):
1929 """Process each entry in the playlist"""
1930 assert ie_result
['_type'] in ('playlist', 'multi_video')
1932 common_info
= self
._playlist
_infodict
(ie_result
, strict
=True)
1933 title
= common_info
.get('playlist') or '<Untitled>'
1934 if self
._match
_entry
(common_info
, incomplete
=True) is not None:
1936 self
.to_screen(f
'[download] Downloading {ie_result["_type"]}: {title}')
1938 all_entries
= PlaylistEntries(self
, ie_result
)
1939 entries
= orderedSet(all_entries
.get_requested_items(), lazy
=True)
1941 lazy
= self
.params
.get('lazy_playlist')
1943 resolved_entries
, n_entries
= [], 'N/A'
1944 ie_result
['requested_entries'], ie_result
['entries'] = None, None
1946 entries
= resolved_entries
= list(entries
)
1947 n_entries
= len(resolved_entries
)
1948 ie_result
['requested_entries'], ie_result
['entries'] = tuple(zip(*resolved_entries
)) or ([], [])
1949 if not ie_result
.get('playlist_count'):
1950 # Better to do this after potentially exhausting entries
1951 ie_result
['playlist_count'] = all_entries
.get_full_count()
1953 extra
= self
._playlist
_infodict
(ie_result
, n_entries
=int_or_none(n_entries
))
1954 ie_copy
= collections
.ChainMap(ie_result
, extra
)
1956 _infojson_written
= False
1957 write_playlist_files
= self
.params
.get('allow_playlist_files', True)
1958 if write_playlist_files
and self
.params
.get('list_thumbnails'):
1959 self
.list_thumbnails(ie_result
)
1960 if write_playlist_files
and not self
.params
.get('simulate'):
1961 _infojson_written
= self
._write
_info
_json
(
1962 'playlist', ie_result
, self
.prepare_filename(ie_copy
, 'pl_infojson'))
1963 if _infojson_written
is None:
1965 if self
._write
_description
('playlist', ie_result
,
1966 self
.prepare_filename(ie_copy
, 'pl_description')) is None:
1968 # TODO: This should be passed to ThumbnailsConvertor if necessary
1969 self
._write
_thumbnails
('playlist', ie_result
, self
.prepare_filename(ie_copy
, 'pl_thumbnail'))
1972 if self
.params
.get('playlistreverse') or self
.params
.get('playlistrandom'):
1973 self
.report_warning('playlistreverse and playlistrandom are not supported with lazy_playlist', only_once
=True)
1974 elif self
.params
.get('playlistreverse'):
1976 elif self
.params
.get('playlistrandom'):
1977 random
.shuffle(entries
)
1979 self
.to_screen(f
'[{ie_result["extractor"]}] Playlist {title}: Downloading {n_entries} items'
1980 f
'{format_field(ie_result, "playlist_count", " of %s")}')
1982 keep_resolved_entries
= self
.params
.get('extract_flat') != 'discard'
1983 if self
.params
.get('extract_flat') == 'discard_in_playlist':
1984 keep_resolved_entries
= ie_result
['_type'] != 'playlist'
1985 if keep_resolved_entries
:
1986 self
.write_debug('The information of all playlist entries will be held in memory')
1989 max_failures
= self
.params
.get('skip_playlist_after_errors') or float('inf')
1990 for i
, (playlist_index
, entry
) in enumerate(entries
):
1992 resolved_entries
.append((playlist_index
, entry
))
1996 entry
['__x_forwarded_for_ip'] = ie_result
.get('__x_forwarded_for_ip')
1997 if not lazy
and 'playlist-index' in self
.params
['compat_opts']:
1998 playlist_index
= ie_result
['requested_entries'][i
]
2000 entry_copy
= collections
.ChainMap(entry
, {
2002 'n_entries': int_or_none(n_entries
),
2003 'playlist_index': playlist_index
,
2004 'playlist_autonumber': i
+ 1,
2007 if self
._match
_entry
(entry_copy
, incomplete
=True) is not None:
2008 # For compatabilty with youtube-dl. See https://github.com/yt-dlp/yt-dlp/issues/4369
2009 resolved_entries
[i
] = (playlist_index
, NO_DEFAULT
)
2012 self
.to_screen('[download] Downloading item %s of %s' % (
2013 self
._format
_screen
(i
+ 1, self
.Styles
.ID
), self
._format
_screen
(n_entries
, self
.Styles
.EMPHASIS
)))
2015 entry_result
= self
.__process
_iterable
_entry
(entry
, download
, collections
.ChainMap({
2016 'playlist_index': playlist_index
,
2017 'playlist_autonumber': i
+ 1,
2019 if not entry_result
:
2021 if failures
>= max_failures
:
2023 f
'Skipping the remaining entries in playlist "{title}" since {failures} items failed extraction')
2025 if keep_resolved_entries
:
2026 resolved_entries
[i
] = (playlist_index
, entry_result
)
2028 # Update with processed data
2029 ie_result
['entries'] = [e
for _
, e
in resolved_entries
if e
is not NO_DEFAULT
]
2030 ie_result
['requested_entries'] = [i
for i
, e
in resolved_entries
if e
is not NO_DEFAULT
]
2031 if ie_result
['requested_entries'] == try_call(lambda: list(range(1, ie_result
['playlist_count'] + 1))):
2032 # Do not set for full playlist
2033 ie_result
.pop('requested_entries')
2035 # Write the updated info to json
2036 if _infojson_written
is True and self
._write
_info
_json
(
2037 'updated playlist', ie_result
,
2038 self
.prepare_filename(ie_copy
, 'pl_infojson'), overwrite
=True) is None:
2041 ie_result
= self
.run_all_pps('playlist', ie_result
)
2042 self
.to_screen(f
'[download] Finished downloading playlist: {title}')
2045 @_handle_extraction_exceptions
2046 def __process_iterable_entry(self
, entry
, download
, extra_info
):
2047 return self
.process_ie_result(
2048 entry
, download
=download
, extra_info
=extra_info
)
2050 def _build_format_filter(self
, filter_spec
):
2051 " Returns a function to filter the formats according to the filter_spec "
2061 operator_rex
= re
.compile(r
'''(?x)\s*
2063 (?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
2064 (?P<value>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)\s*
2065 ''' % '|'.join(map(re
.escape
, OPERATORS
.keys())))
2066 m
= operator_rex
.fullmatch(filter_spec
)
2069 comparison_value
= int(m
.group('value'))
2071 comparison_value
= parse_filesize(m
.group('value'))
2072 if comparison_value
is None:
2073 comparison_value
= parse_filesize(m
.group('value') + 'B')
2074 if comparison_value
is None:
2076 'Invalid value %r in format specification %r' % (
2077 m
.group('value'), filter_spec
))
2078 op
= OPERATORS
[m
.group('op')]
2083 '^=': lambda attr
, value
: attr
.startswith(value
),
2084 '$=': lambda attr
, value
: attr
.endswith(value
),
2085 '*=': lambda attr
, value
: value
in attr
,
2086 '~=': lambda attr
, value
: value
.search(attr
) is not None
2088 str_operator_rex
= re
.compile(r
'''(?x)\s*
2089 (?P<key>[a-zA-Z0-9._-]+)\s*
2090 (?P<negation>!\s*)?(?P<op>%s)\s*(?P<none_inclusive>\?\s*)?
2092 (?P<value>(?(quote)(?:(?!(?P=quote))[^\\]|\\.)+|[\w.-]+))
2093 (?(quote)(?P=quote))\s*
2094 ''' % '|'.join(map(re
.escape
, STR_OPERATORS
.keys())))
2095 m
= str_operator_rex
.fullmatch(filter_spec
)
2097 if m
.group('op') == '~=':
2098 comparison_value
= re
.compile(m
.group('value'))
2100 comparison_value
= re
.sub(r
'''\\([\\"'])''', r
'\1', m
.group('value'))
2101 str_op
= STR_OPERATORS
[m
.group('op')]
2102 if m
.group('negation'):
2103 op
= lambda attr
, value
: not str_op(attr
, value
)
2108 raise SyntaxError('Invalid filter specification %r' % filter_spec
)
2111 actual_value
= f
.get(m
.group('key'))
2112 if actual_value
is None:
2113 return m
.group('none_inclusive')
2114 return op(actual_value
, comparison_value
)
2117 def _check_formats(self
, formats
):
2119 self
.to_screen('[info] Testing format %s' % f
['format_id'])
2120 path
= self
.get_output_path('temp')
2121 if not self
._ensure
_dir
_exists
(f
'{path}/'):
2123 temp_file
= tempfile
.NamedTemporaryFile(suffix
='.tmp', delete
=False, dir=path
or None)
2126 success
, _
= self
.dl(temp_file
.name
, f
, test
=True)
2127 except (DownloadError
, OSError, ValueError) + network_exceptions
:
2130 if os
.path
.exists(temp_file
.name
):
2132 os
.remove(temp_file
.name
)
2134 self
.report_warning('Unable to delete temporary file "%s"' % temp_file
.name
)
2138 self
.to_screen('[info] Unable to download format %s. Skipping...' % f
['format_id'])
2140 def _default_format_spec(self
, info_dict
, download
=True):
2143 merger
= FFmpegMergerPP(self
)
2144 return merger
.available
and merger
.can_merge()
2147 not self
.params
.get('simulate')
2151 or info_dict
.get('is_live') and not self
.params
.get('live_from_start')
2152 or self
.params
['outtmpl']['default'] == '-'))
2155 or self
.params
.get('allow_multiple_audio_streams', False)
2156 or 'format-spec' in self
.params
['compat_opts'])
2159 'best/bestvideo+bestaudio' if prefer_best
2160 else 'bestvideo*+bestaudio/best' if not compat
2161 else 'bestvideo+bestaudio/best')
2163 def build_format_selector(self
, format_spec
):
2164 def syntax_error(note
, start
):
2166 'Invalid format specification: '
2167 '{}\n\t{}\n\t{}^'.format(note
, format_spec
, ' ' * start
[1]))
2168 return SyntaxError(message
)
2170 PICKFIRST
= 'PICKFIRST'
2174 FormatSelector
= collections
.namedtuple('FormatSelector', ['type', 'selector', 'filters'])
2176 allow_multiple_streams
= {'audio': self
.params
.get('allow_multiple_audio_streams', False),
2177 'video': self
.params
.get('allow_multiple_video_streams', False)}
2179 def _parse_filter(tokens
):
2181 for type, string_
, start
, _
, _
in tokens
:
2182 if type == tokenize
.OP
and string_
== ']':
2183 return ''.join(filter_parts
)
2185 filter_parts
.append(string_
)
2187 def _remove_unused_ops(tokens
):
2188 # Remove operators that we don't use and join them with the surrounding strings.
2189 # E.g. 'mp4' '-' 'baseline' '-' '16x9' is converted to 'mp4-baseline-16x9'
2190 ALLOWED_OPS
= ('/', '+', ',', '(', ')')
2191 last_string
, last_start
, last_end
, last_line
= None, None, None, None
2192 for type, string_
, start
, end
, line
in tokens
:
2193 if type == tokenize
.OP
and string_
== '[':
2195 yield tokenize
.NAME
, last_string
, last_start
, last_end
, last_line
2197 yield type, string_
, start
, end
, line
2198 # everything inside brackets will be handled by _parse_filter
2199 for type, string_
, start
, end
, line
in tokens
:
2200 yield type, string_
, start
, end
, line
2201 if type == tokenize
.OP
and string_
== ']':
2203 elif type == tokenize
.OP
and string_
in ALLOWED_OPS
:
2205 yield tokenize
.NAME
, last_string
, last_start
, last_end
, last_line
2207 yield type, string_
, start
, end
, line
2208 elif type in [tokenize
.NAME
, tokenize
.NUMBER
, tokenize
.OP
]:
2210 last_string
= string_
2214 last_string
+= string_
2216 yield tokenize
.NAME
, last_string
, last_start
, last_end
, last_line
2218 def _parse_format_selection(tokens
, inside_merge
=False, inside_choice
=False, inside_group
=False):
2220 current_selector
= None
2221 for type, string_
, start
, _
, _
in tokens
:
2222 # ENCODING is only defined in python 3.x
2223 if type == getattr(tokenize
, 'ENCODING', None):
2225 elif type in [tokenize
.NAME
, tokenize
.NUMBER
]:
2226 current_selector
= FormatSelector(SINGLE
, string_
, [])
2227 elif type == tokenize
.OP
:
2229 if not inside_group
:
2230 # ')' will be handled by the parentheses group
2231 tokens
.restore_last_token()
2233 elif inside_merge
and string_
in ['/', ',']:
2234 tokens
.restore_last_token()
2236 elif inside_choice
and string_
== ',':
2237 tokens
.restore_last_token()
2239 elif string_
== ',':
2240 if not current_selector
:
2241 raise syntax_error('"," must follow a format selector', start
)
2242 selectors
.append(current_selector
)
2243 current_selector
= None
2244 elif string_
== '/':
2245 if not current_selector
:
2246 raise syntax_error('"/" must follow a format selector', start
)
2247 first_choice
= current_selector
2248 second_choice
= _parse_format_selection(tokens
, inside_choice
=True)
2249 current_selector
= FormatSelector(PICKFIRST
, (first_choice
, second_choice
), [])
2250 elif string_
== '[':
2251 if not current_selector
:
2252 current_selector
= FormatSelector(SINGLE
, 'best', [])
2253 format_filter
= _parse_filter(tokens
)
2254 current_selector
.filters
.append(format_filter
)
2255 elif string_
== '(':
2256 if current_selector
:
2257 raise syntax_error('Unexpected "("', start
)
2258 group
= _parse_format_selection(tokens
, inside_group
=True)
2259 current_selector
= FormatSelector(GROUP
, group
, [])
2260 elif string_
== '+':
2261 if not current_selector
:
2262 raise syntax_error('Unexpected "+"', start
)
2263 selector_1
= current_selector
2264 selector_2
= _parse_format_selection(tokens
, inside_merge
=True)
2266 raise syntax_error('Expected a selector', start
)
2267 current_selector
= FormatSelector(MERGE
, (selector_1
, selector_2
), [])
2269 raise syntax_error(f
'Operator not recognized: "{string_}"', start
)
2270 elif type == tokenize
.ENDMARKER
:
2272 if current_selector
:
2273 selectors
.append(current_selector
)
2276 def _merge(formats_pair
):
2277 format_1
, format_2
= formats_pair
2280 formats_info
.extend(format_1
.get('requested_formats', (format_1
,)))
2281 formats_info
.extend(format_2
.get('requested_formats', (format_2
,)))
2283 if not allow_multiple_streams
['video'] or not allow_multiple_streams
['audio']:
2284 get_no_more
= {'video': False, 'audio': False}
2285 for (i
, fmt_info
) in enumerate(formats_info
):
2286 if fmt_info
.get('acodec') == fmt_info
.get('vcodec') == 'none':
2289 for aud_vid
in ['audio', 'video']:
2290 if not allow_multiple_streams
[aud_vid
] and fmt_info
.get(aud_vid
[0] + 'codec') != 'none':
2291 if get_no_more
[aud_vid
]:
2294 get_no_more
[aud_vid
] = True
2296 if len(formats_info
) == 1:
2297 return formats_info
[0]
2299 video_fmts
= [fmt_info
for fmt_info
in formats_info
if fmt_info
.get('vcodec') != 'none']
2300 audio_fmts
= [fmt_info
for fmt_info
in formats_info
if fmt_info
.get('acodec') != 'none']
2302 the_only_video
= video_fmts
[0] if len(video_fmts
) == 1 else None
2303 the_only_audio
= audio_fmts
[0] if len(audio_fmts
) == 1 else None
2305 output_ext
= get_compatible_ext(
2306 vcodecs
=[f
.get('vcodec') for f
in video_fmts
],
2307 acodecs
=[f
.get('acodec') for f
in audio_fmts
],
2308 vexts
=[f
['ext'] for f
in video_fmts
],
2309 aexts
=[f
['ext'] for f
in audio_fmts
],
2310 preferences
=(try_call(lambda: self
.params
['merge_output_format'].split('/'))
2311 or self
.params
.get('prefer_free_formats') and ('webm', 'mkv')))
2313 filtered
= lambda *keys
: filter(None, (traverse_obj(fmt
, *keys
) for fmt
in formats_info
))
2316 'requested_formats': formats_info
,
2317 'format': '+'.join(filtered('format')),
2318 'format_id': '+'.join(filtered('format_id')),
2320 'protocol': '+'.join(map(determine_protocol
, formats_info
)),
2321 'language': '+'.join(orderedSet(filtered('language'))) or None,
2322 'format_note': '+'.join(orderedSet(filtered('format_note'))) or None,
2323 'filesize_approx': sum(filtered('filesize', 'filesize_approx')) or None,
2324 'tbr': sum(filtered('tbr', 'vbr', 'abr')),
2329 'width': the_only_video
.get('width'),
2330 'height': the_only_video
.get('height'),
2331 'resolution': the_only_video
.get('resolution') or self
.format_resolution(the_only_video
),
2332 'fps': the_only_video
.get('fps'),
2333 'dynamic_range': the_only_video
.get('dynamic_range'),
2334 'vcodec': the_only_video
.get('vcodec'),
2335 'vbr': the_only_video
.get('vbr'),
2336 'stretched_ratio': the_only_video
.get('stretched_ratio'),
2337 'aspect_ratio': the_only_video
.get('aspect_ratio'),
2342 'acodec': the_only_audio
.get('acodec'),
2343 'abr': the_only_audio
.get('abr'),
2344 'asr': the_only_audio
.get('asr'),
2345 'audio_channels': the_only_audio
.get('audio_channels')
2350 def _check_formats(formats
):
2351 if self
.params
.get('check_formats') == 'selected':
2352 yield from self
._check
_formats
(formats
)
2354 elif (self
.params
.get('check_formats') is not None
2355 or self
.params
.get('allow_unplayable_formats')):
2360 if f
.get('has_drm') or f
.get('__needs_testing'):
2361 yield from self
._check
_formats
([f
])
2365 def _build_selector_function(selector
):
2366 if isinstance(selector
, list): # ,
2367 fs
= [_build_selector_function(s
) for s
in selector
]
2369 def selector_function(ctx
):
2372 return selector_function
2374 elif selector
.type == GROUP
: # ()
2375 selector_function
= _build_selector_function(selector
.selector
)
2377 elif selector
.type == PICKFIRST
: # /
2378 fs
= [_build_selector_function(s
) for s
in selector
.selector
]
2380 def selector_function(ctx
):
2382 picked_formats
= list(f(ctx
))
2384 return picked_formats
2387 elif selector
.type == MERGE
: # +
2388 selector_1
, selector_2
= map(_build_selector_function
, selector
.selector
)
2390 def selector_function(ctx
):
2391 for pair
in itertools
.product(selector_1(ctx
), selector_2(ctx
)):
2394 elif selector
.type == SINGLE
: # atom
2395 format_spec
= selector
.selector
or 'best'
2397 # TODO: Add allvideo, allaudio etc by generalizing the code with best/worst selector
2398 if format_spec
== 'all':
2399 def selector_function(ctx
):
2400 yield from _check_formats(ctx
['formats'][::-1])
2401 elif format_spec
== 'mergeall':
2402 def selector_function(ctx
):
2403 formats
= list(_check_formats(
2404 f
for f
in ctx
['formats'] if f
.get('vcodec') != 'none' or f
.get('acodec') != 'none'))
2407 merged_format
= formats
[-1]
2408 for f
in formats
[-2::-1]:
2409 merged_format
= _merge((merged_format
, f
))
2413 format_fallback
, seperate_fallback
, format_reverse
, format_idx
= False, None, True, 1
2415 r
'(?P<bw>best|worst|b|w)(?P<type>video|audio|v|a)?(?P<mod>\*)?(?:\.(?P<n>[1-9]\d*))?$',
2417 if mobj
is not None:
2418 format_idx
= int_or_none(mobj
.group('n'), default
=1)
2419 format_reverse
= mobj
.group('bw')[0] == 'b'
2420 format_type
= (mobj
.group('type') or [None])[0]
2421 not_format_type
= {'v': 'a', 'a': 'v'}
.get(format_type
)
2422 format_modified
= mobj
.group('mod') is not None
2424 format_fallback
= not format_type
and not format_modified
# for b, w
2426 (lambda f
: f
.get('%scodec' % format_type
) != 'none')
2427 if format_type
and format_modified
# bv*, ba*, wv*, wa*
2428 else (lambda f
: f
.get('%scodec' % not_format_type
) == 'none')
2429 if format_type
# bv, ba, wv, wa
2430 else (lambda f
: f
.get('vcodec') != 'none' and f
.get('acodec') != 'none')
2431 if not format_modified
# b, w
2432 else lambda f
: True) # b*, w*
2433 filter_f
= lambda f
: _filter_f(f
) and (
2434 f
.get('vcodec') != 'none' or f
.get('acodec') != 'none')
2436 if format_spec
in self
._format
_selection
_exts
['audio']:
2437 filter_f
= lambda f
: f
.get('ext') == format_spec
and f
.get('acodec') != 'none'
2438 elif format_spec
in self
._format
_selection
_exts
['video']:
2439 filter_f
= lambda f
: f
.get('ext') == format_spec
and f
.get('acodec') != 'none' and f
.get('vcodec') != 'none'
2440 seperate_fallback
= lambda f
: f
.get('ext') == format_spec
and f
.get('vcodec') != 'none'
2441 elif format_spec
in self
._format
_selection
_exts
['storyboards']:
2442 filter_f
= lambda f
: f
.get('ext') == format_spec
and f
.get('acodec') == 'none' and f
.get('vcodec') == 'none'
2444 filter_f
= lambda f
: f
.get('format_id') == format_spec
# id
2446 def selector_function(ctx
):
2447 formats
= list(ctx
['formats'])
2448 matches
= list(filter(filter_f
, formats
)) if filter_f
is not None else formats
2450 if format_fallback
and ctx
['incomplete_formats']:
2451 # for extractors with incomplete formats (audio only (soundcloud)
2452 # or video only (imgur)) best/worst will fallback to
2453 # best/worst {video,audio}-only format
2455 elif seperate_fallback
and not ctx
['has_merged_format']:
2456 # for compatibility with youtube-dl when there is no pre-merged format
2457 matches
= list(filter(seperate_fallback
, formats
))
2458 matches
= LazyList(_check_formats(matches
[::-1 if format_reverse
else 1]))
2460 yield matches
[format_idx
- 1]
2461 except LazyList
.IndexError:
2464 filters
= [self
._build
_format
_filter
(f
) for f
in selector
.filters
]
2466 def final_selector(ctx
):
2467 ctx_copy
= dict(ctx
)
2468 for _filter
in filters
:
2469 ctx_copy
['formats'] = list(filter(_filter
, ctx_copy
['formats']))
2470 return selector_function(ctx_copy
)
2471 return final_selector
2473 # HACK: Python 3.12 changed the underlying parser, rendering '7_a' invalid
2474 # Prefix numbers with random letters to avoid it being classified as a number
2475 # See: https://github.com/yt-dlp/yt-dlp/pulls/8797
2476 # TODO: Implement parser not reliant on tokenize.tokenize
2477 prefix
= ''.join(random
.choices(string
.ascii_letters
, k
=32))
2478 stream
= io
.BytesIO(re
.sub(r
'\d[_\d]*', rf
'{prefix}\g<0>', format_spec
).encode())
2480 tokens
= list(_remove_unused_ops(
2481 token
._replace
(string
=token
.string
.replace(prefix
, ''))
2482 for token
in tokenize
.tokenize(stream
.readline
)))
2483 except tokenize
.TokenError
:
2484 raise syntax_error('Missing closing/opening brackets or parenthesis', (0, len(format_spec
)))
2486 class TokenIterator
:
2487 def __init__(self
, tokens
):
2488 self
.tokens
= tokens
2495 if self
.counter
>= len(self
.tokens
):
2496 raise StopIteration()
2497 value
= self
.tokens
[self
.counter
]
2503 def restore_last_token(self
):
2506 parsed_selector
= _parse_format_selection(iter(TokenIterator(tokens
)))
2507 return _build_selector_function(parsed_selector
)
2509 def _calc_headers(self
, info_dict
, load_cookies
=False):
2510 res
= HTTPHeaderDict(self
.params
['http_headers'], info_dict
.get('http_headers'))
2513 if load_cookies
: # For --load-info-json
2514 self
._load
_cookies
(res
.get('Cookie'), autoscope
=info_dict
['url']) # compat
2515 self
._load
_cookies
(info_dict
.get('cookies'), autoscope
=False)
2516 # The `Cookie` header is removed to prevent leaks and unscoped cookies.
2517 # See: https://github.com/yt-dlp/yt-dlp/security/advisories/GHSA-v8mc-9377-rwjj
2518 res
.pop('Cookie', None)
2519 cookies
= self
.cookiejar
.get_cookies_for_url(info_dict
['url'])
2521 encoder
= LenientSimpleCookie()
2523 for cookie
in cookies
:
2524 _
, value
= encoder
.value_encode(cookie
.value
)
2525 values
.append(f
'{cookie.name}={value}')
2527 values
.append(f
'Domain={cookie.domain}')
2529 values
.append(f
'Path={cookie.path}')
2531 values
.append('Secure')
2533 values
.append(f
'Expires={cookie.expires}')
2535 values
.append(f
'Version={cookie.version}')
2536 info_dict
['cookies'] = '; '.join(values
)
2538 if 'X-Forwarded-For' not in res
:
2539 x_forwarded_for_ip
= info_dict
.get('__x_forwarded_for_ip')
2540 if x_forwarded_for_ip
:
2541 res
['X-Forwarded-For'] = x_forwarded_for_ip
2545 def _calc_cookies(self
, url
):
2546 self
.deprecation_warning('"YoutubeDL._calc_cookies" is deprecated and may be removed in a future version')
2547 return self
.cookiejar
.get_cookie_header(url
)
2549 def _sort_thumbnails(self
, thumbnails
):
2550 thumbnails
.sort(key
=lambda t
: (
2551 t
.get('preference') if t
.get('preference') is not None else -1,
2552 t
.get('width') if t
.get('width') is not None else -1,
2553 t
.get('height') if t
.get('height') is not None else -1,
2554 t
.get('id') if t
.get('id') is not None else '',
2557 def _sanitize_thumbnails(self
, info_dict
):
2558 thumbnails
= info_dict
.get('thumbnails')
2559 if thumbnails
is None:
2560 thumbnail
= info_dict
.get('thumbnail')
2562 info_dict
['thumbnails'] = thumbnails
= [{'url': thumbnail}
]
2566 def check_thumbnails(thumbnails
):
2567 for t
in thumbnails
:
2568 self
.to_screen(f
'[info] Testing thumbnail {t["id"]}')
2570 self
.urlopen(HEADRequest(t
['url']))
2571 except network_exceptions
as err
:
2572 self
.to_screen(f
'[info] Unable to connect to thumbnail {t["id"]} URL {t["url"]!r} - {err}. Skipping...')
2576 self
._sort
_thumbnails
(thumbnails
)
2577 for i
, t
in enumerate(thumbnails
):
2578 if t
.get('id') is None:
2580 if t
.get('width') and t
.get('height'):
2581 t
['resolution'] = '%dx%d' % (t
['width'], t
['height'])
2582 t
['url'] = sanitize_url(t
['url'])
2584 if self
.params
.get('check_formats') is True:
2585 info_dict
['thumbnails'] = LazyList(check_thumbnails(thumbnails
[::-1]), reverse
=True)
2587 info_dict
['thumbnails'] = thumbnails
2589 def _fill_common_fields(self
, info_dict
, final
=True):
2590 # TODO: move sanitization here
2592 title
= info_dict
['fulltitle'] = info_dict
.get('title')
2595 self
.write_debug('Extractor gave empty title. Creating a generic title')
2597 self
.report_warning('Extractor failed to obtain "title". Creating a generic title instead')
2598 info_dict
['title'] = f
'{info_dict["extractor"].replace(":", "-")} video #{info_dict["id"]}'
2600 if info_dict
.get('duration') is not None:
2601 info_dict
['duration_string'] = formatSeconds(info_dict
['duration'])
2603 for ts_key
, date_key
in (
2604 ('timestamp', 'upload_date'),
2605 ('release_timestamp', 'release_date'),
2606 ('modified_timestamp', 'modified_date'),
2608 if info_dict
.get(date_key
) is None and info_dict
.get(ts_key
) is not None:
2609 # Working around out-of-range timestamp values (e.g. negative ones on Windows,
2610 # see http://bugs.python.org/issue1646728)
2611 with contextlib
.suppress(ValueError, OverflowError, OSError):
2612 upload_date
= datetime
.datetime
.fromtimestamp(info_dict
[ts_key
], datetime
.timezone
.utc
)
2613 info_dict
[date_key
] = upload_date
.strftime('%Y%m%d')
2615 if not info_dict
.get('release_year'):
2616 info_dict
['release_year'] = traverse_obj(info_dict
, ('release_date', {lambda x: int(x[:4])}
))
2618 live_keys
= ('is_live', 'was_live')
2619 live_status
= info_dict
.get('live_status')
2620 if live_status
is None:
2621 for key
in live_keys
:
2622 if info_dict
.get(key
) is False:
2624 if info_dict
.get(key
):
2627 if all(info_dict
.get(key
) is False for key
in live_keys
):
2628 live_status
= 'not_live'
2630 info_dict
['live_status'] = live_status
2631 for key
in live_keys
:
2632 if info_dict
.get(key
) is None:
2633 info_dict
[key
] = (live_status
== key
)
2634 if live_status
== 'post_live':
2635 info_dict
['was_live'] = True
2637 # Auto generate title fields corresponding to the *_number fields when missing
2638 # in order to always have clean titles. This is very common for TV series.
2639 for field
in ('chapter', 'season', 'episode'):
2640 if final
and info_dict
.get('%s_number' % field
) is not None and not info_dict
.get(field
):
2641 info_dict
[field
] = '%s %d' % (field
.capitalize(), info_dict
['%s_number' % field
])
2643 def _raise_pending_errors(self
, info
):
2644 err
= info
.pop('__pending_error', None)
2646 self
.report_error(err
, tb
=False)
2648 def sort_formats(self
, info_dict
):
2649 formats
= self
._get
_formats
(info_dict
)
2650 formats
.sort(key
=FormatSorter(
2651 self
, info_dict
.get('_format_sort_fields') or []).calculate_preference
)
2653 def process_video_result(self
, info_dict
, download
=True):
2654 assert info_dict
.get('_type', 'video') == 'video'
2655 self
._num
_videos
+= 1
2657 if 'id' not in info_dict
:
2658 raise ExtractorError('Missing "id" field in extractor result', ie
=info_dict
['extractor'])
2659 elif not info_dict
.get('id'):
2660 raise ExtractorError('Extractor failed to obtain "id"', ie
=info_dict
['extractor'])
2662 def report_force_conversion(field
, field_not
, conversion
):
2663 self
.report_warning(
2664 '"%s" field is not %s - forcing %s conversion, there is an error in extractor'
2665 % (field
, field_not
, conversion
))
2667 def sanitize_string_field(info
, string_field
):
2668 field
= info
.get(string_field
)
2669 if field
is None or isinstance(field
, str):
2671 report_force_conversion(string_field
, 'a string', 'string')
2672 info
[string_field
] = str(field
)
2674 def sanitize_numeric_fields(info
):
2675 for numeric_field
in self
._NUMERIC
_FIELDS
:
2676 field
= info
.get(numeric_field
)
2677 if field
is None or isinstance(field
, (int, float)):
2679 report_force_conversion(numeric_field
, 'numeric', 'int')
2680 info
[numeric_field
] = int_or_none(field
)
2682 sanitize_string_field(info_dict
, 'id')
2683 sanitize_numeric_fields(info_dict
)
2684 if info_dict
.get('section_end') and info_dict
.get('section_start') is not None:
2685 info_dict
['duration'] = round(info_dict
['section_end'] - info_dict
['section_start'], 3)
2686 if (info_dict
.get('duration') or 0) <= 0 and info_dict
.pop('duration', None):
2687 self
.report_warning('"duration" field is negative, there is an error in extractor')
2689 chapters
= info_dict
.get('chapters') or []
2690 if chapters
and chapters
[0].get('start_time'):
2691 chapters
.insert(0, {'start_time': 0}
)
2693 dummy_chapter
= {'end_time': 0, 'start_time': info_dict.get('duration')}
2694 for idx
, (prev
, current
, next_
) in enumerate(zip(
2695 (dummy_chapter
, *chapters
), chapters
, (*chapters
[1:], dummy_chapter
)), 1):
2696 if current
.get('start_time') is None:
2697 current
['start_time'] = prev
.get('end_time')
2698 if not current
.get('end_time'):
2699 current
['end_time'] = next_
.get('start_time')
2700 if not current
.get('title'):
2701 current
['title'] = f
'<Untitled Chapter {idx}>'
2703 if 'playlist' not in info_dict
:
2704 # It isn't part of a playlist
2705 info_dict
['playlist'] = None
2706 info_dict
['playlist_index'] = None
2708 self
._sanitize
_thumbnails
(info_dict
)
2710 thumbnail
= info_dict
.get('thumbnail')
2711 thumbnails
= info_dict
.get('thumbnails')
2713 info_dict
['thumbnail'] = sanitize_url(thumbnail
)
2715 info_dict
['thumbnail'] = thumbnails
[-1]['url']
2717 if info_dict
.get('display_id') is None and 'id' in info_dict
:
2718 info_dict
['display_id'] = info_dict
['id']
2720 self
._fill
_common
_fields
(info_dict
)
2722 for cc_kind
in ('subtitles', 'automatic_captions'):
2723 cc
= info_dict
.get(cc_kind
)
2725 for _
, subtitle
in cc
.items():
2726 for subtitle_format
in subtitle
:
2727 if subtitle_format
.get('url'):
2728 subtitle_format
['url'] = sanitize_url(subtitle_format
['url'])
2729 if subtitle_format
.get('ext') is None:
2730 subtitle_format
['ext'] = determine_ext(subtitle_format
['url']).lower()
2732 automatic_captions
= info_dict
.get('automatic_captions')
2733 subtitles
= info_dict
.get('subtitles')
2735 info_dict
['requested_subtitles'] = self
.process_subtitles(
2736 info_dict
['id'], subtitles
, automatic_captions
)
2738 formats
= self
._get
_formats
(info_dict
)
2740 # Backward compatibility with InfoExtractor._sort_formats
2741 field_preference
= (formats
or [{}])[0].pop('__sort_fields', None)
2742 if field_preference
:
2743 info_dict
['_format_sort_fields'] = field_preference
2745 info_dict
['_has_drm'] = any( # or None ensures --clean-infojson removes it
2746 f
.get('has_drm') and f
['has_drm'] != 'maybe' for f
in formats
) or None
2747 if not self
.params
.get('allow_unplayable_formats'):
2748 formats
= [f
for f
in formats
if not f
.get('has_drm') or f
['has_drm'] == 'maybe']
2750 if formats
and all(f
.get('acodec') == f
.get('vcodec') == 'none' for f
in formats
):
2751 self
.report_warning(
2752 f
'{"This video is DRM protected and " if info_dict["_has_drm"] else ""}'
2753 'only images are available for download. Use --list-formats to see them'.capitalize())
2755 get_from_start
= not info_dict
.get('is_live') or bool(self
.params
.get('live_from_start'))
2756 if not get_from_start
:
2757 info_dict
['title'] += ' ' + datetime
.datetime
.now().strftime('%Y-%m-%d %H:%M')
2758 if info_dict
.get('is_live') and formats
:
2759 formats
= [f
for f
in formats
if bool(f
.get('is_from_start')) == get_from_start
]
2760 if get_from_start
and not formats
:
2761 self
.raise_no_formats(info_dict
, msg
=(
2762 '--live-from-start is passed, but there are no formats that can be downloaded from the start. '
2763 'If you want to download from the current time, use --no-live-from-start'))
2765 def is_wellformed(f
):
2768 self
.report_warning(
2769 '"url" field is missing or empty - skipping format, '
2770 'there is an error in extractor')
2772 if isinstance(url
, bytes):
2773 sanitize_string_field(f
, 'url')
2776 # Filter out malformed formats for better extraction robustness
2777 formats
= list(filter(is_wellformed
, formats
or []))
2780 self
.raise_no_formats(info_dict
)
2782 for format
in formats
:
2783 sanitize_string_field(format
, 'format_id')
2784 sanitize_numeric_fields(format
)
2785 format
['url'] = sanitize_url(format
['url'])
2786 if format
.get('ext') is None:
2787 format
['ext'] = determine_ext(format
['url']).lower()
2788 if format
.get('protocol') is None:
2789 format
['protocol'] = determine_protocol(format
)
2790 if format
.get('resolution') is None:
2791 format
['resolution'] = self
.format_resolution(format
, default
=None)
2792 if format
.get('dynamic_range') is None and format
.get('vcodec') != 'none':
2793 format
['dynamic_range'] = 'SDR'
2794 if format
.get('aspect_ratio') is None:
2795 format
['aspect_ratio'] = try_call(lambda: round(format
['width'] / format
['height'], 2))
2796 # For fragmented formats, "tbr" is often max bitrate and not average
2797 if (('manifest-filesize-approx' in self
.params
['compat_opts'] or not format
.get('manifest_url'))
2798 and info_dict
.get('duration') and format
.get('tbr')
2799 and not format
.get('filesize') and not format
.get('filesize_approx')):
2800 format
['filesize_approx'] = int(info_dict
['duration'] * format
['tbr'] * (1024 / 8))
2801 format
['http_headers'] = self
._calc
_headers
(collections
.ChainMap(format
, info_dict
), load_cookies
=True)
2803 # Safeguard against old/insecure infojson when using --load-info-json
2804 if info_dict
.get('http_headers'):
2805 info_dict
['http_headers'] = HTTPHeaderDict(info_dict
['http_headers'])
2806 info_dict
['http_headers'].pop('Cookie', None)
2808 # This is copied to http_headers by the above _calc_headers and can now be removed
2809 if '__x_forwarded_for_ip' in info_dict
:
2810 del info_dict
['__x_forwarded_for_ip']
2814 '_format_sort_fields': info_dict
.get('_format_sort_fields')
2817 # Sanitize and group by format_id
2819 for i
, format
in enumerate(formats
):
2820 if not format
.get('format_id'):
2821 format
['format_id'] = str(i
)
2823 # Sanitize format_id from characters used in format selector expression
2824 format
['format_id'] = re
.sub(r
'[\s,/+\[\]()]', '_', format
['format_id'])
2825 formats_dict
.setdefault(format
['format_id'], []).append(format
)
2827 # Make sure all formats have unique format_id
2828 common_exts
= set(itertools
.chain(*self
._format
_selection
_exts
.values()))
2829 for format_id
, ambiguous_formats
in formats_dict
.items():
2830 ambigious_id
= len(ambiguous_formats
) > 1
2831 for i
, format
in enumerate(ambiguous_formats
):
2833 format
['format_id'] = '%s-%d' % (format_id
, i
)
2834 # Ensure there is no conflict between id and ext in format selection
2835 # See https://github.com/yt-dlp/yt-dlp/issues/1282
2836 if format
['format_id'] != format
['ext'] and format
['format_id'] in common_exts
:
2837 format
['format_id'] = 'f%s' % format
['format_id']
2839 if format
.get('format') is None:
2840 format
['format'] = '{id} - {res}{note}'.format(
2841 id=format
['format_id'],
2842 res
=self
.format_resolution(format
),
2843 note
=format_field(format
, 'format_note', ' (%s)'),
2846 if self
.params
.get('check_formats') is True:
2847 formats
= LazyList(self
._check
_formats
(formats
[::-1]), reverse
=True)
2849 if not formats
or formats
[0] is not info_dict
:
2850 # only set the 'formats' fields if the original info_dict list them
2851 # otherwise we end up with a circular reference, the first (and unique)
2852 # element in the 'formats' field in info_dict is info_dict itself,
2853 # which can't be exported to json
2854 info_dict
['formats'] = formats
2856 info_dict
, _
= self
.pre_process(info_dict
)
2858 if self
._match
_entry
(info_dict
, incomplete
=self
._format
_fields
) is not None:
2861 self
.post_extract(info_dict
)
2862 info_dict
, _
= self
.pre_process(info_dict
, 'after_filter')
2864 # The pre-processors may have modified the formats
2865 formats
= self
._get
_formats
(info_dict
)
2867 list_only
= self
.params
.get('simulate') == 'list_only'
2868 interactive_format_selection
= not list_only
and self
.format_selector
== '-'
2869 if self
.params
.get('list_thumbnails'):
2870 self
.list_thumbnails(info_dict
)
2871 if self
.params
.get('listsubtitles'):
2872 if 'automatic_captions' in info_dict
:
2873 self
.list_subtitles(
2874 info_dict
['id'], automatic_captions
, 'automatic captions')
2875 self
.list_subtitles(info_dict
['id'], subtitles
, 'subtitles')
2876 if self
.params
.get('listformats') or interactive_format_selection
:
2877 self
.list_formats(info_dict
)
2879 # Without this printing, -F --print-json will not work
2880 self
.__forced
_printings
(info_dict
)
2883 format_selector
= self
.format_selector
2885 if interactive_format_selection
:
2886 req_format
= input(self
._format
_screen
('\nEnter format selector ', self
.Styles
.EMPHASIS
)
2887 + '(Press ENTER for default, or Ctrl+C to quit)'
2888 + self
._format
_screen
(': ', self
.Styles
.EMPHASIS
))
2890 format_selector
= self
.build_format_selector(req_format
) if req_format
else None
2891 except SyntaxError as err
:
2892 self
.report_error(err
, tb
=False, is_error
=False)
2895 if format_selector
is None:
2896 req_format
= self
._default
_format
_spec
(info_dict
, download
=download
)
2897 self
.write_debug(f
'Default format spec: {req_format}')
2898 format_selector
= self
.build_format_selector(req_format
)
2900 formats_to_download
= list(format_selector({
2902 'has_merged_format': any('none' not in (f
.get('acodec'), f
.get('vcodec')) for f
in formats
),
2903 'incomplete_formats': (all(f
.get('vcodec') == 'none' for f
in formats
) # No formats with video
2904 or all(f
.get('acodec') == 'none' for f
in formats
)), # OR, No formats with audio
2906 if interactive_format_selection
and not formats_to_download
:
2907 self
.report_error('Requested format is not available', tb
=False, is_error
=False)
2911 if not formats_to_download
:
2912 if not self
.params
.get('ignore_no_formats_error'):
2913 raise ExtractorError(
2914 'Requested format is not available. Use --list-formats for a list of available formats',
2915 expected
=True, video_id
=info_dict
['id'], ie
=info_dict
['extractor'])
2916 self
.report_warning('Requested format is not available')
2917 # Process what we can, even without any available formats.
2918 formats_to_download
= [{}]
2920 requested_ranges
= tuple(self
.params
.get('download_ranges', lambda *_
: [{}])(info_dict
, self
))
2921 best_format
, downloaded_formats
= formats_to_download
[-1], []
2923 if best_format
and requested_ranges
:
2924 def to_screen(*msg
):
2925 self
.to_screen(f
'[info] {info_dict["id"]}: {" ".join(", ".join(variadic(m)) for m in msg)}')
2927 to_screen(f
'Downloading {len(formats_to_download)} format(s):',
2928 (f
['format_id'] for f
in formats_to_download
))
2929 if requested_ranges
!= ({}, ):
2930 to_screen(f
'Downloading {len(requested_ranges)} time ranges:',
2931 (f
'{c["start_time"]:.1f}-{c["end_time"]:.1f}' for c
in requested_ranges
))
2932 max_downloads_reached
= False
2934 for fmt
, chapter
in itertools
.product(formats_to_download
, requested_ranges
):
2935 new_info
= self
._copy
_infodict
(info_dict
)
2936 new_info
.update(fmt
)
2937 offset
, duration
= info_dict
.get('section_start') or 0, info_dict
.get('duration') or float('inf')
2938 end_time
= offset
+ min(chapter
.get('end_time', duration
), duration
)
2939 # duration may not be accurate. So allow deviations <1sec
2940 if end_time
== float('inf') or end_time
> offset
+ duration
+ 1:
2942 if chapter
or offset
:
2944 'section_start': offset
+ chapter
.get('start_time', 0),
2945 'section_end': end_time
,
2946 'section_title': chapter
.get('title'),
2947 'section_number': chapter
.get('index'),
2949 downloaded_formats
.append(new_info
)
2951 self
.process_info(new_info
)
2952 except MaxDownloadsReached
:
2953 max_downloads_reached
= True
2954 self
._raise
_pending
_errors
(new_info
)
2955 # Remove copied info
2956 for key
, val
in tuple(new_info
.items()):
2957 if info_dict
.get(key
) == val
:
2959 if max_downloads_reached
:
2962 write_archive
= {f.get('__write_download_archive', False) for f in downloaded_formats}
2963 assert write_archive
.issubset({True, False, 'ignore'}
)
2964 if True in write_archive
and False not in write_archive
:
2965 self
.record_download_archive(info_dict
)
2967 info_dict
['requested_downloads'] = downloaded_formats
2968 info_dict
= self
.run_all_pps('after_video', info_dict
)
2969 if max_downloads_reached
:
2970 raise MaxDownloadsReached()
2972 # We update the info dict with the selected best quality format (backwards compatibility)
2973 info_dict
.update(best_format
)
2976 def process_subtitles(self
, video_id
, normal_subtitles
, automatic_captions
):
2977 """Select the requested subtitles and their format"""
2978 available_subs
, normal_sub_langs
= {}, []
2979 if normal_subtitles
and self
.params
.get('writesubtitles'):
2980 available_subs
.update(normal_subtitles
)
2981 normal_sub_langs
= tuple(normal_subtitles
.keys())
2982 if automatic_captions
and self
.params
.get('writeautomaticsub'):
2983 for lang
, cap_info
in automatic_captions
.items():
2984 if lang
not in available_subs
:
2985 available_subs
[lang
] = cap_info
2987 if not available_subs
or (
2988 not self
.params
.get('writesubtitles')
2989 and not self
.params
.get('writeautomaticsub')):
2992 all_sub_langs
= tuple(available_subs
.keys())
2993 if self
.params
.get('allsubtitles', False):
2994 requested_langs
= all_sub_langs
2995 elif self
.params
.get('subtitleslangs', False):
2997 requested_langs
= orderedSet_from_options(
2998 self
.params
.get('subtitleslangs'), {'all': all_sub_langs}
, use_regex
=True)
2999 except re
.error
as e
:
3000 raise ValueError(f
'Wrong regex for subtitlelangs: {e.pattern}')
3002 requested_langs
= LazyList(itertools
.chain(
3003 ['en'] if 'en' in normal_sub_langs
else [],
3004 filter(lambda f
: f
.startswith('en'), normal_sub_langs
),
3005 ['en'] if 'en' in all_sub_langs
else [],
3006 filter(lambda f
: f
.startswith('en'), all_sub_langs
),
3007 normal_sub_langs
, all_sub_langs
,
3010 self
.to_screen(f
'[info] {video_id}: Downloading subtitles: {", ".join(requested_langs)}')
3012 formats_query
= self
.params
.get('subtitlesformat', 'best')
3013 formats_preference
= formats_query
.split('/') if formats_query
else []
3015 for lang
in requested_langs
:
3016 formats
= available_subs
.get(lang
)
3018 self
.report_warning(f
'{lang} subtitles not available for {video_id}')
3020 for ext
in formats_preference
:
3024 matches
= list(filter(lambda f
: f
['ext'] == ext
, formats
))
3030 self
.report_warning(
3031 'No subtitle format found matching "%s" for language %s, '
3032 'using %s' % (formats_query
, lang
, f
['ext']))
3036 def _forceprint(self
, key
, info_dict
):
3037 if info_dict
is None:
3039 info_copy
= info_dict
.copy()
3040 info_copy
.setdefault('filename', self
.prepare_filename(info_dict
))
3041 if info_dict
.get('requested_formats') is not None:
3042 # For RTMP URLs, also include the playpath
3043 info_copy
['urls'] = '\n'.join(f
['url'] + f
.get('play_path', '') for f
in info_dict
['requested_formats'])
3044 elif info_dict
.get('url'):
3045 info_copy
['urls'] = info_dict
['url'] + info_dict
.get('play_path', '')
3046 info_copy
['formats_table'] = self
.render_formats_table(info_dict
)
3047 info_copy
['thumbnails_table'] = self
.render_thumbnails_table(info_dict
)
3048 info_copy
['subtitles_table'] = self
.render_subtitles_table(info_dict
.get('id'), info_dict
.get('subtitles'))
3049 info_copy
['automatic_captions_table'] = self
.render_subtitles_table(info_dict
.get('id'), info_dict
.get('automatic_captions'))
3051 def format_tmpl(tmpl
):
3052 mobj
= re
.fullmatch(r
'([\w.:,]|-\d|(?P<dict>{([\w.:,]|-\d)+}))+=?', tmpl
)
3057 if tmpl
.startswith('{'):
3058 tmpl
, fmt
= f
'.{tmpl}', '%({})j'
3059 if tmpl
.endswith('='):
3060 tmpl
, fmt
= tmpl
[:-1], '{0} = %({0})#j'
3061 return '\n'.join(map(fmt
.format
, [tmpl
] if mobj
.group('dict') else tmpl
.split(',')))
3063 for tmpl
in self
.params
['forceprint'].get(key
, []):
3064 self
.to_stdout(self
.evaluate_outtmpl(format_tmpl(tmpl
), info_copy
))
3066 for tmpl
, file_tmpl
in self
.params
['print_to_file'].get(key
, []):
3067 filename
= self
.prepare_filename(info_dict
, outtmpl
=file_tmpl
)
3068 tmpl
= format_tmpl(tmpl
)
3069 self
.to_screen(f
'[info] Writing {tmpl!r} to: {filename}')
3070 if self
._ensure
_dir
_exists
(filename
):
3071 with open(filename
, 'a', encoding
='utf-8', newline
='') as f
:
3072 f
.write(self
.evaluate_outtmpl(tmpl
, info_copy
) + os
.linesep
)
3076 def __forced_printings(self
, info_dict
, filename
=None, incomplete
=True):
3077 if (self
.params
.get('forcejson')
3078 or self
.params
['forceprint'].get('video')
3079 or self
.params
['print_to_file'].get('video')):
3080 self
.post_extract(info_dict
)
3082 info_dict
['filename'] = filename
3083 info_copy
= self
._forceprint
('video', info_dict
)
3085 def print_field(field
, actual_field
=None, optional
=False):
3086 if actual_field
is None:
3087 actual_field
= field
3088 if self
.params
.get(f
'force{field}') and (
3089 info_copy
.get(field
) is not None or (not optional
and not incomplete
)):
3090 self
.to_stdout(info_copy
[actual_field
])
3092 print_field('title')
3094 print_field('url', 'urls')
3095 print_field('thumbnail', optional
=True)
3096 print_field('description', optional
=True)
3097 print_field('filename')
3098 if self
.params
.get('forceduration') and info_copy
.get('duration') is not None:
3099 self
.to_stdout(formatSeconds(info_copy
['duration']))
3100 print_field('format')
3102 if self
.params
.get('forcejson'):
3103 self
.to_stdout(json
.dumps(self
.sanitize_info(info_dict
)))
3105 def dl(self
, name
, info
, subtitle
=False, test
=False):
3106 if not info
.get('url'):
3107 self
.raise_no_formats(info
, True)
3110 verbose
= self
.params
.get('verbose')
3113 'quiet': self
.params
.get('quiet') or not verbose
,
3115 'noprogress': not verbose
,
3117 'skip_unavailable_fragments': False,
3118 'keep_fragments': False,
3120 '_no_ytdl_file': True,
3123 params
= self
.params
3124 fd
= get_suitable_downloader(info
, params
, to_stdout
=(name
== '-'))(self
, params
)
3126 for ph
in self
._progress
_hooks
:
3127 fd
.add_progress_hook(ph
)
3129 (f
['url'].split(',')[0] + ',<data>' if f
['url'].startswith('data:') else f
['url'])
3130 for f
in info
.get('requested_formats', []) or [info
])
3131 self
.write_debug(f
'Invoking {fd.FD_NAME} downloader on "{urls}"')
3133 # Note: Ideally info should be a deep-copied so that hooks cannot modify it.
3134 # But it may contain objects that are not deep-copyable
3135 new_info
= self
._copy
_infodict
(info
)
3136 if new_info
.get('http_headers') is None:
3137 new_info
['http_headers'] = self
._calc
_headers
(new_info
)
3138 return fd
.download(name
, new_info
, subtitle
)
3140 def existing_file(self
, filepaths
, *, default_overwrite
=True):
3141 existing_files
= list(filter(os
.path
.exists
, orderedSet(filepaths
)))
3142 if existing_files
and not self
.params
.get('overwrites', default_overwrite
):
3143 return existing_files
[0]
3145 for file in existing_files
:
3146 self
.report_file_delete(file)
3150 def process_info(self
, info_dict
):
3151 """Process a single resolved IE result. (Modifies it in-place)"""
3153 assert info_dict
.get('_type', 'video') == 'video'
3154 original_infodict
= info_dict
3156 if 'format' not in info_dict
and 'ext' in info_dict
:
3157 info_dict
['format'] = info_dict
['ext']
3159 if self
._match
_entry
(info_dict
) is not None:
3160 info_dict
['__write_download_archive'] = 'ignore'
3163 # Does nothing under normal operation - for backward compatibility of process_info
3164 self
.post_extract(info_dict
)
3166 def replace_info_dict(new_info
):
3168 if new_info
== info_dict
:
3171 info_dict
.update(new_info
)
3173 new_info
, _
= self
.pre_process(info_dict
, 'video')
3174 replace_info_dict(new_info
)
3175 self
._num
_downloads
+= 1
3177 # info_dict['_filename'] needs to be set for backward compatibility
3178 info_dict
['_filename'] = full_filename
= self
.prepare_filename(info_dict
, warn
=True)
3179 temp_filename
= self
.prepare_filename(info_dict
, 'temp')
3183 self
.__forced
_printings
(info_dict
, full_filename
, incomplete
=('format' not in info_dict
))
3185 def check_max_downloads():
3186 if self
._num
_downloads
>= float(self
.params
.get('max_downloads') or 'inf'):
3187 raise MaxDownloadsReached()
3189 if self
.params
.get('simulate'):
3190 info_dict
['__write_download_archive'] = self
.params
.get('force_write_download_archive')
3191 check_max_downloads()
3194 if full_filename
is None:
3196 if not self
._ensure
_dir
_exists
(encodeFilename(full_filename
)):
3198 if not self
._ensure
_dir
_exists
(encodeFilename(temp_filename
)):
3201 if self
._write
_description
('video', info_dict
,
3202 self
.prepare_filename(info_dict
, 'description')) is None:
3205 sub_files
= self
._write
_subtitles
(info_dict
, temp_filename
)
3206 if sub_files
is None:
3208 files_to_move
.update(dict(sub_files
))
3210 thumb_files
= self
._write
_thumbnails
(
3211 'video', info_dict
, temp_filename
, self
.prepare_filename(info_dict
, 'thumbnail'))
3212 if thumb_files
is None:
3214 files_to_move
.update(dict(thumb_files
))
3216 infofn
= self
.prepare_filename(info_dict
, 'infojson')
3217 _infojson_written
= self
._write
_info
_json
('video', info_dict
, infofn
)
3218 if _infojson_written
:
3219 info_dict
['infojson_filename'] = infofn
3220 # For backward compatibility, even though it was a private field
3221 info_dict
['__infojson_filename'] = infofn
3222 elif _infojson_written
is None:
3225 # Note: Annotations are deprecated
3227 if self
.params
.get('writeannotations', False):
3228 annofn
= self
.prepare_filename(info_dict
, 'annotation')
3230 if not self
._ensure
_dir
_exists
(encodeFilename(annofn
)):
3232 if not self
.params
.get('overwrites', True) and os
.path
.exists(encodeFilename(annofn
)):
3233 self
.to_screen('[info] Video annotations are already present')
3234 elif not info_dict
.get('annotations'):
3235 self
.report_warning('There are no annotations to write.')
3238 self
.to_screen('[info] Writing video annotations to: ' + annofn
)
3239 with open(encodeFilename(annofn
), 'w', encoding
='utf-8') as annofile
:
3240 annofile
.write(info_dict
['annotations'])
3241 except (KeyError, TypeError):
3242 self
.report_warning('There are no annotations to write.')
3244 self
.report_error('Cannot write annotations file: ' + annofn
)
3247 # Write internet shortcut files
3248 def _write_link_file(link_type
):
3249 url
= try_get(info_dict
['webpage_url'], iri_to_uri
)
3251 self
.report_warning(
3252 f
'Cannot write internet shortcut file because the actual URL of "{info_dict["webpage_url"]}" is unknown')
3254 linkfn
= replace_extension(self
.prepare_filename(info_dict
, 'link'), link_type
, info_dict
.get('ext'))
3255 if not self
._ensure
_dir
_exists
(encodeFilename(linkfn
)):
3257 if self
.params
.get('overwrites', True) and os
.path
.exists(encodeFilename(linkfn
)):
3258 self
.to_screen(f
'[info] Internet shortcut (.{link_type}) is already present')
3261 self
.to_screen(f
'[info] Writing internet shortcut (.{link_type}) to: {linkfn}')
3262 with open(encodeFilename(to_high_limit_path(linkfn
)), 'w', encoding
='utf-8',
3263 newline
='\r\n' if link_type
== 'url' else '\n') as linkfile
:
3264 template_vars
= {'url': url}
3265 if link_type
== 'desktop':
3266 template_vars
['filename'] = linkfn
[:-(len(link_type
) + 1)]
3267 linkfile
.write(LINK_TEMPLATES
[link_type
] % template_vars
)
3269 self
.report_error(f
'Cannot write internet shortcut {linkfn}')
3274 'url': self
.params
.get('writeurllink'),
3275 'webloc': self
.params
.get('writewebloclink'),
3276 'desktop': self
.params
.get('writedesktoplink'),
3278 if self
.params
.get('writelink'):
3279 link_type
= ('webloc' if sys
.platform
== 'darwin'
3280 else 'desktop' if sys
.platform
.startswith('linux')
3282 write_links
[link_type
] = True
3284 if any(should_write
and not _write_link_file(link_type
)
3285 for link_type
, should_write
in write_links
.items()):
3288 new_info
, files_to_move
= self
.pre_process(info_dict
, 'before_dl', files_to_move
)
3289 replace_info_dict(new_info
)
3291 if self
.params
.get('skip_download'):
3292 info_dict
['filepath'] = temp_filename
3293 info_dict
['__finaldir'] = os
.path
.dirname(os
.path
.abspath(encodeFilename(full_filename
)))
3294 info_dict
['__files_to_move'] = files_to_move
3295 replace_info_dict(self
.run_pp(MoveFilesAfterDownloadPP(self
, False), info_dict
))
3296 info_dict
['__write_download_archive'] = self
.params
.get('force_write_download_archive')
3299 info_dict
.setdefault('__postprocessors', [])
3302 def existing_video_file(*filepaths
):
3303 ext
= info_dict
.get('ext')
3304 converted
= lambda file: replace_extension(file, self
.params
.get('final_ext') or ext
, ext
)
3305 file = self
.existing_file(itertools
.chain(*zip(map(converted
, filepaths
), filepaths
)),
3306 default_overwrite
=False)
3308 info_dict
['ext'] = os
.path
.splitext(file)[1][1:]
3311 fd
, success
= None, True
3312 if info_dict
.get('protocol') or info_dict
.get('url'):
3313 fd
= get_suitable_downloader(info_dict
, self
.params
, to_stdout
=temp_filename
== '-')
3314 if fd
!= FFmpegFD
and 'no-direct-merge' not in self
.params
['compat_opts'] and (
3315 info_dict
.get('section_start') or info_dict
.get('section_end')):
3316 msg
= ('This format cannot be partially downloaded' if FFmpegFD
.available()
3317 else 'You have requested downloading the video partially, but ffmpeg is not installed')
3318 self
.report_error(f
'{msg}. Aborting')
3321 if info_dict
.get('requested_formats') is not None:
3322 old_ext
= info_dict
['ext']
3323 if self
.params
.get('merge_output_format') is None:
3324 if (info_dict
['ext'] == 'webm'
3325 and info_dict
.get('thumbnails')
3326 # check with type instead of pp_key, __name__, or isinstance
3327 # since we dont want any custom PPs to trigger this
3328 and any(type(pp
) == EmbedThumbnailPP
for pp
in self
._pps
['post_process'])): # noqa: E721
3329 info_dict
['ext'] = 'mkv'
3330 self
.report_warning(
3331 'webm doesn\'t support embedding a thumbnail, mkv will be used')
3332 new_ext
= info_dict
['ext']
3334 def correct_ext(filename
, ext
=new_ext
):
3337 filename_real_ext
= os
.path
.splitext(filename
)[1][1:]
3339 os
.path
.splitext(filename
)[0]
3340 if filename_real_ext
in (old_ext
, new_ext
)
3342 return f
'{filename_wo_ext}.{ext}'
3344 # Ensure filename always has a correct extension for successful merge
3345 full_filename
= correct_ext(full_filename
)
3346 temp_filename
= correct_ext(temp_filename
)
3347 dl_filename
= existing_video_file(full_filename
, temp_filename
)
3349 info_dict
['__real_download'] = False
3350 # NOTE: Copy so that original format dicts are not modified
3351 info_dict
['requested_formats'] = list(map(dict, info_dict
['requested_formats']))
3353 merger
= FFmpegMergerPP(self
)
3355 if dl_filename
is not None:
3356 self
.report_file_already_downloaded(dl_filename
)
3358 for f
in info_dict
['requested_formats'] if fd
!= FFmpegFD
else []:
3359 f
['filepath'] = fname
= prepend_extension(
3360 correct_ext(temp_filename
, info_dict
['ext']),
3361 'f%s' % f
['format_id'], info_dict
['ext'])
3362 downloaded
.append(fname
)
3363 info_dict
['url'] = '\n'.join(f
['url'] for f
in info_dict
['requested_formats'])
3364 success
, real_download
= self
.dl(temp_filename
, info_dict
)
3365 info_dict
['__real_download'] = real_download
3367 if self
.params
.get('allow_unplayable_formats'):
3368 self
.report_warning(
3369 'You have requested merging of multiple formats '
3370 'while also allowing unplayable formats to be downloaded. '
3371 'The formats won\'t be merged to prevent data corruption.')
3372 elif not merger
.available
:
3373 msg
= 'You have requested merging of multiple formats but ffmpeg is not installed'
3374 if not self
.params
.get('ignoreerrors'):
3375 self
.report_error(f
'{msg}. Aborting due to --abort-on-error')
3377 self
.report_warning(f
'{msg}. The formats won\'t be merged')
3379 if temp_filename
== '-':
3380 reason
= ('using a downloader other than ffmpeg' if FFmpegFD
.can_merge_formats(info_dict
, self
.params
)
3381 else 'but the formats are incompatible for simultaneous download' if merger
.available
3382 else 'but ffmpeg is not installed')
3383 self
.report_warning(
3384 f
'You have requested downloading multiple formats to stdout {reason}. '
3385 'The formats will be streamed one after the other')
3386 fname
= temp_filename
3387 for f
in info_dict
['requested_formats']:
3388 new_info
= dict(info_dict
)
3389 del new_info
['requested_formats']
3391 if temp_filename
!= '-':
3392 fname
= prepend_extension(
3393 correct_ext(temp_filename
, new_info
['ext']),
3394 'f%s' % f
['format_id'], new_info
['ext'])
3395 if not self
._ensure
_dir
_exists
(fname
):
3397 f
['filepath'] = fname
3398 downloaded
.append(fname
)
3399 partial_success
, real_download
= self
.dl(fname
, new_info
)
3400 info_dict
['__real_download'] = info_dict
['__real_download'] or real_download
3401 success
= success
and partial_success
3403 if downloaded
and merger
.available
and not self
.params
.get('allow_unplayable_formats'):
3404 info_dict
['__postprocessors'].append(merger
)
3405 info_dict
['__files_to_merge'] = downloaded
3406 # Even if there were no downloads, it is being merged only now
3407 info_dict
['__real_download'] = True
3409 for file in downloaded
:
3410 files_to_move
[file] = None
3412 # Just a single file
3413 dl_filename
= existing_video_file(full_filename
, temp_filename
)
3414 if dl_filename
is None or dl_filename
== temp_filename
:
3415 # dl_filename == temp_filename could mean that the file was partially downloaded with --no-part.
3416 # So we should try to resume the download
3417 success
, real_download
= self
.dl(temp_filename
, info_dict
)
3418 info_dict
['__real_download'] = real_download
3420 self
.report_file_already_downloaded(dl_filename
)
3422 dl_filename
= dl_filename
or temp_filename
3423 info_dict
['__finaldir'] = os
.path
.dirname(os
.path
.abspath(encodeFilename(full_filename
)))
3425 except network_exceptions
as err
:
3426 self
.report_error('unable to download video data: %s' % error_to_compat_str(err
))
3428 except OSError as err
:
3429 raise UnavailableVideoError(err
)
3430 except (ContentTooShortError
, ) as err
:
3431 self
.report_error(f
'content too short (expected {err.expected} bytes and served {err.downloaded})')
3434 self
._raise
_pending
_errors
(info_dict
)
3435 if success
and full_filename
!= '-':
3439 fixup_policy
= self
.params
.get('fixup')
3440 vid
= info_dict
['id']
3442 if fixup_policy
in ('ignore', 'never'):
3444 elif fixup_policy
== 'warn':
3446 elif fixup_policy
!= 'force':
3447 assert fixup_policy
in ('detect_or_warn', None)
3448 if not info_dict
.get('__real_download'):
3451 def ffmpeg_fixup(cndn
, msg
, cls
):
3452 if not (do_fixup
and cndn
):
3454 elif do_fixup
== 'warn':
3455 self
.report_warning(f
'{vid}: {msg}')
3459 info_dict
['__postprocessors'].append(pp
)
3461 self
.report_warning(f
'{vid}: {msg}. Install ffmpeg to fix this automatically')
3463 stretched_ratio
= info_dict
.get('stretched_ratio')
3464 ffmpeg_fixup(stretched_ratio
not in (1, None),
3465 f
'Non-uniform pixel ratio {stretched_ratio}',
3466 FFmpegFixupStretchedPP
)
3468 downloader
= get_suitable_downloader(info_dict
, self
.params
) if 'protocol' in info_dict
else None
3469 downloader
= downloader
.FD_NAME
if downloader
else None
3471 ext
= info_dict
.get('ext')
3472 postprocessed_by_ffmpeg
= info_dict
.get('requested_formats') or any((
3473 isinstance(pp
, FFmpegVideoConvertorPP
)
3474 and resolve_recode_mapping(ext
, pp
.mapping
)[0] not in (ext
, None)
3475 ) for pp
in self
._pps
['post_process'])
3477 if not postprocessed_by_ffmpeg
:
3478 ffmpeg_fixup(fd
!= FFmpegFD
and ext
== 'm4a'
3479 and info_dict
.get('container') == 'm4a_dash',
3480 'writing DASH m4a. Only some players support this container',
3482 ffmpeg_fixup(downloader
== 'hlsnative' and not self
.params
.get('hls_use_mpegts')
3483 or info_dict
.get('is_live') and self
.params
.get('hls_use_mpegts') is None,
3484 'Possible MPEG-TS in MP4 container or malformed AAC timestamps',
3486 ffmpeg_fixup(info_dict
.get('is_live') and downloader
== 'dashsegments',
3487 'Possible duplicate MOOV atoms', FFmpegFixupDuplicateMoovPP
)
3489 ffmpeg_fixup(downloader
== 'web_socket_fragment', 'Malformed timestamps detected', FFmpegFixupTimestampPP
)
3490 ffmpeg_fixup(downloader
== 'web_socket_fragment', 'Malformed duration detected', FFmpegFixupDurationPP
)
3494 replace_info_dict(self
.post_process(dl_filename
, info_dict
, files_to_move
))
3495 except PostProcessingError
as err
:
3496 self
.report_error('Postprocessing: %s' % str(err
))
3499 for ph
in self
._post
_hooks
:
3500 ph(info_dict
['filepath'])
3501 except Exception as err
:
3502 self
.report_error('post hooks: %s' % str(err
))
3504 info_dict
['__write_download_archive'] = True
3506 assert info_dict
is original_infodict
# Make sure the info_dict was modified in-place
3507 if self
.params
.get('force_write_download_archive'):
3508 info_dict
['__write_download_archive'] = True
3509 check_max_downloads()
3511 def __download_wrapper(self
, func
):
3512 @functools.wraps(func
)
3513 def wrapper(*args
, **kwargs
):
3515 res
= func(*args
, **kwargs
)
3516 except UnavailableVideoError
as e
:
3517 self
.report_error(e
)
3518 except DownloadCancelled
as e
:
3519 self
.to_screen(f
'[info] {e}')
3520 if not self
.params
.get('break_per_url'):
3522 self
._num
_downloads
= 0
3524 if self
.params
.get('dump_single_json', False):
3525 self
.post_extract(res
)
3526 self
.to_stdout(json
.dumps(self
.sanitize_info(res
)))
3529 def download(self
, url_list
):
3530 """Download a given list of URLs."""
3531 url_list
= variadic(url_list
) # Passing a single URL is a common mistake
3532 outtmpl
= self
.params
['outtmpl']['default']
3533 if (len(url_list
) > 1
3535 and '%' not in outtmpl
3536 and self
.params
.get('max_downloads') != 1):
3537 raise SameFileError(outtmpl
)
3539 for url
in url_list
:
3540 self
.__download
_wrapper
(self
.extract_info
)(
3541 url
, force_generic_extractor
=self
.params
.get('force_generic_extractor', False))
3543 return self
._download
_retcode
3545 def download_with_info_file(self
, info_filename
):
3546 with contextlib
.closing(fileinput
.FileInput(
3547 [info_filename
], mode
='r',
3548 openhook
=fileinput
.hook_encoded('utf-8'))) as f
:
3549 # FileInput doesn't have a read method, we can't call json.load
3550 infos
= [self
.sanitize_info(info
, self
.params
.get('clean_infojson', True))
3551 for info
in variadic(json
.loads('\n'.join(f
)))]
3554 self
.__download
_wrapper
(self
.process_ie_result
)(info
, download
=True)
3555 except (DownloadError
, EntryNotInPlaylist
, ReExtractInfo
) as e
:
3556 if not isinstance(e
, EntryNotInPlaylist
):
3557 self
.to_stderr('\r')
3558 webpage_url
= info
.get('webpage_url')
3559 if webpage_url
is None:
3561 self
.report_warning(f
'The info failed to download: {e}; trying with URL {webpage_url}')
3562 self
.download([webpage_url
])
3563 return self
._download
_retcode
3566 def sanitize_info(info_dict
, remove_private_keys
=False):
3567 ''' Sanitize the infodict for converting to json '''
3568 if info_dict
is None:
3570 info_dict
.setdefault('epoch', int(time
.time()))
3571 info_dict
.setdefault('_type', 'video')
3572 info_dict
.setdefault('_version', {
3573 'version': __version__
,
3574 'current_git_head': current_git_head(),
3575 'release_git_head': RELEASE_GIT_HEAD
,
3576 'repository': ORIGIN
,
3579 if remove_private_keys
:
3580 reject
= lambda k
, v
: v
is None or k
.startswith('__') or k
in {
3581 'requested_downloads', 'requested_formats', 'requested_subtitles', 'requested_entries',
3582 'entries', 'filepath', '_filename', 'filename', 'infojson_filename', 'original_url',
3583 'playlist_autonumber',
3586 reject
= lambda k
, v
: False
3589 if isinstance(obj
, dict):
3590 return {k: filter_fn(v) for k, v in obj.items() if not reject(k, v)}
3591 elif isinstance(obj
, (list, tuple, set, LazyList
)):
3592 return list(map(filter_fn
, obj
))
3593 elif obj
is None or isinstance(obj
, (str, int, float, bool)):
3598 return filter_fn(info_dict
)
3601 def filter_requested_info(info_dict
, actually_filter
=True):
3602 ''' Alias of sanitize_info for backward compatibility '''
3603 return YoutubeDL
.sanitize_info(info_dict
, actually_filter
)
3605 def _delete_downloaded_files(self
, *files_to_delete
, info
={}, msg
=None):
3606 for filename
in set(filter(None, files_to_delete
)):
3608 self
.to_screen(msg
% filename
)
3612 self
.report_warning(f
'Unable to delete file {filename}')
3613 if filename
in info
.get('__files_to_move', []): # NB: Delete even if None
3614 del info
['__files_to_move'][filename
]
3617 def post_extract(info_dict
):
3618 def actual_post_extract(info_dict
):
3619 if info_dict
.get('_type') in ('playlist', 'multi_video'):
3620 for video_dict
in info_dict
.get('entries', {}):
3621 actual_post_extract(video_dict
or {})
3624 post_extractor
= info_dict
.pop('__post_extractor', None) or (lambda: {})
3625 info_dict
.update(post_extractor())
3627 actual_post_extract(info_dict
or {})
3629 def run_pp(self
, pp
, infodict
):
3630 files_to_delete
= []
3631 if '__files_to_move' not in infodict
:
3632 infodict
['__files_to_move'] = {}
3634 files_to_delete
, infodict
= pp
.run(infodict
)
3635 except PostProcessingError
as e
:
3636 # Must be True and not 'only_download'
3637 if self
.params
.get('ignoreerrors') is True:
3638 self
.report_error(e
)
3642 if not files_to_delete
:
3644 if self
.params
.get('keepvideo', False):
3645 for f
in files_to_delete
:
3646 infodict
['__files_to_move'].setdefault(f
, '')
3648 self
._delete
_downloaded
_files
(
3649 *files_to_delete
, info
=infodict
, msg
='Deleting original file %s (pass -k to keep)')
3652 def run_all_pps(self
, key
, info
, *, additional_pps
=None):
3654 self
._forceprint
(key
, info
)
3655 for pp
in (additional_pps
or []) + self
._pps
[key
]:
3656 info
= self
.run_pp(pp
, info
)
3659 def pre_process(self
, ie_info
, key
='pre_process', files_to_move
=None):
3660 info
= dict(ie_info
)
3661 info
['__files_to_move'] = files_to_move
or {}
3663 info
= self
.run_all_pps(key
, info
)
3664 except PostProcessingError
as err
:
3665 msg
= f
'Preprocessing: {err}'
3666 info
.setdefault('__pending_error', msg
)
3667 self
.report_error(msg
, is_error
=False)
3668 return info
, info
.pop('__files_to_move', None)
3670 def post_process(self
, filename
, info
, files_to_move
=None):
3671 """Run all the postprocessors on the given file."""
3672 info
['filepath'] = filename
3673 info
['__files_to_move'] = files_to_move
or {}
3674 info
= self
.run_all_pps('post_process', info
, additional_pps
=info
.get('__postprocessors'))
3675 info
= self
.run_pp(MoveFilesAfterDownloadPP(self
), info
)
3676 del info
['__files_to_move']
3677 return self
.run_all_pps('after_move', info
)
3679 def _make_archive_id(self
, info_dict
):
3680 video_id
= info_dict
.get('id')
3683 # Future-proof against any change in case
3684 # and backwards compatibility with prior versions
3685 extractor
= info_dict
.get('extractor_key') or info_dict
.get('ie_key') # key in a playlist
3686 if extractor
is None:
3687 url
= str_or_none(info_dict
.get('url'))
3690 # Try to find matching extractor for the URL and take its ie_key
3691 for ie_key
, ie
in self
._ies
.items():
3692 if ie
.suitable(url
):
3697 return make_archive_id(extractor
, video_id
)
3699 def in_download_archive(self
, info_dict
):
3700 if not self
.archive
:
3703 vid_ids
= [self
._make
_archive
_id
(info_dict
)]
3704 vid_ids
.extend(info_dict
.get('_old_archive_ids') or [])
3705 return any(id_
in self
.archive
for id_
in vid_ids
)
3707 def record_download_archive(self
, info_dict
):
3708 fn
= self
.params
.get('download_archive')
3711 vid_id
= self
._make
_archive
_id
(info_dict
)
3714 self
.write_debug(f
'Adding to archive: {vid_id}')
3715 if is_path_like(fn
):
3716 with locked_file(fn
, 'a', encoding
='utf-8') as archive_file
:
3717 archive_file
.write(vid_id
+ '\n')
3718 self
.archive
.add(vid_id
)
3721 def format_resolution(format
, default
='unknown'):
3722 if format
.get('vcodec') == 'none' and format
.get('acodec') != 'none':
3724 if format
.get('resolution') is not None:
3725 return format
['resolution']
3726 if format
.get('width') and format
.get('height'):
3727 return '%dx%d' % (format
['width'], format
['height'])
3728 elif format
.get('height'):
3729 return '%sp' % format
['height']
3730 elif format
.get('width'):
3731 return '%dx?' % format
['width']
3734 def _list_format_headers(self
, *headers
):
3735 if self
.params
.get('listformats_table', True) is not False:
3736 return [self
._format
_out
(header
, self
.Styles
.HEADERS
) for header
in headers
]
3739 def _format_note(self
, fdict
):
3741 if fdict
.get('ext') in ['f4f', 'f4m']:
3742 res
+= '(unsupported)'
3743 if fdict
.get('language'):
3746 res
+= '[%s]' % fdict
['language']
3747 if fdict
.get('format_note') is not None:
3750 res
+= fdict
['format_note']
3751 if fdict
.get('tbr') is not None:
3754 res
+= '%4dk' % fdict
['tbr']
3755 if fdict
.get('container') is not None:
3758 res
+= '%s container' % fdict
['container']
3759 if (fdict
.get('vcodec') is not None
3760 and fdict
.get('vcodec') != 'none'):
3763 res
+= fdict
['vcodec']
3764 if fdict
.get('vbr') is not None:
3766 elif fdict
.get('vbr') is not None and fdict
.get('abr') is not None:
3768 if fdict
.get('vbr') is not None:
3769 res
+= '%4dk' % fdict
['vbr']
3770 if fdict
.get('fps') is not None:
3773 res
+= '%sfps' % fdict
['fps']
3774 if fdict
.get('acodec') is not None:
3777 if fdict
['acodec'] == 'none':
3780 res
+= '%-5s' % fdict
['acodec']
3781 elif fdict
.get('abr') is not None:
3785 if fdict
.get('abr') is not None:
3786 res
+= '@%3dk' % fdict
['abr']
3787 if fdict
.get('asr') is not None:
3788 res
+= ' (%5dHz)' % fdict
['asr']
3789 if fdict
.get('filesize') is not None:
3792 res
+= format_bytes(fdict
['filesize'])
3793 elif fdict
.get('filesize_approx') is not None:
3796 res
+= '~' + format_bytes(fdict
['filesize_approx'])
3799 def _get_formats(self
, info_dict
):
3800 if info_dict
.get('formats') is None:
3801 if info_dict
.get('url') and info_dict
.get('_type', 'video') == 'video':
3804 return info_dict
['formats']
3806 def render_formats_table(self
, info_dict
):
3807 formats
= self
._get
_formats
(info_dict
)
3810 if not self
.params
.get('listformats_table', True) is not False:
3813 format_field(f
, 'format_id'),
3814 format_field(f
, 'ext'),
3815 self
.format_resolution(f
),
3816 self
._format
_note
(f
)
3817 ] for f
in formats
if (f
.get('preference') or 0) >= -1000]
3818 return render_table(['format code', 'extension', 'resolution', 'note'], table
, extra_gap
=1)
3820 def simplified_codec(f
, field
):
3821 assert field
in ('acodec', 'vcodec')
3822 codec
= f
.get(field
)
3825 elif codec
!= 'none':
3826 return '.'.join(codec
.split('.')[:4])
3828 if field
== 'vcodec' and f
.get('acodec') == 'none':
3830 elif field
== 'acodec' and f
.get('vcodec') == 'none':
3832 return self
._format
_out
('audio only' if field
== 'vcodec' else 'video only',
3833 self
.Styles
.SUPPRESS
)
3835 delim
= self
._format
_out
('\u2502', self
.Styles
.DELIM
, '|', test_encoding
=True)
3838 self
._format
_out
(format_field(f
, 'format_id'), self
.Styles
.ID
),
3839 format_field(f
, 'ext'),
3840 format_field(f
, func
=self
.format_resolution
, ignore
=('audio only', 'images')),
3841 format_field(f
, 'fps', '\t%d', func
=round),
3842 format_field(f
, 'dynamic_range', '%s', ignore
=(None, 'SDR')).replace('HDR', ''),
3843 format_field(f
, 'audio_channels', '\t%s'),
3845 format_field(f
, 'filesize', ' \t%s', func
=format_bytes
)
3846 or format_field(f
, 'filesize_approx', '≈\t%s', func
=format_bytes
)
3847 or format_field(try_call(lambda: format_bytes(int(info_dict
['duration'] * f
['tbr'] * (1024 / 8)))),
3848 None, self
._format
_out
('~\t%s', self
.Styles
.SUPPRESS
))),
3849 format_field(f
, 'tbr', '\t%dk', func
=round),
3850 shorten_protocol_name(f
.get('protocol', '')),
3852 simplified_codec(f
, 'vcodec'),
3853 format_field(f
, 'vbr', '\t%dk', func
=round),
3854 simplified_codec(f
, 'acodec'),
3855 format_field(f
, 'abr', '\t%dk', func
=round),
3856 format_field(f
, 'asr', '\t%s', func
=format_decimal_suffix
),
3857 join_nonempty(format_field(f
, 'language', '[%s]'), join_nonempty(
3858 self
._format
_out
('UNSUPPORTED', self
.Styles
.BAD_FORMAT
) if f
.get('ext') in ('f4f', 'f4m') else None,
3859 (self
._format
_out
('Maybe DRM', self
.Styles
.WARNING
) if f
.get('has_drm') == 'maybe'
3860 else self
._format
_out
('DRM', self
.Styles
.BAD_FORMAT
) if f
.get('has_drm') else None),
3861 format_field(f
, 'format_note'),
3862 format_field(f
, 'container', ignore
=(None, f
.get('ext'))),
3863 delim
=', '), delim
=' '),
3864 ] for f
in formats
if f
.get('preference') is None or f
['preference'] >= -1000]
3865 header_line
= self
._list
_format
_headers
(
3866 'ID', 'EXT', 'RESOLUTION', '\tFPS', 'HDR', 'CH', delim
, '\tFILESIZE', '\tTBR', 'PROTO',
3867 delim
, 'VCODEC', '\tVBR', 'ACODEC', '\tABR', '\tASR', 'MORE INFO')
3869 return render_table(
3870 header_line
, table
, hide_empty
=True,
3871 delim
=self
._format
_out
('\u2500', self
.Styles
.DELIM
, '-', test_encoding
=True))
3873 def render_thumbnails_table(self
, info_dict
):
3874 thumbnails
= list(info_dict
.get('thumbnails') or [])
3877 return render_table(
3878 self
._list
_format
_headers
('ID', 'Width', 'Height', 'URL'),
3879 [[t
.get('id'), t
.get('width') or 'unknown', t
.get('height') or 'unknown', t
['url']] for t
in thumbnails
])
3881 def render_subtitles_table(self
, video_id
, subtitles
):
3882 def _row(lang
, formats
):
3883 exts
, names
= zip(*((f
['ext'], f
.get('name') or 'unknown') for f
in reversed(formats
)))
3884 if len(set(names
)) == 1:
3885 names
= [] if names
[0] == 'unknown' else names
[:1]
3886 return [lang
, ', '.join(names
), ', '.join(exts
)]
3890 return render_table(
3891 self
._list
_format
_headers
('Language', 'Name', 'Formats'),
3892 [_row(lang
, formats
) for lang
, formats
in subtitles
.items()],
3895 def __list_table(self
, video_id
, name
, func
, *args
):
3898 self
.to_screen(f
'{video_id} has no {name}')
3900 self
.to_screen(f
'[info] Available {name} for {video_id}:')
3901 self
.to_stdout(table
)
3903 def list_formats(self
, info_dict
):
3904 self
.__list
_table
(info_dict
['id'], 'formats', self
.render_formats_table
, info_dict
)
3906 def list_thumbnails(self
, info_dict
):
3907 self
.__list
_table
(info_dict
['id'], 'thumbnails', self
.render_thumbnails_table
, info_dict
)
3909 def list_subtitles(self
, video_id
, subtitles
, name
='subtitles'):
3910 self
.__list
_table
(video_id
, name
, self
.render_subtitles_table
, video_id
, subtitles
)
3912 def print_debug_header(self
):
3913 if not self
.params
.get('verbose'):
3916 from . import _IN_CLI
# Must be delayed import
3918 # These imports can be slow. So import them only as needed
3919 from .extractor
.extractors
import _LAZY_LOADER
3920 from .extractor
.extractors
import (
3921 _PLUGIN_CLASSES
as plugin_ies
,
3922 _PLUGIN_OVERRIDES
as plugin_ie_overrides
3925 def get_encoding(stream
):
3926 ret
= str(getattr(stream
, 'encoding', 'missing (%s)' % type(stream
).__name
__))
3927 additional_info
= []
3928 if os
.environ
.get('TERM', '').lower() == 'dumb':
3929 additional_info
.append('dumb')
3930 if not supports_terminal_sequences(stream
):
3931 from .utils
import WINDOWS_VT_MODE
# Must be imported locally
3932 additional_info
.append('No VT' if WINDOWS_VT_MODE
is False else 'No ANSI')
3934 ret
= f
'{ret} ({",".join(additional_info)})'
3937 encoding_str
= 'Encodings: locale %s, fs %s, pref %s, %s' % (
3938 locale
.getpreferredencoding(),
3939 sys
.getfilesystemencoding(),
3940 self
.get_encoding(),
3942 f
'{key} {get_encoding(stream)}' for key
, stream
in self
._out
_files
.items_
3943 if stream
is not None and key
!= 'console')
3946 logger
= self
.params
.get('logger')
3948 write_debug
= lambda msg
: logger
.debug(f
'[debug] {msg}')
3949 write_debug(encoding_str
)
3951 write_string(f
'[debug] {encoding_str}\n', encoding
=None)
3952 write_debug
= lambda msg
: self
._write
_string
(f
'[debug] {msg}\n')
3954 source
= detect_variant()
3955 if VARIANT
not in (None, 'pip'):
3958 write_debug(join_nonempty(
3959 f
'{REPOSITORY.rpartition("/")[2]} version',
3960 _make_label(ORIGIN
, CHANNEL
.partition('@')[2] or __version__
, __version__
),
3961 f
'[{RELEASE_GIT_HEAD[:9]}]' if RELEASE_GIT_HEAD
else '',
3962 '' if source
== 'unknown' else f
'({source})',
3963 '' if _IN_CLI
else 'API' if klass
== YoutubeDL
else f
'API:{self.__module__}.{klass.__qualname__}',
3967 write_debug(f
'params: {self.params}')
3969 if not _LAZY_LOADER
:
3970 if os
.environ
.get('YTDLP_NO_LAZY_EXTRACTORS'):
3971 write_debug('Lazy loading extractors is forcibly disabled')
3973 write_debug('Lazy loading extractors is disabled')
3974 if self
.params
['compat_opts']:
3975 write_debug('Compatibility options: %s' % ', '.join(self
.params
['compat_opts']))
3977 if current_git_head():
3978 write_debug(f
'Git HEAD: {current_git_head()}')
3979 write_debug(system_identifier())
3981 exe_versions
, ffmpeg_features
= FFmpegPostProcessor
.get_versions_and_features(self
)
3982 ffmpeg_features
= {key for key, val in ffmpeg_features.items() if val}
3984 exe_versions
['ffmpeg'] += ' (%s)' % ','.join(sorted(ffmpeg_features
))
3986 exe_versions
['rtmpdump'] = rtmpdump_version()
3987 exe_versions
['phantomjs'] = PhantomJSwrapper
._version
()
3988 exe_str
= ', '.join(
3989 f
'{exe} {v}' for exe
, v
in sorted(exe_versions
.items()) if v
3991 write_debug('exe versions: %s' % exe_str
)
3993 from .compat
.compat_utils
import get_package_info
3994 from .dependencies
import available_dependencies
3996 write_debug('Optional libraries: %s' % (', '.join(sorted({
3997 join_nonempty(*get_package_info(m
)) for m
in available_dependencies
.values()
4000 write_debug(f
'Proxy map: {self.proxies}')
4001 write_debug(f
'Request Handlers: {", ".join(rh.RH_NAME for rh in self._request_director.handlers.values())}')
4002 for plugin_type
, plugins
in {'Extractor': plugin_ies, 'Post-Processor': plugin_pps}
.items():
4003 display_list
= ['%s%s' % (
4004 klass
.__name
__, '' if klass
.__name
__ == name
else f
' as {name}')
4005 for name
, klass
in plugins
.items()]
4006 if plugin_type
== 'Extractor':
4007 display_list
.extend(f
'{plugins[-1].IE_NAME.partition("+")[2]} ({parent.__name__})'
4008 for parent
, plugins
in plugin_ie_overrides
.items())
4009 if not display_list
:
4011 write_debug(f
'{plugin_type} Plugins: {", ".join(sorted(display_list))}')
4013 plugin_dirs
= plugin_directories()
4015 write_debug(f
'Plugin directories: {plugin_dirs}')
4018 if False and self
.params
.get('call_home'):
4019 ipaddr
= self
.urlopen('https://yt-dl.org/ip').read().decode()
4020 write_debug('Public IP address: %s' % ipaddr
)
4021 latest_version
= self
.urlopen(
4022 'https://yt-dl.org/latest/version').read().decode()
4023 if version_tuple(latest_version
) > version_tuple(__version__
):
4024 self
.report_warning(
4025 'You are using an outdated version (newest version: %s)! '
4026 'See https://yt-dl.org/update if you need help updating.' %
4029 @functools.cached_property
4031 """Global proxy configuration"""
4032 opts_proxy
= self
.params
.get('proxy')
4033 if opts_proxy
is not None:
4034 if opts_proxy
== '':
4035 opts_proxy
= '__noproxy__'
4036 proxies
= {'all': opts_proxy}
4038 proxies
= urllib
.request
.getproxies()
4039 # compat. Set HTTPS_PROXY to __noproxy__ to revert
4040 if 'http' in proxies
and 'https' not in proxies
:
4041 proxies
['https'] = proxies
['http']
4045 @functools.cached_property
4046 def cookiejar(self
):
4047 """Global cookiejar instance"""
4048 return load_cookies(
4049 self
.params
.get('cookiefile'), self
.params
.get('cookiesfrombrowser'), self
)
4054 Get a urllib OpenerDirector from the Urllib handler (deprecated).
4056 self
.deprecation_warning('YoutubeDL._opener is deprecated, use YoutubeDL.urlopen()')
4057 handler
= self
._request
_director
.handlers
['Urllib']
4058 return handler
._get
_instance
(cookiejar
=self
.cookiejar
, proxies
=self
.proxies
)
4060 def urlopen(self
, req
):
4061 """ Start an HTTP download """
4062 if isinstance(req
, str):
4064 elif isinstance(req
, urllib
.request
.Request
):
4065 self
.deprecation_warning(
4066 'Passing a urllib.request.Request object to YoutubeDL.urlopen() is deprecated. '
4067 'Use yt_dlp.networking.common.Request instead.')
4068 req
= urllib_req_to_req(req
)
4069 assert isinstance(req
, Request
)
4071 # compat: Assume user:pass url params are basic auth
4072 url
, basic_auth_header
= extract_basic_auth(req
.url
)
4073 if basic_auth_header
:
4074 req
.headers
['Authorization'] = basic_auth_header
4075 req
.url
= sanitize_url(url
)
4077 clean_proxies(proxies
=req
.proxies
, headers
=req
.headers
)
4078 clean_headers(req
.headers
)
4081 return self
._request
_director
.send(req
)
4082 except NoSupportingHandlers
as e
:
4083 for ue
in e
.unsupported_errors
:
4084 # FIXME: This depends on the order of errors.
4085 if not (ue
.handler
and ue
.msg
):
4087 if ue
.handler
.RH_KEY
== 'Urllib' and 'unsupported url scheme: "file"' in ue
.msg
.lower():
4089 'file:// URLs are disabled by default in yt-dlp for security reasons. '
4090 'Use --enable-file-urls to enable at your own risk.', cause
=ue
) from ue
4091 if 'unsupported proxy type: "https"' in ue
.msg
.lower():
4093 'To use an HTTPS proxy for this request, one of the following dependencies needs to be installed: requests')
4096 re
.match(r
'unsupported url scheme: "wss?"', ue
.msg
.lower())
4097 and 'websockets' not in self
._request
_director
.handlers
4100 'This request requires WebSocket support. '
4101 'Ensure one of the following dependencies are installed: websockets',
4104 except SSLError
as e
:
4105 if 'UNSAFE_LEGACY_RENEGOTIATION_DISABLED' in str(e
):
4106 raise RequestError('UNSAFE_LEGACY_RENEGOTIATION_DISABLED: Try using --legacy-server-connect', cause
=e
) from e
4107 elif 'SSLV3_ALERT_HANDSHAKE_FAILURE' in str(e
):
4109 'SSLV3_ALERT_HANDSHAKE_FAILURE: The server may not support the current cipher list. '
4110 'Try using --legacy-server-connect', cause
=e
) from e
4113 def build_request_director(self
, handlers
, preferences
=None):
4114 logger
= _YDLLogger(self
)
4115 headers
= self
.params
['http_headers'].copy()
4116 proxies
= self
.proxies
.copy()
4117 clean_headers(headers
)
4118 clean_proxies(proxies
, headers
)
4120 director
= RequestDirector(logger
=logger
, verbose
=self
.params
.get('debug_printtraffic'))
4121 for handler
in handlers
:
4122 director
.add_handler(handler(
4125 cookiejar
=self
.cookiejar
,
4127 prefer_system_certs
='no-certifi' in self
.params
['compat_opts'],
4128 verify
=not self
.params
.get('nocheckcertificate'),
4129 **traverse_obj(self
.params
, {
4130 'verbose': 'debug_printtraffic',
4131 'source_address': 'source_address',
4132 'timeout': 'socket_timeout',
4133 'legacy_ssl_support': 'legacyserverconnect',
4134 'enable_file_urls': 'enable_file_urls',
4136 'client_certificate': 'client_certificate',
4137 'client_certificate_key': 'client_certificate_key',
4138 'client_certificate_password': 'client_certificate_password',
4142 director
.preferences
.update(preferences
or [])
4143 if 'prefer-legacy-http-handler' in self
.params
['compat_opts']:
4144 director
.preferences
.add(lambda rh
, _
: 500 if rh
.RH_KEY
== 'Urllib' else 0)
4147 def encode(self
, s
):
4148 if isinstance(s
, bytes):
4149 return s
# Already encoded
4152 return s
.encode(self
.get_encoding())
4153 except UnicodeEncodeError as err
:
4154 err
.reason
= err
.reason
+ '. Check your system encoding configuration or use the --encoding option.'
4157 def get_encoding(self
):
4158 encoding
= self
.params
.get('encoding')
4159 if encoding
is None:
4160 encoding
= preferredencoding()
4163 def _write_info_json(self
, label
, ie_result
, infofn
, overwrite
=None):
4164 ''' Write infojson and returns True = written, 'exists' = Already exists, False = skip, None = error '''
4165 if overwrite
is None:
4166 overwrite
= self
.params
.get('overwrites', True)
4167 if not self
.params
.get('writeinfojson'):
4170 self
.write_debug(f
'Skipping writing {label} infojson')
4172 elif not self
._ensure
_dir
_exists
(infofn
):
4174 elif not overwrite
and os
.path
.exists(infofn
):
4175 self
.to_screen(f
'[info] {label.title()} metadata is already present')
4178 self
.to_screen(f
'[info] Writing {label} metadata as JSON to: {infofn}')
4180 write_json_file(self
.sanitize_info(ie_result
, self
.params
.get('clean_infojson', True)), infofn
)
4183 self
.report_error(f
'Cannot write {label} metadata to JSON file {infofn}')
4186 def _write_description(self
, label
, ie_result
, descfn
):
4187 ''' Write description and returns True = written, False = skip, None = error '''
4188 if not self
.params
.get('writedescription'):
4191 self
.write_debug(f
'Skipping writing {label} description')
4193 elif not self
._ensure
_dir
_exists
(descfn
):
4195 elif not self
.params
.get('overwrites', True) and os
.path
.exists(descfn
):
4196 self
.to_screen(f
'[info] {label.title()} description is already present')
4197 elif ie_result
.get('description') is None:
4198 self
.to_screen(f
'[info] There\'s no {label} description to write')
4202 self
.to_screen(f
'[info] Writing {label} description to: {descfn}')
4203 with open(encodeFilename(descfn
), 'w', encoding
='utf-8') as descfile
:
4204 descfile
.write(ie_result
['description'])
4206 self
.report_error(f
'Cannot write {label} description file {descfn}')
4210 def _write_subtitles(self
, info_dict
, filename
):
4211 ''' Write subtitles to file and return list of (sub_filename, final_sub_filename); or None if error'''
4213 subtitles
= info_dict
.get('requested_subtitles')
4214 if not (self
.params
.get('writesubtitles') or self
.params
.get('writeautomaticsub')):
4215 # subtitles download errors are already managed as troubles in relevant IE
4216 # that way it will silently go on when used with unsupporting IE
4219 self
.to_screen('[info] There are no subtitles for the requested languages')
4221 sub_filename_base
= self
.prepare_filename(info_dict
, 'subtitle')
4222 if not sub_filename_base
:
4223 self
.to_screen('[info] Skipping writing video subtitles')
4226 for sub_lang
, sub_info
in subtitles
.items():
4227 sub_format
= sub_info
['ext']
4228 sub_filename
= subtitles_filename(filename
, sub_lang
, sub_format
, info_dict
.get('ext'))
4229 sub_filename_final
= subtitles_filename(sub_filename_base
, sub_lang
, sub_format
, info_dict
.get('ext'))
4230 existing_sub
= self
.existing_file((sub_filename_final
, sub_filename
))
4232 self
.to_screen(f
'[info] Video subtitle {sub_lang}.{sub_format} is already present')
4233 sub_info
['filepath'] = existing_sub
4234 ret
.append((existing_sub
, sub_filename_final
))
4237 self
.to_screen(f
'[info] Writing video subtitles to: {sub_filename}')
4238 if sub_info
.get('data') is not None:
4240 # Use newline='' to prevent conversion of newline characters
4241 # See https://github.com/ytdl-org/youtube-dl/issues/10268
4242 with open(sub_filename
, 'w', encoding
='utf-8', newline
='') as subfile
:
4243 subfile
.write(sub_info
['data'])
4244 sub_info
['filepath'] = sub_filename
4245 ret
.append((sub_filename
, sub_filename_final
))
4248 self
.report_error(f
'Cannot write video subtitles file {sub_filename}')
4252 sub_copy
= sub_info
.copy()
4253 sub_copy
.setdefault('http_headers', info_dict
.get('http_headers'))
4254 self
.dl(sub_filename
, sub_copy
, subtitle
=True)
4255 sub_info
['filepath'] = sub_filename
4256 ret
.append((sub_filename
, sub_filename_final
))
4257 except (DownloadError
, ExtractorError
, IOError, OSError, ValueError) + network_exceptions
as err
:
4258 msg
= f
'Unable to download video subtitles for {sub_lang!r}: {err}'
4259 if self
.params
.get('ignoreerrors') is not True: # False or 'only_download'
4260 if not self
.params
.get('ignoreerrors'):
4261 self
.report_error(msg
)
4262 raise DownloadError(msg
)
4263 self
.report_warning(msg
)
4266 def _write_thumbnails(self
, label
, info_dict
, filename
, thumb_filename_base
=None):
4267 ''' Write thumbnails to file and return list of (thumb_filename, final_thumb_filename); or None if error '''
4268 write_all
= self
.params
.get('write_all_thumbnails', False)
4269 thumbnails
, ret
= [], []
4270 if write_all
or self
.params
.get('writethumbnail', False):
4271 thumbnails
= info_dict
.get('thumbnails') or []
4273 self
.to_screen(f
'[info] There are no {label} thumbnails to download')
4275 multiple
= write_all
and len(thumbnails
) > 1
4277 if thumb_filename_base
is None:
4278 thumb_filename_base
= filename
4279 if thumbnails
and not thumb_filename_base
:
4280 self
.write_debug(f
'Skipping writing {label} thumbnail')
4283 if thumbnails
and not self
._ensure
_dir
_exists
(filename
):
4286 for idx
, t
in list(enumerate(thumbnails
))[::-1]:
4287 thumb_ext
= (f
'{t["id"]}.' if multiple
else '') + determine_ext(t
['url'], 'jpg')
4288 thumb_display_id
= f
'{label} thumbnail {t["id"]}'
4289 thumb_filename
= replace_extension(filename
, thumb_ext
, info_dict
.get('ext'))
4290 thumb_filename_final
= replace_extension(thumb_filename_base
, thumb_ext
, info_dict
.get('ext'))
4292 existing_thumb
= self
.existing_file((thumb_filename_final
, thumb_filename
))
4294 self
.to_screen('[info] %s is already present' % (
4295 thumb_display_id
if multiple
else f
'{label} thumbnail').capitalize())
4296 t
['filepath'] = existing_thumb
4297 ret
.append((existing_thumb
, thumb_filename_final
))
4299 self
.to_screen(f
'[info] Downloading {thumb_display_id} ...')
4301 uf
= self
.urlopen(Request(t
['url'], headers
=t
.get('http_headers', {})))
4302 self
.to_screen(f
'[info] Writing {thumb_display_id} to: {thumb_filename}')
4303 with open(encodeFilename(thumb_filename
), 'wb') as thumbf
:
4304 shutil
.copyfileobj(uf
, thumbf
)
4305 ret
.append((thumb_filename
, thumb_filename_final
))
4306 t
['filepath'] = thumb_filename
4307 except network_exceptions
as err
:
4308 if isinstance(err
, HTTPError
) and err
.status
== 404:
4309 self
.to_screen(f
'[info] {thumb_display_id.title()} does not exist')
4311 self
.report_warning(f
'Unable to download {thumb_display_id}: {err}')
4313 if ret
and not write_all
: