26 from .cache
import Cache
27 from .compat
import functools
, urllib
# isort: split
28 from .compat
import compat_os_name
, compat_shlex_quote
, urllib_req_to_req
29 from .cookies
import LenientSimpleCookie
, load_cookies
30 from .downloader
import FFmpegFD
, get_suitable_downloader
, shorten_protocol_name
31 from .downloader
.rtmp
import rtmpdump_version
32 from .extractor
import gen_extractor_classes
, get_info_extractor
33 from .extractor
.common
import UnsupportedURLIE
34 from .extractor
.openload
import PhantomJSwrapper
35 from .minicurses
import format_text
36 from .networking
import HEADRequest
, Request
, RequestDirector
37 from .networking
.common
import _REQUEST_HANDLERS
, _RH_PREFERENCES
38 from .networking
.exceptions
import (
46 from .plugins
import directories
as plugin_directories
47 from .postprocessor
import _PLUGIN_CLASSES
as plugin_pps
48 from .postprocessor
import (
50 FFmpegFixupDuplicateMoovPP
,
51 FFmpegFixupDurationPP
,
54 FFmpegFixupStretchedPP
,
55 FFmpegFixupTimestampPP
,
58 FFmpegVideoConvertorPP
,
59 MoveFilesAfterDownloadPP
,
62 from .postprocessor
.ffmpeg
import resolve_mapping
as resolve_recode_mapping
65 _get_system_deprecation
,
101 UnavailableVideoError
,
119 format_decimal_suffix
,
133 orderedSet_from_options
,
137 remove_terminal_sequences
,
146 supports_terminal_sequences
,
156 windows_enable_vt_mode
,
160 from .utils
._utils
import _YDLLogger
161 from .utils
.networking
import (
167 from .version
import CHANNEL
, ORIGIN
, RELEASE_GIT_HEAD
, VARIANT
, __version__
169 if compat_os_name
== 'nt':
176 YoutubeDL objects are the ones responsible of downloading the
177 actual video file and writing it to disk if the user has requested
178 it, among some other tasks. In most cases there should be one per
179 program. As, given a video URL, the downloader doesn't know how to
180 extract all the needed information, task that InfoExtractors do, it
181 has to pass the URL to one of them.
183 For this, YoutubeDL objects have a method that allows
184 InfoExtractors to be registered in a given order. When it is passed
185 a URL, the YoutubeDL object handles it to the first InfoExtractor it
186 finds that reports being able to handle it. The InfoExtractor extracts
187 all the information about the video or videos the URL refers to, and
188 YoutubeDL process the extracted information, possibly using a File
189 Downloader to download the video.
191 YoutubeDL objects accept a lot of parameters. In order not to saturate
192 the object constructor with arguments, it receives a dictionary of
193 options instead. These options are available through the params
194 attribute for the InfoExtractors to use. The YoutubeDL also
195 registers itself as the downloader in charge for the InfoExtractors
196 that are added to it, so this is a "mutual registration".
200 username: Username for authentication purposes.
201 password: Password for authentication purposes.
202 videopassword: Password for accessing a video.
203 ap_mso: Adobe Pass multiple-system operator identifier.
204 ap_username: Multiple-system operator account username.
205 ap_password: Multiple-system operator account password.
206 usenetrc: Use netrc for authentication instead.
207 netrc_location: Location of the netrc file. Defaults to ~/.netrc.
208 netrc_cmd: Use a shell command to get credentials
209 verbose: Print additional info to stdout.
210 quiet: Do not print messages to stdout.
211 no_warnings: Do not print out anything for warnings.
212 forceprint: A dict with keys WHEN mapped to a list of templates to
213 print to stdout. The allowed keys are video or any of the
214 items in utils.POSTPROCESS_WHEN.
215 For compatibility, a single list is also accepted
216 print_to_file: A dict with keys WHEN (same as forceprint) mapped to
217 a list of tuples with (template, filename)
218 forcejson: Force printing info_dict as JSON.
219 dump_single_json: Force printing the info_dict of the whole playlist
220 (or video) as a single JSON line.
221 force_write_download_archive: Force writing download archive regardless
222 of 'skip_download' or 'simulate'.
223 simulate: Do not download the video files. If unset (or None),
224 simulate only if listsubtitles, listformats or list_thumbnails is used
225 format: Video format code. see "FORMAT SELECTION" for more details.
226 You can also pass a function. The function takes 'ctx' as
227 argument and returns the formats to download.
228 See "build_format_selector" for an implementation
229 allow_unplayable_formats: Allow unplayable formats to be extracted and downloaded.
230 ignore_no_formats_error: Ignore "No video formats" error. Usefull for
231 extracting metadata even if the video is not actually
232 available for download (experimental)
233 format_sort: A list of fields by which to sort the video formats.
234 See "Sorting Formats" for more details.
235 format_sort_force: Force the given format_sort. see "Sorting Formats"
237 prefer_free_formats: Whether to prefer video formats with free containers
238 over non-free ones of same quality.
239 allow_multiple_video_streams: Allow multiple video streams to be merged
241 allow_multiple_audio_streams: Allow multiple audio streams to be merged
243 check_formats Whether to test if the formats are downloadable.
244 Can be True (check all), False (check none),
245 'selected' (check selected formats),
246 or None (check only if requested by extractor)
247 paths: Dictionary of output paths. The allowed keys are 'home'
248 'temp' and the keys of OUTTMPL_TYPES (in utils/_utils.py)
249 outtmpl: Dictionary of templates for output names. Allowed keys
250 are 'default' and the keys of OUTTMPL_TYPES (in utils/_utils.py).
251 For compatibility with youtube-dl, a single string can also be used
252 outtmpl_na_placeholder: Placeholder for unavailable meta fields.
253 restrictfilenames: Do not allow "&" and spaces in file names
254 trim_file_name: Limit length of filename (extension excluded)
255 windowsfilenames: Force the filenames to be windows compatible
256 ignoreerrors: Do not stop on download/postprocessing errors.
257 Can be 'only_download' to ignore only download errors.
258 Default is 'only_download' for CLI, but False for API
259 skip_playlist_after_errors: Number of allowed failures until the rest of
260 the playlist is skipped
261 allowed_extractors: List of regexes to match against extractor names that are allowed
262 overwrites: Overwrite all video and metadata files if True,
263 overwrite only non-video files if None
264 and don't overwrite any file if False
265 playlist_items: Specific indices of playlist to download.
266 playlistrandom: Download playlist items in random order.
267 lazy_playlist: Process playlist entries as they are received.
268 matchtitle: Download only matching titles.
269 rejecttitle: Reject downloads for matching titles.
270 logger: Log messages to a logging.Logger instance.
271 logtostderr: Print everything to stderr instead of stdout.
272 consoletitle: Display progress in console window's titlebar.
273 writedescription: Write the video description to a .description file
274 writeinfojson: Write the video description to a .info.json file
275 clean_infojson: Remove internal metadata from the infojson
276 getcomments: Extract video comments. This will not be written to disk
277 unless writeinfojson is also given
278 writeannotations: Write the video annotations to a .annotations.xml file
279 writethumbnail: Write the thumbnail image to a file
280 allow_playlist_files: Whether to write playlists' description, infojson etc
281 also to disk when using the 'write*' options
282 write_all_thumbnails: Write all thumbnail formats to files
283 writelink: Write an internet shortcut file, depending on the
284 current platform (.url/.webloc/.desktop)
285 writeurllink: Write a Windows internet shortcut file (.url)
286 writewebloclink: Write a macOS internet shortcut file (.webloc)
287 writedesktoplink: Write a Linux internet shortcut file (.desktop)
288 writesubtitles: Write the video subtitles to a file
289 writeautomaticsub: Write the automatically generated subtitles to a file
290 listsubtitles: Lists all available subtitles for the video
291 subtitlesformat: The format code for subtitles
292 subtitleslangs: List of languages of the subtitles to download (can be regex).
293 The list may contain "all" to refer to all the available
294 subtitles. The language can be prefixed with a "-" to
295 exclude it from the requested languages, e.g. ['all', '-live_chat']
296 keepvideo: Keep the video file after post-processing
297 daterange: A utils.DateRange object, download only if the upload_date is in the range.
298 skip_download: Skip the actual download of the video file
299 cachedir: Location of the cache files in the filesystem.
300 False to disable filesystem cache.
301 noplaylist: Download single video instead of a playlist if in doubt.
302 age_limit: An integer representing the user's age in years.
303 Unsuitable videos for the given age are skipped.
304 min_views: An integer representing the minimum view count the video
305 must have in order to not be skipped.
306 Videos without view count information are always
307 downloaded. None for no limit.
308 max_views: An integer representing the maximum view count.
309 Videos that are more popular than that are not
311 Videos without view count information are always
312 downloaded. None for no limit.
313 download_archive: A set, or the name of a file where all downloads are recorded.
314 Videos already present in the file are not downloaded again.
315 break_on_existing: Stop the download process after attempting to download a
316 file that is in the archive.
317 break_per_url: Whether break_on_reject and break_on_existing
318 should act on each input URL as opposed to for the entire queue
319 cookiefile: File name or text stream from where cookies should be read and dumped to
320 cookiesfrombrowser: A tuple containing the name of the browser, the profile
321 name/path from where cookies are loaded, the name of the keyring,
322 and the container name, e.g. ('chrome', ) or
323 ('vivaldi', 'default', 'BASICTEXT') or ('firefox', 'default', None, 'Meta')
324 legacyserverconnect: Explicitly allow HTTPS connection to servers that do not
325 support RFC 5746 secure renegotiation
326 nocheckcertificate: Do not verify SSL certificates
327 client_certificate: Path to client certificate file in PEM format. May include the private key
328 client_certificate_key: Path to private key file for client certificate
329 client_certificate_password: Password for client certificate private key, if encrypted.
330 If not provided and the key is encrypted, yt-dlp will ask interactively
331 prefer_insecure: Use HTTP instead of HTTPS to retrieve information.
332 (Only supported by some extractors)
333 enable_file_urls: Enable file:// URLs. This is disabled by default for security reasons.
334 http_headers: A dictionary of custom headers to be used for all requests
335 proxy: URL of the proxy server to use
336 geo_verification_proxy: URL of the proxy to use for IP address verification
337 on geo-restricted sites.
338 socket_timeout: Time to wait for unresponsive hosts, in seconds
339 bidi_workaround: Work around buggy terminals without bidirectional text
340 support, using fridibi
341 debug_printtraffic:Print out sent and received HTTP traffic
342 default_search: Prepend this string if an input url is not valid.
343 'auto' for elaborate guessing
344 encoding: Use this encoding instead of the system-specified.
345 extract_flat: Whether to resolve and process url_results further
346 * False: Always process. Default for API
347 * True: Never process
348 * 'in_playlist': Do not process inside playlist/multi_video
349 * 'discard': Always process, but don't return the result
350 from inside playlist/multi_video
351 * 'discard_in_playlist': Same as "discard", but only for
352 playlists (not multi_video). Default for CLI
353 wait_for_video: If given, wait for scheduled streams to become available.
354 The value should be a tuple containing the range
355 (min_secs, max_secs) to wait between retries
356 postprocessors: A list of dictionaries, each with an entry
357 * key: The name of the postprocessor. See
358 yt_dlp/postprocessor/__init__.py for a list.
359 * when: When to run the postprocessor. Allowed values are
360 the entries of utils.POSTPROCESS_WHEN
361 Assumed to be 'post_process' if not given
362 progress_hooks: A list of functions that get called on download
363 progress, with a dictionary with the entries
364 * status: One of "downloading", "error", or "finished".
365 Check this first and ignore unknown values.
366 * info_dict: The extracted info_dict
368 If status is one of "downloading", or "finished", the
369 following properties may also be present:
370 * filename: The final filename (always present)
371 * tmpfilename: The filename we're currently writing to
372 * downloaded_bytes: Bytes on disk
373 * total_bytes: Size of the whole file, None if unknown
374 * total_bytes_estimate: Guess of the eventual file size,
376 * elapsed: The number of seconds since download started.
377 * eta: The estimated time in seconds, None if unknown
378 * speed: The download speed in bytes/second, None if
380 * fragment_index: The counter of the currently
381 downloaded video fragment.
382 * fragment_count: The number of fragments (= individual
383 files that will be merged)
385 Progress hooks are guaranteed to be called at least once
386 (with status "finished") if the download is successful.
387 postprocessor_hooks: A list of functions that get called on postprocessing
388 progress, with a dictionary with the entries
389 * status: One of "started", "processing", or "finished".
390 Check this first and ignore unknown values.
391 * postprocessor: Name of the postprocessor
392 * info_dict: The extracted info_dict
394 Progress hooks are guaranteed to be called at least twice
395 (with status "started" and "finished") if the processing is successful.
396 merge_output_format: "/" separated list of extensions to use when merging formats.
397 final_ext: Expected final extension; used to detect when the file was
398 already downloaded and converted
399 fixup: Automatically correct known faults of the file.
401 - "never": do nothing
402 - "warn": only emit a warning
403 - "detect_or_warn": check whether we can do anything
404 about it, warn otherwise (default)
405 source_address: Client-side IP address to bind to.
406 sleep_interval_requests: Number of seconds to sleep between requests
408 sleep_interval: Number of seconds to sleep before each download when
409 used alone or a lower bound of a range for randomized
410 sleep before each download (minimum possible number
411 of seconds to sleep) when used along with
413 max_sleep_interval:Upper bound of a range for randomized sleep before each
414 download (maximum possible number of seconds to sleep).
415 Must only be used along with sleep_interval.
416 Actual sleep time will be a random float from range
417 [sleep_interval; max_sleep_interval].
418 sleep_interval_subtitles: Number of seconds to sleep before each subtitle download
419 listformats: Print an overview of available video formats and exit.
420 list_thumbnails: Print a table of all thumbnails and exit.
421 match_filter: A function that gets called for every video with the signature
422 (info_dict, *, incomplete: bool) -> Optional[str]
423 For backward compatibility with youtube-dl, the signature
424 (info_dict) -> Optional[str] is also allowed.
425 - If it returns a message, the video is ignored.
426 - If it returns None, the video is downloaded.
427 - If it returns utils.NO_DEFAULT, the user is interactively
428 asked whether to download the video.
429 - Raise utils.DownloadCancelled(msg) to abort remaining
430 downloads when a video is rejected.
431 match_filter_func in utils/_utils.py is one example for this.
432 color: A Dictionary with output stream names as keys
433 and their respective color policy as values.
434 Can also just be a single color policy,
435 in which case it applies to all outputs.
436 Valid stream names are 'stdout' and 'stderr'.
437 Valid color policies are one of 'always', 'auto', 'no_color' or 'never'.
438 geo_bypass: Bypass geographic restriction via faking X-Forwarded-For
441 Two-letter ISO 3166-2 country code that will be used for
442 explicit geographic restriction bypassing via faking
443 X-Forwarded-For HTTP header
445 IP range in CIDR notation that will be used similarly to
447 external_downloader: A dictionary of protocol keys and the executable of the
448 external downloader to use for it. The allowed protocols
449 are default|http|ftp|m3u8|dash|rtsp|rtmp|mms.
450 Set the value to 'native' to use the native downloader
451 compat_opts: Compatibility options. See "Differences in default behavior".
452 The following options do not work when used through the API:
453 filename, abort-on-error, multistreams, no-live-chat, format-sort
454 no-clean-infojson, no-playlist-metafiles, no-keep-subs, no-attach-info-json.
455 Refer __init__.py for their implementation
456 progress_template: Dictionary of templates for progress outputs.
457 Allowed keys are 'download', 'postprocess',
458 'download-title' (console title) and 'postprocess-title'.
459 The template is mapped on a dictionary with keys 'progress' and 'info'
460 retry_sleep_functions: Dictionary of functions that takes the number of attempts
461 as argument and returns the time to sleep in seconds.
462 Allowed keys are 'http', 'fragment', 'file_access'
463 download_ranges: A callback function that gets called for every video with
464 the signature (info_dict, ydl) -> Iterable[Section].
465 Only the returned sections will be downloaded.
466 Each Section is a dict with the following keys:
467 * start_time: Start time of the section in seconds
468 * end_time: End time of the section in seconds
469 * title: Section title (Optional)
470 * index: Section number (Optional)
471 force_keyframes_at_cuts: Re-encode the video when downloading ranges to get precise cuts
472 noprogress: Do not print the progress bar
473 live_from_start: Whether to download livestreams videos from the start
475 The following parameters are not used by YoutubeDL itself, they are used by
476 the downloader (see yt_dlp/downloader/common.py):
477 nopart, updatetime, buffersize, ratelimit, throttledratelimit, min_filesize,
478 max_filesize, test, noresizebuffer, retries, file_access_retries, fragment_retries,
479 continuedl, xattr_set_filesize, hls_use_mpegts, http_chunk_size,
480 external_downloader_args, concurrent_fragment_downloads.
482 The following options are used by the post processors:
483 ffmpeg_location: Location of the ffmpeg/avconv binary; either the path
484 to the binary or its containing directory.
485 postprocessor_args: A dictionary of postprocessor/executable keys (in lower case)
486 and a list of additional command-line arguments for the
487 postprocessor/executable. The dict can also have "PP+EXE" keys
488 which are used when the given exe is used by the given PP.
489 Use 'default' as the name for arguments to passed to all PP
490 For compatibility with youtube-dl, a single list of args
493 The following options are used by the extractors:
494 extractor_retries: Number of times to retry for known errors (default: 3)
495 dynamic_mpd: Whether to process dynamic DASH manifests (default: True)
496 hls_split_discontinuity: Split HLS playlists to different formats at
497 discontinuities such as ad breaks (default: False)
498 extractor_args: A dictionary of arguments to be passed to the extractors.
499 See "EXTRACTOR ARGUMENTS" for details.
500 E.g. {'youtube': {'skip': ['dash', 'hls']}}
501 mark_watched: Mark videos watched (even with --simulate). Only for YouTube
503 The following options are deprecated and may be removed in the future:
505 break_on_reject: Stop the download process when encountering a video that
506 has been filtered out.
507 - `raise DownloadCancelled(msg)` in match_filter instead
508 force_generic_extractor: Force downloader to use the generic extractor
509 - Use allowed_extractors = ['generic', 'default']
510 playliststart: - Use playlist_items
511 Playlist item to start at.
512 playlistend: - Use playlist_items
513 Playlist item to end at.
514 playlistreverse: - Use playlist_items
515 Download playlist items in reverse order.
516 forceurl: - Use forceprint
517 Force printing final URL.
518 forcetitle: - Use forceprint
519 Force printing title.
520 forceid: - Use forceprint
522 forcethumbnail: - Use forceprint
523 Force printing thumbnail URL.
524 forcedescription: - Use forceprint
525 Force printing description.
526 forcefilename: - Use forceprint
527 Force printing final filename.
528 forceduration: - Use forceprint
529 Force printing duration.
530 allsubtitles: - Use subtitleslangs = ['all']
531 Downloads all the subtitles of the video
532 (requires writesubtitles or writeautomaticsub)
533 include_ads: - Doesn't work
535 call_home: - Not implemented
536 Boolean, true iff we are allowed to contact the
537 yt-dlp servers for debugging.
538 post_hooks: - Register a custom postprocessor
539 A list of functions that get called as the final step
540 for each video file, after all postprocessors have been
541 called. The filename will be passed as the only argument.
542 hls_prefer_native: - Use external_downloader = {'m3u8': 'native'} or {'m3u8': 'ffmpeg'}.
543 Use the native HLS downloader instead of ffmpeg/avconv
544 if True, otherwise use ffmpeg/avconv if False, otherwise
545 use downloader suggested by extractor if None.
546 prefer_ffmpeg: - avconv support is deprecated
547 If False, use avconv instead of ffmpeg if both are available,
548 otherwise prefer ffmpeg.
549 youtube_include_dash_manifest: - Use extractor_args
550 If True (default), DASH manifests and related
551 data will be downloaded and processed by extractor.
552 You can reduce network I/O by disabling it if you don't
553 care about DASH. (only for youtube)
554 youtube_include_hls_manifest: - Use extractor_args
555 If True (default), HLS manifests and related
556 data will be downloaded and processed by extractor.
557 You can reduce network I/O by disabling it if you don't
558 care about HLS. (only for youtube)
559 no_color: Same as `color='no_color'`
560 no_overwrites: Same as `overwrites=False`
564 'width', 'height', 'asr', 'audio_channels', 'fps',
565 'tbr', 'abr', 'vbr', 'filesize', 'filesize_approx',
566 'timestamp', 'release_timestamp',
567 'duration', 'view_count', 'like_count', 'dislike_count', 'repost_count',
568 'average_rating', 'comment_count', 'age_limit',
569 'start_time', 'end_time',
570 'chapter_number', 'season_number', 'episode_number',
571 'track_number', 'disc_number', 'release_year',
575 # NB: Keep in sync with the docstring of extractor/common.py
576 'url', 'manifest_url', 'manifest_stream_number', 'ext', 'format', 'format_id', 'format_note',
577 'width', 'height', 'aspect_ratio', 'resolution', 'dynamic_range', 'tbr', 'abr', 'acodec', 'asr', 'audio_channels',
578 'vbr', 'fps', 'vcodec', 'container', 'filesize', 'filesize_approx', 'rows', 'columns',
579 'player_url', 'protocol', 'fragment_base_url', 'fragments', 'is_from_start',
580 'preference', 'language', 'language_preference', 'quality', 'source_preference', 'cookies',
581 'http_headers', 'stretched_ratio', 'no_resume', 'has_drm', 'extra_param_to_segment_url', 'hls_aes', 'downloader_options',
582 'page_url', 'app', 'play_path', 'tc_url', 'flash_version', 'rtmp_live', 'rtmp_conn', 'rtmp_protocol', 'rtmp_real_time'
584 _format_selection_exts
= {
585 'audio': set(MEDIA_EXTENSIONS
.common_audio
),
586 'video': set(MEDIA_EXTENSIONS
.common_video
+ ('3gp', )),
587 'storyboards': set(MEDIA_EXTENSIONS
.storyboards
),
590 def __init__(self
, params
=None, auto_init
=True):
591 """Create a FileDownloader object with the given options.
592 @param auto_init Whether to load the default extractors and print header (if verbose).
593 Set to 'no_verbose_header' to not print the header
599 self
._ies
_instances
= {}
600 self
._pps
= {k: [] for k in POSTPROCESS_WHEN}
601 self
._printed
_messages
= set()
602 self
._first
_webpage
_request
= True
603 self
._post
_hooks
= []
604 self
._progress
_hooks
= []
605 self
._postprocessor
_hooks
= []
606 self
._download
_retcode
= 0
607 self
._num
_downloads
= 0
609 self
._playlist
_level
= 0
610 self
._playlist
_urls
= set()
611 self
.cache
= Cache(self
)
612 self
.__header
_cookies
= []
614 stdout
= sys
.stderr
if self
.params
.get('logtostderr') else sys
.stdout
615 self
._out
_files
= Namespace(
618 screen
=sys
.stderr
if self
.params
.get('quiet') else stdout
,
619 console
=None if compat_os_name
== 'nt' else next(
620 filter(supports_terminal_sequences
, (sys
.stderr
, sys
.stdout
)), None)
624 windows_enable_vt_mode()
625 except Exception as e
:
626 self
.write_debug(f
'Failed to enable VT mode: {e}')
628 if self
.params
.get('no_color'):
629 if self
.params
.get('color') is not None:
630 self
.params
.setdefault('_warnings', []).append(
631 'Overwriting params from "color" with "no_color"')
632 self
.params
['color'] = 'no_color'
634 term_allow_color
= os
.getenv('TERM', '').lower() != 'dumb'
635 no_color
= bool(os
.getenv('NO_COLOR'))
637 def process_color_policy(stream
):
638 stream_name
= {sys.stdout: 'stdout', sys.stderr: 'stderr'}
[stream
]
639 policy
= traverse_obj(self
.params
, ('color', (stream_name
, None), {str}
), get_all
=False)
640 if policy
in ('auto', None):
641 if term_allow_color
and supports_terminal_sequences(stream
):
642 return 'no_color' if no_color
else True
644 assert policy
in ('always', 'never', 'no_color'), policy
645 return {'always': True, 'never': False}
.get(policy
, policy
)
647 self
._allow
_colors
= Namespace(**{
648 name
: process_color_policy(stream
)
649 for name
, stream
in self
._out
_files
.items_
if name
!= 'console'
652 system_deprecation
= _get_system_deprecation()
653 if system_deprecation
:
654 self
.deprecated_feature(system_deprecation
.replace('\n', '\n '))
656 if self
.params
.get('allow_unplayable_formats'):
658 f
'You have asked for {self._format_err("UNPLAYABLE", self.Styles.EMPHASIS)} formats to be listed/downloaded. '
659 'This is a developer option intended for debugging. \n'
660 ' If you experience any issues while using this option, '
661 f
'{self._format_err("DO NOT", self.Styles.ERROR)} open a bug report')
663 if self
.params
.get('bidi_workaround', False):
666 master
, slave
= pty
.openpty()
667 width
= shutil
.get_terminal_size().columns
668 width_args
= [] if width
is None else ['-w', str(width
)]
669 sp_kwargs
= {'stdin': subprocess.PIPE, 'stdout': slave, 'stderr': self._out_files.error}
671 self
._output
_process
= Popen(['bidiv'] + width_args
, **sp_kwargs
)
673 self
._output
_process
= Popen(['fribidi', '-c', 'UTF-8'] + width_args
, **sp_kwargs
)
674 self
._output
_channel
= os
.fdopen(master
, 'rb')
675 except OSError as ose
:
676 if ose
.errno
== errno
.ENOENT
:
678 'Could not find fribidi executable, ignoring --bidi-workaround. '
679 'Make sure that fribidi is an executable file in one of the directories in your $PATH.')
683 self
.params
['compat_opts'] = set(self
.params
.get('compat_opts', ()))
684 self
.params
['http_headers'] = HTTPHeaderDict(std_headers
, self
.params
.get('http_headers'))
685 self
._load
_cookies
(self
.params
['http_headers'].get('Cookie')) # compat
686 self
.params
['http_headers'].pop('Cookie', None)
687 self
._request
_director
= self
.build_request_director(_REQUEST_HANDLERS
.values(), _RH_PREFERENCES
)
689 if auto_init
and auto_init
!= 'no_verbose_header':
690 self
.print_debug_header()
692 def check_deprecated(param
, option
, suggestion
):
693 if self
.params
.get(param
) is not None:
694 self
.report_warning(f
'{option} is deprecated. Use {suggestion} instead')
698 if check_deprecated('cn_verification_proxy', '--cn-verification-proxy', '--geo-verification-proxy'):
699 if self
.params
.get('geo_verification_proxy') is None:
700 self
.params
['geo_verification_proxy'] = self
.params
['cn_verification_proxy']
702 check_deprecated('autonumber', '--auto-number', '-o "%(autonumber)s-%(title)s.%(ext)s"')
703 check_deprecated('usetitle', '--title', '-o "%(title)s-%(id)s.%(ext)s"')
704 check_deprecated('useid', '--id', '-o "%(id)s.%(ext)s"')
706 for msg
in self
.params
.get('_warnings', []):
707 self
.report_warning(msg
)
708 for msg
in self
.params
.get('_deprecation_warnings', []):
709 self
.deprecated_feature(msg
)
711 if 'list-formats' in self
.params
['compat_opts']:
712 self
.params
['listformats_table'] = False
714 if 'overwrites' not in self
.params
and self
.params
.get('nooverwrites') is not None:
715 # nooverwrites was unnecessarily changed to overwrites
716 # in 0c3d0f51778b153f65c21906031c2e091fcfb641
717 # This ensures compatibility with both keys
718 self
.params
['overwrites'] = not self
.params
['nooverwrites']
719 elif self
.params
.get('overwrites') is None:
720 self
.params
.pop('overwrites', None)
722 self
.params
['nooverwrites'] = not self
.params
['overwrites']
724 if self
.params
.get('simulate') is None and any((
725 self
.params
.get('list_thumbnails'),
726 self
.params
.get('listformats'),
727 self
.params
.get('listsubtitles'),
729 self
.params
['simulate'] = 'list_only'
731 self
.params
.setdefault('forceprint', {})
732 self
.params
.setdefault('print_to_file', {})
734 # Compatibility with older syntax
735 if not isinstance(params
['forceprint'], dict):
736 self
.params
['forceprint'] = {'video': params['forceprint']}
739 self
.add_default_info_extractors()
741 if (sys
.platform
!= 'win32'
742 and sys
.getfilesystemencoding() in ['ascii', 'ANSI_X3.4-1968']
743 and not self
.params
.get('restrictfilenames', False)):
744 # Unicode filesystem API will throw errors (#1474, #13027)
746 'Assuming --restrict-filenames since file system encoding '
747 'cannot encode all characters. '
748 'Set the LC_ALL environment variable to fix this.')
749 self
.params
['restrictfilenames'] = True
751 self
._parse
_outtmpl
()
753 # Creating format selector here allows us to catch syntax errors before the extraction
754 self
.format_selector
= (
755 self
.params
.get('format') if self
.params
.get('format') in (None, '-')
756 else self
.params
['format'] if callable(self
.params
['format'])
757 else self
.build_format_selector(self
.params
['format']))
760 'post_hooks': self
.add_post_hook
,
761 'progress_hooks': self
.add_progress_hook
,
762 'postprocessor_hooks': self
.add_postprocessor_hook
,
764 for opt
, fn
in hooks
.items():
765 for ph
in self
.params
.get(opt
, []):
768 for pp_def_raw
in self
.params
.get('postprocessors', []):
769 pp_def
= dict(pp_def_raw
)
770 when
= pp_def
.pop('when', 'post_process')
771 self
.add_post_processor(
772 get_postprocessor(pp_def
.pop('key'))(self
, **pp_def
),
775 def preload_download_archive(fn
):
776 """Preload the archive, if any is specified"""
780 elif not is_path_like(fn
):
783 self
.write_debug(f
'Loading archive file {fn!r}')
785 with locked_file(fn
, 'r', encoding
='utf-8') as archive_file
:
786 for line
in archive_file
:
787 archive
.add(line
.strip())
788 except OSError as ioe
:
789 if ioe
.errno
!= errno
.ENOENT
:
793 self
.archive
= preload_download_archive(self
.params
.get('download_archive'))
795 def warn_if_short_id(self
, argv
):
796 # short YouTube ID starting with dash?
798 i
for i
, a
in enumerate(argv
)
799 if re
.match(r
'^-[0-9A-Za-z_-]{10}$', a
)]
803 + [a
for i
, a
in enumerate(argv
) if i
not in idxs
]
804 + ['--'] + [argv
[i
] for i
in idxs
]
807 'Long argument string detected. '
808 'Use -- to separate parameters and URLs, like this:\n%s' %
809 args_to_str(correct_argv
))
811 def add_info_extractor(self
, ie
):
812 """Add an InfoExtractor object to the end of the list."""
814 self
._ies
[ie_key
] = ie
815 if not isinstance(ie
, type):
816 self
._ies
_instances
[ie_key
] = ie
817 ie
.set_downloader(self
)
819 def get_info_extractor(self
, ie_key
):
821 Get an instance of an IE with name ie_key, it will try to get one from
822 the _ies list, if there's no instance it will create a new one and add
823 it to the extractor list.
825 ie
= self
._ies
_instances
.get(ie_key
)
827 ie
= get_info_extractor(ie_key
)()
828 self
.add_info_extractor(ie
)
831 def add_default_info_extractors(self
):
833 Add the InfoExtractors returned by gen_extractors to the end of the list
835 all_ies
= {ie.IE_NAME.lower(): ie for ie in gen_extractor_classes()}
836 all_ies
['end'] = UnsupportedURLIE()
838 ie_names
= orderedSet_from_options(
839 self
.params
.get('allowed_extractors', ['default']), {
840 'all': list(all_ies
),
841 'default': [name
for name
, ie
in all_ies
.items() if ie
._ENABLED
],
843 except re
.error
as e
:
844 raise ValueError(f
'Wrong regex for allowed_extractors: {e.pattern}')
845 for name
in ie_names
:
846 self
.add_info_extractor(all_ies
[name
])
847 self
.write_debug(f
'Loaded {len(ie_names)} extractors')
849 def add_post_processor(self
, pp
, when
='post_process'):
850 """Add a PostProcessor object to the end of the chain."""
851 assert when
in POSTPROCESS_WHEN
, f
'Invalid when={when}'
852 self
._pps
[when
].append(pp
)
853 pp
.set_downloader(self
)
855 def add_post_hook(self
, ph
):
856 """Add the post hook"""
857 self
._post
_hooks
.append(ph
)
859 def add_progress_hook(self
, ph
):
860 """Add the download progress hook"""
861 self
._progress
_hooks
.append(ph
)
863 def add_postprocessor_hook(self
, ph
):
864 """Add the postprocessing progress hook"""
865 self
._postprocessor
_hooks
.append(ph
)
866 for pps
in self
._pps
.values():
868 pp
.add_progress_hook(ph
)
870 def _bidi_workaround(self
, message
):
871 if not hasattr(self
, '_output_channel'):
874 assert hasattr(self
, '_output_process')
875 assert isinstance(message
, str)
876 line_count
= message
.count('\n') + 1
877 self
._output
_process
.stdin
.write((message
+ '\n').encode())
878 self
._output
_process
.stdin
.flush()
879 res
= ''.join(self
._output
_channel
.readline().decode()
880 for _
in range(line_count
))
881 return res
[:-len('\n')]
883 def _write_string(self
, message
, out
=None, only_once
=False):
885 if message
in self
._printed
_messages
:
887 self
._printed
_messages
.add(message
)
888 write_string(message
, out
=out
, encoding
=self
.params
.get('encoding'))
890 def to_stdout(self
, message
, skip_eol
=False, quiet
=None):
891 """Print message to stdout"""
892 if quiet
is not None:
893 self
.deprecation_warning('"YoutubeDL.to_stdout" no longer accepts the argument quiet. '
894 'Use "YoutubeDL.to_screen" instead')
895 if skip_eol
is not False:
896 self
.deprecation_warning('"YoutubeDL.to_stdout" no longer accepts the argument skip_eol. '
897 'Use "YoutubeDL.to_screen" instead')
898 self
._write
_string
(f
'{self._bidi_workaround(message)}\n', self
._out
_files
.out
)
900 def to_screen(self
, message
, skip_eol
=False, quiet
=None, only_once
=False):
901 """Print message to screen if not in quiet mode"""
902 if self
.params
.get('logger'):
903 self
.params
['logger'].debug(message
)
905 if (self
.params
.get('quiet') if quiet
is None else quiet
) and not self
.params
.get('verbose'):
908 '%s%s' % (self
._bidi
_workaround
(message
), ('' if skip_eol
else '\n')),
909 self
._out
_files
.screen
, only_once
=only_once
)
911 def to_stderr(self
, message
, only_once
=False):
912 """Print message to stderr"""
913 assert isinstance(message
, str)
914 if self
.params
.get('logger'):
915 self
.params
['logger'].error(message
)
917 self
._write
_string
(f
'{self._bidi_workaround(message)}\n', self
._out
_files
.error
, only_once
=only_once
)
919 def _send_console_code(self
, code
):
920 if compat_os_name
== 'nt' or not self
._out
_files
.console
:
922 self
._write
_string
(code
, self
._out
_files
.console
)
924 def to_console_title(self
, message
):
925 if not self
.params
.get('consoletitle', False):
927 message
= remove_terminal_sequences(message
)
928 if compat_os_name
== 'nt':
929 if ctypes
.windll
.kernel32
.GetConsoleWindow():
930 # c_wchar_p() might not be necessary if `message` is
931 # already of type unicode()
932 ctypes
.windll
.kernel32
.SetConsoleTitleW(ctypes
.c_wchar_p(message
))
934 self
._send
_console
_code
(f
'\033]0;{message}\007')
936 def save_console_title(self
):
937 if not self
.params
.get('consoletitle') or self
.params
.get('simulate'):
939 self
._send
_console
_code
('\033[22;0t') # Save the title on stack
941 def restore_console_title(self
):
942 if not self
.params
.get('consoletitle') or self
.params
.get('simulate'):
944 self
._send
_console
_code
('\033[23;0t') # Restore the title from stack
947 self
.save_console_title()
950 def save_cookies(self
):
951 if self
.params
.get('cookiefile') is not None:
952 self
.cookiejar
.save()
954 def __exit__(self
, *args
):
955 self
.restore_console_title()
960 self
._request
_director
.close()
962 def trouble(self
, message
=None, tb
=None, is_error
=True):
963 """Determine action to take when a download problem appears.
965 Depending on if the downloader has been configured to ignore
966 download errors or not, this method may throw an exception or
967 not when errors are found, after printing the message.
969 @param tb If given, is additional traceback information
970 @param is_error Whether to raise error according to ignorerrors
972 if message
is not None:
973 self
.to_stderr(message
)
974 if self
.params
.get('verbose'):
976 if sys
.exc_info()[0]: # if .trouble has been called from an except block
978 if hasattr(sys
.exc_info()[1], 'exc_info') and sys
.exc_info()[1].exc_info
[0]:
979 tb
+= ''.join(traceback
.format_exception(*sys
.exc_info()[1].exc_info
))
980 tb
+= encode_compat_str(traceback
.format_exc())
982 tb_data
= traceback
.format_list(traceback
.extract_stack())
983 tb
= ''.join(tb_data
)
988 if not self
.params
.get('ignoreerrors'):
989 if sys
.exc_info()[0] and hasattr(sys
.exc_info()[1], 'exc_info') and sys
.exc_info()[1].exc_info
[0]:
990 exc_info
= sys
.exc_info()[1].exc_info
992 exc_info
= sys
.exc_info()
993 raise DownloadError(message
, exc_info
)
994 self
._download
_retcode
= 1
998 EMPHASIS
='light blue',
1003 BAD_FORMAT
='light red',
1005 SUPPRESS
='light black',
1008 def _format_text(self
, handle
, allow_colors
, text
, f
, fallback
=None, *, test_encoding
=False):
1011 original_text
= text
1012 # handle.encoding can be None. See https://github.com/yt-dlp/yt-dlp/issues/2711
1013 encoding
= self
.params
.get('encoding') or getattr(handle
, 'encoding', None) or 'ascii'
1014 text
= text
.encode(encoding
, 'ignore').decode(encoding
)
1015 if fallback
is not None and text
!= original_text
:
1017 return format_text(text
, f
) if allow_colors
is True else text
if fallback
is None else fallback
1019 def _format_out(self
, *args
, **kwargs
):
1020 return self
._format
_text
(self
._out
_files
.out
, self
._allow
_colors
.out
, *args
, **kwargs
)
1022 def _format_screen(self
, *args
, **kwargs
):
1023 return self
._format
_text
(self
._out
_files
.screen
, self
._allow
_colors
.screen
, *args
, **kwargs
)
1025 def _format_err(self
, *args
, **kwargs
):
1026 return self
._format
_text
(self
._out
_files
.error
, self
._allow
_colors
.error
, *args
, **kwargs
)
1028 def report_warning(self
, message
, only_once
=False):
1030 Print the message to stderr, it will be prefixed with 'WARNING:'
1031 If stderr is a tty file the 'WARNING:' will be colored
1033 if self
.params
.get('logger') is not None:
1034 self
.params
['logger'].warning(message
)
1036 if self
.params
.get('no_warnings'):
1038 self
.to_stderr(f
'{self._format_err("WARNING:", self.Styles.WARNING)} {message}', only_once
)
1040 def deprecation_warning(self
, message
, *, stacklevel
=0):
1041 deprecation_warning(
1042 message
, stacklevel
=stacklevel
+ 1, printer
=self
.report_error
, is_error
=False)
1044 def deprecated_feature(self
, message
):
1045 if self
.params
.get('logger') is not None:
1046 self
.params
['logger'].warning(f
'Deprecated Feature: {message}')
1047 self
.to_stderr(f
'{self._format_err("Deprecated Feature:", self.Styles.ERROR)} {message}', True)
1049 def report_error(self
, message
, *args
, **kwargs
):
1051 Do the same as trouble, but prefixes the message with 'ERROR:', colored
1052 in red if stderr is a tty file.
1054 self
.trouble(f
'{self._format_err("ERROR:", self.Styles.ERROR)} {message}', *args
, **kwargs
)
1056 def write_debug(self
, message
, only_once
=False):
1057 '''Log debug message or Print message to stderr'''
1058 if not self
.params
.get('verbose', False):
1060 message
= f
'[debug] {message}'
1061 if self
.params
.get('logger'):
1062 self
.params
['logger'].debug(message
)
1064 self
.to_stderr(message
, only_once
)
1066 def report_file_already_downloaded(self
, file_name
):
1067 """Report file has already been fully downloaded."""
1069 self
.to_screen('[download] %s has already been downloaded' % file_name
)
1070 except UnicodeEncodeError:
1071 self
.to_screen('[download] The file has already been downloaded')
1073 def report_file_delete(self
, file_name
):
1074 """Report that existing file will be deleted."""
1076 self
.to_screen('Deleting existing file %s' % file_name
)
1077 except UnicodeEncodeError:
1078 self
.to_screen('Deleting existing file')
1080 def raise_no_formats(self
, info
, forced
=False, *, msg
=None):
1081 has_drm
= info
.get('_has_drm')
1082 ignored
, expected
= self
.params
.get('ignore_no_formats_error'), bool(msg
)
1083 msg
= msg
or has_drm
and 'This video is DRM protected' or 'No video formats found!'
1084 if forced
or not ignored
:
1085 raise ExtractorError(msg
, video_id
=info
['id'], ie
=info
['extractor'],
1086 expected
=has_drm
or ignored
or expected
)
1088 self
.report_warning(msg
)
1090 def parse_outtmpl(self
):
1091 self
.deprecation_warning('"YoutubeDL.parse_outtmpl" is deprecated and may be removed in a future version')
1092 self
._parse
_outtmpl
()
1093 return self
.params
['outtmpl']
1095 def _parse_outtmpl(self
):
1097 if self
.params
.get('restrictfilenames'): # Remove spaces in the default template
1098 sanitize
= lambda x
: x
.replace(' - ', ' ').replace(' ', '-')
1100 outtmpl
= self
.params
.setdefault('outtmpl', {})
1101 if not isinstance(outtmpl
, dict):
1102 self
.params
['outtmpl'] = outtmpl
= {'default': outtmpl}
1103 outtmpl
.update({k: sanitize(v) for k, v in DEFAULT_OUTTMPL.items() if outtmpl.get(k) is None}
)
1105 def get_output_path(self
, dir_type
='', filename
=None):
1106 paths
= self
.params
.get('paths', {})
1107 assert isinstance(paths
, dict), '"paths" parameter must be a dictionary'
1108 path
= os
.path
.join(
1109 expand_path(paths
.get('home', '').strip()),
1110 expand_path(paths
.get(dir_type
, '').strip()) if dir_type
else '',
1112 return sanitize_path(path
, force
=self
.params
.get('windowsfilenames'))
1115 def _outtmpl_expandpath(outtmpl
):
1116 # expand_path translates '%%' into '%' and '$$' into '$'
1117 # correspondingly that is not what we want since we need to keep
1118 # '%%' intact for template dict substitution step. Working around
1119 # with boundary-alike separator hack.
1120 sep
= ''.join(random
.choices(string
.ascii_letters
, k
=32))
1121 outtmpl
= outtmpl
.replace('%%', f
'%{sep}%').replace('$$', f
'${sep}$')
1123 # outtmpl should be expand_path'ed before template dict substitution
1124 # because meta fields may contain env variables we don't want to
1125 # be expanded. E.g. for outtmpl "%(title)s.%(ext)s" and
1126 # title "Hello $PATH", we don't want `$PATH` to be expanded.
1127 return expand_path(outtmpl
).replace(sep
, '')
1130 def escape_outtmpl(outtmpl
):
1131 ''' Escape any remaining strings like %s, %abc% etc. '''
1133 STR_FORMAT_RE_TMPL
.format('', '(?![%(\0])'),
1134 lambda mobj
: ('' if mobj
.group('has_key') else '%') + mobj
.group(0),
1138 def validate_outtmpl(cls
, outtmpl
):
1139 ''' @return None or Exception object '''
1141 STR_FORMAT_RE_TMPL
.format('[^)]*', '[ljhqBUDS]'),
1142 lambda mobj
: f
'{mobj.group(0)[:-1]}s',
1143 cls
._outtmpl
_expandpath
(outtmpl
))
1145 cls
.escape_outtmpl(outtmpl
) % collections
.defaultdict(int)
1147 except ValueError as err
:
1151 def _copy_infodict(info_dict
):
1152 info_dict
= dict(info_dict
)
1153 info_dict
.pop('__postprocessors', None)
1154 info_dict
.pop('__pending_error', None)
1157 def prepare_outtmpl(self
, outtmpl
, info_dict
, sanitize
=False):
1158 """ Make the outtmpl and info_dict suitable for substitution: ydl.escape_outtmpl(outtmpl) % info_dict
1159 @param sanitize Whether to sanitize the output as a filename.
1160 For backward compatibility, a function can also be passed
1163 info_dict
.setdefault('epoch', int(time
.time())) # keep epoch consistent once set
1165 info_dict
= self
._copy
_infodict
(info_dict
)
1166 info_dict
['duration_string'] = ( # %(duration>%H-%M-%S)s is wrong if duration > 24hrs
1167 formatSeconds(info_dict
['duration'], '-' if sanitize
else ':')
1168 if info_dict
.get('duration', None) is not None
1170 info_dict
['autonumber'] = int(self
.params
.get('autonumber_start', 1) - 1 + self
._num
_downloads
)
1171 info_dict
['video_autonumber'] = self
._num
_videos
1172 if info_dict
.get('resolution') is None:
1173 info_dict
['resolution'] = self
.format_resolution(info_dict
, default
=None)
1175 # For fields playlist_index, playlist_autonumber and autonumber convert all occurrences
1176 # of %(field)s to %(field)0Nd for backward compatibility
1177 field_size_compat_map
= {
1178 'playlist_index': number_of_digits(info_dict
.get('__last_playlist_index') or 0),
1179 'playlist_autonumber': number_of_digits(info_dict
.get('n_entries') or 0),
1180 'autonumber': self
.params
.get('autonumber_size') or 5,
1184 EXTERNAL_FORMAT_RE
= re
.compile(STR_FORMAT_RE_TMPL
.format('[^)]*', f
'[{STR_FORMAT_TYPES}ljhqBUDS]'))
1190 # Field is of the form key1.key2...
1191 # where keys (except first) can be string, int, slice or "{field, ...}"
1192 FIELD_INNER_RE
= r
'(?:\w+|%(num)s|%(num)s?(?::%(num)s?){1,2})' % {'num': r'(?:-?\d+)'}
1193 FIELD_RE
= r
'\w*(?:\.(?:%(inner)s|{%(field)s(?:,%(field)s)*}))*' % {
1194 'inner': FIELD_INNER_RE
,
1195 'field': rf
'\w*(?:\.{FIELD_INNER_RE})*'
1197 MATH_FIELD_RE
= rf
'(?:{FIELD_RE}|-?{NUMBER_RE})'
1198 MATH_OPERATORS_RE
= r
'(?:%s)' % '|'.join(map(re
.escape
, MATH_FUNCTIONS
.keys()))
1199 INTERNAL_FORMAT_RE
= re
.compile(rf
'''(?xs)
1201 (?P<fields>{FIELD_RE})
1202 (?P<maths>(?:{MATH_OPERATORS_RE}{MATH_FIELD_RE})*)
1203 (?:>(?P<strf_format>.+?))?
1205 (?P<alternate>(?<!\\),[^|&)]+)?
1206 (?:&(?P<replacement>.*?))?
1207 (?:\|(?P<default>.*?))?
1210 def _from_user_input(field
):
1214 return slice(*map(int_or_none
, field
.split(':')))
1215 elif int_or_none(field
) is not None:
1219 def _traverse_infodict(fields
):
1220 fields
= [f
for x
in re
.split(r
'\.({.+?})\.?', fields
)
1221 for f
in ([x
] if x
.startswith('{') else x
.split('.'))]
1223 if fields
and not fields
[i
]:
1226 for i
, f
in enumerate(fields
):
1227 if not f
.startswith('{'):
1228 fields
[i
] = _from_user_input(f
)
1230 assert f
.endswith('}'), f
'No closing brace for {f} in {fields}'
1231 fields
[i
] = {k: list(map(_from_user_input, k.split('.'))) for k in f[1:-1].split(',')}
1233 return traverse_obj(info_dict
, fields
, traverse_string
=True)
1235 def get_value(mdict
):
1237 value
= _traverse_infodict(mdict
['fields'])
1240 value
= float_or_none(value
)
1241 if value
is not None:
1244 offset_key
= mdict
['maths']
1246 value
= float_or_none(value
)
1250 MATH_FIELD_RE
if operator
else MATH_OPERATORS_RE
,
1251 offset_key
).group(0)
1252 offset_key
= offset_key
[len(item
):]
1253 if operator
is None:
1254 operator
= MATH_FUNCTIONS
[item
]
1256 item
, multiplier
= (item
[1:], -1) if item
[0] == '-' else (item
, 1)
1257 offset
= float_or_none(item
)
1259 offset
= float_or_none(_traverse_infodict(item
))
1261 value
= operator(value
, multiplier
* offset
)
1262 except (TypeError, ZeroDivisionError):
1265 # Datetime formatting
1266 if mdict
['strf_format']:
1267 value
= strftime_or_none(value
, mdict
['strf_format'].replace('\\,', ','))
1269 # XXX: Workaround for https://github.com/yt-dlp/yt-dlp/issues/4485
1270 if sanitize
and value
== '':
1274 na
= self
.params
.get('outtmpl_na_placeholder', 'NA')
1276 def filename_sanitizer(key
, value
, restricted
=self
.params
.get('restrictfilenames')):
1277 return sanitize_filename(str(value
), restricted
=restricted
, is_id
=(
1278 bool(re
.search(r
'(^|[_.])id(\.|$)', key
))
1279 if 'filename-sanitization' in self
.params
['compat_opts']
1282 sanitizer
= sanitize
if callable(sanitize
) else filename_sanitizer
1283 sanitize
= bool(sanitize
)
1285 def _dumpjson_default(obj
):
1286 if isinstance(obj
, (set, LazyList
)):
1290 class _ReplacementFormatter(string
.Formatter
):
1291 def get_field(self
, field_name
, args
, kwargs
):
1292 if field_name
.isdigit():
1294 raise ValueError('Unsupported field')
1296 replacement_formatter
= _ReplacementFormatter()
1298 def create_key(outer_mobj
):
1299 if not outer_mobj
.group('has_key'):
1300 return outer_mobj
.group(0)
1301 key
= outer_mobj
.group('key')
1302 mobj
= re
.match(INTERNAL_FORMAT_RE
, key
)
1303 value
, replacement
, default
, last_field
= None, None, na
, ''
1305 mobj
= mobj
.groupdict()
1306 default
= mobj
['default'] if mobj
['default'] is not None else default
1307 value
= get_value(mobj
)
1308 last_field
, replacement
= mobj
['fields'], mobj
['replacement']
1309 if value
is None and mobj
['alternate']:
1310 mobj
= re
.match(INTERNAL_FORMAT_RE
, mobj
['remaining'][1:])
1314 if None not in (value
, replacement
):
1316 value
= replacement_formatter
.format(replacement
, value
)
1318 value
, default
= None, na
1320 fmt
= outer_mobj
.group('format')
1321 if fmt
== 's' and last_field
in field_size_compat_map
.keys() and isinstance(value
, int):
1322 fmt
= f
'0{field_size_compat_map[last_field]:d}d'
1324 flags
= outer_mobj
.group('conversion') or ''
1325 str_fmt
= f
'{fmt[:-1]}s'
1327 value
, fmt
= default
, 's'
1328 elif fmt
[-1] == 'l': # list
1329 delim
= '\n' if '#' in flags
else ', '
1330 value
, fmt
= delim
.join(map(str, variadic(value
, allowed_types
=(str, bytes)))), str_fmt
1331 elif fmt
[-1] == 'j': # json
1332 value
, fmt
= json
.dumps(
1333 value
, default
=_dumpjson_default
,
1334 indent
=4 if '#' in flags
else None, ensure_ascii
='+' not in flags
), str_fmt
1335 elif fmt
[-1] == 'h': # html
1336 value
, fmt
= escapeHTML(str(value
)), str_fmt
1337 elif fmt
[-1] == 'q': # quoted
1338 value
= map(str, variadic(value
) if '#' in flags
else [value
])
1339 value
, fmt
= ' '.join(map(compat_shlex_quote
, value
)), str_fmt
1340 elif fmt
[-1] == 'B': # bytes
1341 value
= f
'%{str_fmt}'.encode() % str(value
).encode()
1342 value
, fmt
= value
.decode('utf-8', 'ignore'), 's'
1343 elif fmt
[-1] == 'U': # unicode normalized
1344 value
, fmt
= unicodedata
.normalize(
1345 # "+" = compatibility equivalence, "#" = NFD
1346 'NF%s%s' % ('K' if '+' in flags
else '', 'D' if '#' in flags
else 'C'),
1348 elif fmt
[-1] == 'D': # decimal suffix
1349 num_fmt
, fmt
= fmt
[:-1].replace('#', ''), 's'
1350 value
= format_decimal_suffix(value
, f
'%{num_fmt}f%s' if num_fmt
else '%d%s',
1351 factor
=1024 if '#' in flags
else 1000)
1352 elif fmt
[-1] == 'S': # filename sanitization
1353 value
, fmt
= filename_sanitizer(last_field
, value
, restricted
='#' in flags
), str_fmt
1354 elif fmt
[-1] == 'c':
1356 value
= str(value
)[0]
1359 elif fmt
[-1] not in 'rsa': # numeric
1360 value
= float_or_none(value
)
1362 value
, fmt
= default
, 's'
1365 # If value is an object, sanitize might convert it to a string
1366 # So we convert it to repr first
1368 value
, fmt
= repr(value
), str_fmt
1369 elif fmt
[-1] == 'a':
1370 value
, fmt
= ascii(value
), str_fmt
1371 if fmt
[-1] in 'csra':
1372 value
= sanitizer(last_field
, value
)
1374 key
= '%s\0%s' % (key
.replace('%', '%\0'), outer_mobj
.group('format'))
1375 TMPL_DICT
[key
] = value
1376 return '{prefix}%({key}){fmt}'.format(key
=key
, fmt
=fmt
, prefix
=outer_mobj
.group('prefix'))
1378 return EXTERNAL_FORMAT_RE
.sub(create_key
, outtmpl
), TMPL_DICT
1380 def evaluate_outtmpl(self
, outtmpl
, info_dict
, *args
, **kwargs
):
1381 outtmpl
, info_dict
= self
.prepare_outtmpl(outtmpl
, info_dict
, *args
, **kwargs
)
1382 return self
.escape_outtmpl(outtmpl
) % info_dict
1384 def _prepare_filename(self
, info_dict
, *, outtmpl
=None, tmpl_type
=None):
1385 assert None in (outtmpl
, tmpl_type
), 'outtmpl and tmpl_type are mutually exclusive'
1387 outtmpl
= self
.params
['outtmpl'].get(tmpl_type
or 'default', self
.params
['outtmpl']['default'])
1389 outtmpl
= self
._outtmpl
_expandpath
(outtmpl
)
1390 filename
= self
.evaluate_outtmpl(outtmpl
, info_dict
, True)
1394 if tmpl_type
in ('', 'temp'):
1395 final_ext
, ext
= self
.params
.get('final_ext'), info_dict
.get('ext')
1396 if final_ext
and ext
and final_ext
!= ext
and filename
.endswith(f
'.{final_ext}'):
1397 filename
= replace_extension(filename
, ext
, final_ext
)
1399 force_ext
= OUTTMPL_TYPES
[tmpl_type
]
1401 filename
= replace_extension(filename
, force_ext
, info_dict
.get('ext'))
1403 # https://github.com/blackjack4494/youtube-dlc/issues/85
1404 trim_file_name
= self
.params
.get('trim_file_name', False)
1406 no_ext
, *ext
= filename
.rsplit('.', 2)
1407 filename
= join_nonempty(no_ext
[:trim_file_name
], *ext
, delim
='.')
1410 except ValueError as err
:
1411 self
.report_error('Error in output template: ' + str(err
) + ' (encoding: ' + repr(preferredencoding()) + ')')
1414 def prepare_filename(self
, info_dict
, dir_type
='', *, outtmpl
=None, warn
=False):
1415 """Generate the output filename"""
1417 assert not dir_type
, 'outtmpl and dir_type are mutually exclusive'
1419 filename
= self
._prepare
_filename
(info_dict
, tmpl_type
=dir_type
, outtmpl
=outtmpl
)
1420 if not filename
and dir_type
not in ('', 'temp'):
1424 if not self
.params
.get('paths'):
1426 elif filename
== '-':
1427 self
.report_warning('--paths is ignored when an outputting to stdout', only_once
=True)
1428 elif os
.path
.isabs(filename
):
1429 self
.report_warning('--paths is ignored since an absolute path is given in output template', only_once
=True)
1430 if filename
== '-' or not filename
:
1433 return self
.get_output_path(dir_type
, filename
)
1435 def _match_entry(self
, info_dict
, incomplete
=False, silent
=False):
1436 """Returns None if the file should be downloaded"""
1437 _type
= 'video' if 'playlist-match-filter' in self
.params
['compat_opts'] else info_dict
.get('_type', 'video')
1438 assert incomplete
or _type
== 'video', 'Only video result can be considered complete'
1440 video_title
= info_dict
.get('title', info_dict
.get('id', 'entry'))
1443 if _type
in ('playlist', 'multi_video'):
1445 elif _type
in ('url', 'url_transparent') and not try_call(
1446 lambda: self
.get_info_extractor(info_dict
['ie_key']).is_single_video(info_dict
['url'])):
1449 if 'title' in info_dict
:
1450 # This can happen when we're just evaluating the playlist
1451 title
= info_dict
['title']
1452 matchtitle
= self
.params
.get('matchtitle', False)
1454 if not re
.search(matchtitle
, title
, re
.IGNORECASE
):
1455 return '"' + title
+ '" title did not match pattern "' + matchtitle
+ '"'
1456 rejecttitle
= self
.params
.get('rejecttitle', False)
1458 if re
.search(rejecttitle
, title
, re
.IGNORECASE
):
1459 return '"' + title
+ '" title matched reject pattern "' + rejecttitle
+ '"'
1461 date
= info_dict
.get('upload_date')
1462 if date
is not None:
1463 dateRange
= self
.params
.get('daterange', DateRange())
1464 if date
not in dateRange
:
1465 return f
'{date_from_str(date).isoformat()} upload date is not in range {dateRange}'
1466 view_count
= info_dict
.get('view_count')
1467 if view_count
is not None:
1468 min_views
= self
.params
.get('min_views')
1469 if min_views
is not None and view_count
< min_views
:
1470 return 'Skipping %s, because it has not reached minimum view count (%d/%d)' % (video_title
, view_count
, min_views
)
1471 max_views
= self
.params
.get('max_views')
1472 if max_views
is not None and view_count
> max_views
:
1473 return 'Skipping %s, because it has exceeded the maximum view count (%d/%d)' % (video_title
, view_count
, max_views
)
1474 if age_restricted(info_dict
.get('age_limit'), self
.params
.get('age_limit')):
1475 return 'Skipping "%s" because it is age restricted' % video_title
1477 match_filter
= self
.params
.get('match_filter')
1478 if match_filter
is None:
1484 ret
= match_filter(info_dict
, incomplete
=incomplete
)
1486 # For backward compatibility
1487 ret
= None if incomplete
else match_filter(info_dict
)
1488 except DownloadCancelled
as err
:
1489 if err
.msg
is not NO_DEFAULT
:
1491 ret
, cancelled
= err
.msg
, err
1493 if ret
is NO_DEFAULT
:
1495 filename
= self
._format
_screen
(self
.prepare_filename(info_dict
), self
.Styles
.FILENAME
)
1496 reply
= input(self
._format
_screen
(
1497 f
'Download "{filename}"? (Y/n): ', self
.Styles
.EMPHASIS
)).lower().strip()
1498 if reply
in {'y', ''}
:
1502 raise type(cancelled
)(f
'Skipping {video_title}')
1503 return f
'Skipping {video_title}'
1506 if self
.in_download_archive(info_dict
):
1508 format_field(info_dict
, 'id', f
'{self._format_screen("%s", self.Styles.ID)}: '),
1509 format_field(info_dict
, 'title', f
'{self._format_screen("%s", self.Styles.EMPHASIS)} '),
1510 'has already been recorded in the archive'))
1511 break_opt
, break_err
= 'break_on_existing', ExistingVideoReached
1514 reason
= check_filter()
1515 except DownloadCancelled
as e
:
1516 reason
, break_opt
, break_err
= e
.msg
, 'match_filter', type(e
)
1518 break_opt
, break_err
= 'break_on_reject', RejectedVideoReached
1519 if reason
is not None:
1521 self
.to_screen('[download] ' + reason
)
1522 if self
.params
.get(break_opt
, False):
1527 def add_extra_info(info_dict
, extra_info
):
1528 '''Set the keys from extra_info in info dict if they are missing'''
1529 for key
, value
in extra_info
.items():
1530 info_dict
.setdefault(key
, value
)
1532 def extract_info(self
, url
, download
=True, ie_key
=None, extra_info
=None,
1533 process
=True, force_generic_extractor
=False):
1535 Extract and return the information dictionary of the URL
1538 @param url URL to extract
1541 @param download Whether to download videos
1542 @param process Whether to resolve all unresolved references (URLs, playlist items).
1543 Must be True for download to work
1544 @param ie_key Use only the extractor with this key
1546 @param extra_info Dictionary containing the extra values to add to the info (For internal use only)
1547 @force_generic_extractor Force using the generic extractor (Deprecated; use ie_key='Generic')
1550 if extra_info
is None:
1553 if not ie_key
and force_generic_extractor
:
1557 ies
= {ie_key: self._ies[ie_key]}
if ie_key
in self
._ies
else {}
1561 for key
, ie
in ies
.items():
1562 if not ie
.suitable(url
):
1565 if not ie
.working():
1566 self
.report_warning('The program functionality for this site has been marked as broken, '
1567 'and will probably not work.')
1569 temp_id
= ie
.get_temp_id(url
)
1570 if temp_id
is not None and self
.in_download_archive({'id': temp_id, 'ie_key': key}
):
1571 self
.to_screen(f
'[download] {self._format_screen(temp_id, self.Styles.ID)}: '
1572 'has already been recorded in the archive')
1573 if self
.params
.get('break_on_existing', False):
1574 raise ExistingVideoReached()
1576 return self
.__extract
_info
(url
, self
.get_info_extractor(key
), download
, extra_info
, process
)
1578 extractors_restricted
= self
.params
.get('allowed_extractors') not in (None, ['default'])
1579 self
.report_error(f
'No suitable extractor{format_field(ie_key, None, " (%s)")} found for URL {url}',
1580 tb
=False if extractors_restricted
else None)
1582 def _handle_extraction_exceptions(func
):
1583 @functools.wraps(func
)
1584 def wrapper(self
, *args
, **kwargs
):
1587 return func(self
, *args
, **kwargs
)
1588 except (DownloadCancelled
, LazyList
.IndexError, PagedList
.IndexError):
1590 except ReExtractInfo
as e
:
1592 self
.to_screen(f
'{e}; Re-extracting data')
1594 self
.to_stderr('\r')
1595 self
.report_warning(f
'{e}; Re-extracting data')
1597 except GeoRestrictedError
as e
:
1600 msg
+= '\nThis video is available in %s.' % ', '.join(
1601 map(ISO3166Utils
.short2full
, e
.countries
))
1602 msg
+= '\nYou might want to use a VPN or a proxy server (with --proxy) to workaround.'
1603 self
.report_error(msg
)
1604 except ExtractorError
as e
: # An error we somewhat expected
1605 self
.report_error(str(e
), e
.format_traceback())
1606 except Exception as e
:
1607 if self
.params
.get('ignoreerrors'):
1608 self
.report_error(str(e
), tb
=encode_compat_str(traceback
.format_exc()))
1614 def _wait_for_video(self
, ie_result
={}):
1615 if (not self
.params
.get('wait_for_video')
1616 or ie_result
.get('_type', 'video') != 'video'
1617 or ie_result
.get('formats') or ie_result
.get('url')):
1620 format_dur
= lambda dur
: '%02d:%02d:%02d' % timetuple_from_msec(dur
* 1000)[:-1]
1625 full_msg
= f
'{msg}\n'
1626 if not self
.params
.get('noprogress'):
1627 full_msg
= msg
+ ' ' * (len(last_msg
) - len(msg
)) + '\r'
1630 self
.to_screen(full_msg
, skip_eol
=True)
1633 min_wait
, max_wait
= self
.params
.get('wait_for_video')
1634 diff
= try_get(ie_result
, lambda x
: x
['release_timestamp'] - time
.time())
1635 if diff
is None and ie_result
.get('live_status') == 'is_upcoming':
1636 diff
= round(random
.uniform(min_wait
, max_wait
) if (max_wait
and min_wait
) else (max_wait
or min_wait
), 0)
1637 self
.report_warning('Release time of video is not known')
1638 elif ie_result
and (diff
or 0) <= 0:
1639 self
.report_warning('Video should already be available according to extracted info')
1640 diff
= min(max(diff
or 0, min_wait
or 0), max_wait
or float('inf'))
1641 self
.to_screen(f
'[wait] Waiting for {format_dur(diff)} - Press Ctrl+C to try now')
1643 wait_till
= time
.time() + diff
1646 diff
= wait_till
- time
.time()
1649 raise ReExtractInfo('[wait] Wait period ended', expected
=True)
1650 progress(f
'[wait] Remaining time until next attempt: {self._format_screen(format_dur(diff), self.Styles.EMPHASIS)}')
1652 except KeyboardInterrupt:
1654 raise ReExtractInfo('[wait] Interrupted by user', expected
=True)
1655 except BaseException
as e
:
1656 if not isinstance(e
, ReExtractInfo
):
1660 def _load_cookies(self
, data
, *, autoscope
=True):
1661 """Loads cookies from a `Cookie` header
1663 This tries to work around the security vulnerability of passing cookies to every domain.
1664 See: https://github.com/yt-dlp/yt-dlp/security/advisories/GHSA-v8mc-9377-rwjj
1666 @param data The Cookie header as string to load the cookies from
1667 @param autoscope If `False`, scope cookies using Set-Cookie syntax and error for cookie without domains
1668 If `True`, save cookies for later to be stored in the jar with a limited scope
1669 If a URL, save cookies in the jar with the domain of the URL
1671 for cookie
in LenientSimpleCookie(data
).values():
1672 if autoscope
and any(cookie
.values()):
1673 raise ValueError('Invalid syntax in Cookie Header')
1675 domain
= cookie
.get('domain') or ''
1676 expiry
= cookie
.get('expires')
1677 if expiry
== '': # 0 is valid
1679 prepared_cookie
= http
.cookiejar
.Cookie(
1680 cookie
.get('version') or 0, cookie
.key
, cookie
.value
, None, False,
1681 domain
, True, True, cookie
.get('path') or '', bool(cookie
.get('path')),
1682 cookie
.get('secure') or False, expiry
, False, None, None, {})
1685 self
.cookiejar
.set_cookie(prepared_cookie
)
1686 elif autoscope
is True:
1687 self
.deprecated_feature(
1688 'Passing cookies as a header is a potential security risk; '
1689 'they will be scoped to the domain of the downloaded urls. '
1690 'Please consider loading cookies from a file or browser instead.')
1691 self
.__header
_cookies
.append(prepared_cookie
)
1693 self
.report_warning(
1694 'The extractor result contains an unscoped cookie as an HTTP header. '
1695 f
'If you are using yt-dlp with an input URL{bug_reports_message(before=",")}',
1697 self
._apply
_header
_cookies
(autoscope
, [prepared_cookie
])
1699 self
.report_error('Unscoped cookies are not allowed; please specify some sort of scoping',
1700 tb
=False, is_error
=False)
1702 def _apply_header_cookies(self
, url
, cookies
=None):
1703 """Applies stray header cookies to the provided url
1705 This loads header cookies and scopes them to the domain provided in `url`.
1706 While this is not ideal, it helps reduce the risk of them being sent
1707 to an unintended destination while mostly maintaining compatibility.
1709 parsed
= urllib
.parse
.urlparse(url
)
1710 if not parsed
.hostname
:
1713 for cookie
in map(copy
.copy
, cookies
or self
.__header
_cookies
):
1714 cookie
.domain
= f
'.{parsed.hostname}'
1715 self
.cookiejar
.set_cookie(cookie
)
1717 @_handle_extraction_exceptions
1718 def __extract_info(self
, url
, ie
, download
, extra_info
, process
):
1719 self
._apply
_header
_cookies
(url
)
1722 ie_result
= ie
.extract(url
)
1723 except UserNotLive
as e
:
1725 if self
.params
.get('wait_for_video'):
1726 self
.report_warning(e
)
1727 self
._wait
_for
_video
()
1729 if ie_result
is None: # Finished already (backwards compatibility; listformats and friends should be moved here)
1730 self
.report_warning(f
'Extractor {ie.IE_NAME} returned nothing{bug_reports_message()}')
1732 if isinstance(ie_result
, list):
1733 # Backwards compatibility: old IE result format
1735 '_type': 'compat_list',
1736 'entries': ie_result
,
1738 if extra_info
.get('original_url'):
1739 ie_result
.setdefault('original_url', extra_info
['original_url'])
1740 self
.add_default_extra_info(ie_result
, ie
, url
)
1742 self
._wait
_for
_video
(ie_result
)
1743 return self
.process_ie_result(ie_result
, download
, extra_info
)
1747 def add_default_extra_info(self
, ie_result
, ie
, url
):
1749 self
.add_extra_info(ie_result
, {
1751 'original_url': url
,
1753 webpage_url
= ie_result
.get('webpage_url')
1755 self
.add_extra_info(ie_result
, {
1756 'webpage_url_basename': url_basename(webpage_url
),
1757 'webpage_url_domain': get_domain(webpage_url
),
1760 self
.add_extra_info(ie_result
, {
1761 'extractor': ie
.IE_NAME
,
1762 'extractor_key': ie
.ie_key(),
1765 def process_ie_result(self
, ie_result
, download
=True, extra_info
=None):
1767 Take the result of the ie(may be modified) and resolve all unresolved
1768 references (URLs, playlist items).
1770 It will also download the videos if 'download'.
1771 Returns the resolved ie_result.
1773 if extra_info
is None:
1775 result_type
= ie_result
.get('_type', 'video')
1777 if result_type
in ('url', 'url_transparent'):
1778 ie_result
['url'] = sanitize_url(
1779 ie_result
['url'], scheme
='http' if self
.params
.get('prefer_insecure') else 'https')
1780 if ie_result
.get('original_url') and not extra_info
.get('original_url'):
1781 extra_info
= {'original_url': ie_result['original_url'], **extra_info}
1783 extract_flat
= self
.params
.get('extract_flat', False)
1784 if ((extract_flat
== 'in_playlist' and 'playlist' in extra_info
)
1785 or extract_flat
is True):
1786 info_copy
= ie_result
.copy()
1787 ie
= try_get(ie_result
.get('ie_key'), self
.get_info_extractor
)
1788 if ie
and not ie_result
.get('id'):
1789 info_copy
['id'] = ie
.get_temp_id(ie_result
['url'])
1790 self
.add_default_extra_info(info_copy
, ie
, ie_result
['url'])
1791 self
.add_extra_info(info_copy
, extra_info
)
1792 info_copy
, _
= self
.pre_process(info_copy
)
1793 self
._fill
_common
_fields
(info_copy
, False)
1794 self
.__forced
_printings
(info_copy
)
1795 self
._raise
_pending
_errors
(info_copy
)
1796 if self
.params
.get('force_write_download_archive', False):
1797 self
.record_download_archive(info_copy
)
1800 if result_type
== 'video':
1801 self
.add_extra_info(ie_result
, extra_info
)
1802 ie_result
= self
.process_video_result(ie_result
, download
=download
)
1803 self
._raise
_pending
_errors
(ie_result
)
1804 additional_urls
= (ie_result
or {}).get('additional_urls')
1806 # TODO: Improve MetadataParserPP to allow setting a list
1807 if isinstance(additional_urls
, str):
1808 additional_urls
= [additional_urls
]
1810 '[info] %s: %d additional URL(s) requested' % (ie_result
['id'], len(additional_urls
)))
1811 self
.write_debug('Additional URLs: "%s"' % '", "'.join(additional_urls
))
1812 ie_result
['additional_entries'] = [
1814 url
, download
, extra_info
=extra_info
,
1815 force_generic_extractor
=self
.params
.get('force_generic_extractor'))
1816 for url
in additional_urls
1819 elif result_type
== 'url':
1820 # We have to add extra_info to the results because it may be
1821 # contained in a playlist
1822 return self
.extract_info(
1823 ie_result
['url'], download
,
1824 ie_key
=ie_result
.get('ie_key'),
1825 extra_info
=extra_info
)
1826 elif result_type
== 'url_transparent':
1827 # Use the information from the embedding page
1828 info
= self
.extract_info(
1829 ie_result
['url'], ie_key
=ie_result
.get('ie_key'),
1830 extra_info
=extra_info
, download
=False, process
=False)
1832 # extract_info may return None when ignoreerrors is enabled and
1833 # extraction failed with an error, don't crash and return early
1838 exempted_fields
= {'_type', 'url', 'ie_key'}
1839 if not ie_result
.get('section_end') and ie_result
.get('section_start') is None:
1840 # For video clips, the id etc of the clip extractor should be used
1841 exempted_fields |
= {'id', 'extractor', 'extractor_key'}
1843 new_result
= info
.copy()
1844 new_result
.update(filter_dict(ie_result
, lambda k
, v
: v
is not None and k
not in exempted_fields
))
1846 # Extracted info may not be a video result (i.e.
1847 # info.get('_type', 'video') != video) but rather an url or
1848 # url_transparent. In such cases outer metadata (from ie_result)
1849 # should be propagated to inner one (info). For this to happen
1850 # _type of info should be overridden with url_transparent. This
1851 # fixes issue from https://github.com/ytdl-org/youtube-dl/pull/11163.
1852 if new_result
.get('_type') == 'url':
1853 new_result
['_type'] = 'url_transparent'
1855 return self
.process_ie_result(
1856 new_result
, download
=download
, extra_info
=extra_info
)
1857 elif result_type
in ('playlist', 'multi_video'):
1858 # Protect from infinite recursion due to recursively nested playlists
1859 # (see https://github.com/ytdl-org/youtube-dl/issues/27833)
1860 webpage_url
= ie_result
.get('webpage_url') # Playlists maynot have webpage_url
1861 if webpage_url
and webpage_url
in self
._playlist
_urls
:
1863 '[download] Skipping already downloaded playlist: %s'
1864 % ie_result
.get('title') or ie_result
.get('id'))
1867 self
._playlist
_level
+= 1
1868 self
._playlist
_urls
.add(webpage_url
)
1869 self
._fill
_common
_fields
(ie_result
, False)
1870 self
._sanitize
_thumbnails
(ie_result
)
1872 return self
.__process
_playlist
(ie_result
, download
)
1874 self
._playlist
_level
-= 1
1875 if not self
._playlist
_level
:
1876 self
._playlist
_urls
.clear()
1877 elif result_type
== 'compat_list':
1878 self
.report_warning(
1879 'Extractor %s returned a compat_list result. '
1880 'It needs to be updated.' % ie_result
.get('extractor'))
1883 self
.add_extra_info(r
, {
1884 'extractor': ie_result
['extractor'],
1885 'webpage_url': ie_result
['webpage_url'],
1886 'webpage_url_basename': url_basename(ie_result
['webpage_url']),
1887 'webpage_url_domain': get_domain(ie_result
['webpage_url']),
1888 'extractor_key': ie_result
['extractor_key'],
1891 ie_result
['entries'] = [
1892 self
.process_ie_result(_fixup(r
), download
, extra_info
)
1893 for r
in ie_result
['entries']
1897 raise Exception('Invalid result type: %s' % result_type
)
1899 def _ensure_dir_exists(self
, path
):
1900 return make_dir(path
, self
.report_error
)
1903 def _playlist_infodict(ie_result
, strict
=False, **kwargs
):
1905 'playlist_count': ie_result
.get('playlist_count'),
1906 'playlist': ie_result
.get('title') or ie_result
.get('id'),
1907 'playlist_id': ie_result
.get('id'),
1908 'playlist_title': ie_result
.get('title'),
1909 'playlist_uploader': ie_result
.get('uploader'),
1910 'playlist_uploader_id': ie_result
.get('uploader_id'),
1915 if ie_result
.get('webpage_url'):
1917 'webpage_url': ie_result
['webpage_url'],
1918 'webpage_url_basename': url_basename(ie_result
['webpage_url']),
1919 'webpage_url_domain': get_domain(ie_result
['webpage_url']),
1923 'playlist_index': 0,
1924 '__last_playlist_index': max(ie_result
.get('requested_entries') or (0, 0)),
1925 'extractor': ie_result
['extractor'],
1926 'extractor_key': ie_result
['extractor_key'],
1929 def __process_playlist(self
, ie_result
, download
):
1930 """Process each entry in the playlist"""
1931 assert ie_result
['_type'] in ('playlist', 'multi_video')
1933 common_info
= self
._playlist
_infodict
(ie_result
, strict
=True)
1934 title
= common_info
.get('playlist') or '<Untitled>'
1935 if self
._match
_entry
(common_info
, incomplete
=True) is not None:
1937 self
.to_screen(f
'[download] Downloading {ie_result["_type"]}: {title}')
1939 all_entries
= PlaylistEntries(self
, ie_result
)
1940 entries
= orderedSet(all_entries
.get_requested_items(), lazy
=True)
1942 lazy
= self
.params
.get('lazy_playlist')
1944 resolved_entries
, n_entries
= [], 'N/A'
1945 ie_result
['requested_entries'], ie_result
['entries'] = None, None
1947 entries
= resolved_entries
= list(entries
)
1948 n_entries
= len(resolved_entries
)
1949 ie_result
['requested_entries'], ie_result
['entries'] = tuple(zip(*resolved_entries
)) or ([], [])
1950 if not ie_result
.get('playlist_count'):
1951 # Better to do this after potentially exhausting entries
1952 ie_result
['playlist_count'] = all_entries
.get_full_count()
1954 extra
= self
._playlist
_infodict
(ie_result
, n_entries
=int_or_none(n_entries
))
1955 ie_copy
= collections
.ChainMap(ie_result
, extra
)
1957 _infojson_written
= False
1958 write_playlist_files
= self
.params
.get('allow_playlist_files', True)
1959 if write_playlist_files
and self
.params
.get('list_thumbnails'):
1960 self
.list_thumbnails(ie_result
)
1961 if write_playlist_files
and not self
.params
.get('simulate'):
1962 _infojson_written
= self
._write
_info
_json
(
1963 'playlist', ie_result
, self
.prepare_filename(ie_copy
, 'pl_infojson'))
1964 if _infojson_written
is None:
1966 if self
._write
_description
('playlist', ie_result
,
1967 self
.prepare_filename(ie_copy
, 'pl_description')) is None:
1969 # TODO: This should be passed to ThumbnailsConvertor if necessary
1970 self
._write
_thumbnails
('playlist', ie_result
, self
.prepare_filename(ie_copy
, 'pl_thumbnail'))
1973 if self
.params
.get('playlistreverse') or self
.params
.get('playlistrandom'):
1974 self
.report_warning('playlistreverse and playlistrandom are not supported with lazy_playlist', only_once
=True)
1975 elif self
.params
.get('playlistreverse'):
1977 elif self
.params
.get('playlistrandom'):
1978 random
.shuffle(entries
)
1980 self
.to_screen(f
'[{ie_result["extractor"]}] Playlist {title}: Downloading {n_entries} items'
1981 f
'{format_field(ie_result, "playlist_count", " of %s")}')
1983 keep_resolved_entries
= self
.params
.get('extract_flat') != 'discard'
1984 if self
.params
.get('extract_flat') == 'discard_in_playlist':
1985 keep_resolved_entries
= ie_result
['_type'] != 'playlist'
1986 if keep_resolved_entries
:
1987 self
.write_debug('The information of all playlist entries will be held in memory')
1990 max_failures
= self
.params
.get('skip_playlist_after_errors') or float('inf')
1991 for i
, (playlist_index
, entry
) in enumerate(entries
):
1993 resolved_entries
.append((playlist_index
, entry
))
1997 entry
['__x_forwarded_for_ip'] = ie_result
.get('__x_forwarded_for_ip')
1998 if not lazy
and 'playlist-index' in self
.params
['compat_opts']:
1999 playlist_index
= ie_result
['requested_entries'][i
]
2001 entry_copy
= collections
.ChainMap(entry
, {
2003 'n_entries': int_or_none(n_entries
),
2004 'playlist_index': playlist_index
,
2005 'playlist_autonumber': i
+ 1,
2008 if self
._match
_entry
(entry_copy
, incomplete
=True) is not None:
2009 # For compatabilty with youtube-dl. See https://github.com/yt-dlp/yt-dlp/issues/4369
2010 resolved_entries
[i
] = (playlist_index
, NO_DEFAULT
)
2013 self
.to_screen('[download] Downloading item %s of %s' % (
2014 self
._format
_screen
(i
+ 1, self
.Styles
.ID
), self
._format
_screen
(n_entries
, self
.Styles
.EMPHASIS
)))
2016 entry_result
= self
.__process
_iterable
_entry
(entry
, download
, collections
.ChainMap({
2017 'playlist_index': playlist_index
,
2018 'playlist_autonumber': i
+ 1,
2020 if not entry_result
:
2022 if failures
>= max_failures
:
2024 f
'Skipping the remaining entries in playlist "{title}" since {failures} items failed extraction')
2026 if keep_resolved_entries
:
2027 resolved_entries
[i
] = (playlist_index
, entry_result
)
2029 # Update with processed data
2030 ie_result
['entries'] = [e
for _
, e
in resolved_entries
if e
is not NO_DEFAULT
]
2031 ie_result
['requested_entries'] = [i
for i
, e
in resolved_entries
if e
is not NO_DEFAULT
]
2032 if ie_result
['requested_entries'] == try_call(lambda: list(range(1, ie_result
['playlist_count'] + 1))):
2033 # Do not set for full playlist
2034 ie_result
.pop('requested_entries')
2036 # Write the updated info to json
2037 if _infojson_written
is True and self
._write
_info
_json
(
2038 'updated playlist', ie_result
,
2039 self
.prepare_filename(ie_copy
, 'pl_infojson'), overwrite
=True) is None:
2042 ie_result
= self
.run_all_pps('playlist', ie_result
)
2043 self
.to_screen(f
'[download] Finished downloading playlist: {title}')
2046 @_handle_extraction_exceptions
2047 def __process_iterable_entry(self
, entry
, download
, extra_info
):
2048 return self
.process_ie_result(
2049 entry
, download
=download
, extra_info
=extra_info
)
2051 def _build_format_filter(self
, filter_spec
):
2052 " Returns a function to filter the formats according to the filter_spec "
2062 operator_rex
= re
.compile(r
'''(?x)\s*
2064 (?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
2065 (?P<value>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)\s*
2066 ''' % '|'.join(map(re
.escape
, OPERATORS
.keys())))
2067 m
= operator_rex
.fullmatch(filter_spec
)
2070 comparison_value
= int(m
.group('value'))
2072 comparison_value
= parse_filesize(m
.group('value'))
2073 if comparison_value
is None:
2074 comparison_value
= parse_filesize(m
.group('value') + 'B')
2075 if comparison_value
is None:
2077 'Invalid value %r in format specification %r' % (
2078 m
.group('value'), filter_spec
))
2079 op
= OPERATORS
[m
.group('op')]
2084 '^=': lambda attr
, value
: attr
.startswith(value
),
2085 '$=': lambda attr
, value
: attr
.endswith(value
),
2086 '*=': lambda attr
, value
: value
in attr
,
2087 '~=': lambda attr
, value
: value
.search(attr
) is not None
2089 str_operator_rex
= re
.compile(r
'''(?x)\s*
2090 (?P<key>[a-zA-Z0-9._-]+)\s*
2091 (?P<negation>!\s*)?(?P<op>%s)\s*(?P<none_inclusive>\?\s*)?
2093 (?P<value>(?(quote)(?:(?!(?P=quote))[^\\]|\\.)+|[\w.-]+))
2094 (?(quote)(?P=quote))\s*
2095 ''' % '|'.join(map(re
.escape
, STR_OPERATORS
.keys())))
2096 m
= str_operator_rex
.fullmatch(filter_spec
)
2098 if m
.group('op') == '~=':
2099 comparison_value
= re
.compile(m
.group('value'))
2101 comparison_value
= re
.sub(r
'''\\([\\"'])''', r
'\1', m
.group('value'))
2102 str_op
= STR_OPERATORS
[m
.group('op')]
2103 if m
.group('negation'):
2104 op
= lambda attr
, value
: not str_op(attr
, value
)
2109 raise SyntaxError('Invalid filter specification %r' % filter_spec
)
2112 actual_value
= f
.get(m
.group('key'))
2113 if actual_value
is None:
2114 return m
.group('none_inclusive')
2115 return op(actual_value
, comparison_value
)
2118 def _check_formats(self
, formats
):
2120 self
.to_screen('[info] Testing format %s' % f
['format_id'])
2121 path
= self
.get_output_path('temp')
2122 if not self
._ensure
_dir
_exists
(f
'{path}/'):
2124 temp_file
= tempfile
.NamedTemporaryFile(suffix
='.tmp', delete
=False, dir=path
or None)
2127 success
, _
= self
.dl(temp_file
.name
, f
, test
=True)
2128 except (DownloadError
, OSError, ValueError) + network_exceptions
:
2131 if os
.path
.exists(temp_file
.name
):
2133 os
.remove(temp_file
.name
)
2135 self
.report_warning('Unable to delete temporary file "%s"' % temp_file
.name
)
2139 self
.to_screen('[info] Unable to download format %s. Skipping...' % f
['format_id'])
2141 def _default_format_spec(self
, info_dict
, download
=True):
2144 merger
= FFmpegMergerPP(self
)
2145 return merger
.available
and merger
.can_merge()
2148 not self
.params
.get('simulate')
2152 or info_dict
.get('is_live') and not self
.params
.get('live_from_start')
2153 or self
.params
['outtmpl']['default'] == '-'))
2156 or self
.params
.get('allow_multiple_audio_streams', False)
2157 or 'format-spec' in self
.params
['compat_opts'])
2160 'best/bestvideo+bestaudio' if prefer_best
2161 else 'bestvideo*+bestaudio/best' if not compat
2162 else 'bestvideo+bestaudio/best')
2164 def build_format_selector(self
, format_spec
):
2165 def syntax_error(note
, start
):
2167 'Invalid format specification: '
2168 '{}\n\t{}\n\t{}^'.format(note
, format_spec
, ' ' * start
[1]))
2169 return SyntaxError(message
)
2171 PICKFIRST
= 'PICKFIRST'
2175 FormatSelector
= collections
.namedtuple('FormatSelector', ['type', 'selector', 'filters'])
2177 allow_multiple_streams
= {'audio': self
.params
.get('allow_multiple_audio_streams', False),
2178 'video': self
.params
.get('allow_multiple_video_streams', False)}
2180 def _parse_filter(tokens
):
2182 for type, string_
, start
, _
, _
in tokens
:
2183 if type == tokenize
.OP
and string_
== ']':
2184 return ''.join(filter_parts
)
2186 filter_parts
.append(string_
)
2188 def _remove_unused_ops(tokens
):
2189 # Remove operators that we don't use and join them with the surrounding strings.
2190 # E.g. 'mp4' '-' 'baseline' '-' '16x9' is converted to 'mp4-baseline-16x9'
2191 ALLOWED_OPS
= ('/', '+', ',', '(', ')')
2192 last_string
, last_start
, last_end
, last_line
= None, None, None, None
2193 for type, string_
, start
, end
, line
in tokens
:
2194 if type == tokenize
.OP
and string_
== '[':
2196 yield tokenize
.NAME
, last_string
, last_start
, last_end
, last_line
2198 yield type, string_
, start
, end
, line
2199 # everything inside brackets will be handled by _parse_filter
2200 for type, string_
, start
, end
, line
in tokens
:
2201 yield type, string_
, start
, end
, line
2202 if type == tokenize
.OP
and string_
== ']':
2204 elif type == tokenize
.OP
and string_
in ALLOWED_OPS
:
2206 yield tokenize
.NAME
, last_string
, last_start
, last_end
, last_line
2208 yield type, string_
, start
, end
, line
2209 elif type in [tokenize
.NAME
, tokenize
.NUMBER
, tokenize
.OP
]:
2211 last_string
= string_
2215 last_string
+= string_
2217 yield tokenize
.NAME
, last_string
, last_start
, last_end
, last_line
2219 def _parse_format_selection(tokens
, inside_merge
=False, inside_choice
=False, inside_group
=False):
2221 current_selector
= None
2222 for type, string_
, start
, _
, _
in tokens
:
2223 # ENCODING is only defined in python 3.x
2224 if type == getattr(tokenize
, 'ENCODING', None):
2226 elif type in [tokenize
.NAME
, tokenize
.NUMBER
]:
2227 current_selector
= FormatSelector(SINGLE
, string_
, [])
2228 elif type == tokenize
.OP
:
2230 if not inside_group
:
2231 # ')' will be handled by the parentheses group
2232 tokens
.restore_last_token()
2234 elif inside_merge
and string_
in ['/', ',']:
2235 tokens
.restore_last_token()
2237 elif inside_choice
and string_
== ',':
2238 tokens
.restore_last_token()
2240 elif string_
== ',':
2241 if not current_selector
:
2242 raise syntax_error('"," must follow a format selector', start
)
2243 selectors
.append(current_selector
)
2244 current_selector
= None
2245 elif string_
== '/':
2246 if not current_selector
:
2247 raise syntax_error('"/" must follow a format selector', start
)
2248 first_choice
= current_selector
2249 second_choice
= _parse_format_selection(tokens
, inside_choice
=True)
2250 current_selector
= FormatSelector(PICKFIRST
, (first_choice
, second_choice
), [])
2251 elif string_
== '[':
2252 if not current_selector
:
2253 current_selector
= FormatSelector(SINGLE
, 'best', [])
2254 format_filter
= _parse_filter(tokens
)
2255 current_selector
.filters
.append(format_filter
)
2256 elif string_
== '(':
2257 if current_selector
:
2258 raise syntax_error('Unexpected "("', start
)
2259 group
= _parse_format_selection(tokens
, inside_group
=True)
2260 current_selector
= FormatSelector(GROUP
, group
, [])
2261 elif string_
== '+':
2262 if not current_selector
:
2263 raise syntax_error('Unexpected "+"', start
)
2264 selector_1
= current_selector
2265 selector_2
= _parse_format_selection(tokens
, inside_merge
=True)
2267 raise syntax_error('Expected a selector', start
)
2268 current_selector
= FormatSelector(MERGE
, (selector_1
, selector_2
), [])
2270 raise syntax_error(f
'Operator not recognized: "{string_}"', start
)
2271 elif type == tokenize
.ENDMARKER
:
2273 if current_selector
:
2274 selectors
.append(current_selector
)
2277 def _merge(formats_pair
):
2278 format_1
, format_2
= formats_pair
2281 formats_info
.extend(format_1
.get('requested_formats', (format_1
,)))
2282 formats_info
.extend(format_2
.get('requested_formats', (format_2
,)))
2284 if not allow_multiple_streams
['video'] or not allow_multiple_streams
['audio']:
2285 get_no_more
= {'video': False, 'audio': False}
2286 for (i
, fmt_info
) in enumerate(formats_info
):
2287 if fmt_info
.get('acodec') == fmt_info
.get('vcodec') == 'none':
2290 for aud_vid
in ['audio', 'video']:
2291 if not allow_multiple_streams
[aud_vid
] and fmt_info
.get(aud_vid
[0] + 'codec') != 'none':
2292 if get_no_more
[aud_vid
]:
2295 get_no_more
[aud_vid
] = True
2297 if len(formats_info
) == 1:
2298 return formats_info
[0]
2300 video_fmts
= [fmt_info
for fmt_info
in formats_info
if fmt_info
.get('vcodec') != 'none']
2301 audio_fmts
= [fmt_info
for fmt_info
in formats_info
if fmt_info
.get('acodec') != 'none']
2303 the_only_video
= video_fmts
[0] if len(video_fmts
) == 1 else None
2304 the_only_audio
= audio_fmts
[0] if len(audio_fmts
) == 1 else None
2306 output_ext
= get_compatible_ext(
2307 vcodecs
=[f
.get('vcodec') for f
in video_fmts
],
2308 acodecs
=[f
.get('acodec') for f
in audio_fmts
],
2309 vexts
=[f
['ext'] for f
in video_fmts
],
2310 aexts
=[f
['ext'] for f
in audio_fmts
],
2311 preferences
=(try_call(lambda: self
.params
['merge_output_format'].split('/'))
2312 or self
.params
.get('prefer_free_formats') and ('webm', 'mkv')))
2314 filtered
= lambda *keys
: filter(None, (traverse_obj(fmt
, *keys
) for fmt
in formats_info
))
2317 'requested_formats': formats_info
,
2318 'format': '+'.join(filtered('format')),
2319 'format_id': '+'.join(filtered('format_id')),
2321 'protocol': '+'.join(map(determine_protocol
, formats_info
)),
2322 'language': '+'.join(orderedSet(filtered('language'))) or None,
2323 'format_note': '+'.join(orderedSet(filtered('format_note'))) or None,
2324 'filesize_approx': sum(filtered('filesize', 'filesize_approx')) or None,
2325 'tbr': sum(filtered('tbr', 'vbr', 'abr')),
2330 'width': the_only_video
.get('width'),
2331 'height': the_only_video
.get('height'),
2332 'resolution': the_only_video
.get('resolution') or self
.format_resolution(the_only_video
),
2333 'fps': the_only_video
.get('fps'),
2334 'dynamic_range': the_only_video
.get('dynamic_range'),
2335 'vcodec': the_only_video
.get('vcodec'),
2336 'vbr': the_only_video
.get('vbr'),
2337 'stretched_ratio': the_only_video
.get('stretched_ratio'),
2338 'aspect_ratio': the_only_video
.get('aspect_ratio'),
2343 'acodec': the_only_audio
.get('acodec'),
2344 'abr': the_only_audio
.get('abr'),
2345 'asr': the_only_audio
.get('asr'),
2346 'audio_channels': the_only_audio
.get('audio_channels')
2351 def _check_formats(formats
):
2352 if self
.params
.get('check_formats') == 'selected':
2353 yield from self
._check
_formats
(formats
)
2355 elif (self
.params
.get('check_formats') is not None
2356 or self
.params
.get('allow_unplayable_formats')):
2361 if f
.get('has_drm') or f
.get('__needs_testing'):
2362 yield from self
._check
_formats
([f
])
2366 def _build_selector_function(selector
):
2367 if isinstance(selector
, list): # ,
2368 fs
= [_build_selector_function(s
) for s
in selector
]
2370 def selector_function(ctx
):
2373 return selector_function
2375 elif selector
.type == GROUP
: # ()
2376 selector_function
= _build_selector_function(selector
.selector
)
2378 elif selector
.type == PICKFIRST
: # /
2379 fs
= [_build_selector_function(s
) for s
in selector
.selector
]
2381 def selector_function(ctx
):
2383 picked_formats
= list(f(ctx
))
2385 return picked_formats
2388 elif selector
.type == MERGE
: # +
2389 selector_1
, selector_2
= map(_build_selector_function
, selector
.selector
)
2391 def selector_function(ctx
):
2392 for pair
in itertools
.product(selector_1(ctx
), selector_2(ctx
)):
2395 elif selector
.type == SINGLE
: # atom
2396 format_spec
= selector
.selector
or 'best'
2398 # TODO: Add allvideo, allaudio etc by generalizing the code with best/worst selector
2399 if format_spec
== 'all':
2400 def selector_function(ctx
):
2401 yield from _check_formats(ctx
['formats'][::-1])
2402 elif format_spec
== 'mergeall':
2403 def selector_function(ctx
):
2404 formats
= list(_check_formats(
2405 f
for f
in ctx
['formats'] if f
.get('vcodec') != 'none' or f
.get('acodec') != 'none'))
2408 merged_format
= formats
[-1]
2409 for f
in formats
[-2::-1]:
2410 merged_format
= _merge((merged_format
, f
))
2414 format_fallback
, seperate_fallback
, format_reverse
, format_idx
= False, None, True, 1
2416 r
'(?P<bw>best|worst|b|w)(?P<type>video|audio|v|a)?(?P<mod>\*)?(?:\.(?P<n>[1-9]\d*))?$',
2418 if mobj
is not None:
2419 format_idx
= int_or_none(mobj
.group('n'), default
=1)
2420 format_reverse
= mobj
.group('bw')[0] == 'b'
2421 format_type
= (mobj
.group('type') or [None])[0]
2422 not_format_type
= {'v': 'a', 'a': 'v'}
.get(format_type
)
2423 format_modified
= mobj
.group('mod') is not None
2425 format_fallback
= not format_type
and not format_modified
# for b, w
2427 (lambda f
: f
.get('%scodec' % format_type
) != 'none')
2428 if format_type
and format_modified
# bv*, ba*, wv*, wa*
2429 else (lambda f
: f
.get('%scodec' % not_format_type
) == 'none')
2430 if format_type
# bv, ba, wv, wa
2431 else (lambda f
: f
.get('vcodec') != 'none' and f
.get('acodec') != 'none')
2432 if not format_modified
# b, w
2433 else lambda f
: True) # b*, w*
2434 filter_f
= lambda f
: _filter_f(f
) and (
2435 f
.get('vcodec') != 'none' or f
.get('acodec') != 'none')
2437 if format_spec
in self
._format
_selection
_exts
['audio']:
2438 filter_f
= lambda f
: f
.get('ext') == format_spec
and f
.get('acodec') != 'none'
2439 elif format_spec
in self
._format
_selection
_exts
['video']:
2440 filter_f
= lambda f
: f
.get('ext') == format_spec
and f
.get('acodec') != 'none' and f
.get('vcodec') != 'none'
2441 seperate_fallback
= lambda f
: f
.get('ext') == format_spec
and f
.get('vcodec') != 'none'
2442 elif format_spec
in self
._format
_selection
_exts
['storyboards']:
2443 filter_f
= lambda f
: f
.get('ext') == format_spec
and f
.get('acodec') == 'none' and f
.get('vcodec') == 'none'
2445 filter_f
= lambda f
: f
.get('format_id') == format_spec
# id
2447 def selector_function(ctx
):
2448 formats
= list(ctx
['formats'])
2449 matches
= list(filter(filter_f
, formats
)) if filter_f
is not None else formats
2451 if format_fallback
and ctx
['incomplete_formats']:
2452 # for extractors with incomplete formats (audio only (soundcloud)
2453 # or video only (imgur)) best/worst will fallback to
2454 # best/worst {video,audio}-only format
2456 elif seperate_fallback
and not ctx
['has_merged_format']:
2457 # for compatibility with youtube-dl when there is no pre-merged format
2458 matches
= list(filter(seperate_fallback
, formats
))
2459 matches
= LazyList(_check_formats(matches
[::-1 if format_reverse
else 1]))
2461 yield matches
[format_idx
- 1]
2462 except LazyList
.IndexError:
2465 filters
= [self
._build
_format
_filter
(f
) for f
in selector
.filters
]
2467 def final_selector(ctx
):
2468 ctx_copy
= dict(ctx
)
2469 for _filter
in filters
:
2470 ctx_copy
['formats'] = list(filter(_filter
, ctx_copy
['formats']))
2471 return selector_function(ctx_copy
)
2472 return final_selector
2474 # HACK: Python 3.12 changed the underlying parser, rendering '7_a' invalid
2475 # Prefix numbers with random letters to avoid it being classified as a number
2476 # See: https://github.com/yt-dlp/yt-dlp/pulls/8797
2477 # TODO: Implement parser not reliant on tokenize.tokenize
2478 prefix
= ''.join(random
.choices(string
.ascii_letters
, k
=32))
2479 stream
= io
.BytesIO(re
.sub(r
'\d[_\d]*', rf
'{prefix}\g<0>', format_spec
).encode())
2481 tokens
= list(_remove_unused_ops(
2482 token
._replace
(string
=token
.string
.replace(prefix
, ''))
2483 for token
in tokenize
.tokenize(stream
.readline
)))
2484 except tokenize
.TokenError
:
2485 raise syntax_error('Missing closing/opening brackets or parenthesis', (0, len(format_spec
)))
2487 class TokenIterator
:
2488 def __init__(self
, tokens
):
2489 self
.tokens
= tokens
2496 if self
.counter
>= len(self
.tokens
):
2497 raise StopIteration()
2498 value
= self
.tokens
[self
.counter
]
2504 def restore_last_token(self
):
2507 parsed_selector
= _parse_format_selection(iter(TokenIterator(tokens
)))
2508 return _build_selector_function(parsed_selector
)
2510 def _calc_headers(self
, info_dict
, load_cookies
=False):
2511 res
= HTTPHeaderDict(self
.params
['http_headers'], info_dict
.get('http_headers'))
2514 if load_cookies
: # For --load-info-json
2515 self
._load
_cookies
(res
.get('Cookie'), autoscope
=info_dict
['url']) # compat
2516 self
._load
_cookies
(info_dict
.get('cookies'), autoscope
=False)
2517 # The `Cookie` header is removed to prevent leaks and unscoped cookies.
2518 # See: https://github.com/yt-dlp/yt-dlp/security/advisories/GHSA-v8mc-9377-rwjj
2519 res
.pop('Cookie', None)
2520 cookies
= self
.cookiejar
.get_cookies_for_url(info_dict
['url'])
2522 encoder
= LenientSimpleCookie()
2524 for cookie
in cookies
:
2525 _
, value
= encoder
.value_encode(cookie
.value
)
2526 values
.append(f
'{cookie.name}={value}')
2528 values
.append(f
'Domain={cookie.domain}')
2530 values
.append(f
'Path={cookie.path}')
2532 values
.append('Secure')
2534 values
.append(f
'Expires={cookie.expires}')
2536 values
.append(f
'Version={cookie.version}')
2537 info_dict
['cookies'] = '; '.join(values
)
2539 if 'X-Forwarded-For' not in res
:
2540 x_forwarded_for_ip
= info_dict
.get('__x_forwarded_for_ip')
2541 if x_forwarded_for_ip
:
2542 res
['X-Forwarded-For'] = x_forwarded_for_ip
2546 def _calc_cookies(self
, url
):
2547 self
.deprecation_warning('"YoutubeDL._calc_cookies" is deprecated and may be removed in a future version')
2548 return self
.cookiejar
.get_cookie_header(url
)
2550 def _sort_thumbnails(self
, thumbnails
):
2551 thumbnails
.sort(key
=lambda t
: (
2552 t
.get('preference') if t
.get('preference') is not None else -1,
2553 t
.get('width') if t
.get('width') is not None else -1,
2554 t
.get('height') if t
.get('height') is not None else -1,
2555 t
.get('id') if t
.get('id') is not None else '',
2558 def _sanitize_thumbnails(self
, info_dict
):
2559 thumbnails
= info_dict
.get('thumbnails')
2560 if thumbnails
is None:
2561 thumbnail
= info_dict
.get('thumbnail')
2563 info_dict
['thumbnails'] = thumbnails
= [{'url': thumbnail}
]
2567 def check_thumbnails(thumbnails
):
2568 for t
in thumbnails
:
2569 self
.to_screen(f
'[info] Testing thumbnail {t["id"]}')
2571 self
.urlopen(HEADRequest(t
['url']))
2572 except network_exceptions
as err
:
2573 self
.to_screen(f
'[info] Unable to connect to thumbnail {t["id"]} URL {t["url"]!r} - {err}. Skipping...')
2577 self
._sort
_thumbnails
(thumbnails
)
2578 for i
, t
in enumerate(thumbnails
):
2579 if t
.get('id') is None:
2581 if t
.get('width') and t
.get('height'):
2582 t
['resolution'] = '%dx%d' % (t
['width'], t
['height'])
2583 t
['url'] = sanitize_url(t
['url'])
2585 if self
.params
.get('check_formats') is True:
2586 info_dict
['thumbnails'] = LazyList(check_thumbnails(thumbnails
[::-1]), reverse
=True)
2588 info_dict
['thumbnails'] = thumbnails
2590 def _fill_common_fields(self
, info_dict
, final
=True):
2591 # TODO: move sanitization here
2593 title
= info_dict
['fulltitle'] = info_dict
.get('title')
2596 self
.write_debug('Extractor gave empty title. Creating a generic title')
2598 self
.report_warning('Extractor failed to obtain "title". Creating a generic title instead')
2599 info_dict
['title'] = f
'{info_dict["extractor"].replace(":", "-")} video #{info_dict["id"]}'
2601 if info_dict
.get('duration') is not None:
2602 info_dict
['duration_string'] = formatSeconds(info_dict
['duration'])
2604 for ts_key
, date_key
in (
2605 ('timestamp', 'upload_date'),
2606 ('release_timestamp', 'release_date'),
2607 ('modified_timestamp', 'modified_date'),
2609 if info_dict
.get(date_key
) is None and info_dict
.get(ts_key
) is not None:
2610 # Working around out-of-range timestamp values (e.g. negative ones on Windows,
2611 # see http://bugs.python.org/issue1646728)
2612 with contextlib
.suppress(ValueError, OverflowError, OSError):
2613 upload_date
= datetime
.datetime
.fromtimestamp(info_dict
[ts_key
], datetime
.timezone
.utc
)
2614 info_dict
[date_key
] = upload_date
.strftime('%Y%m%d')
2616 if not info_dict
.get('release_year'):
2617 info_dict
['release_year'] = traverse_obj(info_dict
, ('release_date', {lambda x: int(x[:4])}
))
2619 live_keys
= ('is_live', 'was_live')
2620 live_status
= info_dict
.get('live_status')
2621 if live_status
is None:
2622 for key
in live_keys
:
2623 if info_dict
.get(key
) is False:
2625 if info_dict
.get(key
):
2628 if all(info_dict
.get(key
) is False for key
in live_keys
):
2629 live_status
= 'not_live'
2631 info_dict
['live_status'] = live_status
2632 for key
in live_keys
:
2633 if info_dict
.get(key
) is None:
2634 info_dict
[key
] = (live_status
== key
)
2635 if live_status
== 'post_live':
2636 info_dict
['was_live'] = True
2638 # Auto generate title fields corresponding to the *_number fields when missing
2639 # in order to always have clean titles. This is very common for TV series.
2640 for field
in ('chapter', 'season', 'episode'):
2641 if final
and info_dict
.get('%s_number' % field
) is not None and not info_dict
.get(field
):
2642 info_dict
[field
] = '%s %d' % (field
.capitalize(), info_dict
['%s_number' % field
])
2644 def _raise_pending_errors(self
, info
):
2645 err
= info
.pop('__pending_error', None)
2647 self
.report_error(err
, tb
=False)
2649 def sort_formats(self
, info_dict
):
2650 formats
= self
._get
_formats
(info_dict
)
2651 formats
.sort(key
=FormatSorter(
2652 self
, info_dict
.get('_format_sort_fields') or []).calculate_preference
)
2654 def process_video_result(self
, info_dict
, download
=True):
2655 assert info_dict
.get('_type', 'video') == 'video'
2656 self
._num
_videos
+= 1
2658 if 'id' not in info_dict
:
2659 raise ExtractorError('Missing "id" field in extractor result', ie
=info_dict
['extractor'])
2660 elif not info_dict
.get('id'):
2661 raise ExtractorError('Extractor failed to obtain "id"', ie
=info_dict
['extractor'])
2663 def report_force_conversion(field
, field_not
, conversion
):
2664 self
.report_warning(
2665 '"%s" field is not %s - forcing %s conversion, there is an error in extractor'
2666 % (field
, field_not
, conversion
))
2668 def sanitize_string_field(info
, string_field
):
2669 field
= info
.get(string_field
)
2670 if field
is None or isinstance(field
, str):
2672 report_force_conversion(string_field
, 'a string', 'string')
2673 info
[string_field
] = str(field
)
2675 def sanitize_numeric_fields(info
):
2676 for numeric_field
in self
._NUMERIC
_FIELDS
:
2677 field
= info
.get(numeric_field
)
2678 if field
is None or isinstance(field
, (int, float)):
2680 report_force_conversion(numeric_field
, 'numeric', 'int')
2681 info
[numeric_field
] = int_or_none(field
)
2683 sanitize_string_field(info_dict
, 'id')
2684 sanitize_numeric_fields(info_dict
)
2685 if info_dict
.get('section_end') and info_dict
.get('section_start') is not None:
2686 info_dict
['duration'] = round(info_dict
['section_end'] - info_dict
['section_start'], 3)
2687 if (info_dict
.get('duration') or 0) <= 0 and info_dict
.pop('duration', None):
2688 self
.report_warning('"duration" field is negative, there is an error in extractor')
2690 chapters
= info_dict
.get('chapters') or []
2691 if chapters
and chapters
[0].get('start_time'):
2692 chapters
.insert(0, {'start_time': 0}
)
2694 dummy_chapter
= {'end_time': 0, 'start_time': info_dict.get('duration')}
2695 for idx
, (prev
, current
, next_
) in enumerate(zip(
2696 (dummy_chapter
, *chapters
), chapters
, (*chapters
[1:], dummy_chapter
)), 1):
2697 if current
.get('start_time') is None:
2698 current
['start_time'] = prev
.get('end_time')
2699 if not current
.get('end_time'):
2700 current
['end_time'] = next_
.get('start_time')
2701 if not current
.get('title'):
2702 current
['title'] = f
'<Untitled Chapter {idx}>'
2704 if 'playlist' not in info_dict
:
2705 # It isn't part of a playlist
2706 info_dict
['playlist'] = None
2707 info_dict
['playlist_index'] = None
2709 self
._sanitize
_thumbnails
(info_dict
)
2711 thumbnail
= info_dict
.get('thumbnail')
2712 thumbnails
= info_dict
.get('thumbnails')
2714 info_dict
['thumbnail'] = sanitize_url(thumbnail
)
2716 info_dict
['thumbnail'] = thumbnails
[-1]['url']
2718 if info_dict
.get('display_id') is None and 'id' in info_dict
:
2719 info_dict
['display_id'] = info_dict
['id']
2721 self
._fill
_common
_fields
(info_dict
)
2723 for cc_kind
in ('subtitles', 'automatic_captions'):
2724 cc
= info_dict
.get(cc_kind
)
2726 for _
, subtitle
in cc
.items():
2727 for subtitle_format
in subtitle
:
2728 if subtitle_format
.get('url'):
2729 subtitle_format
['url'] = sanitize_url(subtitle_format
['url'])
2730 if subtitle_format
.get('ext') is None:
2731 subtitle_format
['ext'] = determine_ext(subtitle_format
['url']).lower()
2733 automatic_captions
= info_dict
.get('automatic_captions')
2734 subtitles
= info_dict
.get('subtitles')
2736 info_dict
['requested_subtitles'] = self
.process_subtitles(
2737 info_dict
['id'], subtitles
, automatic_captions
)
2739 formats
= self
._get
_formats
(info_dict
)
2741 # Backward compatibility with InfoExtractor._sort_formats
2742 field_preference
= (formats
or [{}])[0].pop('__sort_fields', None)
2743 if field_preference
:
2744 info_dict
['_format_sort_fields'] = field_preference
2746 info_dict
['_has_drm'] = any( # or None ensures --clean-infojson removes it
2747 f
.get('has_drm') and f
['has_drm'] != 'maybe' for f
in formats
) or None
2748 if not self
.params
.get('allow_unplayable_formats'):
2749 formats
= [f
for f
in formats
if not f
.get('has_drm') or f
['has_drm'] == 'maybe']
2751 if formats
and all(f
.get('acodec') == f
.get('vcodec') == 'none' for f
in formats
):
2752 self
.report_warning(
2753 f
'{"This video is DRM protected and " if info_dict["_has_drm"] else ""}'
2754 'only images are available for download. Use --list-formats to see them'.capitalize())
2756 get_from_start
= not info_dict
.get('is_live') or bool(self
.params
.get('live_from_start'))
2757 if not get_from_start
:
2758 info_dict
['title'] += ' ' + datetime
.datetime
.now().strftime('%Y-%m-%d %H:%M')
2759 if info_dict
.get('is_live') and formats
:
2760 formats
= [f
for f
in formats
if bool(f
.get('is_from_start')) == get_from_start
]
2761 if get_from_start
and not formats
:
2762 self
.raise_no_formats(info_dict
, msg
=(
2763 '--live-from-start is passed, but there are no formats that can be downloaded from the start. '
2764 'If you want to download from the current time, use --no-live-from-start'))
2766 def is_wellformed(f
):
2769 self
.report_warning(
2770 '"url" field is missing or empty - skipping format, '
2771 'there is an error in extractor')
2773 if isinstance(url
, bytes):
2774 sanitize_string_field(f
, 'url')
2777 # Filter out malformed formats for better extraction robustness
2778 formats
= list(filter(is_wellformed
, formats
or []))
2781 self
.raise_no_formats(info_dict
)
2783 for format
in formats
:
2784 sanitize_string_field(format
, 'format_id')
2785 sanitize_numeric_fields(format
)
2786 format
['url'] = sanitize_url(format
['url'])
2787 if format
.get('ext') is None:
2788 format
['ext'] = determine_ext(format
['url']).lower()
2789 if format
.get('protocol') is None:
2790 format
['protocol'] = determine_protocol(format
)
2791 if format
.get('resolution') is None:
2792 format
['resolution'] = self
.format_resolution(format
, default
=None)
2793 if format
.get('dynamic_range') is None and format
.get('vcodec') != 'none':
2794 format
['dynamic_range'] = 'SDR'
2795 if format
.get('aspect_ratio') is None:
2796 format
['aspect_ratio'] = try_call(lambda: round(format
['width'] / format
['height'], 2))
2797 # For fragmented formats, "tbr" is often max bitrate and not average
2798 if (('manifest-filesize-approx' in self
.params
['compat_opts'] or not format
.get('manifest_url'))
2799 and info_dict
.get('duration') and format
.get('tbr')
2800 and not format
.get('filesize') and not format
.get('filesize_approx')):
2801 format
['filesize_approx'] = int(info_dict
['duration'] * format
['tbr'] * (1024 / 8))
2802 format
['http_headers'] = self
._calc
_headers
(collections
.ChainMap(format
, info_dict
), load_cookies
=True)
2804 # Safeguard against old/insecure infojson when using --load-info-json
2805 if info_dict
.get('http_headers'):
2806 info_dict
['http_headers'] = HTTPHeaderDict(info_dict
['http_headers'])
2807 info_dict
['http_headers'].pop('Cookie', None)
2809 # This is copied to http_headers by the above _calc_headers and can now be removed
2810 if '__x_forwarded_for_ip' in info_dict
:
2811 del info_dict
['__x_forwarded_for_ip']
2815 '_format_sort_fields': info_dict
.get('_format_sort_fields')
2818 # Sanitize and group by format_id
2820 for i
, format
in enumerate(formats
):
2821 if not format
.get('format_id'):
2822 format
['format_id'] = str(i
)
2824 # Sanitize format_id from characters used in format selector expression
2825 format
['format_id'] = re
.sub(r
'[\s,/+\[\]()]', '_', format
['format_id'])
2826 formats_dict
.setdefault(format
['format_id'], []).append(format
)
2828 # Make sure all formats have unique format_id
2829 common_exts
= set(itertools
.chain(*self
._format
_selection
_exts
.values()))
2830 for format_id
, ambiguous_formats
in formats_dict
.items():
2831 ambigious_id
= len(ambiguous_formats
) > 1
2832 for i
, format
in enumerate(ambiguous_formats
):
2834 format
['format_id'] = '%s-%d' % (format_id
, i
)
2835 # Ensure there is no conflict between id and ext in format selection
2836 # See https://github.com/yt-dlp/yt-dlp/issues/1282
2837 if format
['format_id'] != format
['ext'] and format
['format_id'] in common_exts
:
2838 format
['format_id'] = 'f%s' % format
['format_id']
2840 if format
.get('format') is None:
2841 format
['format'] = '{id} - {res}{note}'.format(
2842 id=format
['format_id'],
2843 res
=self
.format_resolution(format
),
2844 note
=format_field(format
, 'format_note', ' (%s)'),
2847 if self
.params
.get('check_formats') is True:
2848 formats
= LazyList(self
._check
_formats
(formats
[::-1]), reverse
=True)
2850 if not formats
or formats
[0] is not info_dict
:
2851 # only set the 'formats' fields if the original info_dict list them
2852 # otherwise we end up with a circular reference, the first (and unique)
2853 # element in the 'formats' field in info_dict is info_dict itself,
2854 # which can't be exported to json
2855 info_dict
['formats'] = formats
2857 info_dict
, _
= self
.pre_process(info_dict
)
2859 if self
._match
_entry
(info_dict
, incomplete
=self
._format
_fields
) is not None:
2862 self
.post_extract(info_dict
)
2863 info_dict
, _
= self
.pre_process(info_dict
, 'after_filter')
2865 # The pre-processors may have modified the formats
2866 formats
= self
._get
_formats
(info_dict
)
2868 list_only
= self
.params
.get('simulate') == 'list_only'
2869 interactive_format_selection
= not list_only
and self
.format_selector
== '-'
2870 if self
.params
.get('list_thumbnails'):
2871 self
.list_thumbnails(info_dict
)
2872 if self
.params
.get('listsubtitles'):
2873 if 'automatic_captions' in info_dict
:
2874 self
.list_subtitles(
2875 info_dict
['id'], automatic_captions
, 'automatic captions')
2876 self
.list_subtitles(info_dict
['id'], subtitles
, 'subtitles')
2877 if self
.params
.get('listformats') or interactive_format_selection
:
2878 self
.list_formats(info_dict
)
2880 # Without this printing, -F --print-json will not work
2881 self
.__forced
_printings
(info_dict
)
2884 format_selector
= self
.format_selector
2886 if interactive_format_selection
:
2887 req_format
= input(self
._format
_screen
('\nEnter format selector ', self
.Styles
.EMPHASIS
)
2888 + '(Press ENTER for default, or Ctrl+C to quit)'
2889 + self
._format
_screen
(': ', self
.Styles
.EMPHASIS
))
2891 format_selector
= self
.build_format_selector(req_format
) if req_format
else None
2892 except SyntaxError as err
:
2893 self
.report_error(err
, tb
=False, is_error
=False)
2896 if format_selector
is None:
2897 req_format
= self
._default
_format
_spec
(info_dict
, download
=download
)
2898 self
.write_debug(f
'Default format spec: {req_format}')
2899 format_selector
= self
.build_format_selector(req_format
)
2901 formats_to_download
= list(format_selector({
2903 'has_merged_format': any('none' not in (f
.get('acodec'), f
.get('vcodec')) for f
in formats
),
2904 'incomplete_formats': (all(f
.get('vcodec') == 'none' for f
in formats
) # No formats with video
2905 or all(f
.get('acodec') == 'none' for f
in formats
)), # OR, No formats with audio
2907 if interactive_format_selection
and not formats_to_download
:
2908 self
.report_error('Requested format is not available', tb
=False, is_error
=False)
2912 if not formats_to_download
:
2913 if not self
.params
.get('ignore_no_formats_error'):
2914 raise ExtractorError(
2915 'Requested format is not available. Use --list-formats for a list of available formats',
2916 expected
=True, video_id
=info_dict
['id'], ie
=info_dict
['extractor'])
2917 self
.report_warning('Requested format is not available')
2918 # Process what we can, even without any available formats.
2919 formats_to_download
= [{}]
2921 requested_ranges
= tuple(self
.params
.get('download_ranges', lambda *_
: [{}])(info_dict
, self
))
2922 best_format
, downloaded_formats
= formats_to_download
[-1], []
2924 if best_format
and requested_ranges
:
2925 def to_screen(*msg
):
2926 self
.to_screen(f
'[info] {info_dict["id"]}: {" ".join(", ".join(variadic(m)) for m in msg)}')
2928 to_screen(f
'Downloading {len(formats_to_download)} format(s):',
2929 (f
['format_id'] for f
in formats_to_download
))
2930 if requested_ranges
!= ({}, ):
2931 to_screen(f
'Downloading {len(requested_ranges)} time ranges:',
2932 (f
'{c["start_time"]:.1f}-{c["end_time"]:.1f}' for c
in requested_ranges
))
2933 max_downloads_reached
= False
2935 for fmt
, chapter
in itertools
.product(formats_to_download
, requested_ranges
):
2936 new_info
= self
._copy
_infodict
(info_dict
)
2937 new_info
.update(fmt
)
2938 offset
, duration
= info_dict
.get('section_start') or 0, info_dict
.get('duration') or float('inf')
2939 end_time
= offset
+ min(chapter
.get('end_time', duration
), duration
)
2940 # duration may not be accurate. So allow deviations <1sec
2941 if end_time
== float('inf') or end_time
> offset
+ duration
+ 1:
2943 if chapter
or offset
:
2945 'section_start': offset
+ chapter
.get('start_time', 0),
2946 'section_end': end_time
,
2947 'section_title': chapter
.get('title'),
2948 'section_number': chapter
.get('index'),
2950 downloaded_formats
.append(new_info
)
2952 self
.process_info(new_info
)
2953 except MaxDownloadsReached
:
2954 max_downloads_reached
= True
2955 self
._raise
_pending
_errors
(new_info
)
2956 # Remove copied info
2957 for key
, val
in tuple(new_info
.items()):
2958 if info_dict
.get(key
) == val
:
2960 if max_downloads_reached
:
2963 write_archive
= {f.get('__write_download_archive', False) for f in downloaded_formats}
2964 assert write_archive
.issubset({True, False, 'ignore'}
)
2965 if True in write_archive
and False not in write_archive
:
2966 self
.record_download_archive(info_dict
)
2968 info_dict
['requested_downloads'] = downloaded_formats
2969 info_dict
= self
.run_all_pps('after_video', info_dict
)
2970 if max_downloads_reached
:
2971 raise MaxDownloadsReached()
2973 # We update the info dict with the selected best quality format (backwards compatibility)
2974 info_dict
.update(best_format
)
2977 def process_subtitles(self
, video_id
, normal_subtitles
, automatic_captions
):
2978 """Select the requested subtitles and their format"""
2979 available_subs
, normal_sub_langs
= {}, []
2980 if normal_subtitles
and self
.params
.get('writesubtitles'):
2981 available_subs
.update(normal_subtitles
)
2982 normal_sub_langs
= tuple(normal_subtitles
.keys())
2983 if automatic_captions
and self
.params
.get('writeautomaticsub'):
2984 for lang
, cap_info
in automatic_captions
.items():
2985 if lang
not in available_subs
:
2986 available_subs
[lang
] = cap_info
2988 if not available_subs
or (
2989 not self
.params
.get('writesubtitles')
2990 and not self
.params
.get('writeautomaticsub')):
2993 all_sub_langs
= tuple(available_subs
.keys())
2994 if self
.params
.get('allsubtitles', False):
2995 requested_langs
= all_sub_langs
2996 elif self
.params
.get('subtitleslangs', False):
2998 requested_langs
= orderedSet_from_options(
2999 self
.params
.get('subtitleslangs'), {'all': all_sub_langs}
, use_regex
=True)
3000 except re
.error
as e
:
3001 raise ValueError(f
'Wrong regex for subtitlelangs: {e.pattern}')
3003 requested_langs
= LazyList(itertools
.chain(
3004 ['en'] if 'en' in normal_sub_langs
else [],
3005 filter(lambda f
: f
.startswith('en'), normal_sub_langs
),
3006 ['en'] if 'en' in all_sub_langs
else [],
3007 filter(lambda f
: f
.startswith('en'), all_sub_langs
),
3008 normal_sub_langs
, all_sub_langs
,
3011 self
.to_screen(f
'[info] {video_id}: Downloading subtitles: {", ".join(requested_langs)}')
3013 formats_query
= self
.params
.get('subtitlesformat', 'best')
3014 formats_preference
= formats_query
.split('/') if formats_query
else []
3016 for lang
in requested_langs
:
3017 formats
= available_subs
.get(lang
)
3019 self
.report_warning(f
'{lang} subtitles not available for {video_id}')
3021 for ext
in formats_preference
:
3025 matches
= list(filter(lambda f
: f
['ext'] == ext
, formats
))
3031 self
.report_warning(
3032 'No subtitle format found matching "%s" for language %s, '
3033 'using %s' % (formats_query
, lang
, f
['ext']))
3037 def _forceprint(self
, key
, info_dict
):
3038 if info_dict
is None:
3040 info_copy
= info_dict
.copy()
3041 info_copy
.setdefault('filename', self
.prepare_filename(info_dict
))
3042 if info_dict
.get('requested_formats') is not None:
3043 # For RTMP URLs, also include the playpath
3044 info_copy
['urls'] = '\n'.join(f
['url'] + f
.get('play_path', '') for f
in info_dict
['requested_formats'])
3045 elif info_dict
.get('url'):
3046 info_copy
['urls'] = info_dict
['url'] + info_dict
.get('play_path', '')
3047 info_copy
['formats_table'] = self
.render_formats_table(info_dict
)
3048 info_copy
['thumbnails_table'] = self
.render_thumbnails_table(info_dict
)
3049 info_copy
['subtitles_table'] = self
.render_subtitles_table(info_dict
.get('id'), info_dict
.get('subtitles'))
3050 info_copy
['automatic_captions_table'] = self
.render_subtitles_table(info_dict
.get('id'), info_dict
.get('automatic_captions'))
3052 def format_tmpl(tmpl
):
3053 mobj
= re
.fullmatch(r
'([\w.:,]|-\d|(?P<dict>{([\w.:,]|-\d)+}))+=?', tmpl
)
3058 if tmpl
.startswith('{'):
3059 tmpl
, fmt
= f
'.{tmpl}', '%({})j'
3060 if tmpl
.endswith('='):
3061 tmpl
, fmt
= tmpl
[:-1], '{0} = %({0})#j'
3062 return '\n'.join(map(fmt
.format
, [tmpl
] if mobj
.group('dict') else tmpl
.split(',')))
3064 for tmpl
in self
.params
['forceprint'].get(key
, []):
3065 self
.to_stdout(self
.evaluate_outtmpl(format_tmpl(tmpl
), info_copy
))
3067 for tmpl
, file_tmpl
in self
.params
['print_to_file'].get(key
, []):
3068 filename
= self
.prepare_filename(info_dict
, outtmpl
=file_tmpl
)
3069 tmpl
= format_tmpl(tmpl
)
3070 self
.to_screen(f
'[info] Writing {tmpl!r} to: {filename}')
3071 if self
._ensure
_dir
_exists
(filename
):
3072 with open(filename
, 'a', encoding
='utf-8', newline
='') as f
:
3073 f
.write(self
.evaluate_outtmpl(tmpl
, info_copy
) + os
.linesep
)
3077 def __forced_printings(self
, info_dict
, filename
=None, incomplete
=True):
3078 if (self
.params
.get('forcejson')
3079 or self
.params
['forceprint'].get('video')
3080 or self
.params
['print_to_file'].get('video')):
3081 self
.post_extract(info_dict
)
3083 info_dict
['filename'] = filename
3084 info_copy
= self
._forceprint
('video', info_dict
)
3086 def print_field(field
, actual_field
=None, optional
=False):
3087 if actual_field
is None:
3088 actual_field
= field
3089 if self
.params
.get(f
'force{field}') and (
3090 info_copy
.get(field
) is not None or (not optional
and not incomplete
)):
3091 self
.to_stdout(info_copy
[actual_field
])
3093 print_field('title')
3095 print_field('url', 'urls')
3096 print_field('thumbnail', optional
=True)
3097 print_field('description', optional
=True)
3098 print_field('filename')
3099 if self
.params
.get('forceduration') and info_copy
.get('duration') is not None:
3100 self
.to_stdout(formatSeconds(info_copy
['duration']))
3101 print_field('format')
3103 if self
.params
.get('forcejson'):
3104 self
.to_stdout(json
.dumps(self
.sanitize_info(info_dict
)))
3106 def dl(self
, name
, info
, subtitle
=False, test
=False):
3107 if not info
.get('url'):
3108 self
.raise_no_formats(info
, True)
3111 verbose
= self
.params
.get('verbose')
3114 'quiet': self
.params
.get('quiet') or not verbose
,
3116 'noprogress': not verbose
,
3118 'skip_unavailable_fragments': False,
3119 'keep_fragments': False,
3121 '_no_ytdl_file': True,
3124 params
= self
.params
3125 fd
= get_suitable_downloader(info
, params
, to_stdout
=(name
== '-'))(self
, params
)
3127 for ph
in self
._progress
_hooks
:
3128 fd
.add_progress_hook(ph
)
3130 (f
['url'].split(',')[0] + ',<data>' if f
['url'].startswith('data:') else f
['url'])
3131 for f
in info
.get('requested_formats', []) or [info
])
3132 self
.write_debug(f
'Invoking {fd.FD_NAME} downloader on "{urls}"')
3134 # Note: Ideally info should be a deep-copied so that hooks cannot modify it.
3135 # But it may contain objects that are not deep-copyable
3136 new_info
= self
._copy
_infodict
(info
)
3137 if new_info
.get('http_headers') is None:
3138 new_info
['http_headers'] = self
._calc
_headers
(new_info
)
3139 return fd
.download(name
, new_info
, subtitle
)
3141 def existing_file(self
, filepaths
, *, default_overwrite
=True):
3142 existing_files
= list(filter(os
.path
.exists
, orderedSet(filepaths
)))
3143 if existing_files
and not self
.params
.get('overwrites', default_overwrite
):
3144 return existing_files
[0]
3146 for file in existing_files
:
3147 self
.report_file_delete(file)
3151 def process_info(self
, info_dict
):
3152 """Process a single resolved IE result. (Modifies it in-place)"""
3154 assert info_dict
.get('_type', 'video') == 'video'
3155 original_infodict
= info_dict
3157 if 'format' not in info_dict
and 'ext' in info_dict
:
3158 info_dict
['format'] = info_dict
['ext']
3160 if self
._match
_entry
(info_dict
) is not None:
3161 info_dict
['__write_download_archive'] = 'ignore'
3164 # Does nothing under normal operation - for backward compatibility of process_info
3165 self
.post_extract(info_dict
)
3167 def replace_info_dict(new_info
):
3169 if new_info
== info_dict
:
3172 info_dict
.update(new_info
)
3174 new_info
, _
= self
.pre_process(info_dict
, 'video')
3175 replace_info_dict(new_info
)
3176 self
._num
_downloads
+= 1
3178 # info_dict['_filename'] needs to be set for backward compatibility
3179 info_dict
['_filename'] = full_filename
= self
.prepare_filename(info_dict
, warn
=True)
3180 temp_filename
= self
.prepare_filename(info_dict
, 'temp')
3184 self
.__forced
_printings
(info_dict
, full_filename
, incomplete
=('format' not in info_dict
))
3186 def check_max_downloads():
3187 if self
._num
_downloads
>= float(self
.params
.get('max_downloads') or 'inf'):
3188 raise MaxDownloadsReached()
3190 if self
.params
.get('simulate'):
3191 info_dict
['__write_download_archive'] = self
.params
.get('force_write_download_archive')
3192 check_max_downloads()
3195 if full_filename
is None:
3197 if not self
._ensure
_dir
_exists
(encodeFilename(full_filename
)):
3199 if not self
._ensure
_dir
_exists
(encodeFilename(temp_filename
)):
3202 if self
._write
_description
('video', info_dict
,
3203 self
.prepare_filename(info_dict
, 'description')) is None:
3206 sub_files
= self
._write
_subtitles
(info_dict
, temp_filename
)
3207 if sub_files
is None:
3209 files_to_move
.update(dict(sub_files
))
3211 thumb_files
= self
._write
_thumbnails
(
3212 'video', info_dict
, temp_filename
, self
.prepare_filename(info_dict
, 'thumbnail'))
3213 if thumb_files
is None:
3215 files_to_move
.update(dict(thumb_files
))
3217 infofn
= self
.prepare_filename(info_dict
, 'infojson')
3218 _infojson_written
= self
._write
_info
_json
('video', info_dict
, infofn
)
3219 if _infojson_written
:
3220 info_dict
['infojson_filename'] = infofn
3221 # For backward compatibility, even though it was a private field
3222 info_dict
['__infojson_filename'] = infofn
3223 elif _infojson_written
is None:
3226 # Note: Annotations are deprecated
3228 if self
.params
.get('writeannotations', False):
3229 annofn
= self
.prepare_filename(info_dict
, 'annotation')
3231 if not self
._ensure
_dir
_exists
(encodeFilename(annofn
)):
3233 if not self
.params
.get('overwrites', True) and os
.path
.exists(encodeFilename(annofn
)):
3234 self
.to_screen('[info] Video annotations are already present')
3235 elif not info_dict
.get('annotations'):
3236 self
.report_warning('There are no annotations to write.')
3239 self
.to_screen('[info] Writing video annotations to: ' + annofn
)
3240 with open(encodeFilename(annofn
), 'w', encoding
='utf-8') as annofile
:
3241 annofile
.write(info_dict
['annotations'])
3242 except (KeyError, TypeError):
3243 self
.report_warning('There are no annotations to write.')
3245 self
.report_error('Cannot write annotations file: ' + annofn
)
3248 # Write internet shortcut files
3249 def _write_link_file(link_type
):
3250 url
= try_get(info_dict
['webpage_url'], iri_to_uri
)
3252 self
.report_warning(
3253 f
'Cannot write internet shortcut file because the actual URL of "{info_dict["webpage_url"]}" is unknown')
3255 linkfn
= replace_extension(self
.prepare_filename(info_dict
, 'link'), link_type
, info_dict
.get('ext'))
3256 if not self
._ensure
_dir
_exists
(encodeFilename(linkfn
)):
3258 if self
.params
.get('overwrites', True) and os
.path
.exists(encodeFilename(linkfn
)):
3259 self
.to_screen(f
'[info] Internet shortcut (.{link_type}) is already present')
3262 self
.to_screen(f
'[info] Writing internet shortcut (.{link_type}) to: {linkfn}')
3263 with open(encodeFilename(to_high_limit_path(linkfn
)), 'w', encoding
='utf-8',
3264 newline
='\r\n' if link_type
== 'url' else '\n') as linkfile
:
3265 template_vars
= {'url': url}
3266 if link_type
== 'desktop':
3267 template_vars
['filename'] = linkfn
[:-(len(link_type
) + 1)]
3268 linkfile
.write(LINK_TEMPLATES
[link_type
] % template_vars
)
3270 self
.report_error(f
'Cannot write internet shortcut {linkfn}')
3275 'url': self
.params
.get('writeurllink'),
3276 'webloc': self
.params
.get('writewebloclink'),
3277 'desktop': self
.params
.get('writedesktoplink'),
3279 if self
.params
.get('writelink'):
3280 link_type
= ('webloc' if sys
.platform
== 'darwin'
3281 else 'desktop' if sys
.platform
.startswith('linux')
3283 write_links
[link_type
] = True
3285 if any(should_write
and not _write_link_file(link_type
)
3286 for link_type
, should_write
in write_links
.items()):
3289 new_info
, files_to_move
= self
.pre_process(info_dict
, 'before_dl', files_to_move
)
3290 replace_info_dict(new_info
)
3292 if self
.params
.get('skip_download'):
3293 info_dict
['filepath'] = temp_filename
3294 info_dict
['__finaldir'] = os
.path
.dirname(os
.path
.abspath(encodeFilename(full_filename
)))
3295 info_dict
['__files_to_move'] = files_to_move
3296 replace_info_dict(self
.run_pp(MoveFilesAfterDownloadPP(self
, False), info_dict
))
3297 info_dict
['__write_download_archive'] = self
.params
.get('force_write_download_archive')
3300 info_dict
.setdefault('__postprocessors', [])
3303 def existing_video_file(*filepaths
):
3304 ext
= info_dict
.get('ext')
3305 converted
= lambda file: replace_extension(file, self
.params
.get('final_ext') or ext
, ext
)
3306 file = self
.existing_file(itertools
.chain(*zip(map(converted
, filepaths
), filepaths
)),
3307 default_overwrite
=False)
3309 info_dict
['ext'] = os
.path
.splitext(file)[1][1:]
3312 fd
, success
= None, True
3313 if info_dict
.get('protocol') or info_dict
.get('url'):
3314 fd
= get_suitable_downloader(info_dict
, self
.params
, to_stdout
=temp_filename
== '-')
3315 if fd
!= FFmpegFD
and 'no-direct-merge' not in self
.params
['compat_opts'] and (
3316 info_dict
.get('section_start') or info_dict
.get('section_end')):
3317 msg
= ('This format cannot be partially downloaded' if FFmpegFD
.available()
3318 else 'You have requested downloading the video partially, but ffmpeg is not installed')
3319 self
.report_error(f
'{msg}. Aborting')
3322 if info_dict
.get('requested_formats') is not None:
3323 old_ext
= info_dict
['ext']
3324 if self
.params
.get('merge_output_format') is None:
3325 if (info_dict
['ext'] == 'webm'
3326 and info_dict
.get('thumbnails')
3327 # check with type instead of pp_key, __name__, or isinstance
3328 # since we dont want any custom PPs to trigger this
3329 and any(type(pp
) == EmbedThumbnailPP
for pp
in self
._pps
['post_process'])): # noqa: E721
3330 info_dict
['ext'] = 'mkv'
3331 self
.report_warning(
3332 'webm doesn\'t support embedding a thumbnail, mkv will be used')
3333 new_ext
= info_dict
['ext']
3335 def correct_ext(filename
, ext
=new_ext
):
3338 filename_real_ext
= os
.path
.splitext(filename
)[1][1:]
3340 os
.path
.splitext(filename
)[0]
3341 if filename_real_ext
in (old_ext
, new_ext
)
3343 return f
'{filename_wo_ext}.{ext}'
3345 # Ensure filename always has a correct extension for successful merge
3346 full_filename
= correct_ext(full_filename
)
3347 temp_filename
= correct_ext(temp_filename
)
3348 dl_filename
= existing_video_file(full_filename
, temp_filename
)
3350 info_dict
['__real_download'] = False
3351 # NOTE: Copy so that original format dicts are not modified
3352 info_dict
['requested_formats'] = list(map(dict, info_dict
['requested_formats']))
3354 merger
= FFmpegMergerPP(self
)
3356 if dl_filename
is not None:
3357 self
.report_file_already_downloaded(dl_filename
)
3359 for f
in info_dict
['requested_formats'] if fd
!= FFmpegFD
else []:
3360 f
['filepath'] = fname
= prepend_extension(
3361 correct_ext(temp_filename
, info_dict
['ext']),
3362 'f%s' % f
['format_id'], info_dict
['ext'])
3363 downloaded
.append(fname
)
3364 info_dict
['url'] = '\n'.join(f
['url'] for f
in info_dict
['requested_formats'])
3365 success
, real_download
= self
.dl(temp_filename
, info_dict
)
3366 info_dict
['__real_download'] = real_download
3368 if self
.params
.get('allow_unplayable_formats'):
3369 self
.report_warning(
3370 'You have requested merging of multiple formats '
3371 'while also allowing unplayable formats to be downloaded. '
3372 'The formats won\'t be merged to prevent data corruption.')
3373 elif not merger
.available
:
3374 msg
= 'You have requested merging of multiple formats but ffmpeg is not installed'
3375 if not self
.params
.get('ignoreerrors'):
3376 self
.report_error(f
'{msg}. Aborting due to --abort-on-error')
3378 self
.report_warning(f
'{msg}. The formats won\'t be merged')
3380 if temp_filename
== '-':
3381 reason
= ('using a downloader other than ffmpeg' if FFmpegFD
.can_merge_formats(info_dict
, self
.params
)
3382 else 'but the formats are incompatible for simultaneous download' if merger
.available
3383 else 'but ffmpeg is not installed')
3384 self
.report_warning(
3385 f
'You have requested downloading multiple formats to stdout {reason}. '
3386 'The formats will be streamed one after the other')
3387 fname
= temp_filename
3388 for f
in info_dict
['requested_formats']:
3389 new_info
= dict(info_dict
)
3390 del new_info
['requested_formats']
3392 if temp_filename
!= '-':
3393 fname
= prepend_extension(
3394 correct_ext(temp_filename
, new_info
['ext']),
3395 'f%s' % f
['format_id'], new_info
['ext'])
3396 if not self
._ensure
_dir
_exists
(fname
):
3398 f
['filepath'] = fname
3399 downloaded
.append(fname
)
3400 partial_success
, real_download
= self
.dl(fname
, new_info
)
3401 info_dict
['__real_download'] = info_dict
['__real_download'] or real_download
3402 success
= success
and partial_success
3404 if downloaded
and merger
.available
and not self
.params
.get('allow_unplayable_formats'):
3405 info_dict
['__postprocessors'].append(merger
)
3406 info_dict
['__files_to_merge'] = downloaded
3407 # Even if there were no downloads, it is being merged only now
3408 info_dict
['__real_download'] = True
3410 for file in downloaded
:
3411 files_to_move
[file] = None
3413 # Just a single file
3414 dl_filename
= existing_video_file(full_filename
, temp_filename
)
3415 if dl_filename
is None or dl_filename
== temp_filename
:
3416 # dl_filename == temp_filename could mean that the file was partially downloaded with --no-part.
3417 # So we should try to resume the download
3418 success
, real_download
= self
.dl(temp_filename
, info_dict
)
3419 info_dict
['__real_download'] = real_download
3421 self
.report_file_already_downloaded(dl_filename
)
3423 dl_filename
= dl_filename
or temp_filename
3424 info_dict
['__finaldir'] = os
.path
.dirname(os
.path
.abspath(encodeFilename(full_filename
)))
3426 except network_exceptions
as err
:
3427 self
.report_error('unable to download video data: %s' % error_to_compat_str(err
))
3429 except OSError as err
:
3430 raise UnavailableVideoError(err
)
3431 except (ContentTooShortError
, ) as err
:
3432 self
.report_error(f
'content too short (expected {err.expected} bytes and served {err.downloaded})')
3435 self
._raise
_pending
_errors
(info_dict
)
3436 if success
and full_filename
!= '-':
3440 fixup_policy
= self
.params
.get('fixup')
3441 vid
= info_dict
['id']
3443 if fixup_policy
in ('ignore', 'never'):
3445 elif fixup_policy
== 'warn':
3447 elif fixup_policy
!= 'force':
3448 assert fixup_policy
in ('detect_or_warn', None)
3449 if not info_dict
.get('__real_download'):
3452 def ffmpeg_fixup(cndn
, msg
, cls
):
3453 if not (do_fixup
and cndn
):
3455 elif do_fixup
== 'warn':
3456 self
.report_warning(f
'{vid}: {msg}')
3460 info_dict
['__postprocessors'].append(pp
)
3462 self
.report_warning(f
'{vid}: {msg}. Install ffmpeg to fix this automatically')
3464 stretched_ratio
= info_dict
.get('stretched_ratio')
3465 ffmpeg_fixup(stretched_ratio
not in (1, None),
3466 f
'Non-uniform pixel ratio {stretched_ratio}',
3467 FFmpegFixupStretchedPP
)
3469 downloader
= get_suitable_downloader(info_dict
, self
.params
) if 'protocol' in info_dict
else None
3470 downloader
= downloader
.FD_NAME
if downloader
else None
3472 ext
= info_dict
.get('ext')
3473 postprocessed_by_ffmpeg
= info_dict
.get('requested_formats') or any((
3474 isinstance(pp
, FFmpegVideoConvertorPP
)
3475 and resolve_recode_mapping(ext
, pp
.mapping
)[0] not in (ext
, None)
3476 ) for pp
in self
._pps
['post_process'])
3478 if not postprocessed_by_ffmpeg
:
3479 ffmpeg_fixup(fd
!= FFmpegFD
and ext
== 'm4a'
3480 and info_dict
.get('container') == 'm4a_dash',
3481 'writing DASH m4a. Only some players support this container',
3483 ffmpeg_fixup(downloader
== 'hlsnative' and not self
.params
.get('hls_use_mpegts')
3484 or info_dict
.get('is_live') and self
.params
.get('hls_use_mpegts') is None,
3485 'Possible MPEG-TS in MP4 container or malformed AAC timestamps',
3487 ffmpeg_fixup(info_dict
.get('is_live') and downloader
== 'dashsegments',
3488 'Possible duplicate MOOV atoms', FFmpegFixupDuplicateMoovPP
)
3490 ffmpeg_fixup(downloader
== 'web_socket_fragment', 'Malformed timestamps detected', FFmpegFixupTimestampPP
)
3491 ffmpeg_fixup(downloader
== 'web_socket_fragment', 'Malformed duration detected', FFmpegFixupDurationPP
)
3495 replace_info_dict(self
.post_process(dl_filename
, info_dict
, files_to_move
))
3496 except PostProcessingError
as err
:
3497 self
.report_error('Postprocessing: %s' % str(err
))
3500 for ph
in self
._post
_hooks
:
3501 ph(info_dict
['filepath'])
3502 except Exception as err
:
3503 self
.report_error('post hooks: %s' % str(err
))
3505 info_dict
['__write_download_archive'] = True
3507 assert info_dict
is original_infodict
# Make sure the info_dict was modified in-place
3508 if self
.params
.get('force_write_download_archive'):
3509 info_dict
['__write_download_archive'] = True
3510 check_max_downloads()
3512 def __download_wrapper(self
, func
):
3513 @functools.wraps(func
)
3514 def wrapper(*args
, **kwargs
):
3516 res
= func(*args
, **kwargs
)
3517 except UnavailableVideoError
as e
:
3518 self
.report_error(e
)
3519 except DownloadCancelled
as e
:
3520 self
.to_screen(f
'[info] {e}')
3521 if not self
.params
.get('break_per_url'):
3523 self
._num
_downloads
= 0
3525 if self
.params
.get('dump_single_json', False):
3526 self
.post_extract(res
)
3527 self
.to_stdout(json
.dumps(self
.sanitize_info(res
)))
3530 def download(self
, url_list
):
3531 """Download a given list of URLs."""
3532 url_list
= variadic(url_list
) # Passing a single URL is a common mistake
3533 outtmpl
= self
.params
['outtmpl']['default']
3534 if (len(url_list
) > 1
3536 and '%' not in outtmpl
3537 and self
.params
.get('max_downloads') != 1):
3538 raise SameFileError(outtmpl
)
3540 for url
in url_list
:
3541 self
.__download
_wrapper
(self
.extract_info
)(
3542 url
, force_generic_extractor
=self
.params
.get('force_generic_extractor', False))
3544 return self
._download
_retcode
3546 def download_with_info_file(self
, info_filename
):
3547 with contextlib
.closing(fileinput
.FileInput(
3548 [info_filename
], mode
='r',
3549 openhook
=fileinput
.hook_encoded('utf-8'))) as f
:
3550 # FileInput doesn't have a read method, we can't call json.load
3551 infos
= [self
.sanitize_info(info
, self
.params
.get('clean_infojson', True))
3552 for info
in variadic(json
.loads('\n'.join(f
)))]
3555 self
.__download
_wrapper
(self
.process_ie_result
)(info
, download
=True)
3556 except (DownloadError
, EntryNotInPlaylist
, ReExtractInfo
) as e
:
3557 if not isinstance(e
, EntryNotInPlaylist
):
3558 self
.to_stderr('\r')
3559 webpage_url
= info
.get('webpage_url')
3560 if webpage_url
is None:
3562 self
.report_warning(f
'The info failed to download: {e}; trying with URL {webpage_url}')
3563 self
.download([webpage_url
])
3564 return self
._download
_retcode
3567 def sanitize_info(info_dict
, remove_private_keys
=False):
3568 ''' Sanitize the infodict for converting to json '''
3569 if info_dict
is None:
3571 info_dict
.setdefault('epoch', int(time
.time()))
3572 info_dict
.setdefault('_type', 'video')
3573 info_dict
.setdefault('_version', {
3574 'version': __version__
,
3575 'current_git_head': current_git_head(),
3576 'release_git_head': RELEASE_GIT_HEAD
,
3577 'repository': ORIGIN
,
3580 if remove_private_keys
:
3581 reject
= lambda k
, v
: v
is None or k
.startswith('__') or k
in {
3582 'requested_downloads', 'requested_formats', 'requested_subtitles', 'requested_entries',
3583 'entries', 'filepath', '_filename', 'filename', 'infojson_filename', 'original_url',
3584 'playlist_autonumber',
3587 reject
= lambda k
, v
: False
3590 if isinstance(obj
, dict):
3591 return {k: filter_fn(v) for k, v in obj.items() if not reject(k, v)}
3592 elif isinstance(obj
, (list, tuple, set, LazyList
)):
3593 return list(map(filter_fn
, obj
))
3594 elif obj
is None or isinstance(obj
, (str, int, float, bool)):
3599 return filter_fn(info_dict
)
3602 def filter_requested_info(info_dict
, actually_filter
=True):
3603 ''' Alias of sanitize_info for backward compatibility '''
3604 return YoutubeDL
.sanitize_info(info_dict
, actually_filter
)
3606 def _delete_downloaded_files(self
, *files_to_delete
, info
={}, msg
=None):
3607 for filename
in set(filter(None, files_to_delete
)):
3609 self
.to_screen(msg
% filename
)
3613 self
.report_warning(f
'Unable to delete file {filename}')
3614 if filename
in info
.get('__files_to_move', []): # NB: Delete even if None
3615 del info
['__files_to_move'][filename
]
3618 def post_extract(info_dict
):
3619 def actual_post_extract(info_dict
):
3620 if info_dict
.get('_type') in ('playlist', 'multi_video'):
3621 for video_dict
in info_dict
.get('entries', {}):
3622 actual_post_extract(video_dict
or {})
3625 post_extractor
= info_dict
.pop('__post_extractor', None) or (lambda: {})
3626 info_dict
.update(post_extractor())
3628 actual_post_extract(info_dict
or {})
3630 def run_pp(self
, pp
, infodict
):
3631 files_to_delete
= []
3632 if '__files_to_move' not in infodict
:
3633 infodict
['__files_to_move'] = {}
3635 files_to_delete
, infodict
= pp
.run(infodict
)
3636 except PostProcessingError
as e
:
3637 # Must be True and not 'only_download'
3638 if self
.params
.get('ignoreerrors') is True:
3639 self
.report_error(e
)
3643 if not files_to_delete
:
3645 if self
.params
.get('keepvideo', False):
3646 for f
in files_to_delete
:
3647 infodict
['__files_to_move'].setdefault(f
, '')
3649 self
._delete
_downloaded
_files
(
3650 *files_to_delete
, info
=infodict
, msg
='Deleting original file %s (pass -k to keep)')
3653 def run_all_pps(self
, key
, info
, *, additional_pps
=None):
3655 self
._forceprint
(key
, info
)
3656 for pp
in (additional_pps
or []) + self
._pps
[key
]:
3657 info
= self
.run_pp(pp
, info
)
3660 def pre_process(self
, ie_info
, key
='pre_process', files_to_move
=None):
3661 info
= dict(ie_info
)
3662 info
['__files_to_move'] = files_to_move
or {}
3664 info
= self
.run_all_pps(key
, info
)
3665 except PostProcessingError
as err
:
3666 msg
= f
'Preprocessing: {err}'
3667 info
.setdefault('__pending_error', msg
)
3668 self
.report_error(msg
, is_error
=False)
3669 return info
, info
.pop('__files_to_move', None)
3671 def post_process(self
, filename
, info
, files_to_move
=None):
3672 """Run all the postprocessors on the given file."""
3673 info
['filepath'] = filename
3674 info
['__files_to_move'] = files_to_move
or {}
3675 info
= self
.run_all_pps('post_process', info
, additional_pps
=info
.get('__postprocessors'))
3676 info
= self
.run_pp(MoveFilesAfterDownloadPP(self
), info
)
3677 del info
['__files_to_move']
3678 return self
.run_all_pps('after_move', info
)
3680 def _make_archive_id(self
, info_dict
):
3681 video_id
= info_dict
.get('id')
3684 # Future-proof against any change in case
3685 # and backwards compatibility with prior versions
3686 extractor
= info_dict
.get('extractor_key') or info_dict
.get('ie_key') # key in a playlist
3687 if extractor
is None:
3688 url
= str_or_none(info_dict
.get('url'))
3691 # Try to find matching extractor for the URL and take its ie_key
3692 for ie_key
, ie
in self
._ies
.items():
3693 if ie
.suitable(url
):
3698 return make_archive_id(extractor
, video_id
)
3700 def in_download_archive(self
, info_dict
):
3701 if not self
.archive
:
3704 vid_ids
= [self
._make
_archive
_id
(info_dict
)]
3705 vid_ids
.extend(info_dict
.get('_old_archive_ids') or [])
3706 return any(id_
in self
.archive
for id_
in vid_ids
)
3708 def record_download_archive(self
, info_dict
):
3709 fn
= self
.params
.get('download_archive')
3712 vid_id
= self
._make
_archive
_id
(info_dict
)
3715 self
.write_debug(f
'Adding to archive: {vid_id}')
3716 if is_path_like(fn
):
3717 with locked_file(fn
, 'a', encoding
='utf-8') as archive_file
:
3718 archive_file
.write(vid_id
+ '\n')
3719 self
.archive
.add(vid_id
)
3722 def format_resolution(format
, default
='unknown'):
3723 if format
.get('vcodec') == 'none' and format
.get('acodec') != 'none':
3725 if format
.get('resolution') is not None:
3726 return format
['resolution']
3727 if format
.get('width') and format
.get('height'):
3728 return '%dx%d' % (format
['width'], format
['height'])
3729 elif format
.get('height'):
3730 return '%sp' % format
['height']
3731 elif format
.get('width'):
3732 return '%dx?' % format
['width']
3735 def _list_format_headers(self
, *headers
):
3736 if self
.params
.get('listformats_table', True) is not False:
3737 return [self
._format
_out
(header
, self
.Styles
.HEADERS
) for header
in headers
]
3740 def _format_note(self
, fdict
):
3742 if fdict
.get('ext') in ['f4f', 'f4m']:
3743 res
+= '(unsupported)'
3744 if fdict
.get('language'):
3747 res
+= '[%s]' % fdict
['language']
3748 if fdict
.get('format_note') is not None:
3751 res
+= fdict
['format_note']
3752 if fdict
.get('tbr') is not None:
3755 res
+= '%4dk' % fdict
['tbr']
3756 if fdict
.get('container') is not None:
3759 res
+= '%s container' % fdict
['container']
3760 if (fdict
.get('vcodec') is not None
3761 and fdict
.get('vcodec') != 'none'):
3764 res
+= fdict
['vcodec']
3765 if fdict
.get('vbr') is not None:
3767 elif fdict
.get('vbr') is not None and fdict
.get('abr') is not None:
3769 if fdict
.get('vbr') is not None:
3770 res
+= '%4dk' % fdict
['vbr']
3771 if fdict
.get('fps') is not None:
3774 res
+= '%sfps' % fdict
['fps']
3775 if fdict
.get('acodec') is not None:
3778 if fdict
['acodec'] == 'none':
3781 res
+= '%-5s' % fdict
['acodec']
3782 elif fdict
.get('abr') is not None:
3786 if fdict
.get('abr') is not None:
3787 res
+= '@%3dk' % fdict
['abr']
3788 if fdict
.get('asr') is not None:
3789 res
+= ' (%5dHz)' % fdict
['asr']
3790 if fdict
.get('filesize') is not None:
3793 res
+= format_bytes(fdict
['filesize'])
3794 elif fdict
.get('filesize_approx') is not None:
3797 res
+= '~' + format_bytes(fdict
['filesize_approx'])
3800 def _get_formats(self
, info_dict
):
3801 if info_dict
.get('formats') is None:
3802 if info_dict
.get('url') and info_dict
.get('_type', 'video') == 'video':
3805 return info_dict
['formats']
3807 def render_formats_table(self
, info_dict
):
3808 formats
= self
._get
_formats
(info_dict
)
3811 if not self
.params
.get('listformats_table', True) is not False:
3814 format_field(f
, 'format_id'),
3815 format_field(f
, 'ext'),
3816 self
.format_resolution(f
),
3817 self
._format
_note
(f
)
3818 ] for f
in formats
if (f
.get('preference') or 0) >= -1000]
3819 return render_table(['format code', 'extension', 'resolution', 'note'], table
, extra_gap
=1)
3821 def simplified_codec(f
, field
):
3822 assert field
in ('acodec', 'vcodec')
3823 codec
= f
.get(field
)
3826 elif codec
!= 'none':
3827 return '.'.join(codec
.split('.')[:4])
3829 if field
== 'vcodec' and f
.get('acodec') == 'none':
3831 elif field
== 'acodec' and f
.get('vcodec') == 'none':
3833 return self
._format
_out
('audio only' if field
== 'vcodec' else 'video only',
3834 self
.Styles
.SUPPRESS
)
3836 delim
= self
._format
_out
('\u2502', self
.Styles
.DELIM
, '|', test_encoding
=True)
3839 self
._format
_out
(format_field(f
, 'format_id'), self
.Styles
.ID
),
3840 format_field(f
, 'ext'),
3841 format_field(f
, func
=self
.format_resolution
, ignore
=('audio only', 'images')),
3842 format_field(f
, 'fps', '\t%d', func
=round),
3843 format_field(f
, 'dynamic_range', '%s', ignore
=(None, 'SDR')).replace('HDR', ''),
3844 format_field(f
, 'audio_channels', '\t%s'),
3846 format_field(f
, 'filesize', ' \t%s', func
=format_bytes
)
3847 or format_field(f
, 'filesize_approx', '≈\t%s', func
=format_bytes
)
3848 or format_field(try_call(lambda: format_bytes(int(info_dict
['duration'] * f
['tbr'] * (1024 / 8)))),
3849 None, self
._format
_out
('~\t%s', self
.Styles
.SUPPRESS
))),
3850 format_field(f
, 'tbr', '\t%dk', func
=round),
3851 shorten_protocol_name(f
.get('protocol', '')),
3853 simplified_codec(f
, 'vcodec'),
3854 format_field(f
, 'vbr', '\t%dk', func
=round),
3855 simplified_codec(f
, 'acodec'),
3856 format_field(f
, 'abr', '\t%dk', func
=round),
3857 format_field(f
, 'asr', '\t%s', func
=format_decimal_suffix
),
3858 join_nonempty(format_field(f
, 'language', '[%s]'), join_nonempty(
3859 self
._format
_out
('UNSUPPORTED', self
.Styles
.BAD_FORMAT
) if f
.get('ext') in ('f4f', 'f4m') else None,
3860 (self
._format
_out
('Maybe DRM', self
.Styles
.WARNING
) if f
.get('has_drm') == 'maybe'
3861 else self
._format
_out
('DRM', self
.Styles
.BAD_FORMAT
) if f
.get('has_drm') else None),
3862 format_field(f
, 'format_note'),
3863 format_field(f
, 'container', ignore
=(None, f
.get('ext'))),
3864 delim
=', '), delim
=' '),
3865 ] for f
in formats
if f
.get('preference') is None or f
['preference'] >= -1000]
3866 header_line
= self
._list
_format
_headers
(
3867 'ID', 'EXT', 'RESOLUTION', '\tFPS', 'HDR', 'CH', delim
, '\tFILESIZE', '\tTBR', 'PROTO',
3868 delim
, 'VCODEC', '\tVBR', 'ACODEC', '\tABR', '\tASR', 'MORE INFO')
3870 return render_table(
3871 header_line
, table
, hide_empty
=True,
3872 delim
=self
._format
_out
('\u2500', self
.Styles
.DELIM
, '-', test_encoding
=True))
3874 def render_thumbnails_table(self
, info_dict
):
3875 thumbnails
= list(info_dict
.get('thumbnails') or [])
3878 return render_table(
3879 self
._list
_format
_headers
('ID', 'Width', 'Height', 'URL'),
3880 [[t
.get('id'), t
.get('width') or 'unknown', t
.get('height') or 'unknown', t
['url']] for t
in thumbnails
])
3882 def render_subtitles_table(self
, video_id
, subtitles
):
3883 def _row(lang
, formats
):
3884 exts
, names
= zip(*((f
['ext'], f
.get('name') or 'unknown') for f
in reversed(formats
)))
3885 if len(set(names
)) == 1:
3886 names
= [] if names
[0] == 'unknown' else names
[:1]
3887 return [lang
, ', '.join(names
), ', '.join(exts
)]
3891 return render_table(
3892 self
._list
_format
_headers
('Language', 'Name', 'Formats'),
3893 [_row(lang
, formats
) for lang
, formats
in subtitles
.items()],
3896 def __list_table(self
, video_id
, name
, func
, *args
):
3899 self
.to_screen(f
'{video_id} has no {name}')
3901 self
.to_screen(f
'[info] Available {name} for {video_id}:')
3902 self
.to_stdout(table
)
3904 def list_formats(self
, info_dict
):
3905 self
.__list
_table
(info_dict
['id'], 'formats', self
.render_formats_table
, info_dict
)
3907 def list_thumbnails(self
, info_dict
):
3908 self
.__list
_table
(info_dict
['id'], 'thumbnails', self
.render_thumbnails_table
, info_dict
)
3910 def list_subtitles(self
, video_id
, subtitles
, name
='subtitles'):
3911 self
.__list
_table
(video_id
, name
, self
.render_subtitles_table
, video_id
, subtitles
)
3913 def print_debug_header(self
):
3914 if not self
.params
.get('verbose'):
3917 from . import _IN_CLI
# Must be delayed import
3919 # These imports can be slow. So import them only as needed
3920 from .extractor
.extractors
import _LAZY_LOADER
3921 from .extractor
.extractors
import (
3922 _PLUGIN_CLASSES
as plugin_ies
,
3923 _PLUGIN_OVERRIDES
as plugin_ie_overrides
3926 def get_encoding(stream
):
3927 ret
= str(getattr(stream
, 'encoding', 'missing (%s)' % type(stream
).__name
__))
3928 additional_info
= []
3929 if os
.environ
.get('TERM', '').lower() == 'dumb':
3930 additional_info
.append('dumb')
3931 if not supports_terminal_sequences(stream
):
3932 from .utils
import WINDOWS_VT_MODE
# Must be imported locally
3933 additional_info
.append('No VT' if WINDOWS_VT_MODE
is False else 'No ANSI')
3935 ret
= f
'{ret} ({",".join(additional_info)})'
3938 encoding_str
= 'Encodings: locale %s, fs %s, pref %s, %s' % (
3939 locale
.getpreferredencoding(),
3940 sys
.getfilesystemencoding(),
3941 self
.get_encoding(),
3943 f
'{key} {get_encoding(stream)}' for key
, stream
in self
._out
_files
.items_
3944 if stream
is not None and key
!= 'console')
3947 logger
= self
.params
.get('logger')
3949 write_debug
= lambda msg
: logger
.debug(f
'[debug] {msg}')
3950 write_debug(encoding_str
)
3952 write_string(f
'[debug] {encoding_str}\n', encoding
=None)
3953 write_debug
= lambda msg
: self
._write
_string
(f
'[debug] {msg}\n')
3955 source
= detect_variant()
3956 if VARIANT
not in (None, 'pip'):
3959 write_debug(join_nonempty(
3960 f
'{REPOSITORY.rpartition("/")[2]} version',
3961 _make_label(ORIGIN
, CHANNEL
.partition('@')[2] or __version__
, __version__
),
3962 f
'[{RELEASE_GIT_HEAD[:9]}]' if RELEASE_GIT_HEAD
else '',
3963 '' if source
== 'unknown' else f
'({source})',
3964 '' if _IN_CLI
else 'API' if klass
== YoutubeDL
else f
'API:{self.__module__}.{klass.__qualname__}',
3968 write_debug(f
'params: {self.params}')
3970 if not _LAZY_LOADER
:
3971 if os
.environ
.get('YTDLP_NO_LAZY_EXTRACTORS'):
3972 write_debug('Lazy loading extractors is forcibly disabled')
3974 write_debug('Lazy loading extractors is disabled')
3975 if self
.params
['compat_opts']:
3976 write_debug('Compatibility options: %s' % ', '.join(self
.params
['compat_opts']))
3978 if current_git_head():
3979 write_debug(f
'Git HEAD: {current_git_head()}')
3980 write_debug(system_identifier())
3982 exe_versions
, ffmpeg_features
= FFmpegPostProcessor
.get_versions_and_features(self
)
3983 ffmpeg_features
= {key for key, val in ffmpeg_features.items() if val}
3985 exe_versions
['ffmpeg'] += ' (%s)' % ','.join(sorted(ffmpeg_features
))
3987 exe_versions
['rtmpdump'] = rtmpdump_version()
3988 exe_versions
['phantomjs'] = PhantomJSwrapper
._version
()
3989 exe_str
= ', '.join(
3990 f
'{exe} {v}' for exe
, v
in sorted(exe_versions
.items()) if v
3992 write_debug('exe versions: %s' % exe_str
)
3994 from .compat
.compat_utils
import get_package_info
3995 from .dependencies
import available_dependencies
3997 write_debug('Optional libraries: %s' % (', '.join(sorted({
3998 join_nonempty(*get_package_info(m
)) for m
in available_dependencies
.values()
4001 write_debug(f
'Proxy map: {self.proxies}')
4002 write_debug(f
'Request Handlers: {", ".join(rh.RH_NAME for rh in self._request_director.handlers.values())}')
4003 for plugin_type
, plugins
in {'Extractor': plugin_ies, 'Post-Processor': plugin_pps}
.items():
4004 display_list
= ['%s%s' % (
4005 klass
.__name
__, '' if klass
.__name
__ == name
else f
' as {name}')
4006 for name
, klass
in plugins
.items()]
4007 if plugin_type
== 'Extractor':
4008 display_list
.extend(f
'{plugins[-1].IE_NAME.partition("+")[2]} ({parent.__name__})'
4009 for parent
, plugins
in plugin_ie_overrides
.items())
4010 if not display_list
:
4012 write_debug(f
'{plugin_type} Plugins: {", ".join(sorted(display_list))}')
4014 plugin_dirs
= plugin_directories()
4016 write_debug(f
'Plugin directories: {plugin_dirs}')
4019 if False and self
.params
.get('call_home'):
4020 ipaddr
= self
.urlopen('https://yt-dl.org/ip').read().decode()
4021 write_debug('Public IP address: %s' % ipaddr
)
4022 latest_version
= self
.urlopen(
4023 'https://yt-dl.org/latest/version').read().decode()
4024 if version_tuple(latest_version
) > version_tuple(__version__
):
4025 self
.report_warning(
4026 'You are using an outdated version (newest version: %s)! '
4027 'See https://yt-dl.org/update if you need help updating.' %
4030 @functools.cached_property
4032 """Global proxy configuration"""
4033 opts_proxy
= self
.params
.get('proxy')
4034 if opts_proxy
is not None:
4035 if opts_proxy
== '':
4036 opts_proxy
= '__noproxy__'
4037 proxies
= {'all': opts_proxy}
4039 proxies
= urllib
.request
.getproxies()
4040 # compat. Set HTTPS_PROXY to __noproxy__ to revert
4041 if 'http' in proxies
and 'https' not in proxies
:
4042 proxies
['https'] = proxies
['http']
4046 @functools.cached_property
4047 def cookiejar(self
):
4048 """Global cookiejar instance"""
4049 return load_cookies(
4050 self
.params
.get('cookiefile'), self
.params
.get('cookiesfrombrowser'), self
)
4055 Get a urllib OpenerDirector from the Urllib handler (deprecated).
4057 self
.deprecation_warning('YoutubeDL._opener is deprecated, use YoutubeDL.urlopen()')
4058 handler
= self
._request
_director
.handlers
['Urllib']
4059 return handler
._get
_instance
(cookiejar
=self
.cookiejar
, proxies
=self
.proxies
)
4061 def urlopen(self
, req
):
4062 """ Start an HTTP download """
4063 if isinstance(req
, str):
4065 elif isinstance(req
, urllib
.request
.Request
):
4066 self
.deprecation_warning(
4067 'Passing a urllib.request.Request object to YoutubeDL.urlopen() is deprecated. '
4068 'Use yt_dlp.networking.common.Request instead.')
4069 req
= urllib_req_to_req(req
)
4070 assert isinstance(req
, Request
)
4072 # compat: Assume user:pass url params are basic auth
4073 url
, basic_auth_header
= extract_basic_auth(req
.url
)
4074 if basic_auth_header
:
4075 req
.headers
['Authorization'] = basic_auth_header
4076 req
.url
= sanitize_url(url
)
4078 clean_proxies(proxies
=req
.proxies
, headers
=req
.headers
)
4079 clean_headers(req
.headers
)
4082 return self
._request
_director
.send(req
)
4083 except NoSupportingHandlers
as e
:
4084 for ue
in e
.unsupported_errors
:
4085 # FIXME: This depends on the order of errors.
4086 if not (ue
.handler
and ue
.msg
):
4088 if ue
.handler
.RH_KEY
== 'Urllib' and 'unsupported url scheme: "file"' in ue
.msg
.lower():
4090 'file:// URLs are disabled by default in yt-dlp for security reasons. '
4091 'Use --enable-file-urls to enable at your own risk.', cause
=ue
) from ue
4092 if 'unsupported proxy type: "https"' in ue
.msg
.lower():
4094 'To use an HTTPS proxy for this request, one of the following dependencies needs to be installed: requests')
4097 re
.match(r
'unsupported url scheme: "wss?"', ue
.msg
.lower())
4098 and 'websockets' not in self
._request
_director
.handlers
4101 'This request requires WebSocket support. '
4102 'Ensure one of the following dependencies are installed: websockets',
4105 except SSLError
as e
:
4106 if 'UNSAFE_LEGACY_RENEGOTIATION_DISABLED' in str(e
):
4107 raise RequestError('UNSAFE_LEGACY_RENEGOTIATION_DISABLED: Try using --legacy-server-connect', cause
=e
) from e
4108 elif 'SSLV3_ALERT_HANDSHAKE_FAILURE' in str(e
):
4110 'SSLV3_ALERT_HANDSHAKE_FAILURE: The server may not support the current cipher list. '
4111 'Try using --legacy-server-connect', cause
=e
) from e
4113 except HTTPError
as e
: # TODO: Remove in a future release
4114 raise _CompatHTTPError(e
) from e
4116 def build_request_director(self
, handlers
, preferences
=None):
4117 logger
= _YDLLogger(self
)
4118 headers
= self
.params
['http_headers'].copy()
4119 proxies
= self
.proxies
.copy()
4120 clean_headers(headers
)
4121 clean_proxies(proxies
, headers
)
4123 director
= RequestDirector(logger
=logger
, verbose
=self
.params
.get('debug_printtraffic'))
4124 for handler
in handlers
:
4125 director
.add_handler(handler(
4128 cookiejar
=self
.cookiejar
,
4130 prefer_system_certs
='no-certifi' in self
.params
['compat_opts'],
4131 verify
=not self
.params
.get('nocheckcertificate'),
4132 **traverse_obj(self
.params
, {
4133 'verbose': 'debug_printtraffic',
4134 'source_address': 'source_address',
4135 'timeout': 'socket_timeout',
4136 'legacy_ssl_support': 'legacyserverconnect',
4137 'enable_file_urls': 'enable_file_urls',
4139 'client_certificate': 'client_certificate',
4140 'client_certificate_key': 'client_certificate_key',
4141 'client_certificate_password': 'client_certificate_password',
4145 director
.preferences
.update(preferences
or [])
4146 if 'prefer-legacy-http-handler' in self
.params
['compat_opts']:
4147 director
.preferences
.add(lambda rh
, _
: 500 if rh
.RH_KEY
== 'Urllib' else 0)
4150 def encode(self
, s
):
4151 if isinstance(s
, bytes):
4152 return s
# Already encoded
4155 return s
.encode(self
.get_encoding())
4156 except UnicodeEncodeError as err
:
4157 err
.reason
= err
.reason
+ '. Check your system encoding configuration or use the --encoding option.'
4160 def get_encoding(self
):
4161 encoding
= self
.params
.get('encoding')
4162 if encoding
is None:
4163 encoding
= preferredencoding()
4166 def _write_info_json(self
, label
, ie_result
, infofn
, overwrite
=None):
4167 ''' Write infojson and returns True = written, 'exists' = Already exists, False = skip, None = error '''
4168 if overwrite
is None:
4169 overwrite
= self
.params
.get('overwrites', True)
4170 if not self
.params
.get('writeinfojson'):
4173 self
.write_debug(f
'Skipping writing {label} infojson')
4175 elif not self
._ensure
_dir
_exists
(infofn
):
4177 elif not overwrite
and os
.path
.exists(infofn
):
4178 self
.to_screen(f
'[info] {label.title()} metadata is already present')
4181 self
.to_screen(f
'[info] Writing {label} metadata as JSON to: {infofn}')
4183 write_json_file(self
.sanitize_info(ie_result
, self
.params
.get('clean_infojson', True)), infofn
)
4186 self
.report_error(f
'Cannot write {label} metadata to JSON file {infofn}')
4189 def _write_description(self
, label
, ie_result
, descfn
):
4190 ''' Write description and returns True = written, False = skip, None = error '''
4191 if not self
.params
.get('writedescription'):
4194 self
.write_debug(f
'Skipping writing {label} description')
4196 elif not self
._ensure
_dir
_exists
(descfn
):
4198 elif not self
.params
.get('overwrites', True) and os
.path
.exists(descfn
):
4199 self
.to_screen(f
'[info] {label.title()} description is already present')
4200 elif ie_result
.get('description') is None:
4201 self
.to_screen(f
'[info] There\'s no {label} description to write')
4205 self
.to_screen(f
'[info] Writing {label} description to: {descfn}')
4206 with open(encodeFilename(descfn
), 'w', encoding
='utf-8') as descfile
:
4207 descfile
.write(ie_result
['description'])
4209 self
.report_error(f
'Cannot write {label} description file {descfn}')
4213 def _write_subtitles(self
, info_dict
, filename
):
4214 ''' Write subtitles to file and return list of (sub_filename, final_sub_filename); or None if error'''
4216 subtitles
= info_dict
.get('requested_subtitles')
4217 if not (self
.params
.get('writesubtitles') or self
.params
.get('writeautomaticsub')):
4218 # subtitles download errors are already managed as troubles in relevant IE
4219 # that way it will silently go on when used with unsupporting IE
4222 self
.to_screen('[info] There are no subtitles for the requested languages')
4224 sub_filename_base
= self
.prepare_filename(info_dict
, 'subtitle')
4225 if not sub_filename_base
:
4226 self
.to_screen('[info] Skipping writing video subtitles')
4229 for sub_lang
, sub_info
in subtitles
.items():
4230 sub_format
= sub_info
['ext']
4231 sub_filename
= subtitles_filename(filename
, sub_lang
, sub_format
, info_dict
.get('ext'))
4232 sub_filename_final
= subtitles_filename(sub_filename_base
, sub_lang
, sub_format
, info_dict
.get('ext'))
4233 existing_sub
= self
.existing_file((sub_filename_final
, sub_filename
))
4235 self
.to_screen(f
'[info] Video subtitle {sub_lang}.{sub_format} is already present')
4236 sub_info
['filepath'] = existing_sub
4237 ret
.append((existing_sub
, sub_filename_final
))
4240 self
.to_screen(f
'[info] Writing video subtitles to: {sub_filename}')
4241 if sub_info
.get('data') is not None:
4243 # Use newline='' to prevent conversion of newline characters
4244 # See https://github.com/ytdl-org/youtube-dl/issues/10268
4245 with open(sub_filename
, 'w', encoding
='utf-8', newline
='') as subfile
:
4246 subfile
.write(sub_info
['data'])
4247 sub_info
['filepath'] = sub_filename
4248 ret
.append((sub_filename
, sub_filename_final
))
4251 self
.report_error(f
'Cannot write video subtitles file {sub_filename}')
4255 sub_copy
= sub_info
.copy()
4256 sub_copy
.setdefault('http_headers', info_dict
.get('http_headers'))
4257 self
.dl(sub_filename
, sub_copy
, subtitle
=True)
4258 sub_info
['filepath'] = sub_filename
4259 ret
.append((sub_filename
, sub_filename_final
))
4260 except (DownloadError
, ExtractorError
, IOError, OSError, ValueError) + network_exceptions
as err
:
4261 msg
= f
'Unable to download video subtitles for {sub_lang!r}: {err}'
4262 if self
.params
.get('ignoreerrors') is not True: # False or 'only_download'
4263 if not self
.params
.get('ignoreerrors'):
4264 self
.report_error(msg
)
4265 raise DownloadError(msg
)
4266 self
.report_warning(msg
)
4269 def _write_thumbnails(self
, label
, info_dict
, filename
, thumb_filename_base
=None):
4270 ''' Write thumbnails to file and return list of (thumb_filename, final_thumb_filename); or None if error '''
4271 write_all
= self
.params
.get('write_all_thumbnails', False)
4272 thumbnails
, ret
= [], []
4273 if write_all
or self
.params
.get('writethumbnail', False):
4274 thumbnails
= info_dict
.get('thumbnails') or []
4276 self
.to_screen(f
'[info] There are no {label} thumbnails to download')
4278 multiple
= write_all
and len(thumbnails
) > 1
4280 if thumb_filename_base
is None:
4281 thumb_filename_base
= filename
4282 if thumbnails
and not thumb_filename_base
:
4283 self
.write_debug(f
'Skipping writing {label} thumbnail')
4286 if thumbnails
and not self
._ensure
_dir
_exists
(filename
):
4289 for idx
, t
in list(enumerate(thumbnails
))[::-1]:
4290 thumb_ext
= (f
'{t["id"]}.' if multiple
else '') + determine_ext(t
['url'], 'jpg')
4291 thumb_display_id
= f
'{label} thumbnail {t["id"]}'
4292 thumb_filename
= replace_extension(filename
, thumb_ext
, info_dict
.get('ext'))
4293 thumb_filename_final
= replace_extension(thumb_filename_base
, thumb_ext
, info_dict
.get('ext'))
4295 existing_thumb
= self
.existing_file((thumb_filename_final
, thumb_filename
))
4297 self
.to_screen('[info] %s is already present' % (
4298 thumb_display_id
if multiple
else f
'{label} thumbnail').capitalize())
4299 t
['filepath'] = existing_thumb
4300 ret
.append((existing_thumb
, thumb_filename_final
))
4302 self
.to_screen(f
'[info] Downloading {thumb_display_id} ...')
4304 uf
= self
.urlopen(Request(t
['url'], headers
=t
.get('http_headers', {})))
4305 self
.to_screen(f
'[info] Writing {thumb_display_id} to: {thumb_filename}')
4306 with open(encodeFilename(thumb_filename
), 'wb') as thumbf
:
4307 shutil
.copyfileobj(uf
, thumbf
)
4308 ret
.append((thumb_filename
, thumb_filename_final
))
4309 t
['filepath'] = thumb_filename
4310 except network_exceptions
as err
:
4311 if isinstance(err
, HTTPError
) and err
.status
== 404:
4312 self
.to_screen(f
'[info] {thumb_display_id.title()} does not exist')
4314 self
.report_warning(f
'Unable to download {thumb_display_id}: {err}')
4316 if ret
and not write_all
: