26 from .cache
import Cache
27 from .compat
import functools
, urllib
# isort: split
28 from .compat
import compat_os_name
, compat_shlex_quote
, urllib_req_to_req
29 from .cookies
import LenientSimpleCookie
, load_cookies
30 from .downloader
import FFmpegFD
, get_suitable_downloader
, shorten_protocol_name
31 from .downloader
.rtmp
import rtmpdump_version
32 from .extractor
import gen_extractor_classes
, get_info_extractor
33 from .extractor
.common
import UnsupportedURLIE
34 from .extractor
.openload
import PhantomJSwrapper
35 from .minicurses
import format_text
36 from .networking
import HEADRequest
, Request
, RequestDirector
37 from .networking
.common
import _REQUEST_HANDLERS
, _RH_PREFERENCES
38 from .networking
.exceptions
import (
46 from .plugins
import directories
as plugin_directories
47 from .postprocessor
import _PLUGIN_CLASSES
as plugin_pps
48 from .postprocessor
import (
50 FFmpegFixupDuplicateMoovPP
,
51 FFmpegFixupDurationPP
,
54 FFmpegFixupStretchedPP
,
55 FFmpegFixupTimestampPP
,
58 FFmpegVideoConvertorPP
,
59 MoveFilesAfterDownloadPP
,
62 from .postprocessor
.ffmpeg
import resolve_mapping
as resolve_recode_mapping
63 from .update
import REPOSITORY
, _get_system_deprecation
, current_git_head
, detect_variant
95 UnavailableVideoError
,
113 format_decimal_suffix
,
127 orderedSet_from_options
,
131 remove_terminal_sequences
,
140 supports_terminal_sequences
,
150 windows_enable_vt_mode
,
154 from .utils
._utils
import _YDLLogger
155 from .utils
.networking
import (
161 from .version
import CHANNEL
, RELEASE_GIT_HEAD
, VARIANT
, __version__
163 if compat_os_name
== 'nt':
170 YoutubeDL objects are the ones responsible of downloading the
171 actual video file and writing it to disk if the user has requested
172 it, among some other tasks. In most cases there should be one per
173 program. As, given a video URL, the downloader doesn't know how to
174 extract all the needed information, task that InfoExtractors do, it
175 has to pass the URL to one of them.
177 For this, YoutubeDL objects have a method that allows
178 InfoExtractors to be registered in a given order. When it is passed
179 a URL, the YoutubeDL object handles it to the first InfoExtractor it
180 finds that reports being able to handle it. The InfoExtractor extracts
181 all the information about the video or videos the URL refers to, and
182 YoutubeDL process the extracted information, possibly using a File
183 Downloader to download the video.
185 YoutubeDL objects accept a lot of parameters. In order not to saturate
186 the object constructor with arguments, it receives a dictionary of
187 options instead. These options are available through the params
188 attribute for the InfoExtractors to use. The YoutubeDL also
189 registers itself as the downloader in charge for the InfoExtractors
190 that are added to it, so this is a "mutual registration".
194 username: Username for authentication purposes.
195 password: Password for authentication purposes.
196 videopassword: Password for accessing a video.
197 ap_mso: Adobe Pass multiple-system operator identifier.
198 ap_username: Multiple-system operator account username.
199 ap_password: Multiple-system operator account password.
200 usenetrc: Use netrc for authentication instead.
201 netrc_location: Location of the netrc file. Defaults to ~/.netrc.
202 netrc_cmd: Use a shell command to get credentials
203 verbose: Print additional info to stdout.
204 quiet: Do not print messages to stdout.
205 no_warnings: Do not print out anything for warnings.
206 forceprint: A dict with keys WHEN mapped to a list of templates to
207 print to stdout. The allowed keys are video or any of the
208 items in utils.POSTPROCESS_WHEN.
209 For compatibility, a single list is also accepted
210 print_to_file: A dict with keys WHEN (same as forceprint) mapped to
211 a list of tuples with (template, filename)
212 forcejson: Force printing info_dict as JSON.
213 dump_single_json: Force printing the info_dict of the whole playlist
214 (or video) as a single JSON line.
215 force_write_download_archive: Force writing download archive regardless
216 of 'skip_download' or 'simulate'.
217 simulate: Do not download the video files. If unset (or None),
218 simulate only if listsubtitles, listformats or list_thumbnails is used
219 format: Video format code. see "FORMAT SELECTION" for more details.
220 You can also pass a function. The function takes 'ctx' as
221 argument and returns the formats to download.
222 See "build_format_selector" for an implementation
223 allow_unplayable_formats: Allow unplayable formats to be extracted and downloaded.
224 ignore_no_formats_error: Ignore "No video formats" error. Usefull for
225 extracting metadata even if the video is not actually
226 available for download (experimental)
227 format_sort: A list of fields by which to sort the video formats.
228 See "Sorting Formats" for more details.
229 format_sort_force: Force the given format_sort. see "Sorting Formats"
231 prefer_free_formats: Whether to prefer video formats with free containers
232 over non-free ones of same quality.
233 allow_multiple_video_streams: Allow multiple video streams to be merged
235 allow_multiple_audio_streams: Allow multiple audio streams to be merged
237 check_formats Whether to test if the formats are downloadable.
238 Can be True (check all), False (check none),
239 'selected' (check selected formats),
240 or None (check only if requested by extractor)
241 paths: Dictionary of output paths. The allowed keys are 'home'
242 'temp' and the keys of OUTTMPL_TYPES (in utils/_utils.py)
243 outtmpl: Dictionary of templates for output names. Allowed keys
244 are 'default' and the keys of OUTTMPL_TYPES (in utils/_utils.py).
245 For compatibility with youtube-dl, a single string can also be used
246 outtmpl_na_placeholder: Placeholder for unavailable meta fields.
247 restrictfilenames: Do not allow "&" and spaces in file names
248 trim_file_name: Limit length of filename (extension excluded)
249 windowsfilenames: Force the filenames to be windows compatible
250 ignoreerrors: Do not stop on download/postprocessing errors.
251 Can be 'only_download' to ignore only download errors.
252 Default is 'only_download' for CLI, but False for API
253 skip_playlist_after_errors: Number of allowed failures until the rest of
254 the playlist is skipped
255 allowed_extractors: List of regexes to match against extractor names that are allowed
256 overwrites: Overwrite all video and metadata files if True,
257 overwrite only non-video files if None
258 and don't overwrite any file if False
259 playlist_items: Specific indices of playlist to download.
260 playlistrandom: Download playlist items in random order.
261 lazy_playlist: Process playlist entries as they are received.
262 matchtitle: Download only matching titles.
263 rejecttitle: Reject downloads for matching titles.
264 logger: Log messages to a logging.Logger instance.
265 logtostderr: Print everything to stderr instead of stdout.
266 consoletitle: Display progress in console window's titlebar.
267 writedescription: Write the video description to a .description file
268 writeinfojson: Write the video description to a .info.json file
269 clean_infojson: Remove internal metadata from the infojson
270 getcomments: Extract video comments. This will not be written to disk
271 unless writeinfojson is also given
272 writeannotations: Write the video annotations to a .annotations.xml file
273 writethumbnail: Write the thumbnail image to a file
274 allow_playlist_files: Whether to write playlists' description, infojson etc
275 also to disk when using the 'write*' options
276 write_all_thumbnails: Write all thumbnail formats to files
277 writelink: Write an internet shortcut file, depending on the
278 current platform (.url/.webloc/.desktop)
279 writeurllink: Write a Windows internet shortcut file (.url)
280 writewebloclink: Write a macOS internet shortcut file (.webloc)
281 writedesktoplink: Write a Linux internet shortcut file (.desktop)
282 writesubtitles: Write the video subtitles to a file
283 writeautomaticsub: Write the automatically generated subtitles to a file
284 listsubtitles: Lists all available subtitles for the video
285 subtitlesformat: The format code for subtitles
286 subtitleslangs: List of languages of the subtitles to download (can be regex).
287 The list may contain "all" to refer to all the available
288 subtitles. The language can be prefixed with a "-" to
289 exclude it from the requested languages, e.g. ['all', '-live_chat']
290 keepvideo: Keep the video file after post-processing
291 daterange: A utils.DateRange object, download only if the upload_date is in the range.
292 skip_download: Skip the actual download of the video file
293 cachedir: Location of the cache files in the filesystem.
294 False to disable filesystem cache.
295 noplaylist: Download single video instead of a playlist if in doubt.
296 age_limit: An integer representing the user's age in years.
297 Unsuitable videos for the given age are skipped.
298 min_views: An integer representing the minimum view count the video
299 must have in order to not be skipped.
300 Videos without view count information are always
301 downloaded. None for no limit.
302 max_views: An integer representing the maximum view count.
303 Videos that are more popular than that are not
305 Videos without view count information are always
306 downloaded. None for no limit.
307 download_archive: A set, or the name of a file where all downloads are recorded.
308 Videos already present in the file are not downloaded again.
309 break_on_existing: Stop the download process after attempting to download a
310 file that is in the archive.
311 break_per_url: Whether break_on_reject and break_on_existing
312 should act on each input URL as opposed to for the entire queue
313 cookiefile: File name or text stream from where cookies should be read and dumped to
314 cookiesfrombrowser: A tuple containing the name of the browser, the profile
315 name/path from where cookies are loaded, the name of the keyring,
316 and the container name, e.g. ('chrome', ) or
317 ('vivaldi', 'default', 'BASICTEXT') or ('firefox', 'default', None, 'Meta')
318 legacyserverconnect: Explicitly allow HTTPS connection to servers that do not
319 support RFC 5746 secure renegotiation
320 nocheckcertificate: Do not verify SSL certificates
321 client_certificate: Path to client certificate file in PEM format. May include the private key
322 client_certificate_key: Path to private key file for client certificate
323 client_certificate_password: Password for client certificate private key, if encrypted.
324 If not provided and the key is encrypted, yt-dlp will ask interactively
325 prefer_insecure: Use HTTP instead of HTTPS to retrieve information.
326 (Only supported by some extractors)
327 enable_file_urls: Enable file:// URLs. This is disabled by default for security reasons.
328 http_headers: A dictionary of custom headers to be used for all requests
329 proxy: URL of the proxy server to use
330 geo_verification_proxy: URL of the proxy to use for IP address verification
331 on geo-restricted sites.
332 socket_timeout: Time to wait for unresponsive hosts, in seconds
333 bidi_workaround: Work around buggy terminals without bidirectional text
334 support, using fridibi
335 debug_printtraffic:Print out sent and received HTTP traffic
336 default_search: Prepend this string if an input url is not valid.
337 'auto' for elaborate guessing
338 encoding: Use this encoding instead of the system-specified.
339 extract_flat: Whether to resolve and process url_results further
340 * False: Always process. Default for API
341 * True: Never process
342 * 'in_playlist': Do not process inside playlist/multi_video
343 * 'discard': Always process, but don't return the result
344 from inside playlist/multi_video
345 * 'discard_in_playlist': Same as "discard", but only for
346 playlists (not multi_video). Default for CLI
347 wait_for_video: If given, wait for scheduled streams to become available.
348 The value should be a tuple containing the range
349 (min_secs, max_secs) to wait between retries
350 postprocessors: A list of dictionaries, each with an entry
351 * key: The name of the postprocessor. See
352 yt_dlp/postprocessor/__init__.py for a list.
353 * when: When to run the postprocessor. Allowed values are
354 the entries of utils.POSTPROCESS_WHEN
355 Assumed to be 'post_process' if not given
356 progress_hooks: A list of functions that get called on download
357 progress, with a dictionary with the entries
358 * status: One of "downloading", "error", or "finished".
359 Check this first and ignore unknown values.
360 * info_dict: The extracted info_dict
362 If status is one of "downloading", or "finished", the
363 following properties may also be present:
364 * filename: The final filename (always present)
365 * tmpfilename: The filename we're currently writing to
366 * downloaded_bytes: Bytes on disk
367 * total_bytes: Size of the whole file, None if unknown
368 * total_bytes_estimate: Guess of the eventual file size,
370 * elapsed: The number of seconds since download started.
371 * eta: The estimated time in seconds, None if unknown
372 * speed: The download speed in bytes/second, None if
374 * fragment_index: The counter of the currently
375 downloaded video fragment.
376 * fragment_count: The number of fragments (= individual
377 files that will be merged)
379 Progress hooks are guaranteed to be called at least once
380 (with status "finished") if the download is successful.
381 postprocessor_hooks: A list of functions that get called on postprocessing
382 progress, with a dictionary with the entries
383 * status: One of "started", "processing", or "finished".
384 Check this first and ignore unknown values.
385 * postprocessor: Name of the postprocessor
386 * info_dict: The extracted info_dict
388 Progress hooks are guaranteed to be called at least twice
389 (with status "started" and "finished") if the processing is successful.
390 merge_output_format: "/" separated list of extensions to use when merging formats.
391 final_ext: Expected final extension; used to detect when the file was
392 already downloaded and converted
393 fixup: Automatically correct known faults of the file.
395 - "never": do nothing
396 - "warn": only emit a warning
397 - "detect_or_warn": check whether we can do anything
398 about it, warn otherwise (default)
399 source_address: Client-side IP address to bind to.
400 sleep_interval_requests: Number of seconds to sleep between requests
402 sleep_interval: Number of seconds to sleep before each download when
403 used alone or a lower bound of a range for randomized
404 sleep before each download (minimum possible number
405 of seconds to sleep) when used along with
407 max_sleep_interval:Upper bound of a range for randomized sleep before each
408 download (maximum possible number of seconds to sleep).
409 Must only be used along with sleep_interval.
410 Actual sleep time will be a random float from range
411 [sleep_interval; max_sleep_interval].
412 sleep_interval_subtitles: Number of seconds to sleep before each subtitle download
413 listformats: Print an overview of available video formats and exit.
414 list_thumbnails: Print a table of all thumbnails and exit.
415 match_filter: A function that gets called for every video with the signature
416 (info_dict, *, incomplete: bool) -> Optional[str]
417 For backward compatibility with youtube-dl, the signature
418 (info_dict) -> Optional[str] is also allowed.
419 - If it returns a message, the video is ignored.
420 - If it returns None, the video is downloaded.
421 - If it returns utils.NO_DEFAULT, the user is interactively
422 asked whether to download the video.
423 - Raise utils.DownloadCancelled(msg) to abort remaining
424 downloads when a video is rejected.
425 match_filter_func in utils/_utils.py is one example for this.
426 color: A Dictionary with output stream names as keys
427 and their respective color policy as values.
428 Can also just be a single color policy,
429 in which case it applies to all outputs.
430 Valid stream names are 'stdout' and 'stderr'.
431 Valid color policies are one of 'always', 'auto', 'no_color' or 'never'.
432 geo_bypass: Bypass geographic restriction via faking X-Forwarded-For
435 Two-letter ISO 3166-2 country code that will be used for
436 explicit geographic restriction bypassing via faking
437 X-Forwarded-For HTTP header
439 IP range in CIDR notation that will be used similarly to
441 external_downloader: A dictionary of protocol keys and the executable of the
442 external downloader to use for it. The allowed protocols
443 are default|http|ftp|m3u8|dash|rtsp|rtmp|mms.
444 Set the value to 'native' to use the native downloader
445 compat_opts: Compatibility options. See "Differences in default behavior".
446 The following options do not work when used through the API:
447 filename, abort-on-error, multistreams, no-live-chat, format-sort
448 no-clean-infojson, no-playlist-metafiles, no-keep-subs, no-attach-info-json.
449 Refer __init__.py for their implementation
450 progress_template: Dictionary of templates for progress outputs.
451 Allowed keys are 'download', 'postprocess',
452 'download-title' (console title) and 'postprocess-title'.
453 The template is mapped on a dictionary with keys 'progress' and 'info'
454 retry_sleep_functions: Dictionary of functions that takes the number of attempts
455 as argument and returns the time to sleep in seconds.
456 Allowed keys are 'http', 'fragment', 'file_access'
457 download_ranges: A callback function that gets called for every video with
458 the signature (info_dict, ydl) -> Iterable[Section].
459 Only the returned sections will be downloaded.
460 Each Section is a dict with the following keys:
461 * start_time: Start time of the section in seconds
462 * end_time: End time of the section in seconds
463 * title: Section title (Optional)
464 * index: Section number (Optional)
465 force_keyframes_at_cuts: Re-encode the video when downloading ranges to get precise cuts
466 noprogress: Do not print the progress bar
467 live_from_start: Whether to download livestreams videos from the start
469 The following parameters are not used by YoutubeDL itself, they are used by
470 the downloader (see yt_dlp/downloader/common.py):
471 nopart, updatetime, buffersize, ratelimit, throttledratelimit, min_filesize,
472 max_filesize, test, noresizebuffer, retries, file_access_retries, fragment_retries,
473 continuedl, xattr_set_filesize, hls_use_mpegts, http_chunk_size,
474 external_downloader_args, concurrent_fragment_downloads.
476 The following options are used by the post processors:
477 ffmpeg_location: Location of the ffmpeg/avconv binary; either the path
478 to the binary or its containing directory.
479 postprocessor_args: A dictionary of postprocessor/executable keys (in lower case)
480 and a list of additional command-line arguments for the
481 postprocessor/executable. The dict can also have "PP+EXE" keys
482 which are used when the given exe is used by the given PP.
483 Use 'default' as the name for arguments to passed to all PP
484 For compatibility with youtube-dl, a single list of args
487 The following options are used by the extractors:
488 extractor_retries: Number of times to retry for known errors (default: 3)
489 dynamic_mpd: Whether to process dynamic DASH manifests (default: True)
490 hls_split_discontinuity: Split HLS playlists to different formats at
491 discontinuities such as ad breaks (default: False)
492 extractor_args: A dictionary of arguments to be passed to the extractors.
493 See "EXTRACTOR ARGUMENTS" for details.
494 E.g. {'youtube': {'skip': ['dash', 'hls']}}
495 mark_watched: Mark videos watched (even with --simulate). Only for YouTube
497 The following options are deprecated and may be removed in the future:
499 break_on_reject: Stop the download process when encountering a video that
500 has been filtered out.
501 - `raise DownloadCancelled(msg)` in match_filter instead
502 force_generic_extractor: Force downloader to use the generic extractor
503 - Use allowed_extractors = ['generic', 'default']
504 playliststart: - Use playlist_items
505 Playlist item to start at.
506 playlistend: - Use playlist_items
507 Playlist item to end at.
508 playlistreverse: - Use playlist_items
509 Download playlist items in reverse order.
510 forceurl: - Use forceprint
511 Force printing final URL.
512 forcetitle: - Use forceprint
513 Force printing title.
514 forceid: - Use forceprint
516 forcethumbnail: - Use forceprint
517 Force printing thumbnail URL.
518 forcedescription: - Use forceprint
519 Force printing description.
520 forcefilename: - Use forceprint
521 Force printing final filename.
522 forceduration: - Use forceprint
523 Force printing duration.
524 allsubtitles: - Use subtitleslangs = ['all']
525 Downloads all the subtitles of the video
526 (requires writesubtitles or writeautomaticsub)
527 include_ads: - Doesn't work
529 call_home: - Not implemented
530 Boolean, true iff we are allowed to contact the
531 yt-dlp servers for debugging.
532 post_hooks: - Register a custom postprocessor
533 A list of functions that get called as the final step
534 for each video file, after all postprocessors have been
535 called. The filename will be passed as the only argument.
536 hls_prefer_native: - Use external_downloader = {'m3u8': 'native'} or {'m3u8': 'ffmpeg'}.
537 Use the native HLS downloader instead of ffmpeg/avconv
538 if True, otherwise use ffmpeg/avconv if False, otherwise
539 use downloader suggested by extractor if None.
540 prefer_ffmpeg: - avconv support is deprecated
541 If False, use avconv instead of ffmpeg if both are available,
542 otherwise prefer ffmpeg.
543 youtube_include_dash_manifest: - Use extractor_args
544 If True (default), DASH manifests and related
545 data will be downloaded and processed by extractor.
546 You can reduce network I/O by disabling it if you don't
547 care about DASH. (only for youtube)
548 youtube_include_hls_manifest: - Use extractor_args
549 If True (default), HLS manifests and related
550 data will be downloaded and processed by extractor.
551 You can reduce network I/O by disabling it if you don't
552 care about HLS. (only for youtube)
553 no_color: Same as `color='no_color'`
554 no_overwrites: Same as `overwrites=False`
558 'width', 'height', 'asr', 'audio_channels', 'fps',
559 'tbr', 'abr', 'vbr', 'filesize', 'filesize_approx',
560 'timestamp', 'release_timestamp',
561 'duration', 'view_count', 'like_count', 'dislike_count', 'repost_count',
562 'average_rating', 'comment_count', 'age_limit',
563 'start_time', 'end_time',
564 'chapter_number', 'season_number', 'episode_number',
565 'track_number', 'disc_number', 'release_year',
569 # NB: Keep in sync with the docstring of extractor/common.py
570 'url', 'manifest_url', 'manifest_stream_number', 'ext', 'format', 'format_id', 'format_note',
571 'width', 'height', 'aspect_ratio', 'resolution', 'dynamic_range', 'tbr', 'abr', 'acodec', 'asr', 'audio_channels',
572 'vbr', 'fps', 'vcodec', 'container', 'filesize', 'filesize_approx', 'rows', 'columns',
573 'player_url', 'protocol', 'fragment_base_url', 'fragments', 'is_from_start',
574 'preference', 'language', 'language_preference', 'quality', 'source_preference', 'cookies',
575 'http_headers', 'stretched_ratio', 'no_resume', 'has_drm', 'extra_param_to_segment_url', 'hls_aes', 'downloader_options',
576 'page_url', 'app', 'play_path', 'tc_url', 'flash_version', 'rtmp_live', 'rtmp_conn', 'rtmp_protocol', 'rtmp_real_time'
578 _format_selection_exts
= {
579 'audio': set(MEDIA_EXTENSIONS
.common_audio
),
580 'video': set(MEDIA_EXTENSIONS
.common_video
+ ('3gp', )),
581 'storyboards': set(MEDIA_EXTENSIONS
.storyboards
),
584 def __init__(self
, params
=None, auto_init
=True):
585 """Create a FileDownloader object with the given options.
586 @param auto_init Whether to load the default extractors and print header (if verbose).
587 Set to 'no_verbose_header' to not print the header
593 self
._ies
_instances
= {}
594 self
._pps
= {k: [] for k in POSTPROCESS_WHEN}
595 self
._printed
_messages
= set()
596 self
._first
_webpage
_request
= True
597 self
._post
_hooks
= []
598 self
._progress
_hooks
= []
599 self
._postprocessor
_hooks
= []
600 self
._download
_retcode
= 0
601 self
._num
_downloads
= 0
603 self
._playlist
_level
= 0
604 self
._playlist
_urls
= set()
605 self
.cache
= Cache(self
)
606 self
.__header
_cookies
= []
608 stdout
= sys
.stderr
if self
.params
.get('logtostderr') else sys
.stdout
609 self
._out
_files
= Namespace(
612 screen
=sys
.stderr
if self
.params
.get('quiet') else stdout
,
613 console
=None if compat_os_name
== 'nt' else next(
614 filter(supports_terminal_sequences
, (sys
.stderr
, sys
.stdout
)), None)
618 windows_enable_vt_mode()
619 except Exception as e
:
620 self
.write_debug(f
'Failed to enable VT mode: {e}')
622 if self
.params
.get('no_color'):
623 if self
.params
.get('color') is not None:
624 self
.params
.setdefault('_warnings', []).append(
625 'Overwriting params from "color" with "no_color"')
626 self
.params
['color'] = 'no_color'
628 term_allow_color
= os
.environ
.get('TERM', '').lower() != 'dumb'
630 def process_color_policy(stream
):
631 stream_name
= {sys.stdout: 'stdout', sys.stderr: 'stderr'}
[stream
]
632 policy
= traverse_obj(self
.params
, ('color', (stream_name
, None), {str}
), get_all
=False)
633 if policy
in ('auto', None):
634 return term_allow_color
and supports_terminal_sequences(stream
)
635 assert policy
in ('always', 'never', 'no_color'), policy
636 return {'always': True, 'never': False}
.get(policy
, policy
)
638 self
._allow
_colors
= Namespace(**{
639 name
: process_color_policy(stream
)
640 for name
, stream
in self
._out
_files
.items_
if name
!= 'console'
643 system_deprecation
= _get_system_deprecation()
644 if system_deprecation
:
645 self
.deprecated_feature(system_deprecation
.replace('\n', '\n '))
647 if self
.params
.get('allow_unplayable_formats'):
649 f
'You have asked for {self._format_err("UNPLAYABLE", self.Styles.EMPHASIS)} formats to be listed/downloaded. '
650 'This is a developer option intended for debugging. \n'
651 ' If you experience any issues while using this option, '
652 f
'{self._format_err("DO NOT", self.Styles.ERROR)} open a bug report')
654 if self
.params
.get('bidi_workaround', False):
657 master
, slave
= pty
.openpty()
658 width
= shutil
.get_terminal_size().columns
659 width_args
= [] if width
is None else ['-w', str(width
)]
660 sp_kwargs
= {'stdin': subprocess.PIPE, 'stdout': slave, 'stderr': self._out_files.error}
662 self
._output
_process
= Popen(['bidiv'] + width_args
, **sp_kwargs
)
664 self
._output
_process
= Popen(['fribidi', '-c', 'UTF-8'] + width_args
, **sp_kwargs
)
665 self
._output
_channel
= os
.fdopen(master
, 'rb')
666 except OSError as ose
:
667 if ose
.errno
== errno
.ENOENT
:
669 'Could not find fribidi executable, ignoring --bidi-workaround. '
670 'Make sure that fribidi is an executable file in one of the directories in your $PATH.')
674 self
.params
['compat_opts'] = set(self
.params
.get('compat_opts', ()))
675 self
.params
['http_headers'] = HTTPHeaderDict(std_headers
, self
.params
.get('http_headers'))
676 self
._load
_cookies
(self
.params
['http_headers'].get('Cookie')) # compat
677 self
.params
['http_headers'].pop('Cookie', None)
678 self
._request
_director
= self
.build_request_director(_REQUEST_HANDLERS
.values(), _RH_PREFERENCES
)
680 if auto_init
and auto_init
!= 'no_verbose_header':
681 self
.print_debug_header()
683 def check_deprecated(param
, option
, suggestion
):
684 if self
.params
.get(param
) is not None:
685 self
.report_warning(f
'{option} is deprecated. Use {suggestion} instead')
689 if check_deprecated('cn_verification_proxy', '--cn-verification-proxy', '--geo-verification-proxy'):
690 if self
.params
.get('geo_verification_proxy') is None:
691 self
.params
['geo_verification_proxy'] = self
.params
['cn_verification_proxy']
693 check_deprecated('autonumber', '--auto-number', '-o "%(autonumber)s-%(title)s.%(ext)s"')
694 check_deprecated('usetitle', '--title', '-o "%(title)s-%(id)s.%(ext)s"')
695 check_deprecated('useid', '--id', '-o "%(id)s.%(ext)s"')
697 for msg
in self
.params
.get('_warnings', []):
698 self
.report_warning(msg
)
699 for msg
in self
.params
.get('_deprecation_warnings', []):
700 self
.deprecated_feature(msg
)
702 if 'list-formats' in self
.params
['compat_opts']:
703 self
.params
['listformats_table'] = False
705 if 'overwrites' not in self
.params
and self
.params
.get('nooverwrites') is not None:
706 # nooverwrites was unnecessarily changed to overwrites
707 # in 0c3d0f51778b153f65c21906031c2e091fcfb641
708 # This ensures compatibility with both keys
709 self
.params
['overwrites'] = not self
.params
['nooverwrites']
710 elif self
.params
.get('overwrites') is None:
711 self
.params
.pop('overwrites', None)
713 self
.params
['nooverwrites'] = not self
.params
['overwrites']
715 if self
.params
.get('simulate') is None and any((
716 self
.params
.get('list_thumbnails'),
717 self
.params
.get('listformats'),
718 self
.params
.get('listsubtitles'),
720 self
.params
['simulate'] = 'list_only'
722 self
.params
.setdefault('forceprint', {})
723 self
.params
.setdefault('print_to_file', {})
725 # Compatibility with older syntax
726 if not isinstance(params
['forceprint'], dict):
727 self
.params
['forceprint'] = {'video': params['forceprint']}
730 self
.add_default_info_extractors()
732 if (sys
.platform
!= 'win32'
733 and sys
.getfilesystemencoding() in ['ascii', 'ANSI_X3.4-1968']
734 and not self
.params
.get('restrictfilenames', False)):
735 # Unicode filesystem API will throw errors (#1474, #13027)
737 'Assuming --restrict-filenames since file system encoding '
738 'cannot encode all characters. '
739 'Set the LC_ALL environment variable to fix this.')
740 self
.params
['restrictfilenames'] = True
742 self
._parse
_outtmpl
()
744 # Creating format selector here allows us to catch syntax errors before the extraction
745 self
.format_selector
= (
746 self
.params
.get('format') if self
.params
.get('format') in (None, '-')
747 else self
.params
['format'] if callable(self
.params
['format'])
748 else self
.build_format_selector(self
.params
['format']))
751 'post_hooks': self
.add_post_hook
,
752 'progress_hooks': self
.add_progress_hook
,
753 'postprocessor_hooks': self
.add_postprocessor_hook
,
755 for opt
, fn
in hooks
.items():
756 for ph
in self
.params
.get(opt
, []):
759 for pp_def_raw
in self
.params
.get('postprocessors', []):
760 pp_def
= dict(pp_def_raw
)
761 when
= pp_def
.pop('when', 'post_process')
762 self
.add_post_processor(
763 get_postprocessor(pp_def
.pop('key'))(self
, **pp_def
),
766 def preload_download_archive(fn
):
767 """Preload the archive, if any is specified"""
771 elif not is_path_like(fn
):
774 self
.write_debug(f
'Loading archive file {fn!r}')
776 with locked_file(fn
, 'r', encoding
='utf-8') as archive_file
:
777 for line
in archive_file
:
778 archive
.add(line
.strip())
779 except OSError as ioe
:
780 if ioe
.errno
!= errno
.ENOENT
:
784 self
.archive
= preload_download_archive(self
.params
.get('download_archive'))
786 def warn_if_short_id(self
, argv
):
787 # short YouTube ID starting with dash?
789 i
for i
, a
in enumerate(argv
)
790 if re
.match(r
'^-[0-9A-Za-z_-]{10}$', a
)]
794 + [a
for i
, a
in enumerate(argv
) if i
not in idxs
]
795 + ['--'] + [argv
[i
] for i
in idxs
]
798 'Long argument string detected. '
799 'Use -- to separate parameters and URLs, like this:\n%s' %
800 args_to_str(correct_argv
))
802 def add_info_extractor(self
, ie
):
803 """Add an InfoExtractor object to the end of the list."""
805 self
._ies
[ie_key
] = ie
806 if not isinstance(ie
, type):
807 self
._ies
_instances
[ie_key
] = ie
808 ie
.set_downloader(self
)
810 def get_info_extractor(self
, ie_key
):
812 Get an instance of an IE with name ie_key, it will try to get one from
813 the _ies list, if there's no instance it will create a new one and add
814 it to the extractor list.
816 ie
= self
._ies
_instances
.get(ie_key
)
818 ie
= get_info_extractor(ie_key
)()
819 self
.add_info_extractor(ie
)
822 def add_default_info_extractors(self
):
824 Add the InfoExtractors returned by gen_extractors to the end of the list
826 all_ies
= {ie.IE_NAME.lower(): ie for ie in gen_extractor_classes()}
827 all_ies
['end'] = UnsupportedURLIE()
829 ie_names
= orderedSet_from_options(
830 self
.params
.get('allowed_extractors', ['default']), {
831 'all': list(all_ies
),
832 'default': [name
for name
, ie
in all_ies
.items() if ie
._ENABLED
],
834 except re
.error
as e
:
835 raise ValueError(f
'Wrong regex for allowed_extractors: {e.pattern}')
836 for name
in ie_names
:
837 self
.add_info_extractor(all_ies
[name
])
838 self
.write_debug(f
'Loaded {len(ie_names)} extractors')
840 def add_post_processor(self
, pp
, when
='post_process'):
841 """Add a PostProcessor object to the end of the chain."""
842 assert when
in POSTPROCESS_WHEN
, f
'Invalid when={when}'
843 self
._pps
[when
].append(pp
)
844 pp
.set_downloader(self
)
846 def add_post_hook(self
, ph
):
847 """Add the post hook"""
848 self
._post
_hooks
.append(ph
)
850 def add_progress_hook(self
, ph
):
851 """Add the download progress hook"""
852 self
._progress
_hooks
.append(ph
)
854 def add_postprocessor_hook(self
, ph
):
855 """Add the postprocessing progress hook"""
856 self
._postprocessor
_hooks
.append(ph
)
857 for pps
in self
._pps
.values():
859 pp
.add_progress_hook(ph
)
861 def _bidi_workaround(self
, message
):
862 if not hasattr(self
, '_output_channel'):
865 assert hasattr(self
, '_output_process')
866 assert isinstance(message
, str)
867 line_count
= message
.count('\n') + 1
868 self
._output
_process
.stdin
.write((message
+ '\n').encode())
869 self
._output
_process
.stdin
.flush()
870 res
= ''.join(self
._output
_channel
.readline().decode()
871 for _
in range(line_count
))
872 return res
[:-len('\n')]
874 def _write_string(self
, message
, out
=None, only_once
=False):
876 if message
in self
._printed
_messages
:
878 self
._printed
_messages
.add(message
)
879 write_string(message
, out
=out
, encoding
=self
.params
.get('encoding'))
881 def to_stdout(self
, message
, skip_eol
=False, quiet
=None):
882 """Print message to stdout"""
883 if quiet
is not None:
884 self
.deprecation_warning('"YoutubeDL.to_stdout" no longer accepts the argument quiet. '
885 'Use "YoutubeDL.to_screen" instead')
886 if skip_eol
is not False:
887 self
.deprecation_warning('"YoutubeDL.to_stdout" no longer accepts the argument skip_eol. '
888 'Use "YoutubeDL.to_screen" instead')
889 self
._write
_string
(f
'{self._bidi_workaround(message)}\n', self
._out
_files
.out
)
891 def to_screen(self
, message
, skip_eol
=False, quiet
=None, only_once
=False):
892 """Print message to screen if not in quiet mode"""
893 if self
.params
.get('logger'):
894 self
.params
['logger'].debug(message
)
896 if (self
.params
.get('quiet') if quiet
is None else quiet
) and not self
.params
.get('verbose'):
899 '%s%s' % (self
._bidi
_workaround
(message
), ('' if skip_eol
else '\n')),
900 self
._out
_files
.screen
, only_once
=only_once
)
902 def to_stderr(self
, message
, only_once
=False):
903 """Print message to stderr"""
904 assert isinstance(message
, str)
905 if self
.params
.get('logger'):
906 self
.params
['logger'].error(message
)
908 self
._write
_string
(f
'{self._bidi_workaround(message)}\n', self
._out
_files
.error
, only_once
=only_once
)
910 def _send_console_code(self
, code
):
911 if compat_os_name
== 'nt' or not self
._out
_files
.console
:
913 self
._write
_string
(code
, self
._out
_files
.console
)
915 def to_console_title(self
, message
):
916 if not self
.params
.get('consoletitle', False):
918 message
= remove_terminal_sequences(message
)
919 if compat_os_name
== 'nt':
920 if ctypes
.windll
.kernel32
.GetConsoleWindow():
921 # c_wchar_p() might not be necessary if `message` is
922 # already of type unicode()
923 ctypes
.windll
.kernel32
.SetConsoleTitleW(ctypes
.c_wchar_p(message
))
925 self
._send
_console
_code
(f
'\033]0;{message}\007')
927 def save_console_title(self
):
928 if not self
.params
.get('consoletitle') or self
.params
.get('simulate'):
930 self
._send
_console
_code
('\033[22;0t') # Save the title on stack
932 def restore_console_title(self
):
933 if not self
.params
.get('consoletitle') or self
.params
.get('simulate'):
935 self
._send
_console
_code
('\033[23;0t') # Restore the title from stack
938 self
.save_console_title()
941 def save_cookies(self
):
942 if self
.params
.get('cookiefile') is not None:
943 self
.cookiejar
.save()
945 def __exit__(self
, *args
):
946 self
.restore_console_title()
951 self
._request
_director
.close()
953 def trouble(self
, message
=None, tb
=None, is_error
=True):
954 """Determine action to take when a download problem appears.
956 Depending on if the downloader has been configured to ignore
957 download errors or not, this method may throw an exception or
958 not when errors are found, after printing the message.
960 @param tb If given, is additional traceback information
961 @param is_error Whether to raise error according to ignorerrors
963 if message
is not None:
964 self
.to_stderr(message
)
965 if self
.params
.get('verbose'):
967 if sys
.exc_info()[0]: # if .trouble has been called from an except block
969 if hasattr(sys
.exc_info()[1], 'exc_info') and sys
.exc_info()[1].exc_info
[0]:
970 tb
+= ''.join(traceback
.format_exception(*sys
.exc_info()[1].exc_info
))
971 tb
+= encode_compat_str(traceback
.format_exc())
973 tb_data
= traceback
.format_list(traceback
.extract_stack())
974 tb
= ''.join(tb_data
)
979 if not self
.params
.get('ignoreerrors'):
980 if sys
.exc_info()[0] and hasattr(sys
.exc_info()[1], 'exc_info') and sys
.exc_info()[1].exc_info
[0]:
981 exc_info
= sys
.exc_info()[1].exc_info
983 exc_info
= sys
.exc_info()
984 raise DownloadError(message
, exc_info
)
985 self
._download
_retcode
= 1
989 EMPHASIS
='light blue',
994 BAD_FORMAT
='light red',
996 SUPPRESS
='light black',
999 def _format_text(self
, handle
, allow_colors
, text
, f
, fallback
=None, *, test_encoding
=False):
1002 original_text
= text
1003 # handle.encoding can be None. See https://github.com/yt-dlp/yt-dlp/issues/2711
1004 encoding
= self
.params
.get('encoding') or getattr(handle
, 'encoding', None) or 'ascii'
1005 text
= text
.encode(encoding
, 'ignore').decode(encoding
)
1006 if fallback
is not None and text
!= original_text
:
1008 return format_text(text
, f
) if allow_colors
is True else text
if fallback
is None else fallback
1010 def _format_out(self
, *args
, **kwargs
):
1011 return self
._format
_text
(self
._out
_files
.out
, self
._allow
_colors
.out
, *args
, **kwargs
)
1013 def _format_screen(self
, *args
, **kwargs
):
1014 return self
._format
_text
(self
._out
_files
.screen
, self
._allow
_colors
.screen
, *args
, **kwargs
)
1016 def _format_err(self
, *args
, **kwargs
):
1017 return self
._format
_text
(self
._out
_files
.error
, self
._allow
_colors
.error
, *args
, **kwargs
)
1019 def report_warning(self
, message
, only_once
=False):
1021 Print the message to stderr, it will be prefixed with 'WARNING:'
1022 If stderr is a tty file the 'WARNING:' will be colored
1024 if self
.params
.get('logger') is not None:
1025 self
.params
['logger'].warning(message
)
1027 if self
.params
.get('no_warnings'):
1029 self
.to_stderr(f
'{self._format_err("WARNING:", self.Styles.WARNING)} {message}', only_once
)
1031 def deprecation_warning(self
, message
, *, stacklevel
=0):
1032 deprecation_warning(
1033 message
, stacklevel
=stacklevel
+ 1, printer
=self
.report_error
, is_error
=False)
1035 def deprecated_feature(self
, message
):
1036 if self
.params
.get('logger') is not None:
1037 self
.params
['logger'].warning(f
'Deprecated Feature: {message}')
1038 self
.to_stderr(f
'{self._format_err("Deprecated Feature:", self.Styles.ERROR)} {message}', True)
1040 def report_error(self
, message
, *args
, **kwargs
):
1042 Do the same as trouble, but prefixes the message with 'ERROR:', colored
1043 in red if stderr is a tty file.
1045 self
.trouble(f
'{self._format_err("ERROR:", self.Styles.ERROR)} {message}', *args
, **kwargs
)
1047 def write_debug(self
, message
, only_once
=False):
1048 '''Log debug message or Print message to stderr'''
1049 if not self
.params
.get('verbose', False):
1051 message
= f
'[debug] {message}'
1052 if self
.params
.get('logger'):
1053 self
.params
['logger'].debug(message
)
1055 self
.to_stderr(message
, only_once
)
1057 def report_file_already_downloaded(self
, file_name
):
1058 """Report file has already been fully downloaded."""
1060 self
.to_screen('[download] %s has already been downloaded' % file_name
)
1061 except UnicodeEncodeError:
1062 self
.to_screen('[download] The file has already been downloaded')
1064 def report_file_delete(self
, file_name
):
1065 """Report that existing file will be deleted."""
1067 self
.to_screen('Deleting existing file %s' % file_name
)
1068 except UnicodeEncodeError:
1069 self
.to_screen('Deleting existing file')
1071 def raise_no_formats(self
, info
, forced
=False, *, msg
=None):
1072 has_drm
= info
.get('_has_drm')
1073 ignored
, expected
= self
.params
.get('ignore_no_formats_error'), bool(msg
)
1074 msg
= msg
or has_drm
and 'This video is DRM protected' or 'No video formats found!'
1075 if forced
or not ignored
:
1076 raise ExtractorError(msg
, video_id
=info
['id'], ie
=info
['extractor'],
1077 expected
=has_drm
or ignored
or expected
)
1079 self
.report_warning(msg
)
1081 def parse_outtmpl(self
):
1082 self
.deprecation_warning('"YoutubeDL.parse_outtmpl" is deprecated and may be removed in a future version')
1083 self
._parse
_outtmpl
()
1084 return self
.params
['outtmpl']
1086 def _parse_outtmpl(self
):
1088 if self
.params
.get('restrictfilenames'): # Remove spaces in the default template
1089 sanitize
= lambda x
: x
.replace(' - ', ' ').replace(' ', '-')
1091 outtmpl
= self
.params
.setdefault('outtmpl', {})
1092 if not isinstance(outtmpl
, dict):
1093 self
.params
['outtmpl'] = outtmpl
= {'default': outtmpl}
1094 outtmpl
.update({k: sanitize(v) for k, v in DEFAULT_OUTTMPL.items() if outtmpl.get(k) is None}
)
1096 def get_output_path(self
, dir_type
='', filename
=None):
1097 paths
= self
.params
.get('paths', {})
1098 assert isinstance(paths
, dict), '"paths" parameter must be a dictionary'
1099 path
= os
.path
.join(
1100 expand_path(paths
.get('home', '').strip()),
1101 expand_path(paths
.get(dir_type
, '').strip()) if dir_type
else '',
1103 return sanitize_path(path
, force
=self
.params
.get('windowsfilenames'))
1106 def _outtmpl_expandpath(outtmpl
):
1107 # expand_path translates '%%' into '%' and '$$' into '$'
1108 # correspondingly that is not what we want since we need to keep
1109 # '%%' intact for template dict substitution step. Working around
1110 # with boundary-alike separator hack.
1111 sep
= ''.join(random
.choices(string
.ascii_letters
, k
=32))
1112 outtmpl
= outtmpl
.replace('%%', f
'%{sep}%').replace('$$', f
'${sep}$')
1114 # outtmpl should be expand_path'ed before template dict substitution
1115 # because meta fields may contain env variables we don't want to
1116 # be expanded. E.g. for outtmpl "%(title)s.%(ext)s" and
1117 # title "Hello $PATH", we don't want `$PATH` to be expanded.
1118 return expand_path(outtmpl
).replace(sep
, '')
1121 def escape_outtmpl(outtmpl
):
1122 ''' Escape any remaining strings like %s, %abc% etc. '''
1124 STR_FORMAT_RE_TMPL
.format('', '(?![%(\0])'),
1125 lambda mobj
: ('' if mobj
.group('has_key') else '%') + mobj
.group(0),
1129 def validate_outtmpl(cls
, outtmpl
):
1130 ''' @return None or Exception object '''
1132 STR_FORMAT_RE_TMPL
.format('[^)]*', '[ljhqBUDS]'),
1133 lambda mobj
: f
'{mobj.group(0)[:-1]}s',
1134 cls
._outtmpl
_expandpath
(outtmpl
))
1136 cls
.escape_outtmpl(outtmpl
) % collections
.defaultdict(int)
1138 except ValueError as err
:
1142 def _copy_infodict(info_dict
):
1143 info_dict
= dict(info_dict
)
1144 info_dict
.pop('__postprocessors', None)
1145 info_dict
.pop('__pending_error', None)
1148 def prepare_outtmpl(self
, outtmpl
, info_dict
, sanitize
=False):
1149 """ Make the outtmpl and info_dict suitable for substitution: ydl.escape_outtmpl(outtmpl) % info_dict
1150 @param sanitize Whether to sanitize the output as a filename.
1151 For backward compatibility, a function can also be passed
1154 info_dict
.setdefault('epoch', int(time
.time())) # keep epoch consistent once set
1156 info_dict
= self
._copy
_infodict
(info_dict
)
1157 info_dict
['duration_string'] = ( # %(duration>%H-%M-%S)s is wrong if duration > 24hrs
1158 formatSeconds(info_dict
['duration'], '-' if sanitize
else ':')
1159 if info_dict
.get('duration', None) is not None
1161 info_dict
['autonumber'] = int(self
.params
.get('autonumber_start', 1) - 1 + self
._num
_downloads
)
1162 info_dict
['video_autonumber'] = self
._num
_videos
1163 if info_dict
.get('resolution') is None:
1164 info_dict
['resolution'] = self
.format_resolution(info_dict
, default
=None)
1166 # For fields playlist_index, playlist_autonumber and autonumber convert all occurrences
1167 # of %(field)s to %(field)0Nd for backward compatibility
1168 field_size_compat_map
= {
1169 'playlist_index': number_of_digits(info_dict
.get('__last_playlist_index') or 0),
1170 'playlist_autonumber': number_of_digits(info_dict
.get('n_entries') or 0),
1171 'autonumber': self
.params
.get('autonumber_size') or 5,
1175 EXTERNAL_FORMAT_RE
= re
.compile(STR_FORMAT_RE_TMPL
.format('[^)]*', f
'[{STR_FORMAT_TYPES}ljhqBUDS]'))
1180 # Field is of the form key1.key2...
1181 # where keys (except first) can be string, int, slice or "{field, ...}"
1182 FIELD_INNER_RE
= r
'(?:\w+|%(num)s|%(num)s?(?::%(num)s?){1,2})' % {'num': r'(?:-?\d+)'}
1183 FIELD_RE
= r
'\w*(?:\.(?:%(inner)s|{%(field)s(?:,%(field)s)*}))*' % {
1184 'inner': FIELD_INNER_RE
,
1185 'field': rf
'\w*(?:\.{FIELD_INNER_RE})*'
1187 MATH_FIELD_RE
= rf
'(?:{FIELD_RE}|-?{NUMBER_RE})'
1188 MATH_OPERATORS_RE
= r
'(?:%s)' % '|'.join(map(re
.escape
, MATH_FUNCTIONS
.keys()))
1189 INTERNAL_FORMAT_RE
= re
.compile(rf
'''(?xs)
1191 (?P<fields>{FIELD_RE})
1192 (?P<maths>(?:{MATH_OPERATORS_RE}{MATH_FIELD_RE})*)
1193 (?:>(?P<strf_format>.+?))?
1195 (?P<alternate>(?<!\\),[^|&)]+)?
1196 (?:&(?P<replacement>.*?))?
1197 (?:\|(?P<default>.*?))?
1200 def _traverse_infodict(fields
):
1201 fields
= [f
for x
in re
.split(r
'\.({.+?})\.?', fields
)
1202 for f
in ([x
] if x
.startswith('{') else x
.split('.'))]
1204 if fields
and not fields
[i
]:
1207 for i
, f
in enumerate(fields
):
1208 if not f
.startswith('{'):
1210 assert f
.endswith('}'), f
'No closing brace for {f} in {fields}'
1211 fields
[i
] = {k: k.split('.') for k in f[1:-1].split(',')}
1213 return traverse_obj(info_dict
, fields
, is_user_input
=True, traverse_string
=True)
1215 def get_value(mdict
):
1217 value
= _traverse_infodict(mdict
['fields'])
1220 value
= float_or_none(value
)
1221 if value
is not None:
1224 offset_key
= mdict
['maths']
1226 value
= float_or_none(value
)
1230 MATH_FIELD_RE
if operator
else MATH_OPERATORS_RE
,
1231 offset_key
).group(0)
1232 offset_key
= offset_key
[len(item
):]
1233 if operator
is None:
1234 operator
= MATH_FUNCTIONS
[item
]
1236 item
, multiplier
= (item
[1:], -1) if item
[0] == '-' else (item
, 1)
1237 offset
= float_or_none(item
)
1239 offset
= float_or_none(_traverse_infodict(item
))
1241 value
= operator(value
, multiplier
* offset
)
1242 except (TypeError, ZeroDivisionError):
1245 # Datetime formatting
1246 if mdict
['strf_format']:
1247 value
= strftime_or_none(value
, mdict
['strf_format'].replace('\\,', ','))
1249 # XXX: Workaround for https://github.com/yt-dlp/yt-dlp/issues/4485
1250 if sanitize
and value
== '':
1254 na
= self
.params
.get('outtmpl_na_placeholder', 'NA')
1256 def filename_sanitizer(key
, value
, restricted
=self
.params
.get('restrictfilenames')):
1257 return sanitize_filename(str(value
), restricted
=restricted
, is_id
=(
1258 bool(re
.search(r
'(^|[_.])id(\.|$)', key
))
1259 if 'filename-sanitization' in self
.params
['compat_opts']
1262 sanitizer
= sanitize
if callable(sanitize
) else filename_sanitizer
1263 sanitize
= bool(sanitize
)
1265 def _dumpjson_default(obj
):
1266 if isinstance(obj
, (set, LazyList
)):
1270 class _ReplacementFormatter(string
.Formatter
):
1271 def get_field(self
, field_name
, args
, kwargs
):
1272 if field_name
.isdigit():
1274 raise ValueError('Unsupported field')
1276 replacement_formatter
= _ReplacementFormatter()
1278 def create_key(outer_mobj
):
1279 if not outer_mobj
.group('has_key'):
1280 return outer_mobj
.group(0)
1281 key
= outer_mobj
.group('key')
1282 mobj
= re
.match(INTERNAL_FORMAT_RE
, key
)
1283 value
, replacement
, default
, last_field
= None, None, na
, ''
1285 mobj
= mobj
.groupdict()
1286 default
= mobj
['default'] if mobj
['default'] is not None else default
1287 value
= get_value(mobj
)
1288 last_field
, replacement
= mobj
['fields'], mobj
['replacement']
1289 if value
is None and mobj
['alternate']:
1290 mobj
= re
.match(INTERNAL_FORMAT_RE
, mobj
['remaining'][1:])
1294 if None not in (value
, replacement
):
1296 value
= replacement_formatter
.format(replacement
, value
)
1298 value
, default
= None, na
1300 fmt
= outer_mobj
.group('format')
1301 if fmt
== 's' and last_field
in field_size_compat_map
.keys() and isinstance(value
, int):
1302 fmt
= f
'0{field_size_compat_map[last_field]:d}d'
1304 flags
= outer_mobj
.group('conversion') or ''
1305 str_fmt
= f
'{fmt[:-1]}s'
1307 value
, fmt
= default
, 's'
1308 elif fmt
[-1] == 'l': # list
1309 delim
= '\n' if '#' in flags
else ', '
1310 value
, fmt
= delim
.join(map(str, variadic(value
, allowed_types
=(str, bytes)))), str_fmt
1311 elif fmt
[-1] == 'j': # json
1312 value
, fmt
= json
.dumps(
1313 value
, default
=_dumpjson_default
,
1314 indent
=4 if '#' in flags
else None, ensure_ascii
='+' not in flags
), str_fmt
1315 elif fmt
[-1] == 'h': # html
1316 value
, fmt
= escapeHTML(str(value
)), str_fmt
1317 elif fmt
[-1] == 'q': # quoted
1318 value
= map(str, variadic(value
) if '#' in flags
else [value
])
1319 value
, fmt
= ' '.join(map(compat_shlex_quote
, value
)), str_fmt
1320 elif fmt
[-1] == 'B': # bytes
1321 value
= f
'%{str_fmt}'.encode() % str(value
).encode()
1322 value
, fmt
= value
.decode('utf-8', 'ignore'), 's'
1323 elif fmt
[-1] == 'U': # unicode normalized
1324 value
, fmt
= unicodedata
.normalize(
1325 # "+" = compatibility equivalence, "#" = NFD
1326 'NF%s%s' % ('K' if '+' in flags
else '', 'D' if '#' in flags
else 'C'),
1328 elif fmt
[-1] == 'D': # decimal suffix
1329 num_fmt
, fmt
= fmt
[:-1].replace('#', ''), 's'
1330 value
= format_decimal_suffix(value
, f
'%{num_fmt}f%s' if num_fmt
else '%d%s',
1331 factor
=1024 if '#' in flags
else 1000)
1332 elif fmt
[-1] == 'S': # filename sanitization
1333 value
, fmt
= filename_sanitizer(last_field
, value
, restricted
='#' in flags
), str_fmt
1334 elif fmt
[-1] == 'c':
1336 value
= str(value
)[0]
1339 elif fmt
[-1] not in 'rsa': # numeric
1340 value
= float_or_none(value
)
1342 value
, fmt
= default
, 's'
1345 # If value is an object, sanitize might convert it to a string
1346 # So we convert it to repr first
1348 value
, fmt
= repr(value
), str_fmt
1349 elif fmt
[-1] == 'a':
1350 value
, fmt
= ascii(value
), str_fmt
1351 if fmt
[-1] in 'csra':
1352 value
= sanitizer(last_field
, value
)
1354 key
= '%s\0%s' % (key
.replace('%', '%\0'), outer_mobj
.group('format'))
1355 TMPL_DICT
[key
] = value
1356 return '{prefix}%({key}){fmt}'.format(key
=key
, fmt
=fmt
, prefix
=outer_mobj
.group('prefix'))
1358 return EXTERNAL_FORMAT_RE
.sub(create_key
, outtmpl
), TMPL_DICT
1360 def evaluate_outtmpl(self
, outtmpl
, info_dict
, *args
, **kwargs
):
1361 outtmpl
, info_dict
= self
.prepare_outtmpl(outtmpl
, info_dict
, *args
, **kwargs
)
1362 return self
.escape_outtmpl(outtmpl
) % info_dict
1364 def _prepare_filename(self
, info_dict
, *, outtmpl
=None, tmpl_type
=None):
1365 assert None in (outtmpl
, tmpl_type
), 'outtmpl and tmpl_type are mutually exclusive'
1367 outtmpl
= self
.params
['outtmpl'].get(tmpl_type
or 'default', self
.params
['outtmpl']['default'])
1369 outtmpl
= self
._outtmpl
_expandpath
(outtmpl
)
1370 filename
= self
.evaluate_outtmpl(outtmpl
, info_dict
, True)
1374 if tmpl_type
in ('', 'temp'):
1375 final_ext
, ext
= self
.params
.get('final_ext'), info_dict
.get('ext')
1376 if final_ext
and ext
and final_ext
!= ext
and filename
.endswith(f
'.{final_ext}'):
1377 filename
= replace_extension(filename
, ext
, final_ext
)
1379 force_ext
= OUTTMPL_TYPES
[tmpl_type
]
1381 filename
= replace_extension(filename
, force_ext
, info_dict
.get('ext'))
1383 # https://github.com/blackjack4494/youtube-dlc/issues/85
1384 trim_file_name
= self
.params
.get('trim_file_name', False)
1386 no_ext
, *ext
= filename
.rsplit('.', 2)
1387 filename
= join_nonempty(no_ext
[:trim_file_name
], *ext
, delim
='.')
1390 except ValueError as err
:
1391 self
.report_error('Error in output template: ' + str(err
) + ' (encoding: ' + repr(preferredencoding()) + ')')
1394 def prepare_filename(self
, info_dict
, dir_type
='', *, outtmpl
=None, warn
=False):
1395 """Generate the output filename"""
1397 assert not dir_type
, 'outtmpl and dir_type are mutually exclusive'
1399 filename
= self
._prepare
_filename
(info_dict
, tmpl_type
=dir_type
, outtmpl
=outtmpl
)
1400 if not filename
and dir_type
not in ('', 'temp'):
1404 if not self
.params
.get('paths'):
1406 elif filename
== '-':
1407 self
.report_warning('--paths is ignored when an outputting to stdout', only_once
=True)
1408 elif os
.path
.isabs(filename
):
1409 self
.report_warning('--paths is ignored since an absolute path is given in output template', only_once
=True)
1410 if filename
== '-' or not filename
:
1413 return self
.get_output_path(dir_type
, filename
)
1415 def _match_entry(self
, info_dict
, incomplete
=False, silent
=False):
1416 """Returns None if the file should be downloaded"""
1417 _type
= 'video' if 'playlist-match-filter' in self
.params
['compat_opts'] else info_dict
.get('_type', 'video')
1418 assert incomplete
or _type
== 'video', 'Only video result can be considered complete'
1420 video_title
= info_dict
.get('title', info_dict
.get('id', 'entry'))
1423 if _type
in ('playlist', 'multi_video'):
1425 elif _type
in ('url', 'url_transparent') and not try_call(
1426 lambda: self
.get_info_extractor(info_dict
['ie_key']).is_single_video(info_dict
['url'])):
1429 if 'title' in info_dict
:
1430 # This can happen when we're just evaluating the playlist
1431 title
= info_dict
['title']
1432 matchtitle
= self
.params
.get('matchtitle', False)
1434 if not re
.search(matchtitle
, title
, re
.IGNORECASE
):
1435 return '"' + title
+ '" title did not match pattern "' + matchtitle
+ '"'
1436 rejecttitle
= self
.params
.get('rejecttitle', False)
1438 if re
.search(rejecttitle
, title
, re
.IGNORECASE
):
1439 return '"' + title
+ '" title matched reject pattern "' + rejecttitle
+ '"'
1441 date
= info_dict
.get('upload_date')
1442 if date
is not None:
1443 dateRange
= self
.params
.get('daterange', DateRange())
1444 if date
not in dateRange
:
1445 return f
'{date_from_str(date).isoformat()} upload date is not in range {dateRange}'
1446 view_count
= info_dict
.get('view_count')
1447 if view_count
is not None:
1448 min_views
= self
.params
.get('min_views')
1449 if min_views
is not None and view_count
< min_views
:
1450 return 'Skipping %s, because it has not reached minimum view count (%d/%d)' % (video_title
, view_count
, min_views
)
1451 max_views
= self
.params
.get('max_views')
1452 if max_views
is not None and view_count
> max_views
:
1453 return 'Skipping %s, because it has exceeded the maximum view count (%d/%d)' % (video_title
, view_count
, max_views
)
1454 if age_restricted(info_dict
.get('age_limit'), self
.params
.get('age_limit')):
1455 return 'Skipping "%s" because it is age restricted' % video_title
1457 match_filter
= self
.params
.get('match_filter')
1458 if match_filter
is None:
1464 ret
= match_filter(info_dict
, incomplete
=incomplete
)
1466 # For backward compatibility
1467 ret
= None if incomplete
else match_filter(info_dict
)
1468 except DownloadCancelled
as err
:
1469 if err
.msg
is not NO_DEFAULT
:
1471 ret
, cancelled
= err
.msg
, err
1473 if ret
is NO_DEFAULT
:
1475 filename
= self
._format
_screen
(self
.prepare_filename(info_dict
), self
.Styles
.FILENAME
)
1476 reply
= input(self
._format
_screen
(
1477 f
'Download "{filename}"? (Y/n): ', self
.Styles
.EMPHASIS
)).lower().strip()
1478 if reply
in {'y', ''}
:
1482 raise type(cancelled
)(f
'Skipping {video_title}')
1483 return f
'Skipping {video_title}'
1486 if self
.in_download_archive(info_dict
):
1488 format_field(info_dict
, 'id', f
'{self._format_screen("%s", self.Styles.ID)}: '),
1489 format_field(info_dict
, 'title', f
'{self._format_screen("%s", self.Styles.EMPHASIS)} '),
1490 'has already been recorded in the archive'))
1491 break_opt
, break_err
= 'break_on_existing', ExistingVideoReached
1494 reason
= check_filter()
1495 except DownloadCancelled
as e
:
1496 reason
, break_opt
, break_err
= e
.msg
, 'match_filter', type(e
)
1498 break_opt
, break_err
= 'break_on_reject', RejectedVideoReached
1499 if reason
is not None:
1501 self
.to_screen('[download] ' + reason
)
1502 if self
.params
.get(break_opt
, False):
1507 def add_extra_info(info_dict
, extra_info
):
1508 '''Set the keys from extra_info in info dict if they are missing'''
1509 for key
, value
in extra_info
.items():
1510 info_dict
.setdefault(key
, value
)
1512 def extract_info(self
, url
, download
=True, ie_key
=None, extra_info
=None,
1513 process
=True, force_generic_extractor
=False):
1515 Extract and return the information dictionary of the URL
1518 @param url URL to extract
1521 @param download Whether to download videos
1522 @param process Whether to resolve all unresolved references (URLs, playlist items).
1523 Must be True for download to work
1524 @param ie_key Use only the extractor with this key
1526 @param extra_info Dictionary containing the extra values to add to the info (For internal use only)
1527 @force_generic_extractor Force using the generic extractor (Deprecated; use ie_key='Generic')
1530 if extra_info
is None:
1533 if not ie_key
and force_generic_extractor
:
1537 ies
= {ie_key: self._ies[ie_key]}
if ie_key
in self
._ies
else {}
1541 for key
, ie
in ies
.items():
1542 if not ie
.suitable(url
):
1545 if not ie
.working():
1546 self
.report_warning('The program functionality for this site has been marked as broken, '
1547 'and will probably not work.')
1549 temp_id
= ie
.get_temp_id(url
)
1550 if temp_id
is not None and self
.in_download_archive({'id': temp_id, 'ie_key': key}
):
1551 self
.to_screen(f
'[download] {self._format_screen(temp_id, self.Styles.ID)}: '
1552 'has already been recorded in the archive')
1553 if self
.params
.get('break_on_existing', False):
1554 raise ExistingVideoReached()
1556 return self
.__extract
_info
(url
, self
.get_info_extractor(key
), download
, extra_info
, process
)
1558 extractors_restricted
= self
.params
.get('allowed_extractors') not in (None, ['default'])
1559 self
.report_error(f
'No suitable extractor{format_field(ie_key, None, " (%s)")} found for URL {url}',
1560 tb
=False if extractors_restricted
else None)
1562 def _handle_extraction_exceptions(func
):
1563 @functools.wraps(func
)
1564 def wrapper(self
, *args
, **kwargs
):
1567 return func(self
, *args
, **kwargs
)
1568 except (DownloadCancelled
, LazyList
.IndexError, PagedList
.IndexError):
1570 except ReExtractInfo
as e
:
1572 self
.to_screen(f
'{e}; Re-extracting data')
1574 self
.to_stderr('\r')
1575 self
.report_warning(f
'{e}; Re-extracting data')
1577 except GeoRestrictedError
as e
:
1580 msg
+= '\nThis video is available in %s.' % ', '.join(
1581 map(ISO3166Utils
.short2full
, e
.countries
))
1582 msg
+= '\nYou might want to use a VPN or a proxy server (with --proxy) to workaround.'
1583 self
.report_error(msg
)
1584 except ExtractorError
as e
: # An error we somewhat expected
1585 self
.report_error(str(e
), e
.format_traceback())
1586 except Exception as e
:
1587 if self
.params
.get('ignoreerrors'):
1588 self
.report_error(str(e
), tb
=encode_compat_str(traceback
.format_exc()))
1594 def _wait_for_video(self
, ie_result
={}):
1595 if (not self
.params
.get('wait_for_video')
1596 or ie_result
.get('_type', 'video') != 'video'
1597 or ie_result
.get('formats') or ie_result
.get('url')):
1600 format_dur
= lambda dur
: '%02d:%02d:%02d' % timetuple_from_msec(dur
* 1000)[:-1]
1605 full_msg
= f
'{msg}\n'
1606 if not self
.params
.get('noprogress'):
1607 full_msg
= msg
+ ' ' * (len(last_msg
) - len(msg
)) + '\r'
1610 self
.to_screen(full_msg
, skip_eol
=True)
1613 min_wait
, max_wait
= self
.params
.get('wait_for_video')
1614 diff
= try_get(ie_result
, lambda x
: x
['release_timestamp'] - time
.time())
1615 if diff
is None and ie_result
.get('live_status') == 'is_upcoming':
1616 diff
= round(random
.uniform(min_wait
, max_wait
) if (max_wait
and min_wait
) else (max_wait
or min_wait
), 0)
1617 self
.report_warning('Release time of video is not known')
1618 elif ie_result
and (diff
or 0) <= 0:
1619 self
.report_warning('Video should already be available according to extracted info')
1620 diff
= min(max(diff
or 0, min_wait
or 0), max_wait
or float('inf'))
1621 self
.to_screen(f
'[wait] Waiting for {format_dur(diff)} - Press Ctrl+C to try now')
1623 wait_till
= time
.time() + diff
1626 diff
= wait_till
- time
.time()
1629 raise ReExtractInfo('[wait] Wait period ended', expected
=True)
1630 progress(f
'[wait] Remaining time until next attempt: {self._format_screen(format_dur(diff), self.Styles.EMPHASIS)}')
1632 except KeyboardInterrupt:
1634 raise ReExtractInfo('[wait] Interrupted by user', expected
=True)
1635 except BaseException
as e
:
1636 if not isinstance(e
, ReExtractInfo
):
1640 def _load_cookies(self
, data
, *, autoscope
=True):
1641 """Loads cookies from a `Cookie` header
1643 This tries to work around the security vulnerability of passing cookies to every domain.
1644 See: https://github.com/yt-dlp/yt-dlp/security/advisories/GHSA-v8mc-9377-rwjj
1646 @param data The Cookie header as string to load the cookies from
1647 @param autoscope If `False`, scope cookies using Set-Cookie syntax and error for cookie without domains
1648 If `True`, save cookies for later to be stored in the jar with a limited scope
1649 If a URL, save cookies in the jar with the domain of the URL
1651 for cookie
in LenientSimpleCookie(data
).values():
1652 if autoscope
and any(cookie
.values()):
1653 raise ValueError('Invalid syntax in Cookie Header')
1655 domain
= cookie
.get('domain') or ''
1656 expiry
= cookie
.get('expires')
1657 if expiry
== '': # 0 is valid
1659 prepared_cookie
= http
.cookiejar
.Cookie(
1660 cookie
.get('version') or 0, cookie
.key
, cookie
.value
, None, False,
1661 domain
, True, True, cookie
.get('path') or '', bool(cookie
.get('path')),
1662 cookie
.get('secure') or False, expiry
, False, None, None, {})
1665 self
.cookiejar
.set_cookie(prepared_cookie
)
1666 elif autoscope
is True:
1667 self
.deprecated_feature(
1668 'Passing cookies as a header is a potential security risk; '
1669 'they will be scoped to the domain of the downloaded urls. '
1670 'Please consider loading cookies from a file or browser instead.')
1671 self
.__header
_cookies
.append(prepared_cookie
)
1673 self
.report_warning(
1674 'The extractor result contains an unscoped cookie as an HTTP header. '
1675 f
'If you are using yt-dlp with an input URL{bug_reports_message(before=",")}',
1677 self
._apply
_header
_cookies
(autoscope
, [prepared_cookie
])
1679 self
.report_error('Unscoped cookies are not allowed; please specify some sort of scoping',
1680 tb
=False, is_error
=False)
1682 def _apply_header_cookies(self
, url
, cookies
=None):
1683 """Applies stray header cookies to the provided url
1685 This loads header cookies and scopes them to the domain provided in `url`.
1686 While this is not ideal, it helps reduce the risk of them being sent
1687 to an unintended destination while mostly maintaining compatibility.
1689 parsed
= urllib
.parse
.urlparse(url
)
1690 if not parsed
.hostname
:
1693 for cookie
in map(copy
.copy
, cookies
or self
.__header
_cookies
):
1694 cookie
.domain
= f
'.{parsed.hostname}'
1695 self
.cookiejar
.set_cookie(cookie
)
1697 @_handle_extraction_exceptions
1698 def __extract_info(self
, url
, ie
, download
, extra_info
, process
):
1699 self
._apply
_header
_cookies
(url
)
1702 ie_result
= ie
.extract(url
)
1703 except UserNotLive
as e
:
1705 if self
.params
.get('wait_for_video'):
1706 self
.report_warning(e
)
1707 self
._wait
_for
_video
()
1709 if ie_result
is None: # Finished already (backwards compatibility; listformats and friends should be moved here)
1710 self
.report_warning(f
'Extractor {ie.IE_NAME} returned nothing{bug_reports_message()}')
1712 if isinstance(ie_result
, list):
1713 # Backwards compatibility: old IE result format
1715 '_type': 'compat_list',
1716 'entries': ie_result
,
1718 if extra_info
.get('original_url'):
1719 ie_result
.setdefault('original_url', extra_info
['original_url'])
1720 self
.add_default_extra_info(ie_result
, ie
, url
)
1722 self
._wait
_for
_video
(ie_result
)
1723 return self
.process_ie_result(ie_result
, download
, extra_info
)
1727 def add_default_extra_info(self
, ie_result
, ie
, url
):
1729 self
.add_extra_info(ie_result
, {
1731 'original_url': url
,
1733 webpage_url
= ie_result
.get('webpage_url')
1735 self
.add_extra_info(ie_result
, {
1736 'webpage_url_basename': url_basename(webpage_url
),
1737 'webpage_url_domain': get_domain(webpage_url
),
1740 self
.add_extra_info(ie_result
, {
1741 'extractor': ie
.IE_NAME
,
1742 'extractor_key': ie
.ie_key(),
1745 def process_ie_result(self
, ie_result
, download
=True, extra_info
=None):
1747 Take the result of the ie(may be modified) and resolve all unresolved
1748 references (URLs, playlist items).
1750 It will also download the videos if 'download'.
1751 Returns the resolved ie_result.
1753 if extra_info
is None:
1755 result_type
= ie_result
.get('_type', 'video')
1757 if result_type
in ('url', 'url_transparent'):
1758 ie_result
['url'] = sanitize_url(
1759 ie_result
['url'], scheme
='http' if self
.params
.get('prefer_insecure') else 'https')
1760 if ie_result
.get('original_url') and not extra_info
.get('original_url'):
1761 extra_info
= {'original_url': ie_result['original_url'], **extra_info}
1763 extract_flat
= self
.params
.get('extract_flat', False)
1764 if ((extract_flat
== 'in_playlist' and 'playlist' in extra_info
)
1765 or extract_flat
is True):
1766 info_copy
= ie_result
.copy()
1767 ie
= try_get(ie_result
.get('ie_key'), self
.get_info_extractor
)
1768 if ie
and not ie_result
.get('id'):
1769 info_copy
['id'] = ie
.get_temp_id(ie_result
['url'])
1770 self
.add_default_extra_info(info_copy
, ie
, ie_result
['url'])
1771 self
.add_extra_info(info_copy
, extra_info
)
1772 info_copy
, _
= self
.pre_process(info_copy
)
1773 self
._fill
_common
_fields
(info_copy
, False)
1774 self
.__forced
_printings
(info_copy
)
1775 self
._raise
_pending
_errors
(info_copy
)
1776 if self
.params
.get('force_write_download_archive', False):
1777 self
.record_download_archive(info_copy
)
1780 if result_type
== 'video':
1781 self
.add_extra_info(ie_result
, extra_info
)
1782 ie_result
= self
.process_video_result(ie_result
, download
=download
)
1783 self
._raise
_pending
_errors
(ie_result
)
1784 additional_urls
= (ie_result
or {}).get('additional_urls')
1786 # TODO: Improve MetadataParserPP to allow setting a list
1787 if isinstance(additional_urls
, str):
1788 additional_urls
= [additional_urls
]
1790 '[info] %s: %d additional URL(s) requested' % (ie_result
['id'], len(additional_urls
)))
1791 self
.write_debug('Additional URLs: "%s"' % '", "'.join(additional_urls
))
1792 ie_result
['additional_entries'] = [
1794 url
, download
, extra_info
=extra_info
,
1795 force_generic_extractor
=self
.params
.get('force_generic_extractor'))
1796 for url
in additional_urls
1799 elif result_type
== 'url':
1800 # We have to add extra_info to the results because it may be
1801 # contained in a playlist
1802 return self
.extract_info(
1803 ie_result
['url'], download
,
1804 ie_key
=ie_result
.get('ie_key'),
1805 extra_info
=extra_info
)
1806 elif result_type
== 'url_transparent':
1807 # Use the information from the embedding page
1808 info
= self
.extract_info(
1809 ie_result
['url'], ie_key
=ie_result
.get('ie_key'),
1810 extra_info
=extra_info
, download
=False, process
=False)
1812 # extract_info may return None when ignoreerrors is enabled and
1813 # extraction failed with an error, don't crash and return early
1818 exempted_fields
= {'_type', 'url', 'ie_key'}
1819 if not ie_result
.get('section_end') and ie_result
.get('section_start') is None:
1820 # For video clips, the id etc of the clip extractor should be used
1821 exempted_fields |
= {'id', 'extractor', 'extractor_key'}
1823 new_result
= info
.copy()
1824 new_result
.update(filter_dict(ie_result
, lambda k
, v
: v
is not None and k
not in exempted_fields
))
1826 # Extracted info may not be a video result (i.e.
1827 # info.get('_type', 'video') != video) but rather an url or
1828 # url_transparent. In such cases outer metadata (from ie_result)
1829 # should be propagated to inner one (info). For this to happen
1830 # _type of info should be overridden with url_transparent. This
1831 # fixes issue from https://github.com/ytdl-org/youtube-dl/pull/11163.
1832 if new_result
.get('_type') == 'url':
1833 new_result
['_type'] = 'url_transparent'
1835 return self
.process_ie_result(
1836 new_result
, download
=download
, extra_info
=extra_info
)
1837 elif result_type
in ('playlist', 'multi_video'):
1838 # Protect from infinite recursion due to recursively nested playlists
1839 # (see https://github.com/ytdl-org/youtube-dl/issues/27833)
1840 webpage_url
= ie_result
.get('webpage_url') # Playlists maynot have webpage_url
1841 if webpage_url
and webpage_url
in self
._playlist
_urls
:
1843 '[download] Skipping already downloaded playlist: %s'
1844 % ie_result
.get('title') or ie_result
.get('id'))
1847 self
._playlist
_level
+= 1
1848 self
._playlist
_urls
.add(webpage_url
)
1849 self
._fill
_common
_fields
(ie_result
, False)
1850 self
._sanitize
_thumbnails
(ie_result
)
1852 return self
.__process
_playlist
(ie_result
, download
)
1854 self
._playlist
_level
-= 1
1855 if not self
._playlist
_level
:
1856 self
._playlist
_urls
.clear()
1857 elif result_type
== 'compat_list':
1858 self
.report_warning(
1859 'Extractor %s returned a compat_list result. '
1860 'It needs to be updated.' % ie_result
.get('extractor'))
1863 self
.add_extra_info(r
, {
1864 'extractor': ie_result
['extractor'],
1865 'webpage_url': ie_result
['webpage_url'],
1866 'webpage_url_basename': url_basename(ie_result
['webpage_url']),
1867 'webpage_url_domain': get_domain(ie_result
['webpage_url']),
1868 'extractor_key': ie_result
['extractor_key'],
1871 ie_result
['entries'] = [
1872 self
.process_ie_result(_fixup(r
), download
, extra_info
)
1873 for r
in ie_result
['entries']
1877 raise Exception('Invalid result type: %s' % result_type
)
1879 def _ensure_dir_exists(self
, path
):
1880 return make_dir(path
, self
.report_error
)
1883 def _playlist_infodict(ie_result
, strict
=False, **kwargs
):
1885 'playlist_count': ie_result
.get('playlist_count'),
1886 'playlist': ie_result
.get('title') or ie_result
.get('id'),
1887 'playlist_id': ie_result
.get('id'),
1888 'playlist_title': ie_result
.get('title'),
1889 'playlist_uploader': ie_result
.get('uploader'),
1890 'playlist_uploader_id': ie_result
.get('uploader_id'),
1895 if ie_result
.get('webpage_url'):
1897 'webpage_url': ie_result
['webpage_url'],
1898 'webpage_url_basename': url_basename(ie_result
['webpage_url']),
1899 'webpage_url_domain': get_domain(ie_result
['webpage_url']),
1903 'playlist_index': 0,
1904 '__last_playlist_index': max(ie_result
.get('requested_entries') or (0, 0)),
1905 'extractor': ie_result
['extractor'],
1906 'extractor_key': ie_result
['extractor_key'],
1909 def __process_playlist(self
, ie_result
, download
):
1910 """Process each entry in the playlist"""
1911 assert ie_result
['_type'] in ('playlist', 'multi_video')
1913 common_info
= self
._playlist
_infodict
(ie_result
, strict
=True)
1914 title
= common_info
.get('playlist') or '<Untitled>'
1915 if self
._match
_entry
(common_info
, incomplete
=True) is not None:
1917 self
.to_screen(f
'[download] Downloading {ie_result["_type"]}: {title}')
1919 all_entries
= PlaylistEntries(self
, ie_result
)
1920 entries
= orderedSet(all_entries
.get_requested_items(), lazy
=True)
1922 lazy
= self
.params
.get('lazy_playlist')
1924 resolved_entries
, n_entries
= [], 'N/A'
1925 ie_result
['requested_entries'], ie_result
['entries'] = None, None
1927 entries
= resolved_entries
= list(entries
)
1928 n_entries
= len(resolved_entries
)
1929 ie_result
['requested_entries'], ie_result
['entries'] = tuple(zip(*resolved_entries
)) or ([], [])
1930 if not ie_result
.get('playlist_count'):
1931 # Better to do this after potentially exhausting entries
1932 ie_result
['playlist_count'] = all_entries
.get_full_count()
1934 extra
= self
._playlist
_infodict
(ie_result
, n_entries
=int_or_none(n_entries
))
1935 ie_copy
= collections
.ChainMap(ie_result
, extra
)
1937 _infojson_written
= False
1938 write_playlist_files
= self
.params
.get('allow_playlist_files', True)
1939 if write_playlist_files
and self
.params
.get('list_thumbnails'):
1940 self
.list_thumbnails(ie_result
)
1941 if write_playlist_files
and not self
.params
.get('simulate'):
1942 _infojson_written
= self
._write
_info
_json
(
1943 'playlist', ie_result
, self
.prepare_filename(ie_copy
, 'pl_infojson'))
1944 if _infojson_written
is None:
1946 if self
._write
_description
('playlist', ie_result
,
1947 self
.prepare_filename(ie_copy
, 'pl_description')) is None:
1949 # TODO: This should be passed to ThumbnailsConvertor if necessary
1950 self
._write
_thumbnails
('playlist', ie_result
, self
.prepare_filename(ie_copy
, 'pl_thumbnail'))
1953 if self
.params
.get('playlistreverse') or self
.params
.get('playlistrandom'):
1954 self
.report_warning('playlistreverse and playlistrandom are not supported with lazy_playlist', only_once
=True)
1955 elif self
.params
.get('playlistreverse'):
1957 elif self
.params
.get('playlistrandom'):
1958 random
.shuffle(entries
)
1960 self
.to_screen(f
'[{ie_result["extractor"]}] Playlist {title}: Downloading {n_entries} items'
1961 f
'{format_field(ie_result, "playlist_count", " of %s")}')
1963 keep_resolved_entries
= self
.params
.get('extract_flat') != 'discard'
1964 if self
.params
.get('extract_flat') == 'discard_in_playlist':
1965 keep_resolved_entries
= ie_result
['_type'] != 'playlist'
1966 if keep_resolved_entries
:
1967 self
.write_debug('The information of all playlist entries will be held in memory')
1970 max_failures
= self
.params
.get('skip_playlist_after_errors') or float('inf')
1971 for i
, (playlist_index
, entry
) in enumerate(entries
):
1973 resolved_entries
.append((playlist_index
, entry
))
1977 entry
['__x_forwarded_for_ip'] = ie_result
.get('__x_forwarded_for_ip')
1978 if not lazy
and 'playlist-index' in self
.params
['compat_opts']:
1979 playlist_index
= ie_result
['requested_entries'][i
]
1981 entry_copy
= collections
.ChainMap(entry
, {
1983 'n_entries': int_or_none(n_entries
),
1984 'playlist_index': playlist_index
,
1985 'playlist_autonumber': i
+ 1,
1988 if self
._match
_entry
(entry_copy
, incomplete
=True) is not None:
1989 # For compatabilty with youtube-dl. See https://github.com/yt-dlp/yt-dlp/issues/4369
1990 resolved_entries
[i
] = (playlist_index
, NO_DEFAULT
)
1993 self
.to_screen('[download] Downloading item %s of %s' % (
1994 self
._format
_screen
(i
+ 1, self
.Styles
.ID
), self
._format
_screen
(n_entries
, self
.Styles
.EMPHASIS
)))
1996 entry_result
= self
.__process
_iterable
_entry
(entry
, download
, collections
.ChainMap({
1997 'playlist_index': playlist_index
,
1998 'playlist_autonumber': i
+ 1,
2000 if not entry_result
:
2002 if failures
>= max_failures
:
2004 f
'Skipping the remaining entries in playlist "{title}" since {failures} items failed extraction')
2006 if keep_resolved_entries
:
2007 resolved_entries
[i
] = (playlist_index
, entry_result
)
2009 # Update with processed data
2010 ie_result
['entries'] = [e
for _
, e
in resolved_entries
if e
is not NO_DEFAULT
]
2011 ie_result
['requested_entries'] = [i
for i
, e
in resolved_entries
if e
is not NO_DEFAULT
]
2012 if ie_result
['requested_entries'] == try_call(lambda: list(range(1, ie_result
['playlist_count'] + 1))):
2013 # Do not set for full playlist
2014 ie_result
.pop('requested_entries')
2016 # Write the updated info to json
2017 if _infojson_written
is True and self
._write
_info
_json
(
2018 'updated playlist', ie_result
,
2019 self
.prepare_filename(ie_copy
, 'pl_infojson'), overwrite
=True) is None:
2022 ie_result
= self
.run_all_pps('playlist', ie_result
)
2023 self
.to_screen(f
'[download] Finished downloading playlist: {title}')
2026 @_handle_extraction_exceptions
2027 def __process_iterable_entry(self
, entry
, download
, extra_info
):
2028 return self
.process_ie_result(
2029 entry
, download
=download
, extra_info
=extra_info
)
2031 def _build_format_filter(self
, filter_spec
):
2032 " Returns a function to filter the formats according to the filter_spec "
2042 operator_rex
= re
.compile(r
'''(?x)\s*
2044 (?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
2045 (?P<value>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)\s*
2046 ''' % '|'.join(map(re
.escape
, OPERATORS
.keys())))
2047 m
= operator_rex
.fullmatch(filter_spec
)
2050 comparison_value
= int(m
.group('value'))
2052 comparison_value
= parse_filesize(m
.group('value'))
2053 if comparison_value
is None:
2054 comparison_value
= parse_filesize(m
.group('value') + 'B')
2055 if comparison_value
is None:
2057 'Invalid value %r in format specification %r' % (
2058 m
.group('value'), filter_spec
))
2059 op
= OPERATORS
[m
.group('op')]
2064 '^=': lambda attr
, value
: attr
.startswith(value
),
2065 '$=': lambda attr
, value
: attr
.endswith(value
),
2066 '*=': lambda attr
, value
: value
in attr
,
2067 '~=': lambda attr
, value
: value
.search(attr
) is not None
2069 str_operator_rex
= re
.compile(r
'''(?x)\s*
2070 (?P<key>[a-zA-Z0-9._-]+)\s*
2071 (?P<negation>!\s*)?(?P<op>%s)\s*(?P<none_inclusive>\?\s*)?
2073 (?P<value>(?(quote)(?:(?!(?P=quote))[^\\]|\\.)+|[\w.-]+))
2074 (?(quote)(?P=quote))\s*
2075 ''' % '|'.join(map(re
.escape
, STR_OPERATORS
.keys())))
2076 m
= str_operator_rex
.fullmatch(filter_spec
)
2078 if m
.group('op') == '~=':
2079 comparison_value
= re
.compile(m
.group('value'))
2081 comparison_value
= re
.sub(r
'''\\([\\"'])''', r
'\1', m
.group('value'))
2082 str_op
= STR_OPERATORS
[m
.group('op')]
2083 if m
.group('negation'):
2084 op
= lambda attr
, value
: not str_op(attr
, value
)
2089 raise SyntaxError('Invalid filter specification %r' % filter_spec
)
2092 actual_value
= f
.get(m
.group('key'))
2093 if actual_value
is None:
2094 return m
.group('none_inclusive')
2095 return op(actual_value
, comparison_value
)
2098 def _check_formats(self
, formats
):
2100 self
.to_screen('[info] Testing format %s' % f
['format_id'])
2101 path
= self
.get_output_path('temp')
2102 if not self
._ensure
_dir
_exists
(f
'{path}/'):
2104 temp_file
= tempfile
.NamedTemporaryFile(suffix
='.tmp', delete
=False, dir=path
or None)
2107 success
, _
= self
.dl(temp_file
.name
, f
, test
=True)
2108 except (DownloadError
, OSError, ValueError) + network_exceptions
:
2111 if os
.path
.exists(temp_file
.name
):
2113 os
.remove(temp_file
.name
)
2115 self
.report_warning('Unable to delete temporary file "%s"' % temp_file
.name
)
2119 self
.to_screen('[info] Unable to download format %s. Skipping...' % f
['format_id'])
2121 def _default_format_spec(self
, info_dict
, download
=True):
2124 merger
= FFmpegMergerPP(self
)
2125 return merger
.available
and merger
.can_merge()
2128 not self
.params
.get('simulate')
2132 or info_dict
.get('is_live') and not self
.params
.get('live_from_start')
2133 or self
.params
['outtmpl']['default'] == '-'))
2136 or self
.params
.get('allow_multiple_audio_streams', False)
2137 or 'format-spec' in self
.params
['compat_opts'])
2140 'best/bestvideo+bestaudio' if prefer_best
2141 else 'bestvideo*+bestaudio/best' if not compat
2142 else 'bestvideo+bestaudio/best')
2144 def build_format_selector(self
, format_spec
):
2145 def syntax_error(note
, start
):
2147 'Invalid format specification: '
2148 '{}\n\t{}\n\t{}^'.format(note
, format_spec
, ' ' * start
[1]))
2149 return SyntaxError(message
)
2151 PICKFIRST
= 'PICKFIRST'
2155 FormatSelector
= collections
.namedtuple('FormatSelector', ['type', 'selector', 'filters'])
2157 allow_multiple_streams
= {'audio': self
.params
.get('allow_multiple_audio_streams', False),
2158 'video': self
.params
.get('allow_multiple_video_streams', False)}
2160 def _parse_filter(tokens
):
2162 for type, string_
, start
, _
, _
in tokens
:
2163 if type == tokenize
.OP
and string_
== ']':
2164 return ''.join(filter_parts
)
2166 filter_parts
.append(string_
)
2168 def _remove_unused_ops(tokens
):
2169 # Remove operators that we don't use and join them with the surrounding strings.
2170 # E.g. 'mp4' '-' 'baseline' '-' '16x9' is converted to 'mp4-baseline-16x9'
2171 ALLOWED_OPS
= ('/', '+', ',', '(', ')')
2172 last_string
, last_start
, last_end
, last_line
= None, None, None, None
2173 for type, string_
, start
, end
, line
in tokens
:
2174 if type == tokenize
.OP
and string_
== '[':
2176 yield tokenize
.NAME
, last_string
, last_start
, last_end
, last_line
2178 yield type, string_
, start
, end
, line
2179 # everything inside brackets will be handled by _parse_filter
2180 for type, string_
, start
, end
, line
in tokens
:
2181 yield type, string_
, start
, end
, line
2182 if type == tokenize
.OP
and string_
== ']':
2184 elif type == tokenize
.OP
and string_
in ALLOWED_OPS
:
2186 yield tokenize
.NAME
, last_string
, last_start
, last_end
, last_line
2188 yield type, string_
, start
, end
, line
2189 elif type in [tokenize
.NAME
, tokenize
.NUMBER
, tokenize
.OP
]:
2191 last_string
= string_
2195 last_string
+= string_
2197 yield tokenize
.NAME
, last_string
, last_start
, last_end
, last_line
2199 def _parse_format_selection(tokens
, inside_merge
=False, inside_choice
=False, inside_group
=False):
2201 current_selector
= None
2202 for type, string_
, start
, _
, _
in tokens
:
2203 # ENCODING is only defined in python 3.x
2204 if type == getattr(tokenize
, 'ENCODING', None):
2206 elif type in [tokenize
.NAME
, tokenize
.NUMBER
]:
2207 current_selector
= FormatSelector(SINGLE
, string_
, [])
2208 elif type == tokenize
.OP
:
2210 if not inside_group
:
2211 # ')' will be handled by the parentheses group
2212 tokens
.restore_last_token()
2214 elif inside_merge
and string_
in ['/', ',']:
2215 tokens
.restore_last_token()
2217 elif inside_choice
and string_
== ',':
2218 tokens
.restore_last_token()
2220 elif string_
== ',':
2221 if not current_selector
:
2222 raise syntax_error('"," must follow a format selector', start
)
2223 selectors
.append(current_selector
)
2224 current_selector
= None
2225 elif string_
== '/':
2226 if not current_selector
:
2227 raise syntax_error('"/" must follow a format selector', start
)
2228 first_choice
= current_selector
2229 second_choice
= _parse_format_selection(tokens
, inside_choice
=True)
2230 current_selector
= FormatSelector(PICKFIRST
, (first_choice
, second_choice
), [])
2231 elif string_
== '[':
2232 if not current_selector
:
2233 current_selector
= FormatSelector(SINGLE
, 'best', [])
2234 format_filter
= _parse_filter(tokens
)
2235 current_selector
.filters
.append(format_filter
)
2236 elif string_
== '(':
2237 if current_selector
:
2238 raise syntax_error('Unexpected "("', start
)
2239 group
= _parse_format_selection(tokens
, inside_group
=True)
2240 current_selector
= FormatSelector(GROUP
, group
, [])
2241 elif string_
== '+':
2242 if not current_selector
:
2243 raise syntax_error('Unexpected "+"', start
)
2244 selector_1
= current_selector
2245 selector_2
= _parse_format_selection(tokens
, inside_merge
=True)
2247 raise syntax_error('Expected a selector', start
)
2248 current_selector
= FormatSelector(MERGE
, (selector_1
, selector_2
), [])
2250 raise syntax_error(f
'Operator not recognized: "{string_}"', start
)
2251 elif type == tokenize
.ENDMARKER
:
2253 if current_selector
:
2254 selectors
.append(current_selector
)
2257 def _merge(formats_pair
):
2258 format_1
, format_2
= formats_pair
2261 formats_info
.extend(format_1
.get('requested_formats', (format_1
,)))
2262 formats_info
.extend(format_2
.get('requested_formats', (format_2
,)))
2264 if not allow_multiple_streams
['video'] or not allow_multiple_streams
['audio']:
2265 get_no_more
= {'video': False, 'audio': False}
2266 for (i
, fmt_info
) in enumerate(formats_info
):
2267 if fmt_info
.get('acodec') == fmt_info
.get('vcodec') == 'none':
2270 for aud_vid
in ['audio', 'video']:
2271 if not allow_multiple_streams
[aud_vid
] and fmt_info
.get(aud_vid
[0] + 'codec') != 'none':
2272 if get_no_more
[aud_vid
]:
2275 get_no_more
[aud_vid
] = True
2277 if len(formats_info
) == 1:
2278 return formats_info
[0]
2280 video_fmts
= [fmt_info
for fmt_info
in formats_info
if fmt_info
.get('vcodec') != 'none']
2281 audio_fmts
= [fmt_info
for fmt_info
in formats_info
if fmt_info
.get('acodec') != 'none']
2283 the_only_video
= video_fmts
[0] if len(video_fmts
) == 1 else None
2284 the_only_audio
= audio_fmts
[0] if len(audio_fmts
) == 1 else None
2286 output_ext
= get_compatible_ext(
2287 vcodecs
=[f
.get('vcodec') for f
in video_fmts
],
2288 acodecs
=[f
.get('acodec') for f
in audio_fmts
],
2289 vexts
=[f
['ext'] for f
in video_fmts
],
2290 aexts
=[f
['ext'] for f
in audio_fmts
],
2291 preferences
=(try_call(lambda: self
.params
['merge_output_format'].split('/'))
2292 or self
.params
.get('prefer_free_formats') and ('webm', 'mkv')))
2294 filtered
= lambda *keys
: filter(None, (traverse_obj(fmt
, *keys
) for fmt
in formats_info
))
2297 'requested_formats': formats_info
,
2298 'format': '+'.join(filtered('format')),
2299 'format_id': '+'.join(filtered('format_id')),
2301 'protocol': '+'.join(map(determine_protocol
, formats_info
)),
2302 'language': '+'.join(orderedSet(filtered('language'))) or None,
2303 'format_note': '+'.join(orderedSet(filtered('format_note'))) or None,
2304 'filesize_approx': sum(filtered('filesize', 'filesize_approx')) or None,
2305 'tbr': sum(filtered('tbr', 'vbr', 'abr')),
2310 'width': the_only_video
.get('width'),
2311 'height': the_only_video
.get('height'),
2312 'resolution': the_only_video
.get('resolution') or self
.format_resolution(the_only_video
),
2313 'fps': the_only_video
.get('fps'),
2314 'dynamic_range': the_only_video
.get('dynamic_range'),
2315 'vcodec': the_only_video
.get('vcodec'),
2316 'vbr': the_only_video
.get('vbr'),
2317 'stretched_ratio': the_only_video
.get('stretched_ratio'),
2318 'aspect_ratio': the_only_video
.get('aspect_ratio'),
2323 'acodec': the_only_audio
.get('acodec'),
2324 'abr': the_only_audio
.get('abr'),
2325 'asr': the_only_audio
.get('asr'),
2326 'audio_channels': the_only_audio
.get('audio_channels')
2331 def _check_formats(formats
):
2332 if self
.params
.get('check_formats') == 'selected':
2333 yield from self
._check
_formats
(formats
)
2335 elif (self
.params
.get('check_formats') is not None
2336 or self
.params
.get('allow_unplayable_formats')):
2341 if f
.get('has_drm') or f
.get('__needs_testing'):
2342 yield from self
._check
_formats
([f
])
2346 def _build_selector_function(selector
):
2347 if isinstance(selector
, list): # ,
2348 fs
= [_build_selector_function(s
) for s
in selector
]
2350 def selector_function(ctx
):
2353 return selector_function
2355 elif selector
.type == GROUP
: # ()
2356 selector_function
= _build_selector_function(selector
.selector
)
2358 elif selector
.type == PICKFIRST
: # /
2359 fs
= [_build_selector_function(s
) for s
in selector
.selector
]
2361 def selector_function(ctx
):
2363 picked_formats
= list(f(ctx
))
2365 return picked_formats
2368 elif selector
.type == MERGE
: # +
2369 selector_1
, selector_2
= map(_build_selector_function
, selector
.selector
)
2371 def selector_function(ctx
):
2372 for pair
in itertools
.product(selector_1(ctx
), selector_2(ctx
)):
2375 elif selector
.type == SINGLE
: # atom
2376 format_spec
= selector
.selector
or 'best'
2378 # TODO: Add allvideo, allaudio etc by generalizing the code with best/worst selector
2379 if format_spec
== 'all':
2380 def selector_function(ctx
):
2381 yield from _check_formats(ctx
['formats'][::-1])
2382 elif format_spec
== 'mergeall':
2383 def selector_function(ctx
):
2384 formats
= list(_check_formats(
2385 f
for f
in ctx
['formats'] if f
.get('vcodec') != 'none' or f
.get('acodec') != 'none'))
2388 merged_format
= formats
[-1]
2389 for f
in formats
[-2::-1]:
2390 merged_format
= _merge((merged_format
, f
))
2394 format_fallback
, seperate_fallback
, format_reverse
, format_idx
= False, None, True, 1
2396 r
'(?P<bw>best|worst|b|w)(?P<type>video|audio|v|a)?(?P<mod>\*)?(?:\.(?P<n>[1-9]\d*))?$',
2398 if mobj
is not None:
2399 format_idx
= int_or_none(mobj
.group('n'), default
=1)
2400 format_reverse
= mobj
.group('bw')[0] == 'b'
2401 format_type
= (mobj
.group('type') or [None])[0]
2402 not_format_type
= {'v': 'a', 'a': 'v'}
.get(format_type
)
2403 format_modified
= mobj
.group('mod') is not None
2405 format_fallback
= not format_type
and not format_modified
# for b, w
2407 (lambda f
: f
.get('%scodec' % format_type
) != 'none')
2408 if format_type
and format_modified
# bv*, ba*, wv*, wa*
2409 else (lambda f
: f
.get('%scodec' % not_format_type
) == 'none')
2410 if format_type
# bv, ba, wv, wa
2411 else (lambda f
: f
.get('vcodec') != 'none' and f
.get('acodec') != 'none')
2412 if not format_modified
# b, w
2413 else lambda f
: True) # b*, w*
2414 filter_f
= lambda f
: _filter_f(f
) and (
2415 f
.get('vcodec') != 'none' or f
.get('acodec') != 'none')
2417 if format_spec
in self
._format
_selection
_exts
['audio']:
2418 filter_f
= lambda f
: f
.get('ext') == format_spec
and f
.get('acodec') != 'none'
2419 elif format_spec
in self
._format
_selection
_exts
['video']:
2420 filter_f
= lambda f
: f
.get('ext') == format_spec
and f
.get('acodec') != 'none' and f
.get('vcodec') != 'none'
2421 seperate_fallback
= lambda f
: f
.get('ext') == format_spec
and f
.get('vcodec') != 'none'
2422 elif format_spec
in self
._format
_selection
_exts
['storyboards']:
2423 filter_f
= lambda f
: f
.get('ext') == format_spec
and f
.get('acodec') == 'none' and f
.get('vcodec') == 'none'
2425 filter_f
= lambda f
: f
.get('format_id') == format_spec
# id
2427 def selector_function(ctx
):
2428 formats
= list(ctx
['formats'])
2429 matches
= list(filter(filter_f
, formats
)) if filter_f
is not None else formats
2431 if format_fallback
and ctx
['incomplete_formats']:
2432 # for extractors with incomplete formats (audio only (soundcloud)
2433 # or video only (imgur)) best/worst will fallback to
2434 # best/worst {video,audio}-only format
2436 elif seperate_fallback
and not ctx
['has_merged_format']:
2437 # for compatibility with youtube-dl when there is no pre-merged format
2438 matches
= list(filter(seperate_fallback
, formats
))
2439 matches
= LazyList(_check_formats(matches
[::-1 if format_reverse
else 1]))
2441 yield matches
[format_idx
- 1]
2442 except LazyList
.IndexError:
2445 filters
= [self
._build
_format
_filter
(f
) for f
in selector
.filters
]
2447 def final_selector(ctx
):
2448 ctx_copy
= dict(ctx
)
2449 for _filter
in filters
:
2450 ctx_copy
['formats'] = list(filter(_filter
, ctx_copy
['formats']))
2451 return selector_function(ctx_copy
)
2452 return final_selector
2454 stream
= io
.BytesIO(format_spec
.encode())
2456 tokens
= list(_remove_unused_ops(tokenize
.tokenize(stream
.readline
)))
2457 except tokenize
.TokenError
:
2458 raise syntax_error('Missing closing/opening brackets or parenthesis', (0, len(format_spec
)))
2460 class TokenIterator
:
2461 def __init__(self
, tokens
):
2462 self
.tokens
= tokens
2469 if self
.counter
>= len(self
.tokens
):
2470 raise StopIteration()
2471 value
= self
.tokens
[self
.counter
]
2477 def restore_last_token(self
):
2480 parsed_selector
= _parse_format_selection(iter(TokenIterator(tokens
)))
2481 return _build_selector_function(parsed_selector
)
2483 def _calc_headers(self
, info_dict
, load_cookies
=False):
2484 res
= HTTPHeaderDict(self
.params
['http_headers'], info_dict
.get('http_headers'))
2487 if load_cookies
: # For --load-info-json
2488 self
._load
_cookies
(res
.get('Cookie'), autoscope
=info_dict
['url']) # compat
2489 self
._load
_cookies
(info_dict
.get('cookies'), autoscope
=False)
2490 # The `Cookie` header is removed to prevent leaks and unscoped cookies.
2491 # See: https://github.com/yt-dlp/yt-dlp/security/advisories/GHSA-v8mc-9377-rwjj
2492 res
.pop('Cookie', None)
2493 cookies
= self
.cookiejar
.get_cookies_for_url(info_dict
['url'])
2495 encoder
= LenientSimpleCookie()
2497 for cookie
in cookies
:
2498 _
, value
= encoder
.value_encode(cookie
.value
)
2499 values
.append(f
'{cookie.name}={value}')
2501 values
.append(f
'Domain={cookie.domain}')
2503 values
.append(f
'Path={cookie.path}')
2505 values
.append('Secure')
2507 values
.append(f
'Expires={cookie.expires}')
2509 values
.append(f
'Version={cookie.version}')
2510 info_dict
['cookies'] = '; '.join(values
)
2512 if 'X-Forwarded-For' not in res
:
2513 x_forwarded_for_ip
= info_dict
.get('__x_forwarded_for_ip')
2514 if x_forwarded_for_ip
:
2515 res
['X-Forwarded-For'] = x_forwarded_for_ip
2519 def _calc_cookies(self
, url
):
2520 self
.deprecation_warning('"YoutubeDL._calc_cookies" is deprecated and may be removed in a future version')
2521 return self
.cookiejar
.get_cookie_header(url
)
2523 def _sort_thumbnails(self
, thumbnails
):
2524 thumbnails
.sort(key
=lambda t
: (
2525 t
.get('preference') if t
.get('preference') is not None else -1,
2526 t
.get('width') if t
.get('width') is not None else -1,
2527 t
.get('height') if t
.get('height') is not None else -1,
2528 t
.get('id') if t
.get('id') is not None else '',
2531 def _sanitize_thumbnails(self
, info_dict
):
2532 thumbnails
= info_dict
.get('thumbnails')
2533 if thumbnails
is None:
2534 thumbnail
= info_dict
.get('thumbnail')
2536 info_dict
['thumbnails'] = thumbnails
= [{'url': thumbnail}
]
2540 def check_thumbnails(thumbnails
):
2541 for t
in thumbnails
:
2542 self
.to_screen(f
'[info] Testing thumbnail {t["id"]}')
2544 self
.urlopen(HEADRequest(t
['url']))
2545 except network_exceptions
as err
:
2546 self
.to_screen(f
'[info] Unable to connect to thumbnail {t["id"]} URL {t["url"]!r} - {err}. Skipping...')
2550 self
._sort
_thumbnails
(thumbnails
)
2551 for i
, t
in enumerate(thumbnails
):
2552 if t
.get('id') is None:
2554 if t
.get('width') and t
.get('height'):
2555 t
['resolution'] = '%dx%d' % (t
['width'], t
['height'])
2556 t
['url'] = sanitize_url(t
['url'])
2558 if self
.params
.get('check_formats') is True:
2559 info_dict
['thumbnails'] = LazyList(check_thumbnails(thumbnails
[::-1]), reverse
=True)
2561 info_dict
['thumbnails'] = thumbnails
2563 def _fill_common_fields(self
, info_dict
, final
=True):
2564 # TODO: move sanitization here
2566 title
= info_dict
['fulltitle'] = info_dict
.get('title')
2569 self
.write_debug('Extractor gave empty title. Creating a generic title')
2571 self
.report_warning('Extractor failed to obtain "title". Creating a generic title instead')
2572 info_dict
['title'] = f
'{info_dict["extractor"].replace(":", "-")} video #{info_dict["id"]}'
2574 if info_dict
.get('duration') is not None:
2575 info_dict
['duration_string'] = formatSeconds(info_dict
['duration'])
2577 for ts_key
, date_key
in (
2578 ('timestamp', 'upload_date'),
2579 ('release_timestamp', 'release_date'),
2580 ('modified_timestamp', 'modified_date'),
2582 if info_dict
.get(date_key
) is None and info_dict
.get(ts_key
) is not None:
2583 # Working around out-of-range timestamp values (e.g. negative ones on Windows,
2584 # see http://bugs.python.org/issue1646728)
2585 with contextlib
.suppress(ValueError, OverflowError, OSError):
2586 upload_date
= datetime
.datetime
.fromtimestamp(info_dict
[ts_key
], datetime
.timezone
.utc
)
2587 info_dict
[date_key
] = upload_date
.strftime('%Y%m%d')
2589 live_keys
= ('is_live', 'was_live')
2590 live_status
= info_dict
.get('live_status')
2591 if live_status
is None:
2592 for key
in live_keys
:
2593 if info_dict
.get(key
) is False:
2595 if info_dict
.get(key
):
2598 if all(info_dict
.get(key
) is False for key
in live_keys
):
2599 live_status
= 'not_live'
2601 info_dict
['live_status'] = live_status
2602 for key
in live_keys
:
2603 if info_dict
.get(key
) is None:
2604 info_dict
[key
] = (live_status
== key
)
2605 if live_status
== 'post_live':
2606 info_dict
['was_live'] = True
2608 # Auto generate title fields corresponding to the *_number fields when missing
2609 # in order to always have clean titles. This is very common for TV series.
2610 for field
in ('chapter', 'season', 'episode'):
2611 if final
and info_dict
.get('%s_number' % field
) is not None and not info_dict
.get(field
):
2612 info_dict
[field
] = '%s %d' % (field
.capitalize(), info_dict
['%s_number' % field
])
2614 def _raise_pending_errors(self
, info
):
2615 err
= info
.pop('__pending_error', None)
2617 self
.report_error(err
, tb
=False)
2619 def sort_formats(self
, info_dict
):
2620 formats
= self
._get
_formats
(info_dict
)
2621 formats
.sort(key
=FormatSorter(
2622 self
, info_dict
.get('_format_sort_fields') or []).calculate_preference
)
2624 def process_video_result(self
, info_dict
, download
=True):
2625 assert info_dict
.get('_type', 'video') == 'video'
2626 self
._num
_videos
+= 1
2628 if 'id' not in info_dict
:
2629 raise ExtractorError('Missing "id" field in extractor result', ie
=info_dict
['extractor'])
2630 elif not info_dict
.get('id'):
2631 raise ExtractorError('Extractor failed to obtain "id"', ie
=info_dict
['extractor'])
2633 def report_force_conversion(field
, field_not
, conversion
):
2634 self
.report_warning(
2635 '"%s" field is not %s - forcing %s conversion, there is an error in extractor'
2636 % (field
, field_not
, conversion
))
2638 def sanitize_string_field(info
, string_field
):
2639 field
= info
.get(string_field
)
2640 if field
is None or isinstance(field
, str):
2642 report_force_conversion(string_field
, 'a string', 'string')
2643 info
[string_field
] = str(field
)
2645 def sanitize_numeric_fields(info
):
2646 for numeric_field
in self
._NUMERIC
_FIELDS
:
2647 field
= info
.get(numeric_field
)
2648 if field
is None or isinstance(field
, (int, float)):
2650 report_force_conversion(numeric_field
, 'numeric', 'int')
2651 info
[numeric_field
] = int_or_none(field
)
2653 sanitize_string_field(info_dict
, 'id')
2654 sanitize_numeric_fields(info_dict
)
2655 if info_dict
.get('section_end') and info_dict
.get('section_start') is not None:
2656 info_dict
['duration'] = round(info_dict
['section_end'] - info_dict
['section_start'], 3)
2657 if (info_dict
.get('duration') or 0) <= 0 and info_dict
.pop('duration', None):
2658 self
.report_warning('"duration" field is negative, there is an error in extractor')
2660 chapters
= info_dict
.get('chapters') or []
2661 if chapters
and chapters
[0].get('start_time'):
2662 chapters
.insert(0, {'start_time': 0}
)
2664 dummy_chapter
= {'end_time': 0, 'start_time': info_dict.get('duration')}
2665 for idx
, (prev
, current
, next_
) in enumerate(zip(
2666 (dummy_chapter
, *chapters
), chapters
, (*chapters
[1:], dummy_chapter
)), 1):
2667 if current
.get('start_time') is None:
2668 current
['start_time'] = prev
.get('end_time')
2669 if not current
.get('end_time'):
2670 current
['end_time'] = next_
.get('start_time')
2671 if not current
.get('title'):
2672 current
['title'] = f
'<Untitled Chapter {idx}>'
2674 if 'playlist' not in info_dict
:
2675 # It isn't part of a playlist
2676 info_dict
['playlist'] = None
2677 info_dict
['playlist_index'] = None
2679 self
._sanitize
_thumbnails
(info_dict
)
2681 thumbnail
= info_dict
.get('thumbnail')
2682 thumbnails
= info_dict
.get('thumbnails')
2684 info_dict
['thumbnail'] = sanitize_url(thumbnail
)
2686 info_dict
['thumbnail'] = thumbnails
[-1]['url']
2688 if info_dict
.get('display_id') is None and 'id' in info_dict
:
2689 info_dict
['display_id'] = info_dict
['id']
2691 self
._fill
_common
_fields
(info_dict
)
2693 for cc_kind
in ('subtitles', 'automatic_captions'):
2694 cc
= info_dict
.get(cc_kind
)
2696 for _
, subtitle
in cc
.items():
2697 for subtitle_format
in subtitle
:
2698 if subtitle_format
.get('url'):
2699 subtitle_format
['url'] = sanitize_url(subtitle_format
['url'])
2700 if subtitle_format
.get('ext') is None:
2701 subtitle_format
['ext'] = determine_ext(subtitle_format
['url']).lower()
2703 automatic_captions
= info_dict
.get('automatic_captions')
2704 subtitles
= info_dict
.get('subtitles')
2706 info_dict
['requested_subtitles'] = self
.process_subtitles(
2707 info_dict
['id'], subtitles
, automatic_captions
)
2709 formats
= self
._get
_formats
(info_dict
)
2711 # Backward compatibility with InfoExtractor._sort_formats
2712 field_preference
= (formats
or [{}])[0].pop('__sort_fields', None)
2713 if field_preference
:
2714 info_dict
['_format_sort_fields'] = field_preference
2716 info_dict
['_has_drm'] = any( # or None ensures --clean-infojson removes it
2717 f
.get('has_drm') and f
['has_drm'] != 'maybe' for f
in formats
) or None
2718 if not self
.params
.get('allow_unplayable_formats'):
2719 formats
= [f
for f
in formats
if not f
.get('has_drm') or f
['has_drm'] == 'maybe']
2721 if formats
and all(f
.get('acodec') == f
.get('vcodec') == 'none' for f
in formats
):
2722 self
.report_warning(
2723 f
'{"This video is DRM protected and " if info_dict["_has_drm"] else ""}'
2724 'only images are available for download. Use --list-formats to see them'.capitalize())
2726 get_from_start
= not info_dict
.get('is_live') or bool(self
.params
.get('live_from_start'))
2727 if not get_from_start
:
2728 info_dict
['title'] += ' ' + datetime
.datetime
.now().strftime('%Y-%m-%d %H:%M')
2729 if info_dict
.get('is_live') and formats
:
2730 formats
= [f
for f
in formats
if bool(f
.get('is_from_start')) == get_from_start
]
2731 if get_from_start
and not formats
:
2732 self
.raise_no_formats(info_dict
, msg
=(
2733 '--live-from-start is passed, but there are no formats that can be downloaded from the start. '
2734 'If you want to download from the current time, use --no-live-from-start'))
2736 def is_wellformed(f
):
2739 self
.report_warning(
2740 '"url" field is missing or empty - skipping format, '
2741 'there is an error in extractor')
2743 if isinstance(url
, bytes):
2744 sanitize_string_field(f
, 'url')
2747 # Filter out malformed formats for better extraction robustness
2748 formats
= list(filter(is_wellformed
, formats
or []))
2751 self
.raise_no_formats(info_dict
)
2753 for format
in formats
:
2754 sanitize_string_field(format
, 'format_id')
2755 sanitize_numeric_fields(format
)
2756 format
['url'] = sanitize_url(format
['url'])
2757 if format
.get('ext') is None:
2758 format
['ext'] = determine_ext(format
['url']).lower()
2759 if format
.get('protocol') is None:
2760 format
['protocol'] = determine_protocol(format
)
2761 if format
.get('resolution') is None:
2762 format
['resolution'] = self
.format_resolution(format
, default
=None)
2763 if format
.get('dynamic_range') is None and format
.get('vcodec') != 'none':
2764 format
['dynamic_range'] = 'SDR'
2765 if format
.get('aspect_ratio') is None:
2766 format
['aspect_ratio'] = try_call(lambda: round(format
['width'] / format
['height'], 2))
2767 # For fragmented formats, "tbr" is often max bitrate and not average
2768 if (('manifest-filesize-approx' in self
.params
['compat_opts'] or not format
.get('manifest_url'))
2769 and info_dict
.get('duration') and format
.get('tbr')
2770 and not format
.get('filesize') and not format
.get('filesize_approx')):
2771 format
['filesize_approx'] = int(info_dict
['duration'] * format
['tbr'] * (1024 / 8))
2772 format
['http_headers'] = self
._calc
_headers
(collections
.ChainMap(format
, info_dict
), load_cookies
=True)
2774 # Safeguard against old/insecure infojson when using --load-info-json
2775 if info_dict
.get('http_headers'):
2776 info_dict
['http_headers'] = HTTPHeaderDict(info_dict
['http_headers'])
2777 info_dict
['http_headers'].pop('Cookie', None)
2779 # This is copied to http_headers by the above _calc_headers and can now be removed
2780 if '__x_forwarded_for_ip' in info_dict
:
2781 del info_dict
['__x_forwarded_for_ip']
2785 '_format_sort_fields': info_dict
.get('_format_sort_fields')
2788 # Sanitize and group by format_id
2790 for i
, format
in enumerate(formats
):
2791 if not format
.get('format_id'):
2792 format
['format_id'] = str(i
)
2794 # Sanitize format_id from characters used in format selector expression
2795 format
['format_id'] = re
.sub(r
'[\s,/+\[\]()]', '_', format
['format_id'])
2796 formats_dict
.setdefault(format
['format_id'], []).append(format
)
2798 # Make sure all formats have unique format_id
2799 common_exts
= set(itertools
.chain(*self
._format
_selection
_exts
.values()))
2800 for format_id
, ambiguous_formats
in formats_dict
.items():
2801 ambigious_id
= len(ambiguous_formats
) > 1
2802 for i
, format
in enumerate(ambiguous_formats
):
2804 format
['format_id'] = '%s-%d' % (format_id
, i
)
2805 # Ensure there is no conflict between id and ext in format selection
2806 # See https://github.com/yt-dlp/yt-dlp/issues/1282
2807 if format
['format_id'] != format
['ext'] and format
['format_id'] in common_exts
:
2808 format
['format_id'] = 'f%s' % format
['format_id']
2810 if format
.get('format') is None:
2811 format
['format'] = '{id} - {res}{note}'.format(
2812 id=format
['format_id'],
2813 res
=self
.format_resolution(format
),
2814 note
=format_field(format
, 'format_note', ' (%s)'),
2817 if self
.params
.get('check_formats') is True:
2818 formats
= LazyList(self
._check
_formats
(formats
[::-1]), reverse
=True)
2820 if not formats
or formats
[0] is not info_dict
:
2821 # only set the 'formats' fields if the original info_dict list them
2822 # otherwise we end up with a circular reference, the first (and unique)
2823 # element in the 'formats' field in info_dict is info_dict itself,
2824 # which can't be exported to json
2825 info_dict
['formats'] = formats
2827 info_dict
, _
= self
.pre_process(info_dict
)
2829 if self
._match
_entry
(info_dict
, incomplete
=self
._format
_fields
) is not None:
2832 self
.post_extract(info_dict
)
2833 info_dict
, _
= self
.pre_process(info_dict
, 'after_filter')
2835 # The pre-processors may have modified the formats
2836 formats
= self
._get
_formats
(info_dict
)
2838 list_only
= self
.params
.get('simulate') == 'list_only'
2839 interactive_format_selection
= not list_only
and self
.format_selector
== '-'
2840 if self
.params
.get('list_thumbnails'):
2841 self
.list_thumbnails(info_dict
)
2842 if self
.params
.get('listsubtitles'):
2843 if 'automatic_captions' in info_dict
:
2844 self
.list_subtitles(
2845 info_dict
['id'], automatic_captions
, 'automatic captions')
2846 self
.list_subtitles(info_dict
['id'], subtitles
, 'subtitles')
2847 if self
.params
.get('listformats') or interactive_format_selection
:
2848 self
.list_formats(info_dict
)
2850 # Without this printing, -F --print-json will not work
2851 self
.__forced
_printings
(info_dict
)
2854 format_selector
= self
.format_selector
2856 if interactive_format_selection
:
2857 req_format
= input(self
._format
_screen
('\nEnter format selector ', self
.Styles
.EMPHASIS
)
2858 + '(Press ENTER for default, or Ctrl+C to quit)'
2859 + self
._format
_screen
(': ', self
.Styles
.EMPHASIS
))
2861 format_selector
= self
.build_format_selector(req_format
) if req_format
else None
2862 except SyntaxError as err
:
2863 self
.report_error(err
, tb
=False, is_error
=False)
2866 if format_selector
is None:
2867 req_format
= self
._default
_format
_spec
(info_dict
, download
=download
)
2868 self
.write_debug(f
'Default format spec: {req_format}')
2869 format_selector
= self
.build_format_selector(req_format
)
2871 formats_to_download
= list(format_selector({
2873 'has_merged_format': any('none' not in (f
.get('acodec'), f
.get('vcodec')) for f
in formats
),
2874 'incomplete_formats': (all(f
.get('vcodec') == 'none' for f
in formats
) # No formats with video
2875 or all(f
.get('acodec') == 'none' for f
in formats
)), # OR, No formats with audio
2877 if interactive_format_selection
and not formats_to_download
:
2878 self
.report_error('Requested format is not available', tb
=False, is_error
=False)
2882 if not formats_to_download
:
2883 if not self
.params
.get('ignore_no_formats_error'):
2884 raise ExtractorError(
2885 'Requested format is not available. Use --list-formats for a list of available formats',
2886 expected
=True, video_id
=info_dict
['id'], ie
=info_dict
['extractor'])
2887 self
.report_warning('Requested format is not available')
2888 # Process what we can, even without any available formats.
2889 formats_to_download
= [{}]
2891 requested_ranges
= tuple(self
.params
.get('download_ranges', lambda *_
: [{}])(info_dict
, self
))
2892 best_format
, downloaded_formats
= formats_to_download
[-1], []
2894 if best_format
and requested_ranges
:
2895 def to_screen(*msg
):
2896 self
.to_screen(f
'[info] {info_dict["id"]}: {" ".join(", ".join(variadic(m)) for m in msg)}')
2898 to_screen(f
'Downloading {len(formats_to_download)} format(s):',
2899 (f
['format_id'] for f
in formats_to_download
))
2900 if requested_ranges
!= ({}, ):
2901 to_screen(f
'Downloading {len(requested_ranges)} time ranges:',
2902 (f
'{c["start_time"]:.1f}-{c["end_time"]:.1f}' for c
in requested_ranges
))
2903 max_downloads_reached
= False
2905 for fmt
, chapter
in itertools
.product(formats_to_download
, requested_ranges
):
2906 new_info
= self
._copy
_infodict
(info_dict
)
2907 new_info
.update(fmt
)
2908 offset
, duration
= info_dict
.get('section_start') or 0, info_dict
.get('duration') or float('inf')
2909 end_time
= offset
+ min(chapter
.get('end_time', duration
), duration
)
2910 # duration may not be accurate. So allow deviations <1sec
2911 if end_time
== float('inf') or end_time
> offset
+ duration
+ 1:
2913 if chapter
or offset
:
2915 'section_start': offset
+ chapter
.get('start_time', 0),
2916 'section_end': end_time
,
2917 'section_title': chapter
.get('title'),
2918 'section_number': chapter
.get('index'),
2920 downloaded_formats
.append(new_info
)
2922 self
.process_info(new_info
)
2923 except MaxDownloadsReached
:
2924 max_downloads_reached
= True
2925 self
._raise
_pending
_errors
(new_info
)
2926 # Remove copied info
2927 for key
, val
in tuple(new_info
.items()):
2928 if info_dict
.get(key
) == val
:
2930 if max_downloads_reached
:
2933 write_archive
= {f.get('__write_download_archive', False) for f in downloaded_formats}
2934 assert write_archive
.issubset({True, False, 'ignore'}
)
2935 if True in write_archive
and False not in write_archive
:
2936 self
.record_download_archive(info_dict
)
2938 info_dict
['requested_downloads'] = downloaded_formats
2939 info_dict
= self
.run_all_pps('after_video', info_dict
)
2940 if max_downloads_reached
:
2941 raise MaxDownloadsReached()
2943 # We update the info dict with the selected best quality format (backwards compatibility)
2944 info_dict
.update(best_format
)
2947 def process_subtitles(self
, video_id
, normal_subtitles
, automatic_captions
):
2948 """Select the requested subtitles and their format"""
2949 available_subs
, normal_sub_langs
= {}, []
2950 if normal_subtitles
and self
.params
.get('writesubtitles'):
2951 available_subs
.update(normal_subtitles
)
2952 normal_sub_langs
= tuple(normal_subtitles
.keys())
2953 if automatic_captions
and self
.params
.get('writeautomaticsub'):
2954 for lang
, cap_info
in automatic_captions
.items():
2955 if lang
not in available_subs
:
2956 available_subs
[lang
] = cap_info
2958 if not available_subs
or (
2959 not self
.params
.get('writesubtitles')
2960 and not self
.params
.get('writeautomaticsub')):
2963 all_sub_langs
= tuple(available_subs
.keys())
2964 if self
.params
.get('allsubtitles', False):
2965 requested_langs
= all_sub_langs
2966 elif self
.params
.get('subtitleslangs', False):
2968 requested_langs
= orderedSet_from_options(
2969 self
.params
.get('subtitleslangs'), {'all': all_sub_langs}
, use_regex
=True)
2970 except re
.error
as e
:
2971 raise ValueError(f
'Wrong regex for subtitlelangs: {e.pattern}')
2973 requested_langs
= LazyList(itertools
.chain(
2974 ['en'] if 'en' in normal_sub_langs
else [],
2975 filter(lambda f
: f
.startswith('en'), normal_sub_langs
),
2976 ['en'] if 'en' in all_sub_langs
else [],
2977 filter(lambda f
: f
.startswith('en'), all_sub_langs
),
2978 normal_sub_langs
, all_sub_langs
,
2981 self
.to_screen(f
'[info] {video_id}: Downloading subtitles: {", ".join(requested_langs)}')
2983 formats_query
= self
.params
.get('subtitlesformat', 'best')
2984 formats_preference
= formats_query
.split('/') if formats_query
else []
2986 for lang
in requested_langs
:
2987 formats
= available_subs
.get(lang
)
2989 self
.report_warning(f
'{lang} subtitles not available for {video_id}')
2991 for ext
in formats_preference
:
2995 matches
= list(filter(lambda f
: f
['ext'] == ext
, formats
))
3001 self
.report_warning(
3002 'No subtitle format found matching "%s" for language %s, '
3003 'using %s' % (formats_query
, lang
, f
['ext']))
3007 def _forceprint(self
, key
, info_dict
):
3008 if info_dict
is None:
3010 info_copy
= info_dict
.copy()
3011 info_copy
.setdefault('filename', self
.prepare_filename(info_dict
))
3012 if info_dict
.get('requested_formats') is not None:
3013 # For RTMP URLs, also include the playpath
3014 info_copy
['urls'] = '\n'.join(f
['url'] + f
.get('play_path', '') for f
in info_dict
['requested_formats'])
3015 elif info_dict
.get('url'):
3016 info_copy
['urls'] = info_dict
['url'] + info_dict
.get('play_path', '')
3017 info_copy
['formats_table'] = self
.render_formats_table(info_dict
)
3018 info_copy
['thumbnails_table'] = self
.render_thumbnails_table(info_dict
)
3019 info_copy
['subtitles_table'] = self
.render_subtitles_table(info_dict
.get('id'), info_dict
.get('subtitles'))
3020 info_copy
['automatic_captions_table'] = self
.render_subtitles_table(info_dict
.get('id'), info_dict
.get('automatic_captions'))
3022 def format_tmpl(tmpl
):
3023 mobj
= re
.fullmatch(r
'([\w.:,]|-\d|(?P<dict>{([\w.:,]|-\d)+}))+=?', tmpl
)
3028 if tmpl
.startswith('{'):
3029 tmpl
, fmt
= f
'.{tmpl}', '%({})j'
3030 if tmpl
.endswith('='):
3031 tmpl
, fmt
= tmpl
[:-1], '{0} = %({0})#j'
3032 return '\n'.join(map(fmt
.format
, [tmpl
] if mobj
.group('dict') else tmpl
.split(',')))
3034 for tmpl
in self
.params
['forceprint'].get(key
, []):
3035 self
.to_stdout(self
.evaluate_outtmpl(format_tmpl(tmpl
), info_copy
))
3037 for tmpl
, file_tmpl
in self
.params
['print_to_file'].get(key
, []):
3038 filename
= self
.prepare_filename(info_dict
, outtmpl
=file_tmpl
)
3039 tmpl
= format_tmpl(tmpl
)
3040 self
.to_screen(f
'[info] Writing {tmpl!r} to: {filename}')
3041 if self
._ensure
_dir
_exists
(filename
):
3042 with open(filename
, 'a', encoding
='utf-8', newline
='') as f
:
3043 f
.write(self
.evaluate_outtmpl(tmpl
, info_copy
) + os
.linesep
)
3047 def __forced_printings(self
, info_dict
, filename
=None, incomplete
=True):
3048 if (self
.params
.get('forcejson')
3049 or self
.params
['forceprint'].get('video')
3050 or self
.params
['print_to_file'].get('video')):
3051 self
.post_extract(info_dict
)
3053 info_dict
['filename'] = filename
3054 info_copy
= self
._forceprint
('video', info_dict
)
3056 def print_field(field
, actual_field
=None, optional
=False):
3057 if actual_field
is None:
3058 actual_field
= field
3059 if self
.params
.get(f
'force{field}') and (
3060 info_copy
.get(field
) is not None or (not optional
and not incomplete
)):
3061 self
.to_stdout(info_copy
[actual_field
])
3063 print_field('title')
3065 print_field('url', 'urls')
3066 print_field('thumbnail', optional
=True)
3067 print_field('description', optional
=True)
3068 print_field('filename')
3069 if self
.params
.get('forceduration') and info_copy
.get('duration') is not None:
3070 self
.to_stdout(formatSeconds(info_copy
['duration']))
3071 print_field('format')
3073 if self
.params
.get('forcejson'):
3074 self
.to_stdout(json
.dumps(self
.sanitize_info(info_dict
)))
3076 def dl(self
, name
, info
, subtitle
=False, test
=False):
3077 if not info
.get('url'):
3078 self
.raise_no_formats(info
, True)
3081 verbose
= self
.params
.get('verbose')
3084 'quiet': self
.params
.get('quiet') or not verbose
,
3086 'noprogress': not verbose
,
3088 'skip_unavailable_fragments': False,
3089 'keep_fragments': False,
3091 '_no_ytdl_file': True,
3094 params
= self
.params
3095 fd
= get_suitable_downloader(info
, params
, to_stdout
=(name
== '-'))(self
, params
)
3097 for ph
in self
._progress
_hooks
:
3098 fd
.add_progress_hook(ph
)
3100 (f
['url'].split(',')[0] + ',<data>' if f
['url'].startswith('data:') else f
['url'])
3101 for f
in info
.get('requested_formats', []) or [info
])
3102 self
.write_debug(f
'Invoking {fd.FD_NAME} downloader on "{urls}"')
3104 # Note: Ideally info should be a deep-copied so that hooks cannot modify it.
3105 # But it may contain objects that are not deep-copyable
3106 new_info
= self
._copy
_infodict
(info
)
3107 if new_info
.get('http_headers') is None:
3108 new_info
['http_headers'] = self
._calc
_headers
(new_info
)
3109 return fd
.download(name
, new_info
, subtitle
)
3111 def existing_file(self
, filepaths
, *, default_overwrite
=True):
3112 existing_files
= list(filter(os
.path
.exists
, orderedSet(filepaths
)))
3113 if existing_files
and not self
.params
.get('overwrites', default_overwrite
):
3114 return existing_files
[0]
3116 for file in existing_files
:
3117 self
.report_file_delete(file)
3121 def process_info(self
, info_dict
):
3122 """Process a single resolved IE result. (Modifies it in-place)"""
3124 assert info_dict
.get('_type', 'video') == 'video'
3125 original_infodict
= info_dict
3127 if 'format' not in info_dict
and 'ext' in info_dict
:
3128 info_dict
['format'] = info_dict
['ext']
3130 if self
._match
_entry
(info_dict
) is not None:
3131 info_dict
['__write_download_archive'] = 'ignore'
3134 # Does nothing under normal operation - for backward compatibility of process_info
3135 self
.post_extract(info_dict
)
3137 def replace_info_dict(new_info
):
3139 if new_info
== info_dict
:
3142 info_dict
.update(new_info
)
3144 new_info
, _
= self
.pre_process(info_dict
, 'video')
3145 replace_info_dict(new_info
)
3146 self
._num
_downloads
+= 1
3148 # info_dict['_filename'] needs to be set for backward compatibility
3149 info_dict
['_filename'] = full_filename
= self
.prepare_filename(info_dict
, warn
=True)
3150 temp_filename
= self
.prepare_filename(info_dict
, 'temp')
3154 self
.__forced
_printings
(info_dict
, full_filename
, incomplete
=('format' not in info_dict
))
3156 def check_max_downloads():
3157 if self
._num
_downloads
>= float(self
.params
.get('max_downloads') or 'inf'):
3158 raise MaxDownloadsReached()
3160 if self
.params
.get('simulate'):
3161 info_dict
['__write_download_archive'] = self
.params
.get('force_write_download_archive')
3162 check_max_downloads()
3165 if full_filename
is None:
3167 if not self
._ensure
_dir
_exists
(encodeFilename(full_filename
)):
3169 if not self
._ensure
_dir
_exists
(encodeFilename(temp_filename
)):
3172 if self
._write
_description
('video', info_dict
,
3173 self
.prepare_filename(info_dict
, 'description')) is None:
3176 sub_files
= self
._write
_subtitles
(info_dict
, temp_filename
)
3177 if sub_files
is None:
3179 files_to_move
.update(dict(sub_files
))
3181 thumb_files
= self
._write
_thumbnails
(
3182 'video', info_dict
, temp_filename
, self
.prepare_filename(info_dict
, 'thumbnail'))
3183 if thumb_files
is None:
3185 files_to_move
.update(dict(thumb_files
))
3187 infofn
= self
.prepare_filename(info_dict
, 'infojson')
3188 _infojson_written
= self
._write
_info
_json
('video', info_dict
, infofn
)
3189 if _infojson_written
:
3190 info_dict
['infojson_filename'] = infofn
3191 # For backward compatibility, even though it was a private field
3192 info_dict
['__infojson_filename'] = infofn
3193 elif _infojson_written
is None:
3196 # Note: Annotations are deprecated
3198 if self
.params
.get('writeannotations', False):
3199 annofn
= self
.prepare_filename(info_dict
, 'annotation')
3201 if not self
._ensure
_dir
_exists
(encodeFilename(annofn
)):
3203 if not self
.params
.get('overwrites', True) and os
.path
.exists(encodeFilename(annofn
)):
3204 self
.to_screen('[info] Video annotations are already present')
3205 elif not info_dict
.get('annotations'):
3206 self
.report_warning('There are no annotations to write.')
3209 self
.to_screen('[info] Writing video annotations to: ' + annofn
)
3210 with open(encodeFilename(annofn
), 'w', encoding
='utf-8') as annofile
:
3211 annofile
.write(info_dict
['annotations'])
3212 except (KeyError, TypeError):
3213 self
.report_warning('There are no annotations to write.')
3215 self
.report_error('Cannot write annotations file: ' + annofn
)
3218 # Write internet shortcut files
3219 def _write_link_file(link_type
):
3220 url
= try_get(info_dict
['webpage_url'], iri_to_uri
)
3222 self
.report_warning(
3223 f
'Cannot write internet shortcut file because the actual URL of "{info_dict["webpage_url"]}" is unknown')
3225 linkfn
= replace_extension(self
.prepare_filename(info_dict
, 'link'), link_type
, info_dict
.get('ext'))
3226 if not self
._ensure
_dir
_exists
(encodeFilename(linkfn
)):
3228 if self
.params
.get('overwrites', True) and os
.path
.exists(encodeFilename(linkfn
)):
3229 self
.to_screen(f
'[info] Internet shortcut (.{link_type}) is already present')
3232 self
.to_screen(f
'[info] Writing internet shortcut (.{link_type}) to: {linkfn}')
3233 with open(encodeFilename(to_high_limit_path(linkfn
)), 'w', encoding
='utf-8',
3234 newline
='\r\n' if link_type
== 'url' else '\n') as linkfile
:
3235 template_vars
= {'url': url}
3236 if link_type
== 'desktop':
3237 template_vars
['filename'] = linkfn
[:-(len(link_type
) + 1)]
3238 linkfile
.write(LINK_TEMPLATES
[link_type
] % template_vars
)
3240 self
.report_error(f
'Cannot write internet shortcut {linkfn}')
3245 'url': self
.params
.get('writeurllink'),
3246 'webloc': self
.params
.get('writewebloclink'),
3247 'desktop': self
.params
.get('writedesktoplink'),
3249 if self
.params
.get('writelink'):
3250 link_type
= ('webloc' if sys
.platform
== 'darwin'
3251 else 'desktop' if sys
.platform
.startswith('linux')
3253 write_links
[link_type
] = True
3255 if any(should_write
and not _write_link_file(link_type
)
3256 for link_type
, should_write
in write_links
.items()):
3259 new_info
, files_to_move
= self
.pre_process(info_dict
, 'before_dl', files_to_move
)
3260 replace_info_dict(new_info
)
3262 if self
.params
.get('skip_download'):
3263 info_dict
['filepath'] = temp_filename
3264 info_dict
['__finaldir'] = os
.path
.dirname(os
.path
.abspath(encodeFilename(full_filename
)))
3265 info_dict
['__files_to_move'] = files_to_move
3266 replace_info_dict(self
.run_pp(MoveFilesAfterDownloadPP(self
, False), info_dict
))
3267 info_dict
['__write_download_archive'] = self
.params
.get('force_write_download_archive')
3270 info_dict
.setdefault('__postprocessors', [])
3273 def existing_video_file(*filepaths
):
3274 ext
= info_dict
.get('ext')
3275 converted
= lambda file: replace_extension(file, self
.params
.get('final_ext') or ext
, ext
)
3276 file = self
.existing_file(itertools
.chain(*zip(map(converted
, filepaths
), filepaths
)),
3277 default_overwrite
=False)
3279 info_dict
['ext'] = os
.path
.splitext(file)[1][1:]
3282 fd
, success
= None, True
3283 if info_dict
.get('protocol') or info_dict
.get('url'):
3284 fd
= get_suitable_downloader(info_dict
, self
.params
, to_stdout
=temp_filename
== '-')
3285 if fd
!= FFmpegFD
and 'no-direct-merge' not in self
.params
['compat_opts'] and (
3286 info_dict
.get('section_start') or info_dict
.get('section_end')):
3287 msg
= ('This format cannot be partially downloaded' if FFmpegFD
.available()
3288 else 'You have requested downloading the video partially, but ffmpeg is not installed')
3289 self
.report_error(f
'{msg}. Aborting')
3292 if info_dict
.get('requested_formats') is not None:
3293 old_ext
= info_dict
['ext']
3294 if self
.params
.get('merge_output_format') is None:
3295 if (info_dict
['ext'] == 'webm'
3296 and info_dict
.get('thumbnails')
3297 # check with type instead of pp_key, __name__, or isinstance
3298 # since we dont want any custom PPs to trigger this
3299 and any(type(pp
) == EmbedThumbnailPP
for pp
in self
._pps
['post_process'])): # noqa: E721
3300 info_dict
['ext'] = 'mkv'
3301 self
.report_warning(
3302 'webm doesn\'t support embedding a thumbnail, mkv will be used')
3303 new_ext
= info_dict
['ext']
3305 def correct_ext(filename
, ext
=new_ext
):
3308 filename_real_ext
= os
.path
.splitext(filename
)[1][1:]
3310 os
.path
.splitext(filename
)[0]
3311 if filename_real_ext
in (old_ext
, new_ext
)
3313 return f
'{filename_wo_ext}.{ext}'
3315 # Ensure filename always has a correct extension for successful merge
3316 full_filename
= correct_ext(full_filename
)
3317 temp_filename
= correct_ext(temp_filename
)
3318 dl_filename
= existing_video_file(full_filename
, temp_filename
)
3320 info_dict
['__real_download'] = False
3321 # NOTE: Copy so that original format dicts are not modified
3322 info_dict
['requested_formats'] = list(map(dict, info_dict
['requested_formats']))
3324 merger
= FFmpegMergerPP(self
)
3326 if dl_filename
is not None:
3327 self
.report_file_already_downloaded(dl_filename
)
3329 for f
in info_dict
['requested_formats'] if fd
!= FFmpegFD
else []:
3330 f
['filepath'] = fname
= prepend_extension(
3331 correct_ext(temp_filename
, info_dict
['ext']),
3332 'f%s' % f
['format_id'], info_dict
['ext'])
3333 downloaded
.append(fname
)
3334 info_dict
['url'] = '\n'.join(f
['url'] for f
in info_dict
['requested_formats'])
3335 success
, real_download
= self
.dl(temp_filename
, info_dict
)
3336 info_dict
['__real_download'] = real_download
3338 if self
.params
.get('allow_unplayable_formats'):
3339 self
.report_warning(
3340 'You have requested merging of multiple formats '
3341 'while also allowing unplayable formats to be downloaded. '
3342 'The formats won\'t be merged to prevent data corruption.')
3343 elif not merger
.available
:
3344 msg
= 'You have requested merging of multiple formats but ffmpeg is not installed'
3345 if not self
.params
.get('ignoreerrors'):
3346 self
.report_error(f
'{msg}. Aborting due to --abort-on-error')
3348 self
.report_warning(f
'{msg}. The formats won\'t be merged')
3350 if temp_filename
== '-':
3351 reason
= ('using a downloader other than ffmpeg' if FFmpegFD
.can_merge_formats(info_dict
, self
.params
)
3352 else 'but the formats are incompatible for simultaneous download' if merger
.available
3353 else 'but ffmpeg is not installed')
3354 self
.report_warning(
3355 f
'You have requested downloading multiple formats to stdout {reason}. '
3356 'The formats will be streamed one after the other')
3357 fname
= temp_filename
3358 for f
in info_dict
['requested_formats']:
3359 new_info
= dict(info_dict
)
3360 del new_info
['requested_formats']
3362 if temp_filename
!= '-':
3363 fname
= prepend_extension(
3364 correct_ext(temp_filename
, new_info
['ext']),
3365 'f%s' % f
['format_id'], new_info
['ext'])
3366 if not self
._ensure
_dir
_exists
(fname
):
3368 f
['filepath'] = fname
3369 downloaded
.append(fname
)
3370 partial_success
, real_download
= self
.dl(fname
, new_info
)
3371 info_dict
['__real_download'] = info_dict
['__real_download'] or real_download
3372 success
= success
and partial_success
3374 if downloaded
and merger
.available
and not self
.params
.get('allow_unplayable_formats'):
3375 info_dict
['__postprocessors'].append(merger
)
3376 info_dict
['__files_to_merge'] = downloaded
3377 # Even if there were no downloads, it is being merged only now
3378 info_dict
['__real_download'] = True
3380 for file in downloaded
:
3381 files_to_move
[file] = None
3383 # Just a single file
3384 dl_filename
= existing_video_file(full_filename
, temp_filename
)
3385 if dl_filename
is None or dl_filename
== temp_filename
:
3386 # dl_filename == temp_filename could mean that the file was partially downloaded with --no-part.
3387 # So we should try to resume the download
3388 success
, real_download
= self
.dl(temp_filename
, info_dict
)
3389 info_dict
['__real_download'] = real_download
3391 self
.report_file_already_downloaded(dl_filename
)
3393 dl_filename
= dl_filename
or temp_filename
3394 info_dict
['__finaldir'] = os
.path
.dirname(os
.path
.abspath(encodeFilename(full_filename
)))
3396 except network_exceptions
as err
:
3397 self
.report_error('unable to download video data: %s' % error_to_compat_str(err
))
3399 except OSError as err
:
3400 raise UnavailableVideoError(err
)
3401 except (ContentTooShortError
, ) as err
:
3402 self
.report_error(f
'content too short (expected {err.expected} bytes and served {err.downloaded})')
3405 self
._raise
_pending
_errors
(info_dict
)
3406 if success
and full_filename
!= '-':
3410 fixup_policy
= self
.params
.get('fixup')
3411 vid
= info_dict
['id']
3413 if fixup_policy
in ('ignore', 'never'):
3415 elif fixup_policy
== 'warn':
3417 elif fixup_policy
!= 'force':
3418 assert fixup_policy
in ('detect_or_warn', None)
3419 if not info_dict
.get('__real_download'):
3422 def ffmpeg_fixup(cndn
, msg
, cls
):
3423 if not (do_fixup
and cndn
):
3425 elif do_fixup
== 'warn':
3426 self
.report_warning(f
'{vid}: {msg}')
3430 info_dict
['__postprocessors'].append(pp
)
3432 self
.report_warning(f
'{vid}: {msg}. Install ffmpeg to fix this automatically')
3434 stretched_ratio
= info_dict
.get('stretched_ratio')
3435 ffmpeg_fixup(stretched_ratio
not in (1, None),
3436 f
'Non-uniform pixel ratio {stretched_ratio}',
3437 FFmpegFixupStretchedPP
)
3439 downloader
= get_suitable_downloader(info_dict
, self
.params
) if 'protocol' in info_dict
else None
3440 downloader
= downloader
.FD_NAME
if downloader
else None
3442 ext
= info_dict
.get('ext')
3443 postprocessed_by_ffmpeg
= info_dict
.get('requested_formats') or any((
3444 isinstance(pp
, FFmpegVideoConvertorPP
)
3445 and resolve_recode_mapping(ext
, pp
.mapping
)[0] not in (ext
, None)
3446 ) for pp
in self
._pps
['post_process'])
3448 if not postprocessed_by_ffmpeg
:
3449 ffmpeg_fixup(fd
!= FFmpegFD
and ext
== 'm4a'
3450 and info_dict
.get('container') == 'm4a_dash',
3451 'writing DASH m4a. Only some players support this container',
3453 ffmpeg_fixup(downloader
== 'hlsnative' and not self
.params
.get('hls_use_mpegts')
3454 or info_dict
.get('is_live') and self
.params
.get('hls_use_mpegts') is None,
3455 'Possible MPEG-TS in MP4 container or malformed AAC timestamps',
3457 ffmpeg_fixup(info_dict
.get('is_live') and downloader
== 'dashsegments',
3458 'Possible duplicate MOOV atoms', FFmpegFixupDuplicateMoovPP
)
3460 ffmpeg_fixup(downloader
== 'web_socket_fragment', 'Malformed timestamps detected', FFmpegFixupTimestampPP
)
3461 ffmpeg_fixup(downloader
== 'web_socket_fragment', 'Malformed duration detected', FFmpegFixupDurationPP
)
3465 replace_info_dict(self
.post_process(dl_filename
, info_dict
, files_to_move
))
3466 except PostProcessingError
as err
:
3467 self
.report_error('Postprocessing: %s' % str(err
))
3470 for ph
in self
._post
_hooks
:
3471 ph(info_dict
['filepath'])
3472 except Exception as err
:
3473 self
.report_error('post hooks: %s' % str(err
))
3475 info_dict
['__write_download_archive'] = True
3477 assert info_dict
is original_infodict
# Make sure the info_dict was modified in-place
3478 if self
.params
.get('force_write_download_archive'):
3479 info_dict
['__write_download_archive'] = True
3480 check_max_downloads()
3482 def __download_wrapper(self
, func
):
3483 @functools.wraps(func
)
3484 def wrapper(*args
, **kwargs
):
3486 res
= func(*args
, **kwargs
)
3487 except UnavailableVideoError
as e
:
3488 self
.report_error(e
)
3489 except DownloadCancelled
as e
:
3490 self
.to_screen(f
'[info] {e}')
3491 if not self
.params
.get('break_per_url'):
3493 self
._num
_downloads
= 0
3495 if self
.params
.get('dump_single_json', False):
3496 self
.post_extract(res
)
3497 self
.to_stdout(json
.dumps(self
.sanitize_info(res
)))
3500 def download(self
, url_list
):
3501 """Download a given list of URLs."""
3502 url_list
= variadic(url_list
) # Passing a single URL is a common mistake
3503 outtmpl
= self
.params
['outtmpl']['default']
3504 if (len(url_list
) > 1
3506 and '%' not in outtmpl
3507 and self
.params
.get('max_downloads') != 1):
3508 raise SameFileError(outtmpl
)
3510 for url
in url_list
:
3511 self
.__download
_wrapper
(self
.extract_info
)(
3512 url
, force_generic_extractor
=self
.params
.get('force_generic_extractor', False))
3514 return self
._download
_retcode
3516 def download_with_info_file(self
, info_filename
):
3517 with contextlib
.closing(fileinput
.FileInput(
3518 [info_filename
], mode
='r',
3519 openhook
=fileinput
.hook_encoded('utf-8'))) as f
:
3520 # FileInput doesn't have a read method, we can't call json.load
3521 infos
= [self
.sanitize_info(info
, self
.params
.get('clean_infojson', True))
3522 for info
in variadic(json
.loads('\n'.join(f
)))]
3525 self
.__download
_wrapper
(self
.process_ie_result
)(info
, download
=True)
3526 except (DownloadError
, EntryNotInPlaylist
, ReExtractInfo
) as e
:
3527 if not isinstance(e
, EntryNotInPlaylist
):
3528 self
.to_stderr('\r')
3529 webpage_url
= info
.get('webpage_url')
3530 if webpage_url
is None:
3532 self
.report_warning(f
'The info failed to download: {e}; trying with URL {webpage_url}')
3533 self
.download([webpage_url
])
3534 return self
._download
_retcode
3537 def sanitize_info(info_dict
, remove_private_keys
=False):
3538 ''' Sanitize the infodict for converting to json '''
3539 if info_dict
is None:
3541 info_dict
.setdefault('epoch', int(time
.time()))
3542 info_dict
.setdefault('_type', 'video')
3543 info_dict
.setdefault('_version', {
3544 'version': __version__
,
3545 'current_git_head': current_git_head(),
3546 'release_git_head': RELEASE_GIT_HEAD
,
3547 'repository': REPOSITORY
,
3550 if remove_private_keys
:
3551 reject
= lambda k
, v
: v
is None or k
.startswith('__') or k
in {
3552 'requested_downloads', 'requested_formats', 'requested_subtitles', 'requested_entries',
3553 'entries', 'filepath', '_filename', 'filename', 'infojson_filename', 'original_url',
3554 'playlist_autonumber',
3557 reject
= lambda k
, v
: False
3560 if isinstance(obj
, dict):
3561 return {k: filter_fn(v) for k, v in obj.items() if not reject(k, v)}
3562 elif isinstance(obj
, (list, tuple, set, LazyList
)):
3563 return list(map(filter_fn
, obj
))
3564 elif obj
is None or isinstance(obj
, (str, int, float, bool)):
3569 return filter_fn(info_dict
)
3572 def filter_requested_info(info_dict
, actually_filter
=True):
3573 ''' Alias of sanitize_info for backward compatibility '''
3574 return YoutubeDL
.sanitize_info(info_dict
, actually_filter
)
3576 def _delete_downloaded_files(self
, *files_to_delete
, info
={}, msg
=None):
3577 for filename
in set(filter(None, files_to_delete
)):
3579 self
.to_screen(msg
% filename
)
3583 self
.report_warning(f
'Unable to delete file {filename}')
3584 if filename
in info
.get('__files_to_move', []): # NB: Delete even if None
3585 del info
['__files_to_move'][filename
]
3588 def post_extract(info_dict
):
3589 def actual_post_extract(info_dict
):
3590 if info_dict
.get('_type') in ('playlist', 'multi_video'):
3591 for video_dict
in info_dict
.get('entries', {}):
3592 actual_post_extract(video_dict
or {})
3595 post_extractor
= info_dict
.pop('__post_extractor', None) or (lambda: {})
3596 info_dict
.update(post_extractor())
3598 actual_post_extract(info_dict
or {})
3600 def run_pp(self
, pp
, infodict
):
3601 files_to_delete
= []
3602 if '__files_to_move' not in infodict
:
3603 infodict
['__files_to_move'] = {}
3605 files_to_delete
, infodict
= pp
.run(infodict
)
3606 except PostProcessingError
as e
:
3607 # Must be True and not 'only_download'
3608 if self
.params
.get('ignoreerrors') is True:
3609 self
.report_error(e
)
3613 if not files_to_delete
:
3615 if self
.params
.get('keepvideo', False):
3616 for f
in files_to_delete
:
3617 infodict
['__files_to_move'].setdefault(f
, '')
3619 self
._delete
_downloaded
_files
(
3620 *files_to_delete
, info
=infodict
, msg
='Deleting original file %s (pass -k to keep)')
3623 def run_all_pps(self
, key
, info
, *, additional_pps
=None):
3625 self
._forceprint
(key
, info
)
3626 for pp
in (additional_pps
or []) + self
._pps
[key
]:
3627 info
= self
.run_pp(pp
, info
)
3630 def pre_process(self
, ie_info
, key
='pre_process', files_to_move
=None):
3631 info
= dict(ie_info
)
3632 info
['__files_to_move'] = files_to_move
or {}
3634 info
= self
.run_all_pps(key
, info
)
3635 except PostProcessingError
as err
:
3636 msg
= f
'Preprocessing: {err}'
3637 info
.setdefault('__pending_error', msg
)
3638 self
.report_error(msg
, is_error
=False)
3639 return info
, info
.pop('__files_to_move', None)
3641 def post_process(self
, filename
, info
, files_to_move
=None):
3642 """Run all the postprocessors on the given file."""
3643 info
['filepath'] = filename
3644 info
['__files_to_move'] = files_to_move
or {}
3645 info
= self
.run_all_pps('post_process', info
, additional_pps
=info
.get('__postprocessors'))
3646 info
= self
.run_pp(MoveFilesAfterDownloadPP(self
), info
)
3647 del info
['__files_to_move']
3648 return self
.run_all_pps('after_move', info
)
3650 def _make_archive_id(self
, info_dict
):
3651 video_id
= info_dict
.get('id')
3654 # Future-proof against any change in case
3655 # and backwards compatibility with prior versions
3656 extractor
= info_dict
.get('extractor_key') or info_dict
.get('ie_key') # key in a playlist
3657 if extractor
is None:
3658 url
= str_or_none(info_dict
.get('url'))
3661 # Try to find matching extractor for the URL and take its ie_key
3662 for ie_key
, ie
in self
._ies
.items():
3663 if ie
.suitable(url
):
3668 return make_archive_id(extractor
, video_id
)
3670 def in_download_archive(self
, info_dict
):
3671 if not self
.archive
:
3674 vid_ids
= [self
._make
_archive
_id
(info_dict
)]
3675 vid_ids
.extend(info_dict
.get('_old_archive_ids') or [])
3676 return any(id_
in self
.archive
for id_
in vid_ids
)
3678 def record_download_archive(self
, info_dict
):
3679 fn
= self
.params
.get('download_archive')
3682 vid_id
= self
._make
_archive
_id
(info_dict
)
3685 self
.write_debug(f
'Adding to archive: {vid_id}')
3686 if is_path_like(fn
):
3687 with locked_file(fn
, 'a', encoding
='utf-8') as archive_file
:
3688 archive_file
.write(vid_id
+ '\n')
3689 self
.archive
.add(vid_id
)
3692 def format_resolution(format
, default
='unknown'):
3693 if format
.get('vcodec') == 'none' and format
.get('acodec') != 'none':
3695 if format
.get('resolution') is not None:
3696 return format
['resolution']
3697 if format
.get('width') and format
.get('height'):
3698 return '%dx%d' % (format
['width'], format
['height'])
3699 elif format
.get('height'):
3700 return '%sp' % format
['height']
3701 elif format
.get('width'):
3702 return '%dx?' % format
['width']
3705 def _list_format_headers(self
, *headers
):
3706 if self
.params
.get('listformats_table', True) is not False:
3707 return [self
._format
_out
(header
, self
.Styles
.HEADERS
) for header
in headers
]
3710 def _format_note(self
, fdict
):
3712 if fdict
.get('ext') in ['f4f', 'f4m']:
3713 res
+= '(unsupported)'
3714 if fdict
.get('language'):
3717 res
+= '[%s]' % fdict
['language']
3718 if fdict
.get('format_note') is not None:
3721 res
+= fdict
['format_note']
3722 if fdict
.get('tbr') is not None:
3725 res
+= '%4dk' % fdict
['tbr']
3726 if fdict
.get('container') is not None:
3729 res
+= '%s container' % fdict
['container']
3730 if (fdict
.get('vcodec') is not None
3731 and fdict
.get('vcodec') != 'none'):
3734 res
+= fdict
['vcodec']
3735 if fdict
.get('vbr') is not None:
3737 elif fdict
.get('vbr') is not None and fdict
.get('abr') is not None:
3739 if fdict
.get('vbr') is not None:
3740 res
+= '%4dk' % fdict
['vbr']
3741 if fdict
.get('fps') is not None:
3744 res
+= '%sfps' % fdict
['fps']
3745 if fdict
.get('acodec') is not None:
3748 if fdict
['acodec'] == 'none':
3751 res
+= '%-5s' % fdict
['acodec']
3752 elif fdict
.get('abr') is not None:
3756 if fdict
.get('abr') is not None:
3757 res
+= '@%3dk' % fdict
['abr']
3758 if fdict
.get('asr') is not None:
3759 res
+= ' (%5dHz)' % fdict
['asr']
3760 if fdict
.get('filesize') is not None:
3763 res
+= format_bytes(fdict
['filesize'])
3764 elif fdict
.get('filesize_approx') is not None:
3767 res
+= '~' + format_bytes(fdict
['filesize_approx'])
3770 def _get_formats(self
, info_dict
):
3771 if info_dict
.get('formats') is None:
3772 if info_dict
.get('url') and info_dict
.get('_type', 'video') == 'video':
3775 return info_dict
['formats']
3777 def render_formats_table(self
, info_dict
):
3778 formats
= self
._get
_formats
(info_dict
)
3781 if not self
.params
.get('listformats_table', True) is not False:
3784 format_field(f
, 'format_id'),
3785 format_field(f
, 'ext'),
3786 self
.format_resolution(f
),
3787 self
._format
_note
(f
)
3788 ] for f
in formats
if (f
.get('preference') or 0) >= -1000]
3789 return render_table(['format code', 'extension', 'resolution', 'note'], table
, extra_gap
=1)
3791 def simplified_codec(f
, field
):
3792 assert field
in ('acodec', 'vcodec')
3793 codec
= f
.get(field
)
3796 elif codec
!= 'none':
3797 return '.'.join(codec
.split('.')[:4])
3799 if field
== 'vcodec' and f
.get('acodec') == 'none':
3801 elif field
== 'acodec' and f
.get('vcodec') == 'none':
3803 return self
._format
_out
('audio only' if field
== 'vcodec' else 'video only',
3804 self
.Styles
.SUPPRESS
)
3806 delim
= self
._format
_out
('\u2502', self
.Styles
.DELIM
, '|', test_encoding
=True)
3809 self
._format
_out
(format_field(f
, 'format_id'), self
.Styles
.ID
),
3810 format_field(f
, 'ext'),
3811 format_field(f
, func
=self
.format_resolution
, ignore
=('audio only', 'images')),
3812 format_field(f
, 'fps', '\t%d', func
=round),
3813 format_field(f
, 'dynamic_range', '%s', ignore
=(None, 'SDR')).replace('HDR', ''),
3814 format_field(f
, 'audio_channels', '\t%s'),
3816 format_field(f
, 'filesize', ' \t%s', func
=format_bytes
)
3817 or format_field(f
, 'filesize_approx', '≈\t%s', func
=format_bytes
)
3818 or format_field(try_call(lambda: format_bytes(int(info_dict
['duration'] * f
['tbr'] * (1024 / 8)))),
3819 None, self
._format
_out
('~\t%s', self
.Styles
.SUPPRESS
))),
3820 format_field(f
, 'tbr', '\t%dk', func
=round),
3821 shorten_protocol_name(f
.get('protocol', '')),
3823 simplified_codec(f
, 'vcodec'),
3824 format_field(f
, 'vbr', '\t%dk', func
=round),
3825 simplified_codec(f
, 'acodec'),
3826 format_field(f
, 'abr', '\t%dk', func
=round),
3827 format_field(f
, 'asr', '\t%s', func
=format_decimal_suffix
),
3828 join_nonempty(format_field(f
, 'language', '[%s]'), join_nonempty(
3829 self
._format
_out
('UNSUPPORTED', self
.Styles
.BAD_FORMAT
) if f
.get('ext') in ('f4f', 'f4m') else None,
3830 (self
._format
_out
('Maybe DRM', self
.Styles
.WARNING
) if f
.get('has_drm') == 'maybe'
3831 else self
._format
_out
('DRM', self
.Styles
.BAD_FORMAT
) if f
.get('has_drm') else None),
3832 format_field(f
, 'format_note'),
3833 format_field(f
, 'container', ignore
=(None, f
.get('ext'))),
3834 delim
=', '), delim
=' '),
3835 ] for f
in formats
if f
.get('preference') is None or f
['preference'] >= -1000]
3836 header_line
= self
._list
_format
_headers
(
3837 'ID', 'EXT', 'RESOLUTION', '\tFPS', 'HDR', 'CH', delim
, '\tFILESIZE', '\tTBR', 'PROTO',
3838 delim
, 'VCODEC', '\tVBR', 'ACODEC', '\tABR', '\tASR', 'MORE INFO')
3840 return render_table(
3841 header_line
, table
, hide_empty
=True,
3842 delim
=self
._format
_out
('\u2500', self
.Styles
.DELIM
, '-', test_encoding
=True))
3844 def render_thumbnails_table(self
, info_dict
):
3845 thumbnails
= list(info_dict
.get('thumbnails') or [])
3848 return render_table(
3849 self
._list
_format
_headers
('ID', 'Width', 'Height', 'URL'),
3850 [[t
.get('id'), t
.get('width') or 'unknown', t
.get('height') or 'unknown', t
['url']] for t
in thumbnails
])
3852 def render_subtitles_table(self
, video_id
, subtitles
):
3853 def _row(lang
, formats
):
3854 exts
, names
= zip(*((f
['ext'], f
.get('name') or 'unknown') for f
in reversed(formats
)))
3855 if len(set(names
)) == 1:
3856 names
= [] if names
[0] == 'unknown' else names
[:1]
3857 return [lang
, ', '.join(names
), ', '.join(exts
)]
3861 return render_table(
3862 self
._list
_format
_headers
('Language', 'Name', 'Formats'),
3863 [_row(lang
, formats
) for lang
, formats
in subtitles
.items()],
3866 def __list_table(self
, video_id
, name
, func
, *args
):
3869 self
.to_screen(f
'{video_id} has no {name}')
3871 self
.to_screen(f
'[info] Available {name} for {video_id}:')
3872 self
.to_stdout(table
)
3874 def list_formats(self
, info_dict
):
3875 self
.__list
_table
(info_dict
['id'], 'formats', self
.render_formats_table
, info_dict
)
3877 def list_thumbnails(self
, info_dict
):
3878 self
.__list
_table
(info_dict
['id'], 'thumbnails', self
.render_thumbnails_table
, info_dict
)
3880 def list_subtitles(self
, video_id
, subtitles
, name
='subtitles'):
3881 self
.__list
_table
(video_id
, name
, self
.render_subtitles_table
, video_id
, subtitles
)
3883 def print_debug_header(self
):
3884 if not self
.params
.get('verbose'):
3887 from . import _IN_CLI
# Must be delayed import
3889 # These imports can be slow. So import them only as needed
3890 from .extractor
.extractors
import _LAZY_LOADER
3891 from .extractor
.extractors
import (
3892 _PLUGIN_CLASSES
as plugin_ies
,
3893 _PLUGIN_OVERRIDES
as plugin_ie_overrides
3896 def get_encoding(stream
):
3897 ret
= str(getattr(stream
, 'encoding', 'missing (%s)' % type(stream
).__name
__))
3898 additional_info
= []
3899 if os
.environ
.get('TERM', '').lower() == 'dumb':
3900 additional_info
.append('dumb')
3901 if not supports_terminal_sequences(stream
):
3902 from .utils
import WINDOWS_VT_MODE
# Must be imported locally
3903 additional_info
.append('No VT' if WINDOWS_VT_MODE
is False else 'No ANSI')
3905 ret
= f
'{ret} ({",".join(additional_info)})'
3908 encoding_str
= 'Encodings: locale %s, fs %s, pref %s, %s' % (
3909 locale
.getpreferredencoding(),
3910 sys
.getfilesystemencoding(),
3911 self
.get_encoding(),
3913 f
'{key} {get_encoding(stream)}' for key
, stream
in self
._out
_files
.items_
3914 if stream
is not None and key
!= 'console')
3917 logger
= self
.params
.get('logger')
3919 write_debug
= lambda msg
: logger
.debug(f
'[debug] {msg}')
3920 write_debug(encoding_str
)
3922 write_string(f
'[debug] {encoding_str}\n', encoding
=None)
3923 write_debug
= lambda msg
: self
._write
_string
(f
'[debug] {msg}\n')
3925 source
= detect_variant()
3926 if VARIANT
not in (None, 'pip'):
3929 write_debug(join_nonempty(
3930 f
'{"yt-dlp" if REPOSITORY == "yt-dlp/yt-dlp" else REPOSITORY} version',
3931 f
'{CHANNEL}@{__version__}',
3932 f
'[{RELEASE_GIT_HEAD[:9]}]' if RELEASE_GIT_HEAD
else '',
3933 '' if source
== 'unknown' else f
'({source})',
3934 '' if _IN_CLI
else 'API' if klass
== YoutubeDL
else f
'API:{self.__module__}.{klass.__qualname__}',
3938 write_debug(f
'params: {self.params}')
3940 if not _LAZY_LOADER
:
3941 if os
.environ
.get('YTDLP_NO_LAZY_EXTRACTORS'):
3942 write_debug('Lazy loading extractors is forcibly disabled')
3944 write_debug('Lazy loading extractors is disabled')
3945 if self
.params
['compat_opts']:
3946 write_debug('Compatibility options: %s' % ', '.join(self
.params
['compat_opts']))
3948 if current_git_head():
3949 write_debug(f
'Git HEAD: {current_git_head()}')
3950 write_debug(system_identifier())
3952 exe_versions
, ffmpeg_features
= FFmpegPostProcessor
.get_versions_and_features(self
)
3953 ffmpeg_features
= {key for key, val in ffmpeg_features.items() if val}
3955 exe_versions
['ffmpeg'] += ' (%s)' % ','.join(sorted(ffmpeg_features
))
3957 exe_versions
['rtmpdump'] = rtmpdump_version()
3958 exe_versions
['phantomjs'] = PhantomJSwrapper
._version
()
3959 exe_str
= ', '.join(
3960 f
'{exe} {v}' for exe
, v
in sorted(exe_versions
.items()) if v
3962 write_debug('exe versions: %s' % exe_str
)
3964 from .compat
.compat_utils
import get_package_info
3965 from .dependencies
import available_dependencies
3967 write_debug('Optional libraries: %s' % (', '.join(sorted({
3968 join_nonempty(*get_package_info(m
)) for m
in available_dependencies
.values()
3971 write_debug(f
'Proxy map: {self.proxies}')
3972 write_debug(f
'Request Handlers: {", ".join(rh.RH_NAME for rh in self._request_director.handlers.values())}')
3973 for plugin_type
, plugins
in {'Extractor': plugin_ies, 'Post-Processor': plugin_pps}
.items():
3974 display_list
= ['%s%s' % (
3975 klass
.__name
__, '' if klass
.__name
__ == name
else f
' as {name}')
3976 for name
, klass
in plugins
.items()]
3977 if plugin_type
== 'Extractor':
3978 display_list
.extend(f
'{plugins[-1].IE_NAME.partition("+")[2]} ({parent.__name__})'
3979 for parent
, plugins
in plugin_ie_overrides
.items())
3980 if not display_list
:
3982 write_debug(f
'{plugin_type} Plugins: {", ".join(sorted(display_list))}')
3984 plugin_dirs
= plugin_directories()
3986 write_debug(f
'Plugin directories: {plugin_dirs}')
3989 if False and self
.params
.get('call_home'):
3990 ipaddr
= self
.urlopen('https://yt-dl.org/ip').read().decode()
3991 write_debug('Public IP address: %s' % ipaddr
)
3992 latest_version
= self
.urlopen(
3993 'https://yt-dl.org/latest/version').read().decode()
3994 if version_tuple(latest_version
) > version_tuple(__version__
):
3995 self
.report_warning(
3996 'You are using an outdated version (newest version: %s)! '
3997 'See https://yt-dl.org/update if you need help updating.' %
4000 @functools.cached_property
4002 """Global proxy configuration"""
4003 opts_proxy
= self
.params
.get('proxy')
4004 if opts_proxy
is not None:
4005 if opts_proxy
== '':
4006 opts_proxy
= '__noproxy__'
4007 proxies
= {'all': opts_proxy}
4009 proxies
= urllib
.request
.getproxies()
4010 # compat. Set HTTPS_PROXY to __noproxy__ to revert
4011 if 'http' in proxies
and 'https' not in proxies
:
4012 proxies
['https'] = proxies
['http']
4016 @functools.cached_property
4017 def cookiejar(self
):
4018 """Global cookiejar instance"""
4019 return load_cookies(
4020 self
.params
.get('cookiefile'), self
.params
.get('cookiesfrombrowser'), self
)
4025 Get a urllib OpenerDirector from the Urllib handler (deprecated).
4027 self
.deprecation_warning('YoutubeDL._opener is deprecated, use YoutubeDL.urlopen()')
4028 handler
= self
._request
_director
.handlers
['Urllib']
4029 return handler
._get
_instance
(cookiejar
=self
.cookiejar
, proxies
=self
.proxies
)
4031 def urlopen(self
, req
):
4032 """ Start an HTTP download """
4033 if isinstance(req
, str):
4035 elif isinstance(req
, urllib
.request
.Request
):
4036 self
.deprecation_warning(
4037 'Passing a urllib.request.Request object to YoutubeDL.urlopen() is deprecated. '
4038 'Use yt_dlp.networking.common.Request instead.')
4039 req
= urllib_req_to_req(req
)
4040 assert isinstance(req
, Request
)
4042 # compat: Assume user:pass url params are basic auth
4043 url
, basic_auth_header
= extract_basic_auth(req
.url
)
4044 if basic_auth_header
:
4045 req
.headers
['Authorization'] = basic_auth_header
4046 req
.url
= sanitize_url(url
)
4048 clean_proxies(proxies
=req
.proxies
, headers
=req
.headers
)
4049 clean_headers(req
.headers
)
4052 return self
._request
_director
.send(req
)
4053 except NoSupportingHandlers
as e
:
4054 for ue
in e
.unsupported_errors
:
4055 if not (ue
.handler
and ue
.msg
):
4057 if ue
.handler
.RH_KEY
== 'Urllib' and 'unsupported url scheme: "file"' in ue
.msg
.lower():
4059 'file:// URLs are disabled by default in yt-dlp for security reasons. '
4060 'Use --enable-file-urls to enable at your own risk.', cause
=ue
) from ue
4061 if 'unsupported proxy type: "https"' in ue
.msg
.lower():
4063 'To use an HTTPS proxy for this request, one of the following dependencies needs to be installed: requests')
4065 except SSLError
as e
:
4066 if 'UNSAFE_LEGACY_RENEGOTIATION_DISABLED' in str(e
):
4067 raise RequestError('UNSAFE_LEGACY_RENEGOTIATION_DISABLED: Try using --legacy-server-connect', cause
=e
) from e
4068 elif 'SSLV3_ALERT_HANDSHAKE_FAILURE' in str(e
):
4070 'SSLV3_ALERT_HANDSHAKE_FAILURE: The server may not support the current cipher list. '
4071 'Try using --legacy-server-connect', cause
=e
) from e
4073 except HTTPError
as e
: # TODO: Remove in a future release
4074 raise _CompatHTTPError(e
) from e
4076 def build_request_director(self
, handlers
, preferences
=None):
4077 logger
= _YDLLogger(self
)
4078 headers
= self
.params
['http_headers'].copy()
4079 proxies
= self
.proxies
.copy()
4080 clean_headers(headers
)
4081 clean_proxies(proxies
, headers
)
4083 director
= RequestDirector(logger
=logger
, verbose
=self
.params
.get('debug_printtraffic'))
4084 for handler
in handlers
:
4085 director
.add_handler(handler(
4088 cookiejar
=self
.cookiejar
,
4090 prefer_system_certs
='no-certifi' in self
.params
['compat_opts'],
4091 verify
=not self
.params
.get('nocheckcertificate'),
4092 **traverse_obj(self
.params
, {
4093 'verbose': 'debug_printtraffic',
4094 'source_address': 'source_address',
4095 'timeout': 'socket_timeout',
4096 'legacy_ssl_support': 'legacyserverconnect',
4097 'enable_file_urls': 'enable_file_urls',
4099 'client_certificate': 'client_certificate',
4100 'client_certificate_key': 'client_certificate_key',
4101 'client_certificate_password': 'client_certificate_password',
4105 director
.preferences
.update(preferences
or [])
4106 if 'prefer-legacy-http-handler' in self
.params
['compat_opts']:
4107 director
.preferences
.add(lambda rh
, _
: 500 if rh
.RH_KEY
== 'Urllib' else 0)
4110 def encode(self
, s
):
4111 if isinstance(s
, bytes):
4112 return s
# Already encoded
4115 return s
.encode(self
.get_encoding())
4116 except UnicodeEncodeError as err
:
4117 err
.reason
= err
.reason
+ '. Check your system encoding configuration or use the --encoding option.'
4120 def get_encoding(self
):
4121 encoding
= self
.params
.get('encoding')
4122 if encoding
is None:
4123 encoding
= preferredencoding()
4126 def _write_info_json(self
, label
, ie_result
, infofn
, overwrite
=None):
4127 ''' Write infojson and returns True = written, 'exists' = Already exists, False = skip, None = error '''
4128 if overwrite
is None:
4129 overwrite
= self
.params
.get('overwrites', True)
4130 if not self
.params
.get('writeinfojson'):
4133 self
.write_debug(f
'Skipping writing {label} infojson')
4135 elif not self
._ensure
_dir
_exists
(infofn
):
4137 elif not overwrite
and os
.path
.exists(infofn
):
4138 self
.to_screen(f
'[info] {label.title()} metadata is already present')
4141 self
.to_screen(f
'[info] Writing {label} metadata as JSON to: {infofn}')
4143 write_json_file(self
.sanitize_info(ie_result
, self
.params
.get('clean_infojson', True)), infofn
)
4146 self
.report_error(f
'Cannot write {label} metadata to JSON file {infofn}')
4149 def _write_description(self
, label
, ie_result
, descfn
):
4150 ''' Write description and returns True = written, False = skip, None = error '''
4151 if not self
.params
.get('writedescription'):
4154 self
.write_debug(f
'Skipping writing {label} description')
4156 elif not self
._ensure
_dir
_exists
(descfn
):
4158 elif not self
.params
.get('overwrites', True) and os
.path
.exists(descfn
):
4159 self
.to_screen(f
'[info] {label.title()} description is already present')
4160 elif ie_result
.get('description') is None:
4161 self
.to_screen(f
'[info] There\'s no {label} description to write')
4165 self
.to_screen(f
'[info] Writing {label} description to: {descfn}')
4166 with open(encodeFilename(descfn
), 'w', encoding
='utf-8') as descfile
:
4167 descfile
.write(ie_result
['description'])
4169 self
.report_error(f
'Cannot write {label} description file {descfn}')
4173 def _write_subtitles(self
, info_dict
, filename
):
4174 ''' Write subtitles to file and return list of (sub_filename, final_sub_filename); or None if error'''
4176 subtitles
= info_dict
.get('requested_subtitles')
4177 if not (self
.params
.get('writesubtitles') or self
.params
.get('writeautomaticsub')):
4178 # subtitles download errors are already managed as troubles in relevant IE
4179 # that way it will silently go on when used with unsupporting IE
4182 self
.to_screen('[info] There are no subtitles for the requested languages')
4184 sub_filename_base
= self
.prepare_filename(info_dict
, 'subtitle')
4185 if not sub_filename_base
:
4186 self
.to_screen('[info] Skipping writing video subtitles')
4189 for sub_lang
, sub_info
in subtitles
.items():
4190 sub_format
= sub_info
['ext']
4191 sub_filename
= subtitles_filename(filename
, sub_lang
, sub_format
, info_dict
.get('ext'))
4192 sub_filename_final
= subtitles_filename(sub_filename_base
, sub_lang
, sub_format
, info_dict
.get('ext'))
4193 existing_sub
= self
.existing_file((sub_filename_final
, sub_filename
))
4195 self
.to_screen(f
'[info] Video subtitle {sub_lang}.{sub_format} is already present')
4196 sub_info
['filepath'] = existing_sub
4197 ret
.append((existing_sub
, sub_filename_final
))
4200 self
.to_screen(f
'[info] Writing video subtitles to: {sub_filename}')
4201 if sub_info
.get('data') is not None:
4203 # Use newline='' to prevent conversion of newline characters
4204 # See https://github.com/ytdl-org/youtube-dl/issues/10268
4205 with open(sub_filename
, 'w', encoding
='utf-8', newline
='') as subfile
:
4206 subfile
.write(sub_info
['data'])
4207 sub_info
['filepath'] = sub_filename
4208 ret
.append((sub_filename
, sub_filename_final
))
4211 self
.report_error(f
'Cannot write video subtitles file {sub_filename}')
4215 sub_copy
= sub_info
.copy()
4216 sub_copy
.setdefault('http_headers', info_dict
.get('http_headers'))
4217 self
.dl(sub_filename
, sub_copy
, subtitle
=True)
4218 sub_info
['filepath'] = sub_filename
4219 ret
.append((sub_filename
, sub_filename_final
))
4220 except (DownloadError
, ExtractorError
, IOError, OSError, ValueError) + network_exceptions
as err
:
4221 msg
= f
'Unable to download video subtitles for {sub_lang!r}: {err}'
4222 if self
.params
.get('ignoreerrors') is not True: # False or 'only_download'
4223 if not self
.params
.get('ignoreerrors'):
4224 self
.report_error(msg
)
4225 raise DownloadError(msg
)
4226 self
.report_warning(msg
)
4229 def _write_thumbnails(self
, label
, info_dict
, filename
, thumb_filename_base
=None):
4230 ''' Write thumbnails to file and return list of (thumb_filename, final_thumb_filename); or None if error '''
4231 write_all
= self
.params
.get('write_all_thumbnails', False)
4232 thumbnails
, ret
= [], []
4233 if write_all
or self
.params
.get('writethumbnail', False):
4234 thumbnails
= info_dict
.get('thumbnails') or []
4236 self
.to_screen(f
'[info] There are no {label} thumbnails to download')
4238 multiple
= write_all
and len(thumbnails
) > 1
4240 if thumb_filename_base
is None:
4241 thumb_filename_base
= filename
4242 if thumbnails
and not thumb_filename_base
:
4243 self
.write_debug(f
'Skipping writing {label} thumbnail')
4246 if thumbnails
and not self
._ensure
_dir
_exists
(filename
):
4249 for idx
, t
in list(enumerate(thumbnails
))[::-1]:
4250 thumb_ext
= (f
'{t["id"]}.' if multiple
else '') + determine_ext(t
['url'], 'jpg')
4251 thumb_display_id
= f
'{label} thumbnail {t["id"]}'
4252 thumb_filename
= replace_extension(filename
, thumb_ext
, info_dict
.get('ext'))
4253 thumb_filename_final
= replace_extension(thumb_filename_base
, thumb_ext
, info_dict
.get('ext'))
4255 existing_thumb
= self
.existing_file((thumb_filename_final
, thumb_filename
))
4257 self
.to_screen('[info] %s is already present' % (
4258 thumb_display_id
if multiple
else f
'{label} thumbnail').capitalize())
4259 t
['filepath'] = existing_thumb
4260 ret
.append((existing_thumb
, thumb_filename_final
))
4262 self
.to_screen(f
'[info] Downloading {thumb_display_id} ...')
4264 uf
= self
.urlopen(Request(t
['url'], headers
=t
.get('http_headers', {})))
4265 self
.to_screen(f
'[info] Writing {thumb_display_id} to: {thumb_filename}')
4266 with open(encodeFilename(thumb_filename
), 'wb') as thumbf
:
4267 shutil
.copyfileobj(uf
, thumbf
)
4268 ret
.append((thumb_filename
, thumb_filename_final
))
4269 t
['filepath'] = thumb_filename
4270 except network_exceptions
as err
:
4271 if isinstance(err
, HTTPError
) and err
.status
== 404:
4272 self
.to_screen(f
'[info] {thumb_display_id.title()} does not exist')
4274 self
.report_warning(f
'Unable to download {thumb_display_id}: {err}')
4276 if ret
and not write_all
: